Merge "Add N2VC support"
authortierno <alfonso.tiernosepulveda@telefonica.com>
Tue, 3 Apr 2018 14:16:10 +0000 (16:16 +0200)
committerGerrit Code Review <root@osm.etsi.org>
Tue, 3 Apr 2018 14:16:10 +0000 (16:16 +0200)
22 files changed:
Dockerfile
database_utils/migrate_mano_db.sh
docker/Dockerfile-local
docker/scripts/start.sh
openmanod
osm_ro/nfvo.py
osm_ro/nfvo_db.py
osm_ro/openmano_schemas.py
osm_ro/vim_thread.py
osm_ro/vimconn.py
osm_ro/vimconn_openstack.py
osm_ro/vimconn_openvim.py
osm_ro/vimconn_vmware.py
requirements.txt
scenarios/examples/v3_3vnf_2vdu_1vnffg_nsd.yaml [new file with mode: 0644]
scripts/install-openmano.sh
scripts/python-osm-ro.postinst
test/RO_tests/v3_2vdu_set_ip_mac/scenario_2vdu_set_ip_mac.yaml [new file with mode: 0644]
test/RO_tests/v3_2vdu_set_ip_mac/vnfd_2vdu_set_ip_mac.yaml [new file with mode: 0644]
test/RO_tests/v3_2vdu_set_ip_mac/vnfd_2vdu_set_ip_mac2.yaml [new file with mode: 0644]
test/test_RO.py
vnfs/examples/v3_2vdu_vnfd.yaml [new file with mode: 0644]

index c8f439e..e2b9c55 100644 (file)
@@ -11,7 +11,7 @@ RUN  apt-get update && \
   DEBIAN_FRONTEND=noninteractive add-apt-repository -y cloud-archive:ocata && \
   DEBIAN_FRONTEND=noninteractive apt-get update && \
   DEBIAN_FRONTEND=noninteractive apt-get -y install python-novaclient python-keystoneclient python-glanceclient python-cinderclient python-neutronclient && \
-  DEBIAN_FRONTEND=noninteractive pip install -U progressbar pyvmomi pyvcloud==18.2.2 && \
+  DEBIAN_FRONTEND=noninteractive pip install -U progressbar pyvmomi pyvcloud==19.1.1 && \
   DEBIAN_FRONTEND=noninteractive apt-get -y install python-argcomplete python-bottle python-cffi python-packaging python-paramiko python-pkgconfig libmysqlclient-dev libssl-dev libffi-dev python-mysqldb && \
   DEBIAN_FRONTEND=noninteractive apt-get -y install python-logutils python-openstackclient python-openstacksdk
 
index d7a0755..61e2ac6 100755 (executable)
@@ -33,8 +33,8 @@ DBPORT="3306"
 DBNAME="mano_db"
 QUIET_MODE=""
 #TODO update it with the last database version
-LAST_DB_VERSION=27
+LAST_DB_VERSION=28
+
 # Detect paths
 MYSQL=$(which mysql)
 AWK=$(which awk)
@@ -195,6 +195,7 @@ fi
 #[ $OPENMANO_VER_NUM -ge 5022 ] && DB_VERSION=25  #0.5.22 =>  25
 #[ $OPENMANO_VER_NUM -ge 5024 ] && DB_VERSION=26  #0.5.24 =>  26
 #[ $OPENMANO_VER_NUM -ge 5025 ] && DB_VERSION=27  #0.5.25 =>  27
+#[ $OPENMANO_VER_NUM -ge 5052 ] && DB_VERSION=28  #0.5.52 =>  28
 #TODO ... put next versions here
 
 function upgrade_to_1(){
@@ -1019,6 +1020,223 @@ function downgrade_from_27(){
     sql "ALTER TABLE nfvo_tenants DROP COLUMN RO_pub_key;"
     sql "DELETE FROM schema_version WHERE version_int='27';"
 }
+function upgrade_to_28(){
+    echo "      [Adding necessary tables for VNFFG]"
+    echo "      Adding sce_vnffgs"
+    sql "CREATE TABLE IF NOT EXISTS sce_vnffgs (
+            uuid VARCHAR(36) NOT NULL,
+            tenant_id VARCHAR(36) NULL DEFAULT NULL,
+            name VARCHAR(255) NOT NULL,
+            description VARCHAR(255) NULL DEFAULT NULL,
+            vendor VARCHAR(255) NULL DEFAULT NULL,
+            scenario_id VARCHAR(36) NOT NULL,
+            created_at DOUBLE NOT NULL,
+            modified_at DOUBLE NULL DEFAULT NULL,
+        PRIMARY KEY (uuid),
+        INDEX FK_scenarios_sce_vnffg (scenario_id),
+        CONSTRAINT FK_scenarios_vnffg FOREIGN KEY (tenant_id) REFERENCES scenarios (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
+        COLLATE='utf8_general_ci'
+        ENGINE=InnoDB;"
+    echo "      Adding sce_rsps"
+    sql "CREATE TABLE IF NOT EXISTS sce_rsps (
+            uuid VARCHAR(36) NOT NULL,
+            tenant_id VARCHAR(36) NULL DEFAULT NULL,
+            name VARCHAR(255) NOT NULL,
+            sce_vnffg_id VARCHAR(36) NOT NULL,
+            created_at DOUBLE NOT NULL,
+            modified_at DOUBLE NULL DEFAULT NULL,
+        PRIMARY KEY (uuid),
+        INDEX FK_sce_vnffgs_rsp (sce_vnffg_id),
+        CONSTRAINT FK_sce_vnffgs_rsp FOREIGN KEY (sce_vnffg_id) REFERENCES sce_vnffgs (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
+        COLLATE='utf8_general_ci'
+        ENGINE=InnoDB;"
+    echo "      Adding sce_rsp_hops"
+    sql "CREATE TABLE IF NOT EXISTS sce_rsp_hops (
+            uuid VARCHAR(36) NOT NULL,
+            if_order INT DEFAULT 0 NOT NULL,
+            interface_id VARCHAR(36) NOT NULL,
+            sce_vnf_id VARCHAR(36) NOT NULL,
+            sce_rsp_id VARCHAR(36) NOT NULL,
+            created_at DOUBLE NOT NULL,
+            modified_at DOUBLE NULL DEFAULT NULL,
+        PRIMARY KEY (uuid),
+        INDEX FK_interfaces_rsp_hop (interface_id),
+        INDEX FK_sce_vnfs_rsp_hop (sce_vnf_id),
+        INDEX FK_sce_rsps_rsp_hop (sce_rsp_id),
+        CONSTRAINT FK_interfaces_rsp_hop FOREIGN KEY (interface_id) REFERENCES interfaces (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+        CONSTRAINT FK_sce_vnfs_rsp_hop FOREIGN KEY (sce_vnf_id) REFERENCES sce_vnfs (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+        CONSTRAINT FK_sce_rsps_rsp_hop FOREIGN KEY (sce_rsp_id) REFERENCES sce_rsps (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
+        COLLATE='utf8_general_ci'
+        ENGINE=InnoDB;"
+    echo "      Adding sce_classifiers"
+    sql "CREATE TABLE IF NOT EXISTS sce_classifiers (
+            uuid VARCHAR(36) NOT NULL,
+            tenant_id VARCHAR(36) NULL DEFAULT NULL,
+            name VARCHAR(255) NOT NULL,
+            sce_vnffg_id VARCHAR(36) NOT NULL,
+            sce_rsp_id VARCHAR(36) NOT NULL,
+            sce_vnf_id VARCHAR(36) NOT NULL,
+            interface_id VARCHAR(36) NOT NULL,
+            created_at DOUBLE NOT NULL,
+            modified_at DOUBLE NULL DEFAULT NULL,
+        PRIMARY KEY (uuid),
+        INDEX FK_sce_vnffgs_classifier (sce_vnffg_id),
+        INDEX FK_sce_rsps_classifier (sce_rsp_id),
+        INDEX FK_sce_vnfs_classifier (sce_vnf_id),
+        INDEX FK_interfaces_classifier (interface_id),
+        CONSTRAINT FK_sce_vnffgs_classifier FOREIGN KEY (sce_vnffg_id) REFERENCES sce_vnffgs (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+        CONSTRAINT FK_sce_rsps_classifier FOREIGN KEY (sce_rsp_id) REFERENCES sce_rsps (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+        CONSTRAINT FK_sce_vnfs_classifier FOREIGN KEY (sce_vnf_id) REFERENCES sce_vnfs (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+        CONSTRAINT FK_interfaces_classifier FOREIGN KEY (interface_id) REFERENCES interfaces (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
+        COLLATE='utf8_general_ci'
+        ENGINE=InnoDB;"
+    echo "      Adding sce_classifier_matches"
+    sql "CREATE TABLE IF NOT EXISTS sce_classifier_matches (
+            uuid VARCHAR(36) NOT NULL,
+            ip_proto VARCHAR(2) NOT NULL,
+            source_ip VARCHAR(16) NOT NULL,
+            destination_ip VARCHAR(16) NOT NULL,
+            source_port VARCHAR(5) NOT NULL,
+            destination_port VARCHAR(5) NOT NULL,
+            sce_classifier_id VARCHAR(36) NOT NULL,
+            created_at DOUBLE NOT NULL,
+            modified_at DOUBLE NULL DEFAULT NULL,
+        PRIMARY KEY (uuid),
+        INDEX FK_classifiers_classifier_match (sce_classifier_id),
+        CONSTRAINT FK_sce_classifiers_classifier_match FOREIGN KEY (sce_classifier_id) REFERENCES sce_classifiers (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
+        COLLATE='utf8_general_ci'
+        ENGINE=InnoDB;"
+
+    echo "      [Adding necessary tables for VNFFG-SFC instance mapping]"
+    echo "      Adding instance_sfis"
+    sql "CREATE TABLE IF NOT EXISTS instance_sfis (
+          uuid varchar(36) NOT NULL,
+          instance_scenario_id varchar(36) NOT NULL,
+          vim_sfi_id varchar(36) DEFAULT NULL,
+          sce_rsp_hop_id varchar(36) DEFAULT NULL,
+          datacenter_id varchar(36) DEFAULT NULL,
+          datacenter_tenant_id varchar(36) DEFAULT NULL,
+          status enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+          error_msg varchar(1024) DEFAULT NULL,
+          vim_info text,
+          created_at double NOT NULL,
+          modified_at double DEFAULT NULL,
+          PRIMARY KEY (uuid),
+      KEY FK_instance_sfis_instance_scenarios (instance_scenario_id),
+      KEY FK_instance_sfis_sce_rsp_hops (sce_rsp_hop_id),
+      KEY FK_instance_sfis_datacenters (datacenter_id),
+      KEY FK_instance_sfis_datacenter_tenants (datacenter_tenant_id),
+      CONSTRAINT FK_instance_sfis_datacenter_tenants FOREIGN KEY (datacenter_tenant_id) REFERENCES datacenter_tenants (uuid),
+      CONSTRAINT FK_instance_sfis_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid),
+      CONSTRAINT FK_instance_sfis_instance_scenarios FOREIGN KEY (instance_scenario_id) REFERENCES instance_scenarios (uuid) ON DELETE CASCADE ON UPDATE CASCADE,
+      CONSTRAINT FK_instance_sfis_sce_rsp_hops FOREIGN KEY (sce_rsp_hop_id) REFERENCES sce_rsp_hops (uuid) ON DELETE SET NULL ON UPDATE CASCADE)
+      COLLATE='utf8_general_ci'
+      ENGINE=InnoDB;"
+    echo "      Adding instance_sfs"
+    sql "CREATE TABLE IF NOT EXISTS instance_sfs (
+          uuid varchar(36) NOT NULL,
+          instance_scenario_id varchar(36) NOT NULL,
+          vim_sf_id varchar(36) DEFAULT NULL,
+          sce_rsp_hop_id varchar(36) DEFAULT NULL,
+          datacenter_id varchar(36) DEFAULT NULL,
+          datacenter_tenant_id varchar(36) DEFAULT NULL,
+          status enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+          error_msg varchar(1024) DEFAULT NULL,
+          vim_info text,
+          created_at double NOT NULL,
+          modified_at double DEFAULT NULL,
+      PRIMARY KEY (uuid),
+      KEY FK_instance_sfs_instance_scenarios (instance_scenario_id),
+      KEY FK_instance_sfs_sce_rsp_hops (sce_rsp_hop_id),
+      KEY FK_instance_sfs_datacenters (datacenter_id),
+      KEY FK_instance_sfs_datacenter_tenants (datacenter_tenant_id),
+      CONSTRAINT FK_instance_sfs_datacenter_tenants FOREIGN KEY (datacenter_tenant_id) REFERENCES datacenter_tenants (uuid),
+      CONSTRAINT FK_instance_sfs_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid),
+      CONSTRAINT FK_instance_sfs_instance_scenarios FOREIGN KEY (instance_scenario_id) REFERENCES instance_scenarios (uuid) ON DELETE CASCADE ON UPDATE CASCADE,
+      CONSTRAINT FK_instance_sfs_sce_rsp_hops FOREIGN KEY (sce_rsp_hop_id) REFERENCES sce_rsp_hops (uuid) ON DELETE SET NULL ON UPDATE CASCADE)
+      COLLATE='utf8_general_ci'
+      ENGINE=InnoDB;"
+    echo "      Adding instance_classifications"
+    sql "CREATE TABLE IF NOT EXISTS instance_classifications (
+          uuid varchar(36) NOT NULL,
+          instance_scenario_id varchar(36) NOT NULL,
+          vim_classification_id varchar(36) DEFAULT NULL,
+          sce_classifier_match_id varchar(36) DEFAULT NULL,
+          datacenter_id varchar(36) DEFAULT NULL,
+          datacenter_tenant_id varchar(36) DEFAULT NULL,
+          status enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+          error_msg varchar(1024) DEFAULT NULL,
+          vim_info text,
+          created_at double NOT NULL,
+          modified_at double DEFAULT NULL,
+      PRIMARY KEY (uuid),
+      KEY FK_instance_classifications_instance_scenarios (instance_scenario_id),
+      KEY FK_instance_classifications_sce_classifier_matches (sce_classifier_match_id),
+      KEY FK_instance_classifications_datacenters (datacenter_id),
+      KEY FK_instance_classifications_datacenter_tenants (datacenter_tenant_id),
+      CONSTRAINT FK_instance_classifications_datacenter_tenants FOREIGN KEY (datacenter_tenant_id) REFERENCES datacenter_tenants (uuid),
+      CONSTRAINT FK_instance_classifications_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid),
+      CONSTRAINT FK_instance_classifications_instance_scenarios FOREIGN KEY (instance_scenario_id) REFERENCES instance_scenarios (uuid) ON DELETE CASCADE ON UPDATE CASCADE,
+      CONSTRAINT FK_instance_classifications_sce_classifier_matches FOREIGN KEY (sce_classifier_match_id) REFERENCES sce_classifier_matches (uuid) ON DELETE SET NULL ON UPDATE CASCADE)
+      COLLATE='utf8_general_ci'
+      ENGINE=InnoDB;"
+    echo "      Adding instance_sfps"
+    sql "CREATE TABLE IF NOT EXISTS instance_sfps (
+          uuid varchar(36) NOT NULL,
+          instance_scenario_id varchar(36) NOT NULL,
+          vim_sfp_id varchar(36) DEFAULT NULL,
+          sce_rsp_id varchar(36) DEFAULT NULL,
+          datacenter_id varchar(36) DEFAULT NULL,
+          datacenter_tenant_id varchar(36) DEFAULT NULL,
+          status enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+          error_msg varchar(1024) DEFAULT NULL,
+          vim_info text,
+          created_at double NOT NULL,
+          modified_at double DEFAULT NULL,
+      PRIMARY KEY (uuid),
+      KEY FK_instance_sfps_instance_scenarios (instance_scenario_id),
+      KEY FK_instance_sfps_sce_rsps (sce_rsp_id),
+      KEY FK_instance_sfps_datacenters (datacenter_id),
+      KEY FK_instance_sfps_datacenter_tenants (datacenter_tenant_id),
+      CONSTRAINT FK_instance_sfps_datacenter_tenants FOREIGN KEY (datacenter_tenant_id) REFERENCES datacenter_tenants (uuid),
+      CONSTRAINT FK_instance_sfps_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid),
+      CONSTRAINT FK_instance_sfps_instance_scenarios FOREIGN KEY (instance_scenario_id) REFERENCES instance_scenarios (uuid) ON DELETE CASCADE ON UPDATE CASCADE,
+      CONSTRAINT FK_instance_sfps_sce_rsps FOREIGN KEY (sce_rsp_id) REFERENCES sce_rsps (uuid) ON DELETE SET NULL ON UPDATE CASCADE)
+      COLLATE='utf8_general_ci'
+      ENGINE=InnoDB;"
+
+
+    echo "      [Altering vim_actions table]"
+    sql "ALTER TABLE vim_actions MODIFY COLUMN item ENUM('datacenters_flavors','datacenter_images','instance_nets','instance_vms','instance_interfaces','instance_sfis','instance_sfs','instance_classifications','instance_sfps') NOT NULL COMMENT 'table where the item is stored'"
+
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+         "VALUES (28, '0.28', '0.5.28', 'Adding VNFFG-related tables', '2017-11-20');"
+}
+function downgrade_from_28(){
+    echo "      [Undo adding the VNFFG tables]"
+    echo "      Dropping instance_sfps"
+    sql "DROP TABLE instance_sfps;"
+    echo "      Dropping sce_classifications"
+    sql "DROP TABLE instance_classifications;"
+    echo "      Dropping instance_sfs"
+    sql "DROP TABLE instance_sfs;"
+    echo "      Dropping instance_sfis"
+    sql "DROP TABLE instance_sfis;"
+    echo "      Dropping sce_classifier_matches"
+    echo "      [Undo adding the VNFFG-SFC instance mapping tables]"
+    sql "DROP TABLE sce_classifier_matches;"
+    echo "      Dropping sce_classifiers"
+    sql "DROP TABLE sce_classifiers;"
+    echo "      Dropping sce_rsp_hops"
+    sql "DROP TABLE sce_rsp_hops;"
+    echo "      Dropping sce_rsps"
+    sql "DROP TABLE sce_rsps;"
+    echo "      Dropping sce_vnffgs"
+    sql "DROP TABLE sce_vnffgs;"
+    echo "      [Altering vim_actions table]"
+    sql "ALTER TABLE vim_actions MODIFY COLUMN item ENUM('datacenters_flavors','datacenter_images','instance_nets','instance_vms','instance_interfaces') NOT NULL COMMENT 'table where the item is stored'"
+    sql "DELETE FROM schema_version WHERE version_int='28';"
+}
 function upgrade_to_X(){
     echo "      change 'datacenter_nets'"
     sql "ALTER TABLE datacenter_nets ADD COLUMN vim_tenant_id VARCHAR(36) NOT NULL AFTER datacenter_id, DROP INDEX name_datacenter_id, ADD UNIQUE INDEX name_datacenter_id (name, datacenter_id, vim_tenant_id);"
index e031cf3..f68b379 100644 (file)
@@ -18,7 +18,7 @@ RUN apt-get update && \
     DEBIAN_FRONTEND=noninteractive apt-get -y install python-yaml python-netaddr python-boto && \
     DEBIAN_FRONTEND=noninteractive apt-get -y install software-properties-common && \
     DEBIAN_FRONTEND=noninteractive apt-get -y install python-novaclient python-keystoneclient python-glanceclient python-cinderclient python-neutronclient && \
-    DEBIAN_FRONTEND=noninteractive pip install -U progressbar pyvmomi pyvcloud==18.2.2 && \
+    DEBIAN_FRONTEND=noninteractive pip install -U progressbar pyvmomi pyvcloud==19.1.1 && \
     DEBIAN_FRONTEND=noninteractive apt-get -y install python-argcomplete python-bottle python-cffi python-packaging python-paramiko python-pkgconfig libmysqlclient-dev libssl-dev libffi-dev python-mysqldb && \
     DEBIAN_FRONTEND=noninteractive apt-get -y install python-logutils python-openstackclient python-openstacksdk && \
     DEBIAN_FRONTEND=noninteractive apt-get -y install python-bitarray && \
index 136bf85..1506e3a 100755 (executable)
@@ -125,4 +125,4 @@ fi
 
 
 echo "4/4 Try to start"
-/usr/bin/openmanod -c /etc/osm/openmanod.cfg --log-file=/var/log/osm/openmano.log
+/usr/bin/openmanod -c /etc/osm/openmanod.cfg --log-file=/var/log/osm/openmano.log --create-tenant=osm
index 0616389..b45187e 100755 (executable)
--- a/openmanod
+++ b/openmanod
@@ -48,9 +48,9 @@ import osm_ro
 
 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes"
 __date__ = "$26-aug-2014 11:09:29$"
-__version__ = "0.5.51-r561"
-version_date = "Jan 2018"
-database_version = 27      # expected database schema version
+__version__ = "0.5.57-r567"
+version_date = "Mar 2018"
+database_version = 28      # expected database schema version
 
 
 global global_config
@@ -136,6 +136,7 @@ def usage():
     print("      --log-socket-host HOST: send logs to this host")
     print("      --log-socket-port PORT: send logs using this port (default: 9022)")
     print("      --log-file FILE: send logs to this file")
+    print("      --create-tenant NAME: Try to creates this tenant name before starting, ignoring any errors as e.g. conflict")
     return
 
 
@@ -183,8 +184,10 @@ if __name__=="__main__":
     # Read parameters and configuration file
     httpthread = None
     try:
-        #load parameters and configuration
-        opts, args = getopt.getopt(sys.argv[1:], "hvc:V:p:P:", ["config=", "help", "version", "port=", "vnf-repository=", "adminport=", "log-socket-host=", "log-socket-port=", "log-file="])
+        # load parameters and configuration
+        opts, args = getopt.getopt(sys.argv[1:], "hvc:V:p:P:",
+                                   ["config=", "help", "version", "port=", "vnf-repository=", "adminport=",
+                                    "log-socket-host=", "log-socket-port=", "log-file=", "create-tenant="])
         port=None
         port_admin = None
         config_file = 'osm_ro/openmanod.cfg'
@@ -192,6 +195,7 @@ if __name__=="__main__":
         log_file = None
         log_socket_host = None
         log_socket_port = None
+        create_tenant = None
 
         for o, a in opts:
             if o in ("-v", "--version"):
@@ -215,6 +219,8 @@ if __name__=="__main__":
                 log_socket_host = a
             elif o == "--log-file":
                 log_file = a
+            elif o == "--create-tenant":
+                create_tenant = a
             else:
                 assert False, "Unhandled option"
         if log_file:
@@ -320,6 +326,14 @@ if __name__=="__main__":
             exit(-1)
 
         nfvo.global_config=global_config
+        if create_tenant:
+            try:
+                nfvo.new_tenant(mydb, {"name": create_tenant})
+            except Exception as e:
+                if isinstance(e, nfvo.NfvoException) and e.http_code == 409:
+                    pass  # if tenant exist (NfvoException error 409), ignore
+                else:     # otherwise print and error and continue
+                    logger.error("Cannot create tenant '{}': {}".format(create_tenant, e))
         nfvo.start_service(mydb)
 
         httpthread = httpserver.httpserver(mydb, False, global_config['http_host'], global_config['http_port'])
index 5c0fefe..6bb14f9 100644 (file)
@@ -838,6 +838,8 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):
         db_interfaces = []
         db_images = []
         db_flavors = []
+        db_ip_profiles_index = 0
+        db_ip_profiles = []
         uuid_list = []
         vnfd_uuid_list = []
         vnfd_catalog_descriptor = vnf_descriptor.get("vnfd:vnfd-catalog")
@@ -869,6 +871,27 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):
                 if vnfd_descriptor["id"] == str(vnfd["id"]):
                     break
 
+            # table ip_profiles (ip-profiles)
+            ip_profile_name2db_table_index = {}
+            for ip_profile in vnfd.get("ip-profiles").itervalues():
+                db_ip_profile = {
+                    "ip_version": str(ip_profile["ip-profile-params"].get("ip-version", "ipv4")),
+                    "subnet_address": str(ip_profile["ip-profile-params"].get("subnet-address")),
+                    "gateway_address": str(ip_profile["ip-profile-params"].get("gateway-address")),
+                    "dhcp_enabled": str(ip_profile["ip-profile-params"]["dhcp-params"].get("enabled", True)),
+                    "dhcp_start_address": str(ip_profile["ip-profile-params"]["dhcp-params"].get("start-address")),
+                    "dhcp_count": str(ip_profile["ip-profile-params"]["dhcp-params"].get("count")),
+                }
+                dns_list = []
+                for dns in ip_profile["ip-profile-params"]["dns-server"].itervalues():
+                    dns_list.append(str(dns.get("address")))
+                db_ip_profile["dns_address"] = ";".join(dns_list)
+                if ip_profile["ip-profile-params"].get('security-group'):
+                    db_ip_profile["security_group"] = ip_profile["ip-profile-params"]['security-group']
+                ip_profile_name2db_table_index[str(ip_profile["name"])] = db_ip_profiles_index
+                db_ip_profiles_index += 1
+                db_ip_profiles.append(db_ip_profile)
+
             # table nets (internal-vld)
             net_id2uuid = {}  # for mapping interface with network
             for vld in vnfd.get("internal-vld").itervalues():
@@ -883,6 +906,22 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):
                 }
                 net_id2uuid[vld.get("id")] = net_uuid
                 db_nets.append(db_net)
+                # ip-profile, link db_ip_profile with db_sce_net
+                if vld.get("ip-profile-ref"):
+                    ip_profile_name = vld.get("ip-profile-ref")
+                    if ip_profile_name not in ip_profile_name2db_table_index:
+                        raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{}]':'vld[{}]':'ip-profile-ref':"
+                                            "'{}'. Reference to a non-existing 'ip_profiles'".format(
+                                                str(vnfd["id"]), str(vld["id"]), str(vld["ip-profile-ref"])),
+                                            HTTP_Bad_Request)
+                    db_ip_profiles[ip_profile_name2db_table_index[ip_profile_name]]["net_id"] = net_uuid
+                else:  #check no ip-address has been defined
+                    for icp in vld.get("internal-connection-point").itervalues():
+                        if icp.get("ip-address"):
+                            raise NfvoException("Error at 'vnfd[{}]':'vld[{}]':'internal-connection-point[{}]' "
+                                            "contains an ip-address but no ip-profile has been defined at VLD".format(
+                                                str(vnfd["id"]), str(vld["id"]), str(icp["id"])),
+                                            HTTP_Bad_Request)
 
             # connection points vaiable declaration
             cp_name2iface_uuid = {}
@@ -893,6 +932,10 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):
             vdu_id2uuid = {}
             vdu_id2db_table_index = {}
             for vdu in vnfd.get("vdu").itervalues():
+
+                for vdu_descriptor in vnfd_descriptor["vdu"]:
+                    if vdu_descriptor["id"] == str(vdu["id"]):
+                        break
                 vm_uuid = str(uuid4())
                 uuid_list.append(vm_uuid)
                 vdu_id = get_str(vdu, "id", 255)
@@ -1043,27 +1086,43 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):
                                                 HTTP_Bad_Request)
                     elif iface.get("internal-connection-point-ref"):
                         try:
+                            for icp_descriptor in vdu_descriptor["internal-connection-point"]:
+                                if icp_descriptor["id"] == str(iface.get("internal-connection-point-ref")):
+                                    break
+                            else:
+                                raise KeyError("does not exist at vdu:internal-connection-point")
+                            icp = None
+                            icp_vld = None
                             for vld in vnfd.get("internal-vld").itervalues():
                                 for cp in vld.get("internal-connection-point").itervalues():
                                     if cp.get("id-ref") == iface.get("internal-connection-point-ref"):
-                                        db_interface["net_id"] = net_id2uuid[vld.get("id")]
-                                        for cp_descriptor in vnfd_descriptor["connection-point"]:
-                                            if cp_descriptor["name"] == db_interface["internal_name"]:
-                                                break
-                                        if str(cp_descriptor.get("port-security-enabled")).lower() == "false":
-                                            db_interface["port_security"] = 0
-                                        elif str(cp_descriptor.get("port-security-enabled")).lower() == "true":
-                                            db_interface["port_security"] = 1
-                                        break
-                        except KeyError:
+                                        if icp:
+                                            raise KeyError("is referenced by more than one 'internal-vld'")
+                                        icp = cp
+                                        icp_vld = vld
+                            if not icp:
+                                raise KeyError("is not referenced by any 'internal-vld'")
+
+                            db_interface["net_id"] = net_id2uuid[icp_vld.get("id")]
+                            if str(icp_descriptor.get("port-security-enabled")).lower() == "false":
+                                db_interface["port_security"] = 0
+                            elif str(icp_descriptor.get("port-security-enabled")).lower() == "true":
+                                db_interface["port_security"] = 1
+                            if icp.get("ip-address"):
+                                if not icp_vld.get("ip-profile-ref"):
+                                    raise NfvoException
+                                db_interface["ip_address"] = str(icp.get("ip-address"))
+                        except KeyError as e:
                             raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'vdu[{vdu}]':"
-                                                "'interface[{iface}]':'internal-connection-point-ref':'{cp}' is not"
-                                                " referenced by any internal-vld".format(
+                                                "'interface[{iface}]':'internal-connection-point-ref':'{cp}'"
+                                                " {msg}".format(
                                                     vnf=vnfd_id, vdu=vdu_id, iface=iface["name"],
-                                                    cp=iface.get("internal-connection-point-ref")),
+                                                    cp=iface.get("internal-connection-point-ref"), msg=str(e)),
                                                 HTTP_Bad_Request)
                     if iface.get("position") is not None:
                         db_interface["created_at"] = int(iface.get("position")) - 1000
+                    if iface.get("mac-address"):
+                        db_interface["mac"] = str(iface.get("mac-address"))
                     db_interfaces.append(db_interface)
 
                 # table flavors
@@ -1189,14 +1248,13 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):
             if mgmt_access:
                 db_vnf["mgmt_access"] = yaml.safe_dump(mgmt_access, default_flow_style=True, width=256)
 
-
-
             db_vnfs.append(db_vnf)
         db_tables=[
             {"vnfs": db_vnfs},
             {"nets": db_nets},
             {"images": db_images},
             {"flavors": db_flavors},
+            {"ip_profiles": db_ip_profiles},
             {"vms": db_vms},
             {"interfaces": db_interfaces},
         ]
@@ -2107,7 +2165,7 @@ def new_nsd_v3(mydb, tenant_id, nsd_descriptor):
     :param mydb:
     :param tenant_id:
     :param nsd_descriptor:
-    :return: The list of cretated NSD ids
+    :return: The list of created NSD ids
     """
     try:
         mynsd = nsd_catalog.nsd()
@@ -2119,6 +2177,11 @@ def new_nsd_v3(mydb, tenant_id, nsd_descriptor):
         db_sce_nets = []
         db_sce_vnfs = []
         db_sce_interfaces = []
+        db_sce_vnffgs = []
+        db_sce_rsps = []
+        db_sce_rsp_hops = []
+        db_sce_classifiers = []
+        db_sce_classifier_matches = []
         db_ip_profiles = []
         db_ip_profiles_index = 0
         uuid_list = []
@@ -2126,7 +2189,7 @@ def new_nsd_v3(mydb, tenant_id, nsd_descriptor):
         for nsd_yang in mynsd.nsd_catalog.nsd.itervalues():
             nsd = nsd_yang.get()
 
-            # table sceanrios
+            # table scenarios
             scenario_uuid = str(uuid4())
             uuid_list.append(scenario_uuid)
             nsd_uuid_list.append(scenario_uuid)
@@ -2250,26 +2313,155 @@ def new_nsd_v3(mydb, tenant_id, nsd_descriptor):
                         db_sce_net["type"] = "data"
                     sce_interface_uuid = str(uuid4())
                     uuid_list.append(sce_net_uuid)
+                    iface_ip_address = None
+                    if iface.get("ip-address"):
+                        iface_ip_address = str(iface.get("ip-address"))
                     db_sce_interface = {
                         "uuid": sce_interface_uuid,
                         "sce_vnf_id": vnf_index2scevnf_uuid[vnf_index],
                         "sce_net_id": sce_net_uuid,
                         "interface_id": interface_uuid,
-                        # "ip_address": #TODO
+                        "ip_address": iface_ip_address,
                     }
                     db_sce_interfaces.append(db_sce_interface)
                 if not db_sce_net["type"]:
                     db_sce_net["type"] = "bridge"
 
+            # table sce_vnffgs (vnffgd)
+            for vnffg in nsd.get("vnffgd").itervalues():
+                sce_vnffg_uuid = str(uuid4())
+                uuid_list.append(sce_vnffg_uuid)
+                db_sce_vnffg = {
+                    "uuid": sce_vnffg_uuid,
+                    "name": get_str(vnffg, "name", 255),
+                    "scenario_id": scenario_uuid,
+                    "vendor": get_str(vnffg, "vendor", 255),
+                    "description": get_str(vld, "description", 255),
+                }
+                db_sce_vnffgs.append(db_sce_vnffg)
+
+                # deal with rsps
+                db_sce_rsps = []
+                for rsp in vnffg.get("rsp").itervalues():
+                    sce_rsp_uuid = str(uuid4())
+                    uuid_list.append(sce_rsp_uuid)
+                    db_sce_rsp = {
+                        "uuid": sce_rsp_uuid,
+                        "name": get_str(rsp, "name", 255),
+                        "sce_vnffg_id": sce_vnffg_uuid,
+                        "id": get_str(rsp, "id", 255), # only useful to link with classifiers; will be removed later in the code
+                    }
+                    db_sce_rsps.append(db_sce_rsp)
+                    db_sce_rsp_hops = []
+                    for iface in rsp.get("vnfd-connection-point-ref").itervalues():
+                        vnf_index = int(iface['member-vnf-index-ref'])
+                        if_order = int(iface['order'])
+                        # check correct parameters
+                        if vnf_index not in vnf_index2vnf_uuid:
+                            raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'rsp[{}]':'vnfd-connection-point"
+                                                "-ref':'member-vnf-index-ref':'{}'. Reference to a non-existing index at "
+                                                "'nsd':'constituent-vnfd'".format(
+                                                    str(nsd["id"]), str(rsp["id"]), str(iface["member-vnf-index-ref"])),
+                                                HTTP_Bad_Request)
+
+                        existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid',),
+                                                        FROM="interfaces as i join vms on i.vm_id=vms.uuid",
+                                                        WHERE={'vnf_id': vnf_index2vnf_uuid[vnf_index],
+                                                               'external_name': get_str(iface, "vnfd-connection-point-ref",
+                                                                                        255)})
+                        if not existing_ifaces:
+                            raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'rsp[{}]':'vnfd-connection-point"
+                                                "-ref':'vnfd-connection-point-ref':'{}'. Reference to a non-existing "
+                                                "connection-point name at VNFD '{}'".format(
+                                                    str(nsd["id"]), str(rsp["id"]), str(iface["vnfd-connection-point-ref"]),
+                                                    str(iface.get("vnfd-id-ref"))[:255]),
+                                                HTTP_Bad_Request)
+                        interface_uuid = existing_ifaces[0]["uuid"]
+                        sce_rsp_hop_uuid = str(uuid4())
+                        uuid_list.append(sce_rsp_hop_uuid)
+                        db_sce_rsp_hop = {
+                            "uuid": sce_rsp_hop_uuid,
+                            "if_order": if_order,
+                            "interface_id": interface_uuid,
+                            "sce_vnf_id": vnf_index2scevnf_uuid[vnf_index],
+                            "sce_rsp_id": sce_rsp_uuid,
+                        }
+                        db_sce_rsp_hops.append(db_sce_rsp_hop)
+
+                # deal with classifiers
+                db_sce_classifiers = []
+                for classifier in vnffg.get("classifier").itervalues():
+                    sce_classifier_uuid = str(uuid4())
+                    uuid_list.append(sce_classifier_uuid)
+
+                    # source VNF
+                    vnf_index = int(classifier['member-vnf-index-ref'])
+                    if vnf_index not in vnf_index2vnf_uuid:
+                        raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'classifier[{}]':'vnfd-connection-point"
+                                            "-ref':'member-vnf-index-ref':'{}'. Reference to a non-existing index at "
+                                            "'nsd':'constituent-vnfd'".format(
+                                                str(nsd["id"]), str(classifier["id"]), str(classifier["member-vnf-index-ref"])),
+                                            HTTP_Bad_Request)
+                    existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid',),
+                                                    FROM="interfaces as i join vms on i.vm_id=vms.uuid",
+                                                    WHERE={'vnf_id': vnf_index2vnf_uuid[vnf_index],
+                                                           'external_name': get_str(classifier, "vnfd-connection-point-ref",
+                                                                                    255)})
+                    if not existing_ifaces:
+                        raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'rsp[{}]':'vnfd-connection-point"
+                                            "-ref':'vnfd-connection-point-ref':'{}'. Reference to a non-existing "
+                                            "connection-point name at VNFD '{}'".format(
+                                                str(nsd["id"]), str(rsp["id"]), str(iface["vnfd-connection-point-ref"]),
+                                                str(iface.get("vnfd-id-ref"))[:255]),
+                                            HTTP_Bad_Request)
+                    interface_uuid = existing_ifaces[0]["uuid"]
+
+                    db_sce_classifier = {
+                        "uuid": sce_classifier_uuid,
+                        "name": get_str(classifier, "name", 255),
+                        "sce_vnffg_id": sce_vnffg_uuid,
+                        "sce_vnf_id": vnf_index2scevnf_uuid[vnf_index],
+                        "interface_id": interface_uuid,
+                    }
+                    rsp_id = get_str(classifier, "rsp-id-ref", 255)
+                    rsp = next((item for item in db_sce_rsps if item["id"] == rsp_id), None)
+                    db_sce_classifier["sce_rsp_id"] = rsp["uuid"]
+                    db_sce_classifiers.append(db_sce_classifier)
+
+                    db_sce_classifier_matches = []
+                    for match in classifier.get("match-attributes").itervalues():
+                        sce_classifier_match_uuid = str(uuid4())
+                        uuid_list.append(sce_classifier_match_uuid)
+                        db_sce_classifier_match = {
+                            "uuid": sce_classifier_match_uuid,
+                            "ip_proto": get_str(match, "ip-proto", 2),
+                            "source_ip": get_str(match, "source-ip-address", 16),
+                            "destination_ip": get_str(match, "destination-ip-address", 16),
+                            "source_port": get_str(match, "source-port", 5),
+                            "destination_port": get_str(match, "destination-port", 5),
+                            "sce_classifier_id": sce_classifier_uuid,
+                        }
+                        db_sce_classifier_matches.append(db_sce_classifier_match)
+                    # TODO: vnf/cp keys
+
+        # remove unneeded id's in sce_rsps
+        for rsp in db_sce_rsps:
+            rsp.pop('id')
+
         db_tables = [
             {"scenarios": db_scenarios},
             {"sce_nets": db_sce_nets},
             {"ip_profiles": db_ip_profiles},
             {"sce_vnfs": db_sce_vnfs},
             {"sce_interfaces": db_sce_interfaces},
+            {"sce_vnffgs": db_sce_vnffgs},
+            {"sce_rsps": db_sce_rsps},
+            {"sce_rsp_hops": db_sce_rsp_hops},
+            {"sce_classifiers": db_sce_classifiers},
+            {"sce_classifier_matches": db_sce_classifier_matches},
         ]
 
-        logger.debug("create_vnf Deployment done vnfDict: %s",
+        logger.debug("new_nsd_v3 done: %s",
                     yaml.safe_dump(db_tables, indent=4, default_flow_style=False) )
         mydb.new_rows(db_tables, uuid_list)
         return nsd_uuid_list
@@ -2694,7 +2886,6 @@ def create_instance(mydb, tenant_id, instance_dict):
     myvim_threads_id[default_datacenter_id], _ = get_vim_thread(mydb, tenant_id, default_datacenter_id)
     tenant = mydb.get_rows_by_id('nfvo_tenants', tenant_id)
     # myvim_tenant = myvim['tenant_id']
-
     rollbackList=[]
 
     # print "Checking that the scenario exists and getting the scenario dictionary"
@@ -2708,6 +2899,10 @@ def create_instance(mydb, tenant_id, instance_dict):
     db_instance_vnfs = []
     db_instance_vms = []
     db_instance_interfaces = []
+    db_instance_sfis = []
+    db_instance_sfs = []
+    db_instance_classifications = []
+    db_instance_sfps = []
     db_ip_profiles = []
     db_vim_actions = []
     uuid_list = []
@@ -2795,7 +2990,7 @@ def create_instance(mydb, tenant_id, instance_dict):
         # 0.2 merge instance information into scenario
         # Ideally, the operation should be as simple as: update(scenarioDict,instance_dict)
         # However, this is not possible yet.
-        for net_name, net_instance_desc in instance_dict.get("networks",{}).iteritems():
+        for net_name, net_instance_desc in instance_dict.get("networks", {}).iteritems():
             for scenario_net in scenarioDict['nets']:
                 if net_name == scenario_net["name"]:
                     if 'ip-profile' in net_instance_desc:
@@ -2816,13 +3011,13 @@ def create_instance(mydb, tenant_id, instance_dict):
                             scenario_net['ip_profile'] = ipprofile_db
                         else:
                             update(scenario_net['ip_profile'], ipprofile_db)
-            for interface in net_instance_desc.get('interfaces', () ):
+            for interface in net_instance_desc.get('interfaces', ()):
                 if 'ip_address' in interface:
                     for vnf in scenarioDict['vnfs']:
                         if interface['vnf'] == vnf['name']:
                             for vnf_interface in vnf['interfaces']:
                                 if interface['vnf_interface'] == vnf_interface['external_name']:
-                                    vnf_interface['ip_address']=interface['ip_address']
+                                    vnf_interface['ip_address'] = interface['ip_address']
 
         # logger.debug(">>>>>>>> Merged dictionary")
         # logger.debug("Creating instance scenario-dict MERGED:\n%s",
@@ -3096,50 +3291,53 @@ def create_instance(mydb, tenant_id, instance_dict):
                 db_vm_ifaces = []
                 for iface in vm['interfaces']:
                     netDict = {}
-                    if iface['type']=="data":
+                    if iface['type'] == "data":
                         netDict['type'] = iface['model']
-                    elif "model" in iface and iface["model"]!=None:
-                        netDict['model']=iface['model']
+                    elif "model" in iface and iface["model"] != None:
+                        netDict['model'] = iface['model']
                     # TODO in future, remove this because mac_address will not be set, and the type of PV,VF
                     # is obtained from iterface table model
                     # discover type of interface looking at flavor
-                    for numa in flavor_dict.get('extended',{}).get('numas',[]):
-                        for flavor_iface in numa.get('interfaces',[]):
+                    for numa in flavor_dict.get('extended', {}).get('numas', []):
+                        for flavor_iface in numa.get('interfaces', []):
                             if flavor_iface.get('name') == iface['internal_name']:
                                 if flavor_iface['dedicated'] == 'yes':
-                                    netDict['type']="PF"    #passthrough
+                                    netDict['type'] = "PF"    # passthrough
                                 elif flavor_iface['dedicated'] == 'no':
-                                    netDict['type']="VF"    #siov
+                                    netDict['type'] = "VF"    # siov
                                 elif flavor_iface['dedicated'] == 'yes:sriov':
-                                    netDict['type']="VFnotShared"   #sriov but only one sriov on the PF
+                                    netDict['type'] = "VFnotShared"   # sriov but only one sriov on the PF
                                 netDict["mac_address"] = flavor_iface.get("mac_address")
-                                break;
+                                break
                     netDict["use"]=iface['type']
-                    if netDict["use"]=="data" and not netDict.get("type"):
-                        #print "netDict", netDict
-                        #print "iface", iface
-                        e_text = "Cannot determine the interface type PF or VF of VNF '%s' VM '%s' iface '%s'" %(sce_vnf['name'], vm['name'], iface['internal_name'])
-                        if flavor_dict.get('extended')==None:
+                    if netDict["use"] == "data" and not netDict.get("type"):
+                        # print "netDict", netDict
+                        # print "iface", iface
+                        e_text = "Cannot determine the interface type PF or VF of VNF '{}' VM '{}' iface '{}'".fromat(
+                            sce_vnf['name'], vm['name'], iface['internal_name'])
+                        if flavor_dict.get('extended') == None:
                             raise NfvoException(e_text + "After database migration some information is not available. \
                                     Try to delete and create the scenarios and VNFs again", HTTP_Conflict)
                         else:
                             raise NfvoException(e_text, HTTP_Internal_Server_Error)
-                    if netDict["use"]=="mgmt" or netDict["use"]=="bridge":
+                    if netDict["use"] == "mgmt" or netDict["use"] == "bridge":
                         netDict["type"]="virtual"
-                    if "vpci" in iface and iface["vpci"] is not None:
+                    if iface.get("vpci"):
                         netDict['vpci'] = iface['vpci']
-                    if "mac" in iface and iface["mac"] is not None:
+                    if iface.get("mac"):
                         netDict['mac_address'] = iface['mac']
-                    if "port-security" in iface and iface["port-security"] is not None:
+                    if iface.get("ip_address"):
+                        netDict['ip_address'] = iface['ip_address']
+                    if iface.get("port-security") is not None:
                         netDict['port_security'] = iface['port-security']
-                    if "floating-ip" in iface and iface["floating-ip"] is not None:
+                    if iface.get("floating-ip") is not None:
                         netDict['floating_ip'] = iface['floating-ip']
                     netDict['name'] = iface['internal_name']
                     if iface['net_id'] is None:
                         for vnf_iface in sce_vnf["interfaces"]:
                             # print iface
                             # print vnf_iface
-                            if vnf_iface['interface_id']==iface['uuid']:
+                            if vnf_iface['interface_id'] == iface['uuid']:
                                 netDict['net_id'] = "TASK-{}".format(net2task_id['scenario'][ vnf_iface['sce_net_id'] ][datacenter_id])
                                 instance_net_id = sce_net2instance[ vnf_iface['sce_net_id'] ][datacenter_id]
                                 task_depends_on.append(net2task_id['scenario'][ vnf_iface['sce_net_id'] ][datacenter_id])
@@ -3160,6 +3358,7 @@ def create_instance(mydb, tenant_id, instance_dict):
                         # 'vim_interface_id': ,
                         'type': 'external' if iface['external_name'] is not None else 'internal',
                         'ip_address': iface.get('ip_address'),
+                        'mac_address': iface.get('mac'),
                         'floating_ip': int(iface.get('floating-ip', False)),
                         'port_security': int(iface.get('port-security', True))
                     }
@@ -3193,8 +3392,8 @@ def create_instance(mydb, tenant_id, instance_dict):
                     for net in myVMDict['networks']:
                         if "vim_id" in net:
                             for iface in vm['interfaces']:
-                                if net["name"]==iface["internal_name"]:
-                                    iface["vim_id"]=net["vim_id"]
+                                if net["name"] == iface["internal_name"]:
+                                    iface["vim_id"] = net["vim_id"]
                                     break
                     vm_uuid = str(uuid4())
                     uuid_list.append(vm_uuid)
@@ -3244,6 +3443,157 @@ def create_instance(mydb, tenant_id, instance_dict):
                     task_index += 1
                     db_vim_actions.append(db_vim_action)
 
+        task_depends_on = []
+        for vnffg in scenarioDict['vnffgs']:
+            for rsp in vnffg['rsps']:
+                sfs_created = []
+                for cp in rsp['connection_points']:
+                    count = mydb.get_rows(
+                            SELECT=('vms.count'),
+                            FROM="vms join interfaces on vms.uuid=interfaces.vm_id join sce_rsp_hops as h on interfaces.uuid=h.interface_id",
+                            WHERE={'h.uuid': cp['uuid']})[0]['count']
+                    instance_vnf = next((item for item in db_instance_vnfs if item['sce_vnf_id'] == cp['sce_vnf_id']), None)
+                    instance_vms = [item for item in db_instance_vms if item['instance_vnf_id'] == instance_vnf['uuid']]
+                    dependencies = []
+                    for instance_vm in instance_vms:
+                        action = next((item for item in db_vim_actions if item['item_id'] == instance_vm['uuid']), None)
+                        if action:
+                            dependencies.append(action['task_index'])
+                        # TODO: throw exception if count != len(instance_vms)
+                        # TODO: and action shouldn't ever be None
+                    sfis_created = []
+                    for i in range(count):
+                        # create sfis
+                        sfi_uuid = str(uuid4())
+                        uuid_list.append(sfi_uuid)
+                        db_sfi = {
+                            "uuid": sfi_uuid,
+                            "instance_scenario_id": instance_uuid,
+                            'sce_rsp_hop_id': cp['uuid'],
+                            'datacenter_id': datacenter_id,
+                            'datacenter_tenant_id': myvim_thread_id,
+                            "vim_sfi_id": None, # vim thread will populate
+                        }
+                        db_instance_sfis.append(db_sfi)
+                        db_vim_action = {
+                            "instance_action_id": instance_action_id,
+                            "task_index": task_index,
+                            "datacenter_vim_id": myvim_thread_id,
+                            "action": "CREATE",
+                            "status": "SCHEDULED",
+                            "item": "instance_sfis",
+                            "item_id": sfi_uuid,
+                            "extra": yaml.safe_dump({"params": "", "depends_on": [dependencies[i]]},
+                                                    default_flow_style=True, width=256)
+                        }
+                        sfis_created.append(task_index)
+                        task_index += 1
+                        db_vim_actions.append(db_vim_action)
+                    # create sfs
+                    sf_uuid = str(uuid4())
+                    uuid_list.append(sf_uuid)
+                    db_sf = {
+                        "uuid": sf_uuid,
+                        "instance_scenario_id": instance_uuid,
+                        'sce_rsp_hop_id': cp['uuid'],
+                        'datacenter_id': datacenter_id,
+                        'datacenter_tenant_id': myvim_thread_id,
+                        "vim_sf_id": None, # vim thread will populate
+                    }
+                    db_instance_sfs.append(db_sf)
+                    db_vim_action = {
+                        "instance_action_id": instance_action_id,
+                        "task_index": task_index,
+                        "datacenter_vim_id": myvim_thread_id,
+                        "action": "CREATE",
+                        "status": "SCHEDULED",
+                        "item": "instance_sfs",
+                        "item_id": sf_uuid,
+                        "extra": yaml.safe_dump({"params": "", "depends_on": sfis_created},
+                                                default_flow_style=True, width=256)
+                    }
+                    sfs_created.append(task_index)
+                    task_index += 1
+                    db_vim_actions.append(db_vim_action)
+                classifier = rsp['classifier']
+
+                # TODO the following ~13 lines can be reused for the sfi case
+                count = mydb.get_rows(
+                        SELECT=('vms.count'),
+                        FROM="vms join interfaces on vms.uuid=interfaces.vm_id join sce_classifiers as c on interfaces.uuid=c.interface_id",
+                        WHERE={'c.uuid': classifier['uuid']})[0]['count']
+                instance_vnf = next((item for item in db_instance_vnfs if item['sce_vnf_id'] == classifier['sce_vnf_id']), None)
+                instance_vms = [item for item in db_instance_vms if item['instance_vnf_id'] == instance_vnf['uuid']]
+                dependencies = []
+                for instance_vm in instance_vms:
+                    action = next((item for item in db_vim_actions if item['item_id'] == instance_vm['uuid']), None)
+                    if action:
+                        dependencies.append(action['task_index'])
+                    # TODO: throw exception if count != len(instance_vms)
+                    # TODO: and action shouldn't ever be None
+                classifications_created = []
+                for i in range(count):
+                    for match in classifier['matches']:
+                        # create classifications
+                        classification_uuid = str(uuid4())
+                        uuid_list.append(classification_uuid)
+                        db_classification = {
+                            "uuid": classification_uuid,
+                            "instance_scenario_id": instance_uuid,
+                            'sce_classifier_match_id': match['uuid'],
+                            'datacenter_id': datacenter_id,
+                            'datacenter_tenant_id': myvim_thread_id,
+                            "vim_classification_id": None, # vim thread will populate
+                        }
+                        db_instance_classifications.append(db_classification)
+                        classification_params = {
+                            "ip_proto": match["ip_proto"],
+                            "source_ip": match["source_ip"],
+                            "destination_ip": match["destination_ip"],
+                            "source_port": match["source_port"],
+                            "destination_port": match["destination_port"]
+                        }
+                        db_vim_action = {
+                            "instance_action_id": instance_action_id,
+                            "task_index": task_index,
+                            "datacenter_vim_id": myvim_thread_id,
+                            "action": "CREATE",
+                            "status": "SCHEDULED",
+                            "item": "instance_classifications",
+                            "item_id": classification_uuid,
+                            "extra": yaml.safe_dump({"params": classification_params, "depends_on": [dependencies[i]]},
+                                                    default_flow_style=True, width=256)
+                        }
+                        classifications_created.append(task_index)
+                        task_index += 1
+                        db_vim_actions.append(db_vim_action)
+
+                # create sfps
+                sfp_uuid = str(uuid4())
+                uuid_list.append(sfp_uuid)
+                db_sfp = {
+                    "uuid": sfp_uuid,
+                    "instance_scenario_id": instance_uuid,
+                    'sce_rsp_id': rsp['uuid'],
+                    'datacenter_id': datacenter_id,
+                    'datacenter_tenant_id': myvim_thread_id,
+                    "vim_sfp_id": None, # vim thread will populate
+                }
+                db_instance_sfps.append(db_sfp)
+                db_vim_action = {
+                    "instance_action_id": instance_action_id,
+                    "task_index": task_index,
+                    "datacenter_vim_id": myvim_thread_id,
+                    "action": "CREATE",
+                    "status": "SCHEDULED",
+                    "item": "instance_sfps",
+                    "item_id": sfp_uuid,
+                    "extra": yaml.safe_dump({"params": "", "depends_on": sfs_created + classifications_created},
+                                            default_flow_style=True, width=256)
+                }
+                task_index += 1
+                db_vim_actions.append(db_vim_action)
+
         scenarioDict["datacenter2tenant"] = myvim_threads_id
 
         db_instance_action["number_tasks"] = task_index
@@ -3257,6 +3607,10 @@ def create_instance(mydb, tenant_id, instance_dict):
             {"instance_vms": db_instance_vms},
             {"instance_interfaces": db_instance_interfaces},
             {"instance_actions": db_instance_action},
+            {"instance_sfis": db_instance_sfis},
+            {"instance_sfs": db_instance_sfs},
+            {"instance_classifications": db_instance_classifications},
+            {"instance_sfps": db_instance_sfps},
             {"vim_actions": db_vim_actions}
         ]
 
@@ -3288,7 +3642,6 @@ def delete_instance(mydb, tenant_id, instance_id):
     # print yaml.safe_dump(instanceDict, indent=4, default_flow_style=False)
     tenant_id = instanceDict["tenant_id"]
     # print "Checking that nfvo_tenant_id exists and getting the VIM URI and the VIM tenant_id"
-
     # 1. Delete from Database
     message = mydb.delete_instance_scenario(instance_id, tenant_id)
 
@@ -3397,6 +3750,156 @@ def delete_instance(mydb, tenant_id, instance_id):
         task_index += 1
         db_vim_actions.append(db_vim_action)
 
+    # 2.3 deleting VNFFGs
+
+    for sfp in instanceDict.get('sfps', ()):
+        vimthread_affected[sfp["datacenter_tenant_id"]] = None
+        datacenter_key = (sfp["datacenter_id"], sfp["datacenter_tenant_id"])
+        if datacenter_key not in myvims:
+            try:
+                _,myvim_thread = get_vim_thread(mydb, tenant_id, sfp["datacenter_id"], sfp["datacenter_tenant_id"])
+            except NfvoException as e:
+                logger.error(str(e))
+                myvim_thread = None
+            myvim_threads[datacenter_key] = myvim_thread
+            vims = get_vim(mydb, tenant_id, datacenter_id=sfp["datacenter_id"],
+                           datacenter_tenant_id=sfp["datacenter_tenant_id"])
+            if len(vims) == 0:
+                logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sfp["datacenter_id"], sfp["datacenter_tenant_id"]))
+                myvims[datacenter_key] = None
+            else:
+                myvims[datacenter_key] = vims.values()[0]
+        myvim = myvims[datacenter_key]
+        myvim_thread = myvim_threads[datacenter_key]
+
+        if not myvim:
+            error_msg += "\n    vim_sfp_id={} cannot be deleted because datacenter={} not found".format(sfp['vim_sfp_id'], sfp["datacenter_id"])
+            continue
+        extra = {"params": (sfp['vim_sfp_id'])}
+        db_vim_action = {
+            "instance_action_id": instance_action_id,
+            "task_index": task_index,
+            "datacenter_vim_id": sfp["datacenter_tenant_id"],
+            "action": "DELETE",
+            "status": "SCHEDULED",
+            "item": "instance_sfps",
+            "item_id": sfp["uuid"],
+            "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
+        }
+        task_index += 1
+        db_vim_actions.append(db_vim_action)
+
+    for sf in instanceDict.get('sfs', ()):
+        vimthread_affected[sf["datacenter_tenant_id"]] = None
+        datacenter_key = (sf["datacenter_id"], sf["datacenter_tenant_id"])
+        if datacenter_key not in myvims:
+            try:
+                _,myvim_thread = get_vim_thread(mydb, tenant_id, sf["datacenter_id"], sf["datacenter_tenant_id"])
+            except NfvoException as e:
+                logger.error(str(e))
+                myvim_thread = None
+            myvim_threads[datacenter_key] = myvim_thread
+            vims = get_vim(mydb, tenant_id, datacenter_id=sf["datacenter_id"],
+                           datacenter_tenant_id=sf["datacenter_tenant_id"])
+            if len(vims) == 0:
+                logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sf["datacenter_id"], sf["datacenter_tenant_id"]))
+                myvims[datacenter_key] = None
+            else:
+                myvims[datacenter_key] = vims.values()[0]
+        myvim = myvims[datacenter_key]
+        myvim_thread = myvim_threads[datacenter_key]
+
+        if not myvim:
+            error_msg += "\n    vim_sf_id={} cannot be deleted because datacenter={} not found".format(sf['vim_sf_id'], sf["datacenter_id"])
+            continue
+        extra = {"params": (sf['vim_sf_id'])}
+        db_vim_action = {
+            "instance_action_id": instance_action_id,
+            "task_index": task_index,
+            "datacenter_vim_id": sf["datacenter_tenant_id"],
+            "action": "DELETE",
+            "status": "SCHEDULED",
+            "item": "instance_sfs",
+            "item_id": sf["uuid"],
+            "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
+        }
+        task_index += 1
+        db_vim_actions.append(db_vim_action)
+
+    for sfi in instanceDict.get('sfis', ()):
+        vimthread_affected[sfi["datacenter_tenant_id"]] = None
+        datacenter_key = (sfi["datacenter_id"], sfi["datacenter_tenant_id"])
+        if datacenter_key not in myvims:
+            try:
+                _,myvim_thread = get_vim_thread(mydb, tenant_id, sfi["datacenter_id"], sfi["datacenter_tenant_id"])
+            except NfvoException as e:
+                logger.error(str(e))
+                myvim_thread = None
+            myvim_threads[datacenter_key] = myvim_thread
+            vims = get_vim(mydb, tenant_id, datacenter_id=sfi["datacenter_id"],
+                           datacenter_tenant_id=sfi["datacenter_tenant_id"])
+            if len(vims) == 0:
+                logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sfi["datacenter_id"], sfi["datacenter_tenant_id"]))
+                myvims[datacenter_key] = None
+            else:
+                myvims[datacenter_key] = vims.values()[0]
+        myvim = myvims[datacenter_key]
+        myvim_thread = myvim_threads[datacenter_key]
+
+        if not myvim:
+            error_msg += "\n    vim_sfi_id={} cannot be deleted because datacenter={} not found".format(sfi['vim_sfi_id'], sfi["datacenter_id"])
+            continue
+        extra = {"params": (sfi['vim_sfi_id'])}
+        db_vim_action = {
+            "instance_action_id": instance_action_id,
+            "task_index": task_index,
+            "datacenter_vim_id": sfi["datacenter_tenant_id"],
+            "action": "DELETE",
+            "status": "SCHEDULED",
+            "item": "instance_sfis",
+            "item_id": sfi["uuid"],
+            "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
+        }
+        task_index += 1
+        db_vim_actions.append(db_vim_action)
+
+    for classification in instanceDict['classifications']:
+        vimthread_affected[classification["datacenter_tenant_id"]] = None
+        datacenter_key = (classification["datacenter_id"], classification["datacenter_tenant_id"])
+        if datacenter_key not in myvims:
+            try:
+                _,myvim_thread = get_vim_thread(mydb, tenant_id, classification["datacenter_id"], classification["datacenter_tenant_id"])
+            except NfvoException as e:
+                logger.error(str(e))
+                myvim_thread = None
+            myvim_threads[datacenter_key] = myvim_thread
+            vims = get_vim(mydb, tenant_id, datacenter_id=classification["datacenter_id"],
+                           datacenter_tenant_id=classification["datacenter_tenant_id"])
+            if len(vims) == 0:
+                logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(classification["datacenter_id"], classification["datacenter_tenant_id"]))
+                myvims[datacenter_key] = None
+            else:
+                myvims[datacenter_key] = vims.values()[0]
+        myvim = myvims[datacenter_key]
+        myvim_thread = myvim_threads[datacenter_key]
+
+        if not myvim:
+            error_msg += "\n    vim_classification_id={} cannot be deleted because datacenter={} not found".format(classification['vim_classification_id'], classification["datacenter_id"])
+            continue
+        extra = {"params": (classification['vim_classification_id'])}
+        db_vim_action = {
+            "instance_action_id": instance_action_id,
+            "task_index": task_index,
+            "datacenter_vim_id": classification["datacenter_tenant_id"],
+            "action": "DELETE",
+            "status": "SCHEDULED",
+            "item": "instance_classifications",
+            "item_id": classification["uuid"],
+            "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
+        }
+        task_index += 1
+        db_vim_actions.append(db_vim_action)
+
     db_instance_action["number_tasks"] = task_index
     db_tables = [
         {"instance_actions": db_instance_action},
@@ -3740,7 +4243,7 @@ def new_tenant(mydb, tenant_dict):
         tenant_dict['encrypted_RO_priv_key'] = priv_key
         mydb.new_row("nfvo_tenants", tenant_dict, confidential_data=True)
     except db_base_Exception as e:
-        raise NfvoException("Error creating the new tenant: {} ".format(tenant_dict['name']) + str(e), HTTP_Internal_Server_Error)
+        raise NfvoException("Error creating the new tenant: {} ".format(tenant_dict['name']) + str(e), e.http_code)
     return tenant_uuid
 
 def delete_tenant(mydb, tenant):
@@ -4123,7 +4626,7 @@ def get_sdn_net_id(mydb, tenant_id, datacenter, network_id):
         result =  mydb.get_rows(SELECT=('sdn_net_id',), FROM='instance_nets', WHERE=search_dict)
     except db_base_Exception as e:
         raise NfvoException("db_base_Exception obtaining SDN network to associated to vim network {}".format(
-            network_id) + str(e), HTTP_Internal_Server_Error)
+            network_id) + str(e), e.http_code)
 
     sdn_net_counter = 0
     for net in result:
@@ -4180,7 +4683,7 @@ def vim_net_sdn_attach(mydb, tenant_id, datacenter, network_id, descriptor):
             sdn_network_id, network_id) + str(e), HTTP_Internal_Server_Error)
     except db_base_Exception as e:
         raise NfvoException("db_base_Exception attaching SDN network to vim network {}".format(
-            network_id) + str(e), HTTP_Internal_Server_Error)
+            network_id) + str(e), e.http_code)
 
     return 'Port uuid: '+ result
 
@@ -4310,7 +4813,7 @@ def vim_action_delete(mydb, tenant_id, datacenter, item, name):
                     mydb.delete_row(FROM='instance_nets', WHERE={'instance_scenario_id': None, 'sdn_net_id': sdn_network_id, 'vim_net_id': item_id})
                 except db_base_Exception as e:
                     raise NfvoException("Error deleting correspondence for VIM/SDN dataplane networks{}: ".format(correspondence) +
-                                        str(e), HTTP_Internal_Server_Error)
+                                        str(e), e.http_code)
 
                 #Delete the SDN network
                 try:
@@ -4380,7 +4883,7 @@ def vim_action_create(mydb, tenant_id, datacenter, item, descriptor):
                     mydb.new_row('instance_nets', correspondence, add_uuid=True)
                 except db_base_Exception as e:
                     raise NfvoException("Error saving correspondence for VIM/SDN dataplane networks{}: {}".format(
-                        correspondence, e), HTTP_Internal_Server_Error)
+                        correspondence, e), e.http_code)
         elif item=="tenants":
             tenant = descriptor["tenant"]
             content = myvim.new_tenant(tenant["name"], tenant.get("description"))
index 8b72e14..055699e 100644 (file)
@@ -37,7 +37,9 @@ import time
 tables_with_createdat_field=["datacenters","instance_nets","instance_scenarios","instance_vms","instance_vnfs",
                            "interfaces","nets","nfvo_tenants","scenarios","sce_interfaces","sce_nets",
                            "sce_vnfs","tenants_datacenters","datacenter_tenants","vms","vnfs", "datacenter_nets",
-                           "instance_actions", "vim_actions"]
+                           "instance_actions", "vim_actions", "sce_vnffgs", "sce_rsps", "sce_rsp_hops",
+                           "sce_classifiers", "sce_classifier_matches", "instance_sfis", "instance_sfs",
+                           "instance_classifications", "instance_sfps"]
 
 
 class nfvo_db(db_base.db_base):
@@ -596,14 +598,14 @@ class nfvo_db(db_base.db_base):
                             vnf['mgmt_access'] = yaml.load(mgmt_access_dict[0]['mgmt_access'])
                         else:
                             vnf['mgmt_access'] = None
-                        #sce_interfaces
+                        # sce_interfaces
                         cmd = "SELECT scei.uuid,scei.sce_net_id,scei.interface_id,i.external_name,scei.ip_address"\
                               " FROM sce_interfaces as scei join interfaces as i on scei.interface_id=i.uuid"\
                               " WHERE scei.sce_vnf_id='{}' ORDER BY scei.created_at".format(vnf['uuid'])
                         self.logger.debug(cmd)
                         self.cur.execute(cmd)
                         vnf['interfaces'] = self.cur.fetchall()
-                        #vms
+                        # vms
                         cmd = "SELECT vms.uuid as uuid, flavor_id, image_id, vms.name as name," \
                               " vms.description as description, vms.boot_data as boot_data, count," \
                               " vms.availability_zone as availability_zone" \
@@ -641,9 +643,14 @@ class nfvo_db(db_base.db_base):
                             self.logger.debug(cmd)
                             self.cur.execute(cmd)
                             vm['interfaces'] = self.cur.fetchall()
-                            for index in range(0,len(vm['interfaces'])):
-                                vm['interfaces'][index]['port-security'] = vm['interfaces'][index].pop("port_security")
-                                vm['interfaces'][index]['floating-ip'] = vm['interfaces'][index].pop("floating_ip")
+                            for iface in vm['interfaces']:
+                                iface['port-security'] = iface.pop("port_security")
+                                iface['floating-ip'] = iface.pop("floating_ip")
+                                for sce_interface in vnf["interfaces"]:
+                                    if sce_interface["interface_id"] == iface["uuid"]:
+                                        if sce_interface["ip_address"]:
+                                            iface["ip_address"] = sce_interface["ip_address"]
+                                        break
                         #nets    every net of a vms
                         cmd = "SELECT uuid,name,type,description FROM nets WHERE vnf_id='{}'".format(vnf['vnf_id'])  
                         self.logger.debug(cmd)
@@ -695,6 +702,36 @@ class nfvo_db(db_base.db_base):
                     
                     db_base._convert_datetime2str(scenario_dict)
                     db_base._convert_str2boolean(scenario_dict, ('public','shared','external','port-security','floating-ip') )
+
+                    #forwarding graphs
+                    cmd = "SELECT uuid,name,description,vendor FROM sce_vnffgs WHERE scenario_id='{}' "\
+                          "ORDER BY created_at".format(scenario_dict['uuid'])
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd)
+                    scenario_dict['vnffgs'] = self.cur.fetchall()
+                    for vnffg in scenario_dict['vnffgs']:
+                        cmd = "SELECT uuid,name FROM sce_rsps WHERE sce_vnffg_id='{}' "\
+                              "ORDER BY created_at".format(vnffg['uuid'])
+                        self.logger.debug(cmd)
+                        self.cur.execute(cmd)
+                        vnffg['rsps'] = self.cur.fetchall()
+                        for rsp in vnffg['rsps']:
+                            cmd = "SELECT uuid,if_order,interface_id,sce_vnf_id FROM sce_rsp_hops WHERE sce_rsp_id='{}' "\
+                                  "ORDER BY created_at".format(rsp['uuid'])
+                            self.logger.debug(cmd)
+                            self.cur.execute(cmd)
+                            rsp['connection_points'] = self.cur.fetchall();
+                            cmd = "SELECT uuid,name,sce_vnf_id,interface_id FROM sce_classifiers WHERE sce_vnffg_id='{}' "\
+                                  "AND sce_rsp_id='{}' ORDER BY created_at".format(vnffg['uuid'], rsp['uuid'])
+                            self.logger.debug(cmd)
+                            self.cur.execute(cmd)
+                            rsp['classifier'] = self.cur.fetchone();
+                            cmd = "SELECT uuid,ip_proto,source_ip,destination_ip,source_port,destination_port FROM sce_classifier_matches "\
+                                  "WHERE sce_classifier_id='{}' ORDER BY created_at".format(rsp['classifier']['uuid'])
+                            self.logger.debug(cmd)
+                            self.cur.execute(cmd)
+                            rsp['classifier']['matches'] = self.cur.fetchall()
+
                     return scenario_dict
             except (mdb.Error, AttributeError) as e:
                 self._format_error(e, tries)
@@ -983,7 +1020,47 @@ class nfvo_db(db_base.db_base):
                     self.logger.debug(cmd)
                     self.cur.execute(cmd)
                     instance_dict['nets'] = self.cur.fetchall()
-                    
+
+                    #instance_sfps
+                    cmd = "SELECT uuid,vim_sfp_id,sce_rsp_id,datacenter_id,"\
+                          "datacenter_tenant_id,status,error_msg,vim_info"\
+                            " FROM instance_sfps" \
+                            " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid'])
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd)
+                    instance_dict['sfps'] = self.cur.fetchall()
+
+                    # for sfp in instance_dict['sfps']:
+                    #instance_sfs
+                    cmd = "SELECT uuid,vim_sf_id,sce_rsp_hop_id,datacenter_id,"\
+                          "datacenter_tenant_id,status,error_msg,vim_info"\
+                            " FROM instance_sfs" \
+                            " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid']) # TODO: replace instance_scenario_id with instance_sfp_id
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd)
+                    instance_dict['sfs'] = self.cur.fetchall()
+
+                    #for sf in instance_dict['sfs']:
+                    #instance_sfis
+                    cmd = "SELECT uuid,vim_sfi_id,sce_rsp_hop_id,datacenter_id,"\
+                          "datacenter_tenant_id,status,error_msg,vim_info"\
+                            " FROM instance_sfis" \
+                            " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid']) # TODO: replace instance_scenario_id with instance_sf_id
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd)
+                    instance_dict['sfis'] = self.cur.fetchall()
+#                            for sfi in instance_dict['sfi']:
+
+                    #instance_classifications
+                    cmd = "SELECT uuid,vim_classification_id,sce_classifier_match_id,datacenter_id,"\
+                          "datacenter_tenant_id,status,error_msg,vim_info"\
+                            " FROM instance_classifications" \
+                            " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid'])
+                    self.logger.debug(cmd)
+                    self.cur.execute(cmd)
+                    instance_dict['classifications'] = self.cur.fetchall()
+#                    for classification in instance_dict['classifications']
+
                     db_base._convert_datetime2str(instance_dict)
                     db_base._convert_str2boolean(instance_dict, ('public','shared','created') )
                     return instance_dict
index 9e15ac5..34d3ba4 100644 (file)
@@ -101,9 +101,9 @@ config_schema = {
         "http_console_host": nameshort_schema,
         "http_console_ports": {
             "type": "array", 
-            "items": {"OneOf" : [
+            "items": {"OneOf": [
                 port_schema, 
-                {"type":"object", "properties":{"from": port_schema, "to": port_schema}, "required": ["from","to"]} 
+                {"type": "object", "properties": {"from": port_schema, "to": port_schema}, "required": ["from", "to"]}
             ]}
         },
         "log_level": log_level_schema,
@@ -300,22 +300,22 @@ datacenter_associate_schema={
 }
 
 dhcp_schema = {
-    "title":"DHCP schema",
+    "title": "DHCP schema",
     "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
+    "type": "object",
     "properties":{
         "enabled": {"type": "boolean"},
-        "start-address": ip_schema,
-        "count": integer1_schema
+        "start-address": {"OneOf": [{"type": "null"}, ip_schema]},
+        "count": integer0_schema
     },
     "required": ["enabled", "start-address", "count"],
 }
 
 ip_profile_schema = {
-    "title":"IP profile schema",
+    "title": "IP profile schema",
     "$schema": "http://json-schema.org/draft-04/schema#",
-    "type":"object",
-    "properties":{
+    "type": "object",
+    "properties": {
         "ip-version": {"type": "string", "enum": ["IPv4","IPv6"]},
         "subnet-address": ip_prefix_schema,
         "gateway-address": ip_schema,
index ce0fc7f..cbcd31f 100644 (file)
@@ -484,11 +484,11 @@ class vim_thread(threading.Thread):
                         break
                     elif task_dependency["status"] == "FAILED":
                         raise VimThreadException(
-                            "Cannot {} {}, (task {}.{}) because depends on failed {} {}, (task{}.{})".format(
+                            "Cannot {} {}, (task {}.{}) because depends on failed {}.{}, (task{}.{}): {}".format(
                                 task["action"], task["item"],
                                 task["instance_action_id"], task["task_index"],
                                 task_dependency["instance_action_id"], task_dependency["task_index"],
-                                task_dependency["action"], task_dependency["item"]))
+                                task_dependency["action"], task_dependency["item"], task_dependency.get("error_msg")))
                 if dependency_not_completed:
                     # Move this task to the end.
                     task["extra"]["tries"] = task["extra"].get("tries", 0) + 1
@@ -530,6 +530,38 @@ class vim_thread(threading.Thread):
                         result, database_update = self.get_net(task)
                     else:
                         raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+                elif task["item"] == 'instance_sfis':
+                    if task["action"] == "CREATE":
+                        result, database_update = self.new_sfi(task)
+                        nb_created += 1
+                    elif task["action"] == "DELETE":
+                        result, database_update = self.del_sfi(task)
+                    else:
+                        raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+                elif task["item"] == 'instance_sfs':
+                    if task["action"] == "CREATE":
+                        result, database_update = self.new_sf(task)
+                        nb_created += 1
+                    elif task["action"] == "DELETE":
+                        result, database_update = self.del_sf(task)
+                    else:
+                        raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+                elif task["item"] == 'instance_classifications':
+                    if task["action"] == "CREATE":
+                        result, database_update = self.new_classification(task)
+                        nb_created += 1
+                    elif task["action"] == "DELETE":
+                        result, database_update = self.del_classification(task)
+                    else:
+                        raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+                elif task["item"] == 'instance_sfps':
+                    if task["action"] == "CREATE":
+                        result, database_update = self.new_sfp(task)
+                        nb_created += 1
+                    elif task["action"] == "DELETE":
+                        result, database_update = self.del_sfp(task)
+                    else:
+                        raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
                 else:
                     raise vimconn.vimconnException(self.name + "unknown task item {}".format(task["item"]))
                     # TODO
@@ -543,11 +575,14 @@ class vim_thread(threading.Thread):
                 elif task["item"] == 'instance_nets':
                     database_update["vim_net_id"] = None
 
+            no_refresh_tasks = ['instance_sfis', 'instance_sfs',
+                'instance_classifications', 'instance_sfps']
             if task["action"] == "DELETE":
                 action_key = task["item"] + task["item_id"]
                 del self.grouped_tasks[action_key]
             elif task["action"] in ("CREATE", "FIND") and task["status"] in ("DONE", "BUILD"):
-                self._insert_refresh(task)
+                if task["item"] not in no_refresh_tasks:
+                    self._insert_refresh(task)
 
             task_id = task["instance_action_id"] + "." + str(task["task_index"])
             self.logger.debug("task={} item={} action={} result={}:'{}' params={}".format(
@@ -941,3 +976,234 @@ class vim_thread(threading.Thread):
                 return True, None
         task["status"] = "FAILED"
         return False, None
+
+    ## Service Function Instances
+
+    def new_sfi(self, task):
+        vim_sfi_id = None
+        try:
+            params = task["params"]
+            task_id = task["instance_action_id"] + "." + str(task["task_index"])
+            depends = task.get("depends")
+            error_text = ""
+            interfaces = task.get("depends").values()[0].get("extra").get("params")[5]
+            # At the moment, every port associated with the VM will be used both as ingress and egress ports.
+            # Bear in mind that different VIM connectors might support SFI differently. In the case of OpenStack, only the
+            # first ingress and first egress ports will be used to create the SFI (Port Pair).
+            port_id_list = [interfaces[0].get("vim_id")]
+            name = "sfi-%s" % task["item_id"][:8]
+            # By default no form of IETF SFC Encapsulation will be used
+            vim_sfi_id = self.vim.new_sfi(name, port_id_list, port_id_list, sfc_encap=False)
+
+            task["extra"]["created"] = True
+            task["error_msg"] = None
+            task["status"] = "DONE"
+            task["vim_id"] = vim_sfi_id
+            instance_element_update = {"status": "ACTIVE", "vim_sfi_id": vim_sfi_id, "error_msg": None}
+            return True, instance_element_update
+
+        except (vimconn.vimconnException, VimThreadException) as e:
+            self.logger.error("Error creating Service Function Instance, task=%s: %s", task_id, str(e))
+            error_text = self._format_vim_error_msg(str(e))
+            task["error_msg"] = error_text
+            task["status"] = "FAILED"
+            task["vim_id"] = None
+            instance_element_update = {"status": "VIM_ERROR", "vim_sfi_id": None, "error_msg": error_text}
+            return False, instance_element_update
+
+    def del_sfi(self, task):
+        sfi_vim_id = task["vim_id"]
+        try:
+            self.vim.delete_sfi(sfi_vim_id)
+            task["status"] = "DONE"
+            task["error_msg"] = None
+            return True, None
+
+        except vimconn.vimconnException as e:
+            task["error_msg"] = self._format_vim_error_msg(str(e))
+            if isinstance(e, vimconn.vimconnNotFoundException):
+                # If not found mark as Done and fill error_msg
+                task["status"] = "DONE"
+                return True, None
+            task["status"] = "FAILED"
+            return False, None
+
+    def new_sf(self, task):
+        vim_sf_id = None
+        try:
+            params = task["params"]
+            task_id = task["instance_action_id"] + "." + str(task["task_index"])
+            depends = task.get("depends")
+            error_text = ""
+            #sfis = task.get("depends").values()[0].get("extra").get("params")[5]
+            sfis = task.get("depends").values()
+            sfi_id_list = []
+            for sfi in sfis:
+                sfi_id_list.append(sfi.get("vim_id"))
+            name = "sf-%s" % task["item_id"][:8]
+            # By default no form of IETF SFC Encapsulation will be used
+            vim_sf_id = self.vim.new_sf(name, sfi_id_list, sfc_encap=False)
+
+            task["extra"]["created"] = True
+            task["error_msg"] = None
+            task["status"] = "DONE"
+            task["vim_id"] = vim_sf_id
+            instance_element_update = {"status": "ACTIVE", "vim_sf_id": vim_sf_id, "error_msg": None}
+            return True, instance_element_update
+
+        except (vimconn.vimconnException, VimThreadException) as e:
+            self.logger.error("Error creating Service Function, task=%s: %s", task_id, str(e))
+            error_text = self._format_vim_error_msg(str(e))
+            task["error_msg"] = error_text
+            task["status"] = "FAILED"
+            task["vim_id"] = None
+            instance_element_update = {"status": "VIM_ERROR", "vim_sf_id": None, "error_msg": error_text}
+            return False, instance_element_update
+
+    def del_sf(self, task):
+        sf_vim_id = task["vim_id"]
+        try:
+            self.vim.delete_sf(sf_vim_id)
+            task["status"] = "DONE"
+            task["error_msg"] = None
+            return True, None
+
+        except vimconn.vimconnException as e:
+            task["error_msg"] = self._format_vim_error_msg(str(e))
+            if isinstance(e, vimconn.vimconnNotFoundException):
+                # If not found mark as Done and fill error_msg
+                task["status"] = "DONE"
+                return True, None
+            task["status"] = "FAILED"
+            return False, None
+
+    def new_classification(self, task):
+        vim_classification_id = None
+        try:
+            params = task["params"]
+            task_id = task["instance_action_id"] + "." + str(task["task_index"])
+            depends = task.get("depends")
+            error_text = ""
+            interfaces = task.get("depends").values()[0].get("extra").get("params")[5]
+            # Bear in mind that different VIM connectors might support Classifications differently.
+            # In the case of OpenStack, only the first VNF attached to the classifier will be used
+            # to create the Classification(s) (the "logical source port" of the "Flow Classifier").
+            # Since the VNFFG classifier match lacks the ethertype, classification defaults to
+            # using the IPv4 flow classifier.
+            name = "c-%s" % task["item_id"][:8]
+            # if not CIDR is given for the IP addresses, add /32:
+            ip_proto = int(params.get("ip_proto"))
+            source_ip = params.get("source_ip")
+            destination_ip = params.get("destination_ip")
+            if ip_proto == 1:
+                ip_proto = 'icmp'
+            elif ip_proto == 6:
+                ip_proto = 'tcp'
+            elif ip_proto == 17:
+                ip_proto = 'udp'
+            if '/' not in source_ip:
+                source_ip += '/32'
+            if '/' not in destination_ip:
+                destination_ip += '/32'
+            definition = {
+                    "logical_source_port": interfaces[0].get("vim_id"),
+                    "protocol": ip_proto,
+                    "source_ip_prefix": source_ip,
+                    "destination_ip_prefix": destination_ip,
+                    "source_port_range_min": params.get("source_port"),
+                    "source_port_range_max": params.get("source_port"),
+                    "destination_port_range_min": params.get("destination_port"),
+                    "destination_port_range_max": params.get("destination_port"),
+            }
+
+            vim_classification_id = self.vim.new_classification(
+                name, 'legacy_flow_classifier', definition)
+
+            task["extra"]["created"] = True
+            task["error_msg"] = None
+            task["status"] = "DONE"
+            task["vim_id"] = vim_classification_id
+            instance_element_update = {"status": "ACTIVE", "vim_classification_id": vim_classification_id, "error_msg": None}
+            return True, instance_element_update
+
+        except (vimconn.vimconnException, VimThreadException) as e:
+            self.logger.error("Error creating Classification, task=%s: %s", task_id, str(e))
+            error_text = self._format_vim_error_msg(str(e))
+            task["error_msg"] = error_text
+            task["status"] = "FAILED"
+            task["vim_id"] = None
+            instance_element_update = {"status": "VIM_ERROR", "vim_classification_id": None, "error_msg": error_text}
+            return False, instance_element_update
+
+    def del_classification(self, task):
+        classification_vim_id = task["vim_id"]
+        try:
+            self.vim.delete_classification(classification_vim_id)
+            task["status"] = "DONE"
+            task["error_msg"] = None
+            return True, None
+
+        except vimconn.vimconnException as e:
+            task["error_msg"] = self._format_vim_error_msg(str(e))
+            if isinstance(e, vimconn.vimconnNotFoundException):
+                # If not found mark as Done and fill error_msg
+                task["status"] = "DONE"
+                return True, None
+            task["status"] = "FAILED"
+            return False, None
+
+    def new_sfp(self, task):
+        vim_sfp_id = None
+        try:
+            params = task["params"]
+            task_id = task["instance_action_id"] + "." + str(task["task_index"])
+            depends = task.get("depends")
+            error_text = ""
+            deps = task.get("depends").values()
+            sf_id_list = []
+            classification_id_list = []
+            for dep in deps:
+                vim_id = dep.get("vim_id")
+                resource = dep.get("item")
+                if resource == "instance_sfs":
+                    sf_id_list.append(vim_id)
+                elif resource == "instance_classifications":
+                    classification_id_list.append(vim_id)
+
+            name = "sfp-%s" % task["item_id"][:8]
+            # By default no form of IETF SFC Encapsulation will be used
+            vim_sfp_id = self.vim.new_sfp(name, classification_id_list, sf_id_list, sfc_encap=False)
+
+            task["extra"]["created"] = True
+            task["error_msg"] = None
+            task["status"] = "DONE"
+            task["vim_id"] = vim_sfp_id
+            instance_element_update = {"status": "ACTIVE", "vim_sfp_id": vim_sfp_id, "error_msg": None}
+            return True, instance_element_update
+
+        except (vimconn.vimconnException, VimThreadException) as e:
+            self.logger.error("Error creating Service Function, task=%s: %s", task_id, str(e))
+            error_text = self._format_vim_error_msg(str(e))
+            task["error_msg"] = error_text
+            task["status"] = "FAILED"
+            task["vim_id"] = None
+            instance_element_update = {"status": "VIM_ERROR", "vim_sfp_id": None, "error_msg": error_text}
+            return False, instance_element_update
+        return
+
+    def del_sfp(self, task):
+        sfp_vim_id = task["vim_id"]
+        try:
+            self.vim.delete_sfp(sfp_vim_id)
+            task["status"] = "DONE"
+            task["error_msg"] = None
+            return True, None
+
+        except vimconn.vimconnException as e:
+            task["error_msg"] = self._format_vim_error_msg(str(e))
+            if isinstance(e, vimconn.vimconnNotFoundException):
+                # If not found mark as Done and fill error_msg
+                task["status"] = "DONE"
+                return True, None
+            task["status"] = "FAILED"
+            return False, None
index bdfcb15..fd8f9be 100644 (file)
@@ -321,15 +321,14 @@ class vimconnector():
                 'bridge': overlay isolated network
                 'data':   underlay E-LAN network for Passthrough and SRIOV interfaces
                 'ptp':    underlay E-LINE network for Passthrough and SRIOV interfaces.
-            'ip_profile': is a dict containing the IP parameters of the network (Currently only IPv4 is implemented)
-                'ip-version': can be one of ["IPv4","IPv6"]
-                'subnet-address': ip_prefix_schema, that is X.X.X.X/Y
-                'gateway-address': (Optional) ip_schema, that is X.X.X.X
-                'dns-address': (Optional) ip_schema,
-                'dhcp': (Optional) dict containing
-                    'enabled': {"type": "boolean"},
-                    'start-address': ip_schema, first IP to grant
-                    'count': number of IPs to grant.
+            'ip_profile': is a dict containing the IP parameters of the network
+                'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
+                'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
+                'gateway_address': (Optional) ip_schema, that is X.X.X.X
+                'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
+                'dhcp_enabled': True or False
+                'dhcp_start_address': ip_schema, first IP to grant
+                'dhcp_count': number of IPs to grant.
             'shared': if this network can be seen/use by other tenants/organization
             'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
         Returns the network identifier on success or raises and exception on failure
@@ -483,6 +482,7 @@ class vimconnector():
                 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
                 'model': (optional and only have sense for type==virtual) interface model: virtio, e2000, ...
                 'mac_address': (optional) mac address to assign to this interface
+                'ip_address': (optional) IP address to assign to this interface
                 #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided,
                     the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
                 'type': (mandatory) can be one of:
index 85b8dc8..55f910b 100644 (file)
@@ -36,7 +36,7 @@ __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor
 __date__  = "$22-sep-2017 23:59:59$"
 
 import vimconn
-import json
+import json
 import logging
 import netaddr
 import time
@@ -353,7 +353,7 @@ class vimconnector(vimconn.vimconnector):
         elif isinstance(exception, nvExceptions.Conflict):
             raise vimconn.vimconnConflictException(type(exception).__name__ + ": " + str(exception))
         elif isinstance(exception, vimconn.vimconnException):
-            raise
+            raise exception
         else:  # ()
             self.logger.error("General Exception " + str(exception), exc_info=True)
             raise vimconn.vimconnConnectionException(type(exception).__name__ + ": " + str(exception))
@@ -443,7 +443,7 @@ class vimconnector(vimconn.vimconnector):
             #create subnetwork, even if there is no profile
             if not ip_profile:
                 ip_profile = {}
-            if 'subnet_address' not in ip_profile:
+            if not ip_profile.get('subnet_address'):
                 #Fake subnet is required
                 subnet_rand = random.randint(0, 255)
                 ip_profile['subnet_address'] = "192.168.{}.0/24".format(subnet_rand)
@@ -455,16 +455,18 @@ class vimconnector(vimconn.vimconnector):
                     "cidr": ip_profile['subnet_address']
                     }
             # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
-            subnet['gateway_ip'] = ip_profile.get('gateway_address')
+            if ip_profile.get('gateway_address'):
+                subnet['gateway_ip'] = ip_profile.get('gateway_address')
             if ip_profile.get('dns_address'):
                 subnet['dns_nameservers'] = ip_profile['dns_address'].split(";")
             if 'dhcp_enabled' in ip_profile:
-                subnet['enable_dhcp'] = False if ip_profile['dhcp_enabled']=="false" else True
-            if 'dhcp_start_address' in ip_profile:
+                subnet['enable_dhcp'] = False if \
+                    ip_profile['dhcp_enabled']=="false" or ip_profile['dhcp_enabled']==False else True
+            if ip_profile.get('dhcp_start_address'):
                 subnet['allocation_pools'] = []
                 subnet['allocation_pools'].append(dict())
                 subnet['allocation_pools'][0]['start'] = ip_profile['dhcp_start_address']
-            if 'dhcp_count' in ip_profile:
+            if ip_profile.get('dhcp_count'):
                 #parts = ip_profile['dhcp_start_address'].split('.')
                 #ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
                 ip_int = int(netaddr.IPAddress(ip_profile['dhcp_start_address']))
@@ -474,7 +476,7 @@ class vimconnector(vimconn.vimconnector):
             #self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
             self.neutron.create_subnet({"subnet": subnet} )
             return new_net["network"]["id"]
-        except (neExceptions.ConnectionFailed, ksExceptions.ClientException, neExceptions.NeutronException, ConnectionError) as e:
+        except Exception as e:
             if new_net:
                 self.neutron.delete_network(new_net['network']['id'])
             self._format_exception(e)
@@ -493,9 +495,10 @@ class vimconnector(vimconn.vimconnector):
         self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
         try:
             self._reload_connection()
-            if self.api_version3 and "tenant_id" in filter_dict:
-                filter_dict['project_id'] = filter_dict.pop('tenant_id') #TODO check
-            net_dict = self.neutron.list_networks(**filter_dict)
+            filter_dict_os = filter_dict.copy()
+            if self.api_version3 and "tenant_id" in filter_dict_os:
+                filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id')  #T ODO check
+            net_dict = self.neutron.list_networks(**filter_dict_os)
             net_list = net_dict["networks"]
             self.__net_os2mano(net_list)
             return net_list
@@ -840,9 +843,9 @@ class vimconnector(vimconn.vimconnector):
         self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
         try:
             self._reload_connection()
-            filter_dict_os=filter_dict.copy()
+            filter_dict_os = filter_dict.copy()
             #First we filter by the available filter fields: name, id. The others are removed.
-            filter_dict_os.pop('checksum',None)
+            filter_dict_os.pop('checksum', None)
             image_list = self.nova.images.findall(**filter_dict_os)
             if len(image_list) == 0:
                 return []
@@ -851,7 +854,7 @@ class vimconnector(vimconn.vimconnector):
             for image in image_list:
                 try:
                     image_class = self.glance.images.get(image.id)
-                    if 'checksum' not in filter_dict or image_class['checksum']==filter_dict.get('checksum'):
+                    if 'checksum' not in filter_dict or image_class['checksum'] == filter_dict.get('checksum'):
                         filtered_list.append(image_class.copy())
                 except gl1Exceptions.HTTPNotFound:
                     pass
@@ -952,20 +955,20 @@ class vimconnector(vimconn.vimconnector):
                 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
                 vim_id: filled/added by this function
                 floating_ip: True/False (or it can be None)
-                'cloud_config': (optional) dictionary with:
-                'key-pairs': (optional) list of strings with the public key to be inserted to the default user
-                'users': (optional) list of users to be inserted, each item is a dict with:
-                    'name': (mandatory) user name,
-                    'key-pairs': (optional) list of strings with the public key to be inserted to the user
-                'user-data': (optional) string is a text script to be passed directly to cloud-init
-                'config-files': (optional). List of files to be transferred. Each item is a dict with:
-                    'dest': (mandatory) string with the destination absolute path
-                    'encoding': (optional, by default text). Can be one of:
-                        'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
-                    'content' (mandatory): string with the content of the file
-                    'permissions': (optional) string with file permissions, typically octal notation '0644'
-                    'owner': (optional) file owner, string with the format 'owner:group'
-                'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
+            'cloud_config': (optional) dictionary with:
+            'key-pairs': (optional) list of strings with the public key to be inserted to the default user
+            'users': (optional) list of users to be inserted, each item is a dict with:
+                'name': (mandatory) user name,
+                'key-pairs': (optional) list of strings with the public key to be inserted to the user
+            'user-data': (optional) string is a text script to be passed directly to cloud-init
+            'config-files': (optional). List of files to be transferred. Each item is a dict with:
+                'dest': (mandatory) string with the destination absolute path
+                'encoding': (optional, by default text). Can be one of:
+                    'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+                'content' (mandatory): string with the content of the file
+                'permissions': (optional) string with file permissions, typically octal notation '0644'
+                'owner': (optional) file owner, string with the format 'owner:group'
+            'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
             'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
                 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
                 'size': (mandatory) string with the size of the disk in GB
@@ -1032,6 +1035,9 @@ class vimconnector(vimconn.vimconnector):
                     port_dict["name"]=name
                 if net.get("mac_address"):
                     port_dict["mac_address"]=net["mac_address"]
+                if net.get("ip_address"):
+                    port_dict["fixed_ips"] = [{'ip_address': net["ip_address"]}]
+                    # TODO add 'subnet_id': <subnet_id>
                 new_port = self.neutron.create_port({"port": port_dict })
                 created_items["port:" + str(new_port["port"]["id"])] = True
                 net["mac_adress"] = new_port["port"]["mac_address"]
@@ -1054,6 +1060,7 @@ class vimconnector(vimconn.vimconnector):
                 elif net['use'] == 'mgmt' and self.config.get('use_floating_ip'):
                     net['exit_on_floating_ip_error'] = False
                     external_network.append(net)
+                    net['floating_ip'] = self.config.get('use_floating_ip')
 
                 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic is dropped.
                 # As a workaround we wait until the VM is active and then disable the port-security
@@ -1125,6 +1132,7 @@ class vimconnector(vimconn.vimconnector):
                                               block_device_mapping=block_device_mapping
                                               )  # , description=description)
 
+            vm_start_time = time.time()
             # Previously mentioned workaround to wait until the VM is active and then disable the port-security
             if no_secured_ports:
                 self.__wait_for_vm(server.id, 'ACTIVE')
@@ -1141,49 +1149,61 @@ class vimconnector(vimconn.vimconnector):
             pool_id = None
             if external_network:
                 floating_ips = self.neutron.list_floatingips().get("floatingips", ())
-                self.__wait_for_vm(server.id, 'ACTIVE')
-
             for floating_network in external_network:
                 try:
                     assigned = False
                     while not assigned:
                         if floating_ips:
                             ip = floating_ips.pop(0)
-                            if not ip.get("port_id", False) and ip.get('tenant_id') == server.tenant_id:
-                                free_floating_ip = ip.get("floating_ip_address")
-                                try:
-                                    fix_ip = floating_network.get('ip')
-                                    server.add_floating_ip(free_floating_ip, fix_ip)
-                                    assigned = True
-                                except Exception as e:
-                                    raise vimconn.vimconnException(type(e).__name__ + ": Cannot create floating_ip "+  str(e), http_code=vimconn.HTTP_Conflict)
+                            if ip.get("port_id", False) or ip.get('tenant_id') != server.tenant_id:
+                                continue
+                            if isinstance(floating_network['floating_ip'], str):
+                                if ip.get("floating_network_id") != floating_network['floating_ip']:
+                                    continue
+                            free_floating_ip = ip.get("floating_ip_address")
                         else:
-                            #Find the external network
-                            external_nets = list()
-                            for net in self.neutron.list_networks()['networks']:
-                                if net['router:external']:
-                                        external_nets.append(net)
-
-                            if len(external_nets) == 0:
-                                raise vimconn.vimconnException("Cannot create floating_ip automatically since no external "
-                                                               "network is present",
-                                                                http_code=vimconn.HTTP_Conflict)
-                            if len(external_nets) > 1:
-                                raise vimconn.vimconnException("Cannot create floating_ip automatically since multiple "
-                                                               "external networks are present",
-                                                               http_code=vimconn.HTTP_Conflict)
-
-                            pool_id = external_nets[0].get('id')
+                            if isinstance(floating_network['floating_ip'], str):
+                                pool_id = floating_network['floating_ip']
+                            else:
+                                #Find the external network
+                                external_nets = list()
+                                for net in self.neutron.list_networks()['networks']:
+                                    if net['router:external']:
+                                            external_nets.append(net)
+
+                                if len(external_nets) == 0:
+                                    raise vimconn.vimconnException("Cannot create floating_ip automatically since no external "
+                                                                   "network is present",
+                                                                    http_code=vimconn.HTTP_Conflict)
+                                if len(external_nets) > 1:
+                                    raise vimconn.vimconnException("Cannot create floating_ip automatically since multiple "
+                                                                   "external networks are present",
+                                                                   http_code=vimconn.HTTP_Conflict)
+
+                                pool_id = external_nets[0].get('id')
                             param = {'floatingip': {'floating_network_id': pool_id, 'tenant_id': server.tenant_id}}
                             try:
                                 #self.logger.debug("Creating floating IP")
                                 new_floating_ip = self.neutron.create_floatingip(param)
                                 free_floating_ip = new_floating_ip['floatingip']['floating_ip_address']
-                                fix_ip = floating_network.get('ip')
+                            except Exception as e:
+                                raise vimconn.vimconnException(type(e).__name__ + ": Cannot create new floating_ip " +
+                                                               str(e), http_code=vimconn.HTTP_Conflict)
+
+                        fix_ip = floating_network.get('ip')
+                        while not assigned:
+                            try:
                                 server.add_floating_ip(free_floating_ip, fix_ip)
-                                assigned=True
+                                assigned = True
                             except Exception as e:
-                                raise vimconn.vimconnException(type(e).__name__ + ": Cannot assign floating_ip "+  str(e), http_code=vimconn.HTTP_Conflict)
+                                vm_status = self.nova.servers.get(server.id).status
+                                if vm_status != 'ACTIVE' and vm_status != 'ERROR':
+                                    if time.time() - vm_start_time < server_timeout:
+                                        time.sleep(5)
+                                        continue
+                                raise vimconn.vimconnException(type(e).__name__ + ": Cannot create floating_ip "+  str(e),
+                                                               http_code=vimconn.HTTP_Conflict)
+
                 except Exception as e:
                     if not floating_network['exit_on_floating_ip_error']:
                         self.logger.warn("Cannot create floating_ip. %s", str(e))
@@ -1592,8 +1612,7 @@ class vimconnector(vimconn.vimconnector):
             error_text= type(e).__name__ + ": "+  (str(e) if len(e.args)==0 else str(e.args[0]))
         #TODO insert exception vimconn.HTTP_Unauthorized
         #if reaching here is because an exception
-        if self.debug:
-            self.logger.debug("new_user " + error_text)
+        self.logger.debug("new_user " + error_text)
         return error_value, error_text
 
     def delete_user(self, user_id):
@@ -1616,8 +1635,7 @@ class vimconnector(vimconn.vimconnector):
             error_text= type(e).__name__ + ": "+  (str(e) if len(e.args)==0 else str(e.args[0]))
         #TODO insert exception vimconn.HTTP_Unauthorized
         #if reaching here is because an exception
-        if self.debug:
-            print("delete_tenant " + error_text)
+            self.logger.debug("delete_tenant " + error_text)
         return error_value, error_text
 
     def get_hosts_info(self):
@@ -1640,8 +1658,7 @@ class vimconnector(vimconn.vimconnector):
             error_text= type(e).__name__ + ": "+  (str(e) if len(e.args)==0 else str(e.args[0]))
         #TODO insert exception vimconn.HTTP_Unauthorized
         #if reaching here is because an exception
-        if self.debug:
-            print("get_hosts_info " + error_text)
+        self.logger.debug("get_hosts_info " + error_text)
         return error_value, error_text
 
     def get_hosts(self, vim_tenant):
@@ -1669,8 +1686,7 @@ class vimconnector(vimconn.vimconnector):
             error_text= type(e).__name__ + ": "+  (str(e) if len(e.args)==0 else str(e.args[0]))
         #TODO insert exception vimconn.HTTP_Unauthorized
         #if reaching here is because an exception
-        if self.debug:
-            print("get_hosts " + error_text)
+        self.logger.debug("get_hosts " + error_text)
         return error_value, error_text
 
     def new_classification(self, name, ctype, definition):
@@ -1691,7 +1707,7 @@ class vimconnector(vimconn.vimconnector):
             classification_dict = definition
             classification_dict['name'] = name
 
-            new_class = self.neutron.create_flow_classifier(
+            new_class = self.neutron.create_sfc_flow_classifier(
                 {'flow_classifier': classification_dict})
             return new_class['flow_classifier']['id']
         except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
@@ -1717,11 +1733,12 @@ class vimconnector(vimconn.vimconnector):
         self.logger.debug("Getting Classifications from VIM filter: '%s'",
                           str(filter_dict))
         try:
+            filter_dict_os = filter_dict.copy()
             self._reload_connection()
-            if self.api_version3 and "tenant_id" in filter_dict:
-                filter_dict['project_id'] = filter_dict.pop('tenant_id')
-            classification_dict = self.neutron.list_flow_classifier(
-                **filter_dict)
+            if self.api_version3 and "tenant_id" in filter_dict_os:
+                filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id')
+            classification_dict = self.neutron.list_sfc_flow_classifiers(
+                **filter_dict_os)
             classification_list = classification_dict["flow_classifiers"]
             self.__classification_os2mano(classification_list)
             return classification_list
@@ -1733,7 +1750,7 @@ class vimconnector(vimconn.vimconnector):
         self.logger.debug("Deleting Classification '%s' from VIM", class_id)
         try:
             self._reload_connection()
-            self.neutron.delete_flow_classifier(class_id)
+            self.neutron.delete_sfc_flow_classifier(class_id)
             return class_id
         except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
                 ksExceptions.ClientException, neExceptions.NeutronException,
@@ -1748,9 +1765,7 @@ class vimconnector(vimconn.vimconnector):
             self._reload_connection()
             correlation = None
             if sfc_encap:
-                # TODO(igordc): must be changed to NSH in Queens
-                # (MPLS is a workaround)
-                correlation = 'mpls'
+                correlation = 'nsh'
             if len(ingress_ports) != 1:
                 raise vimconn.vimconnNotSupportedException(
                     "OpenStack VIM connector can only have "
@@ -1764,13 +1779,13 @@ class vimconnector(vimconn.vimconnector):
                         'egress': egress_ports[0],
                         'service_function_parameters': {
                             'correlation': correlation}}
-            new_sfi = self.neutron.create_port_pair({'port_pair': sfi_dict})
+            new_sfi = self.neutron.create_sfc_port_pair({'port_pair': sfi_dict})
             return new_sfi['port_pair']['id']
         except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
                 neExceptions.NeutronException, ConnectionError) as e:
             if new_sfi:
                 try:
-                    self.neutron.delete_port_pair_group(
+                    self.neutron.delete_sfc_port_pair(
                         new_sfi['port_pair']['id'])
                 except Exception:
                     self.logger.error(
@@ -1798,9 +1813,10 @@ class vimconnector(vimconn.vimconnector):
                           "VIM filter: '%s'", str(filter_dict))
         try:
             self._reload_connection()
-            if self.api_version3 and "tenant_id" in filter_dict:
-                filter_dict['project_id'] = filter_dict.pop('tenant_id')
-            sfi_dict = self.neutron.list_port_pair(**filter_dict)
+            filter_dict_os = filter_dict.copy()
+            if self.api_version3 and "tenant_id" in filter_dict_os:
+                filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id')
+            sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os)
             sfi_list = sfi_dict["port_pairs"]
             self.__sfi_os2mano(sfi_list)
             return sfi_list
@@ -1813,7 +1829,7 @@ class vimconnector(vimconn.vimconnector):
                           "from VIM", sfi_id)
         try:
             self._reload_connection()
-            self.neutron.delete_port_pair(sfi_id)
+            self.neutron.delete_sfc_port_pair(sfi_id)
             return sfi_id
         except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
                 ksExceptions.ClientException, neExceptions.NeutronException,
@@ -1826,27 +1842,25 @@ class vimconnector(vimconn.vimconnector):
         try:
             new_sf = None
             self._reload_connection()
-            correlation = None
-            if sfc_encap:
-                # TODO(igordc): must be changed to NSH in Queens
-                # (MPLS is a workaround)
-                correlation = 'mpls'
+            # correlation = None
+            # if sfc_encap:
+            #     correlation = 'nsh'
             for instance in sfis:
                 sfi = self.get_sfi(instance)
-                if sfi.get('sfc_encap') != correlation:
+                if sfi.get('sfc_encap') != sfc_encap:
                     raise vimconn.vimconnNotSupportedException(
                         "OpenStack VIM connector requires all SFIs of the "
                         "same SF to share the same SFC Encapsulation")
             sf_dict = {'name': name,
                        'port_pairs': sfis}
-            new_sf = self.neutron.create_port_pair_group({
+            new_sf = self.neutron.create_sfc_port_pair_group({
                 'port_pair_group': sf_dict})
             return new_sf['port_pair_group']['id']
         except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
                 neExceptions.NeutronException, ConnectionError) as e:
             if new_sf:
                 try:
-                    self.neutron.delete_port_pair_group(
+                    self.neutron.delete_sfc_port_pair_group(
                         new_sf['port_pair_group']['id'])
                 except Exception:
                     self.logger.error(
@@ -1872,9 +1886,10 @@ class vimconnector(vimconn.vimconnector):
                           str(filter_dict))
         try:
             self._reload_connection()
-            if self.api_version3 and "tenant_id" in filter_dict:
-                filter_dict['project_id'] = filter_dict.pop('tenant_id')
-            sf_dict = self.neutron.list_port_pair_group(**filter_dict)
+            filter_dict_os = filter_dict.copy()
+            if self.api_version3 and "tenant_id" in filter_dict_os:
+                filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id')
+            sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
             sf_list = sf_dict["port_pair_groups"]
             self.__sf_os2mano(sf_list)
             return sf_list
@@ -1886,7 +1901,7 @@ class vimconnector(vimconn.vimconnector):
         self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
         try:
             self._reload_connection()
-            self.neutron.delete_port_pair_group(sf_id)
+            self.neutron.delete_sfc_port_pair_group(sf_id)
             return sf_id
         except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
                 ksExceptions.ClientException, neExceptions.NeutronException,
@@ -1899,26 +1914,24 @@ class vimconnector(vimconn.vimconnector):
         try:
             new_sfp = None
             self._reload_connection()
-            if not sfc_encap:
-                raise vimconn.vimconnNotSupportedException(
-                    "OpenStack VIM connector only supports "
-                    "SFC-Encapsulated chains")
-            # TODO(igordc): must be changed to NSH in Queens
-            # (MPLS is a workaround)
-            correlation = 'mpls'
+            # In networking-sfc the MPLS encapsulation is legacy
+            # should be used when no full SFC Encapsulation is intended
+            sfc_encap = 'mpls'
+            if sfc_encap:
+                correlation = 'nsh'
             sfp_dict = {'name': name,
                         'flow_classifiers': classifications,
                         'port_pair_groups': sfs,
                         'chain_parameters': {'correlation': correlation}}
             if spi:
                 sfp_dict['chain_id'] = spi
-            new_sfp = self.neutron.create_port_chain({'port_chain': sfp_dict})
+            new_sfp = self.neutron.create_sfc_port_chain({'port_chain': sfp_dict})
             return new_sfp["port_chain"]["id"]
         except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
                 neExceptions.NeutronException, ConnectionError) as e:
             if new_sfp:
                 try:
-                    self.neutron.delete_port_chain(new_sfp['port_chain']['id'])
+                    self.neutron.delete_sfc_port_chain(new_sfp['port_chain']['id'])
                 except Exception:
                     self.logger.error(
                         'Creation of Service Function Path failed, with '
@@ -1943,9 +1956,10 @@ class vimconnector(vimconn.vimconnector):
                           "'%s'", str(filter_dict))
         try:
             self._reload_connection()
-            if self.api_version3 and "tenant_id" in filter_dict:
-                filter_dict['project_id'] = filter_dict.pop('tenant_id')
-            sfp_dict = self.neutron.list_port_chain(**filter_dict)
+            filter_dict_os = filter_dict.copy()
+            if self.api_version3 and "tenant_id" in filter_dict_os:
+                filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id')
+            sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
             sfp_list = sfp_dict["port_chains"]
             self.__sfp_os2mano(sfp_list)
             return sfp_list
@@ -1958,7 +1972,7 @@ class vimconnector(vimconn.vimconnector):
             "Deleting Service Function Path '%s' from VIM", sfp_id)
         try:
             self._reload_connection()
-            self.neutron.delete_port_chain(sfp_id)
+            self.neutron.delete_sfc_port_chain(sfp_id)
             return sfp_id
         except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
                 ksExceptions.ClientException, neExceptions.NeutronException,
index b34fdf0..417092f 100644 (file)
@@ -850,6 +850,8 @@ class vimconnector(vimconn.vimconnector):
                         net_dict["model"] = net["model"]
                 if net.get("mac_address"):
                     net_dict["mac_address"] = net["mac_address"]
+                if net.get("ip_address"):
+                    net_dict["ip_address"] = net["ip_address"]
                 virtio_net_list.append(net_dict)
             payload_dict={  "name":        name[:64],
                             "description": description,
@@ -952,7 +954,7 @@ class vimconnector(vimconn.vimconnector):
             vm={}
             #print "VIMConnector refresh_tenant_vms and nets: Getting tenant VM instance information from VIM"
             try:
-                url = self.url+'/'+self.tenant+'/servers/'+ vm_id
+                url = self.url + '/' + self.tenant + '/servers/' + vm_id
                 self.logger.info("Getting vm GET %s", url)
                 vim_response = requests.get(url, headers = self.headers_req)
                 self._check_http_request_response(vim_response)
@@ -969,7 +971,7 @@ class vimconnector(vimconn.vimconnector):
                 #get interfaces info
                 try:
                     management_ip = False
-                    url2 = self.url+'/ports?device_id='+ quote(vm_id)
+                    url2 = self.url + '/ports?device_id=' + quote(vm_id)
                     self.logger.info("Getting PORTS GET %s", url2)
                     vim_response2 = requests.get(url2, headers = self.headers_req)
                     self._check_http_request_response(vim_response2)
@@ -978,7 +980,7 @@ class vimconnector(vimconn.vimconnector):
                         vm["interfaces"]=[]
                     for port in client_data.get("ports"):
                         interface={}
-                        interface['vim_info']  = yaml.safe_dump(port)
+                        interface['vim_info'] = yaml.safe_dump(port)
                         interface["mac_address"] = port.get("mac_address")
                         interface["vim_net_id"] = port.get("network_id")
                         interface["vim_interface_id"] = port["id"]
index 9c26574..4c81b9b 100644 (file)
@@ -42,23 +42,19 @@ from xml.etree import ElementTree as XmlElementTree
 from lxml import etree as lxmlElementTree
 
 import yaml
-from pyvcloud import Http
-from pyvcloud.vcloudair import VCA
-from pyvcloud.schema.vcd.v1_5.schemas.vcloud import sessionType, organizationType, \
-    vAppType, organizationListType, vdcType, catalogType, queryRecordViewType, \
-    networkType, vcloudType, taskType, diskType, vmsType, vdcTemplateListType, mediaType
+from pyvcloud.vcd.client import BasicLoginCredentials,Client,VcdTaskException
+from pyvcloud.vcd.vdc import VDC
+from pyvcloud.vcd.org import Org
+import re
+from pyvcloud.vcd.vapp import VApp
 from xml.sax.saxutils import escape
-
-from pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities import TaskType
-from pyvcloud.schema.vcd.v1_5.schemas.vcloud.taskType import TaskType as GenericTask
-from pyvcloud.schema.vcd.v1_5.schemas.vcloud.vAppType import TaskType as VappTask
-from pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities import TasksInProgressType
-
 import logging
 import json
 import time
 import uuid
 import httplib
+#For python3
+#import http.client
 import hashlib
 import socket
 import struct
@@ -80,11 +76,11 @@ DEFAULT_IP_PROFILE = {'dhcp_count':50,
 INTERVAL_TIME = 5
 MAX_WAIT_TIME = 1800
 
-VCAVERSION = '5.9'
+API_VERSION = '5.9'
 
-__author__ = "Mustafa Bayramov, Arpita Kate, Sachin Bhangare"
-__date__ = "$12-Jan-2017 11:09:29$"
-__version__ = '0.1'
+__author__ = "Mustafa Bayramov, Arpita Kate, Sachin Bhangare, Prakash Kasar"
+__date__ = "$09-Mar-2018 11:09:29$"
+__version__ = '0.2'
 
 #     -1: "Could not be created",
 #     0: "Unresolved",
@@ -179,6 +175,9 @@ class vimconnector(vimconn.vimconnector):
         self.nsx_user = None
         self.nsx_password = None
 
+        # Disable warnings from self-signed certificates.
+        requests.packages.urllib3.disable_warnings()
+
         if tenant_name is not None:
             orgnameandtenant = tenant_name.split(":")
             if len(orgnameandtenant) == 2:
@@ -219,7 +218,7 @@ class vimconnector(vimconn.vimconnector):
 #         self.vlanID_range = config.get("vlanID_range", None)
 
         self.org_uuid = None
-        self.vca = None
+        self.client = None
 
         if not url:
             raise vimconn.vimconnException('url param can not be NoneType')
@@ -292,59 +291,41 @@ class vimconnector(vimconn.vimconnector):
             Organization creation / provider network creation etc.
 
             Returns:
-                The return vca object that letter can be used to connect to vcloud direct as admin for provider vdc
+                The return client object that latter can be used to connect to vcloud director as admin for provider vdc
         """
 
-        self.logger.debug("Logging in to a vca {} as admin.".format(self.org_name))
+        self.logger.debug("Logging into vCD {} as admin.".format(self.org_name))
 
-        vca_admin = VCA(host=self.url,
-                        username=self.admin_user,
-                        service_type=STANDALONE,
-                        version=VCAVERSION,
-                        verify=False,
-                        log=False)
-        result = vca_admin.login(password=self.admin_password, org='System')
-        if not result:
-            raise vimconn.vimconnConnectionException(
-                "Can't connect to a vCloud director as: {}".format(self.admin_user))
-        result = vca_admin.login(token=vca_admin.token, org='System', org_url=vca_admin.vcloud_session.org_url)
-        if result is True:
-            self.logger.info(
-                "Successfully logged to a vcloud direct org: {} as user: {}".format('System', self.admin_user))
+        try:
+            host = self.url
+            org = 'System'
+            client_as_admin = Client(host, verify_ssl_certs=False)
+            client_as_admin.set_credentials(BasicLoginCredentials(self.admin_user, org, self.admin_password))
+        except Exception as e:
+            raise vimconn.vimconnException(
+                  "Can't connect to a vCloud director as: {} with exception {}".format(self.admin_user, e))
 
-        return vca_admin
+        return client_as_admin
 
     def connect(self):
         """ Method connect as normal user to vCloud director.
 
             Returns:
-                The return vca object that letter can be used to connect to vCloud director as admin for VDC
+                The return client object that latter can be used to connect to vCloud director as admin for VDC
         """
 
         try:
-            self.logger.debug("Logging in to a vca {} as {} to datacenter {}.".format(self.org_name,
+            self.logger.debug("Logging into vCD {} as {} to datacenter {}.".format(self.org_name,
                                                                                       self.user,
                                                                                       self.org_name))
-            vca = VCA(host=self.url,
-                      username=self.user,
-                      service_type=STANDALONE,
-                      version=VCAVERSION,
-                      verify=False,
-                      log=False)
-
-            result = vca.login(password=self.passwd, org=self.org_name)
-            if not result:
-                raise vimconn.vimconnConnectionException("Can't connect to a vCloud director as: {}".format(self.user))
-            result = vca.login(token=vca.token, org=self.org_name, org_url=vca.vcloud_session.org_url)
-            if result is True:
-                self.logger.info(
-                    "Successfully logged to a vcloud direct org: {} as user: {}".format(self.org_name, self.user))
-
+            host = self.url
+            client = Client(host, verify_ssl_certs=False)
+            client.set_credentials(BasicLoginCredentials(self.user, self.org_name, self.passwd))
         except:
             raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
                                                      "{} as user: {}".format(self.org_name, self.user))
 
-        return vca
+        return client
 
     def init_organization(self):
         """ Method initialize organization UUID and VDC parameters.
@@ -357,18 +338,18 @@ class vimconnector(vimconn.vimconnector):
             Returns:
                 The return vca object that letter can be used to connect to vcloud direct as admin
         """
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed.")
+        client = self.connect()
+        if not client:
+            raise vimconn.vimconnConnectionException("Failed to connect vCD.")
 
-        self.vca = vca
+        self.client = client
         try:
             if self.org_uuid is None:
-                org_dict = self.get_org_list()
-                for org in org_dict:
+                org_list = client.get_org_list()
+                for org in org_list.Org:
                     # we set org UUID at the init phase but we can do it only when we have valid credential.
-                    if org_dict[org] == self.org_name:
-                        self.org_uuid = org
+                    if org.get('name') == self.org_name:
+                        self.org_uuid = org.get('href').split('/')[-1]
                         self.logger.debug("Setting organization UUID {}".format(self.org_uuid))
                         break
                 else:
@@ -421,7 +402,7 @@ class vimconnector(vimconn.vimconnector):
         vdc_task = self.create_vdc(vdc_name=tenant_name)
         if vdc_task is not None:
             vdc_uuid, value = vdc_task.popitem()
-            self.logger.info("Crated new vdc {} and uuid: {}".format(tenant_name, vdc_uuid))
+            self.logger.info("Created new vdc {} and uuid: {}".format(tenant_name, vdc_uuid))
             return vdc_uuid
         else:
             raise vimconn.vimconnException("Failed create tenant {}".format(tenant_name))
@@ -437,17 +418,19 @@ class vimconnector(vimconn.vimconnector):
         """
         vca = self.connect_as_admin()
         if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed")
+            raise vimconn.vimconnConnectionException("Failed to connect vCD")
 
         if tenant_id is not None:
-            if vca.vcloud_session and vca.vcloud_session.organization:
+            if vca._session:
                 #Get OrgVDC
-                url_list = [self.vca.host, '/api/vdc/', tenant_id]
+                url_list = [self.url, '/api/vdc/', tenant_id]
                 orgvdc_herf = ''.join(url_list)
-                response = Http.get(url=orgvdc_herf,
-                                headers=vca.vcloud_session.get_vcloud_headers(),
-                                verify=vca.verify,
-                                logger=vca.logger)
+
+                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
+                response = self.perform_request(req_type='GET',
+                                                url=orgvdc_herf,
+                                                headers=headers)
 
                 if response.status_code != requests.codes.ok:
                     self.logger.debug("delete_tenant():GET REST API call {} failed. "\
@@ -457,22 +440,19 @@ class vimconnector(vimconn.vimconnector):
 
                 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
                 namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
+                #For python3
+                #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
                 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
                 vdc_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
                 vdc_remove_href = vdc_remove_href + '?recursive=true&force=true'
 
-                #Delete OrgVDC
-                response = Http.delete(url=vdc_remove_href,
-                                    headers=vca.vcloud_session.get_vcloud_headers(),
-                                    verify=vca.verify,
-                                    logger=vca.logger)
+                response = self.perform_request(req_type='DELETE',
+                                                url=vdc_remove_href,
+                                                headers=headers)
 
                 if response.status_code == 202:
-                        delete_vdc_task = taskType.parseString(response.content, True)
-                        if type(delete_vdc_task) is GenericTask:
-                            self.vca.block_until_completed(delete_vdc_task)
-                            self.logger.info("Deleted tenant with ID {}".format(tenant_id))
-                            return tenant_id
+                    time.sleep(5)
+                    return tenant_id
                 else:
                     self.logger.debug("delete_tenant(): DELETE REST API call {} failed. "\
                                       "Return status code {}".format(vdc_remove_href,
@@ -556,35 +536,66 @@ class vimconnector(vimconn.vimconnector):
         if not self.tenant_name:
             raise vimconn.vimconnConnectionException("Tenant name is empty.")
 
-        vdc = self.get_vdc_details()
+        org, vdc = self.get_vdc_details()
         if vdc is None:
             raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}".format(self.tenant_name))
 
-        vdc_uuid = vdc.get_id().split(":")[3]
-        networks = self.vca.get_networks(vdc.get_name())
+        vdc_uuid = vdc.get('id').split(":")[3]
+        if self.client._session:
+                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                response = self.perform_request(req_type='GET',
+                                           url=vdc.get('href'),
+                                               headers=headers)
+        if response.status_code != 200:
+            self.logger.error("Failed to get vdc content")
+            raise vimconn.vimconnNotFoundException("Failed to get vdc content")
+        else:
+            content = XmlElementTree.fromstring(response.content)
         network_list = []
         try:
-            for network in networks:
-                filter_dict = {}
-                netid = network.get_id().split(":")
-                if len(netid) != 4:
-                    continue
+            for item in content:
+                if item.tag.split('}')[-1] == 'AvailableNetworks':
+                    for net in item:
+                        response = self.perform_request(req_type='GET',
+                                                   url=net.get('href'),
+                                                       headers=headers)
+
+                        if response.status_code != 200:
+                            self.logger.error("Failed to get network content")
+                            raise vimconn.vimconnNotFoundException("Failed to get network content")
+                        else:
+                            net_details = XmlElementTree.fromstring(response.content)
 
-                filter_dict["name"] = network.get_name()
-                filter_dict["id"] = netid[3]
-                filter_dict["shared"] = network.get_IsShared()
-                filter_dict["tenant_id"] = vdc_uuid
-                if network.get_status() == 1:
-                    filter_dict["admin_state_up"] = True
-                else:
-                    filter_dict["admin_state_up"] = False
-                filter_dict["status"] = "ACTIVE"
-                filter_dict["type"] = "bridge"
-                network_list.append(filter_dict)
-                self.logger.debug("get_vcd_network_list adding entry {}".format(filter_dict))
+                            filter_dict = {}
+                            net_uuid = net_details.get('id').split(":")
+                            if len(net_uuid) != 4:
+                                continue
+                            else:
+                                net_uuid = net_uuid[3]
+                                # create dict entry
+                                self.logger.debug("Adding  {} to a list vcd id {} network {}".format(net_uuid,
+                                                                                                        vdc_uuid,
+                                                                                         net_details.get('name')))
+                                filter_dict["name"] = net_details.get('name')
+                                filter_dict["id"] = net_uuid
+                                if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
+                                    shared = True
+                                else:
+                                    shared = False
+                                filter_dict["shared"] = shared
+                                filter_dict["tenant_id"] = vdc_uuid
+                                if net_details.get('status') == 1:
+                                    filter_dict["admin_state_up"] = True
+                                else:
+                                    filter_dict["admin_state_up"] = False
+                                filter_dict["status"] = "ACTIVE"
+                                filter_dict["type"] = "bridge"
+                                network_list.append(filter_dict)
+                                self.logger.debug("get_vcd_network_list adding entry {}".format(filter_dict))
         except:
-            self.logger.debug("Error in get_vcd_network_list")
-            self.logger.debug(traceback.format_exc())
+            self.logger.debug("Error in get_vcd_network_list", exc_info=True)
             pass
 
         self.logger.debug("get_vcd_network_list returning {}".format(network_list))
@@ -612,50 +623,80 @@ class vimconnector(vimconn.vimconnector):
         if not self.tenant_name:
             raise vimconn.vimconnConnectionException("Tenant name is empty.")
 
-        vdc = self.get_vdc_details()
+        org, vdc = self.get_vdc_details()
         if vdc is None:
             raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name))
 
         try:
-            vdcid = vdc.get_id().split(":")[3]
-            networks = self.vca.get_networks(vdc.get_name())
-            network_list = []
+            vdcid = vdc.get('id').split(":")[3]
+
+            if self.client._session:
+                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                response = self.perform_request(req_type='GET',
+                                           url=vdc.get('href'),
+                                               headers=headers)
+            if response.status_code != 200:
+                self.logger.error("Failed to get vdc content")
+                raise vimconn.vimconnNotFoundException("Failed to get vdc content")
+            else:
+                content = XmlElementTree.fromstring(response.content)
 
-            for network in networks:
-                filter_entry = {}
-                net_uuid = network.get_id().split(":")
-                if len(net_uuid) != 4:
-                    continue
-                else:
-                    net_uuid = net_uuid[3]
-                # create dict entry
-                self.logger.debug("Adding  {} to a list vcd id {} network {}".format(net_uuid,
-                                                                                     vdcid,
-                                                                                     network.get_name()))
-                filter_entry["name"] = network.get_name()
-                filter_entry["id"] = net_uuid
-                filter_entry["shared"] = network.get_IsShared()
-                filter_entry["tenant_id"] = vdcid
-                if network.get_status() == 1:
-                    filter_entry["admin_state_up"] = True
-                else:
-                    filter_entry["admin_state_up"] = False
-                filter_entry["status"] = "ACTIVE"
-                filter_entry["type"] = "bridge"
-                filtered_entry = filter_entry.copy()
+            network_list = []
+            for item in content:
+                if item.tag.split('}')[-1] == 'AvailableNetworks':
+                    for net in item:
+                        response = self.perform_request(req_type='GET',
+                                                   url=net.get('href'),
+                                                       headers=headers)
+
+                        if response.status_code != 200:
+                            self.logger.error("Failed to get network content")
+                            raise vimconn.vimconnNotFoundException("Failed to get network content")
+                        else:
+                            net_details = XmlElementTree.fromstring(response.content)
 
-                if filter_dict is not None and filter_dict:
-                    # we remove all the key : value we don't care and match only
-                    # respected field
-                    filtered_dict = set(filter_entry.keys()) - set(filter_dict)
-                    for unwanted_key in filtered_dict: del filter_entry[unwanted_key]
-                    if filter_dict == filter_entry:
-                        network_list.append(filtered_entry)
-                else:
-                    network_list.append(filtered_entry)
-        except:
-            self.logger.debug("Error in get_vcd_network_list")
-            self.logger.debug(traceback.format_exc())
+                            filter_entry = {}
+                            net_uuid = net_details.get('id').split(":")
+                            if len(net_uuid) != 4:
+                                continue
+                            else:
+                                net_uuid = net_uuid[3] 
+                                # create dict entry
+                                self.logger.debug("Adding  {} to a list vcd id {} network {}".format(net_uuid,
+                                                                                                        vdcid,
+                                                                                         net_details.get('name')))
+                                filter_entry["name"] = net_details.get('name')
+                                filter_entry["id"] = net_uuid
+                                if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
+                                    shared = True
+                                else:
+                                    shared = False
+                                filter_entry["shared"] = shared
+                                filter_entry["tenant_id"] = vdcid
+                                if net_details.get('status') == 1:
+                                    filter_entry["admin_state_up"] = True
+                                else:
+                                    filter_entry["admin_state_up"] = False
+                                filter_entry["status"] = "ACTIVE"
+                                filter_entry["type"] = "bridge"
+                                filtered_entry = filter_entry.copy()
+
+                                if filter_dict is not None and filter_dict:
+                                    # we remove all the key : value we don't care and match only
+                                    # respected field
+                                    filtered_dict = set(filter_entry.keys()) - set(filter_dict)
+                                    for unwanted_key in filtered_dict: del filter_entry[unwanted_key]
+                                    if filter_dict == filter_entry:
+                                        network_list.append(filtered_entry)
+                                else:
+                                    network_list.append(filtered_entry)
+        except Exception as e:
+            self.logger.debug("Error in get_network_list",exc_info=True)
+            if isinstance(e, vimconn.vimconnException):
+                raise
+            else:
+                raise vimconn.vimconnNotFoundException("Failed : Networks list not found {} ".format(e))
 
         self.logger.debug("Returning {}".format(network_list))
         return network_list
@@ -665,33 +706,55 @@ class vimconnector(vimconn.vimconnector):
            Return a dict with  the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
 
         try:
-            vdc = self.get_vdc_details()
-            vdc_id = vdc.get_id().split(":")[3]
+            org, vdc = self.get_vdc_details()
+            vdc_id = vdc.get('id').split(":")[3]
+            if self.client._session:
+                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                response = self.perform_request(req_type='GET',
+                                           url=vdc.get('href'),
+                                               headers=headers)
+            if response.status_code != 200:
+                self.logger.error("Failed to get vdc content")
+                raise vimconn.vimconnNotFoundException("Failed to get vdc content")
+            else:
+                content = XmlElementTree.fromstring(response.content)
 
-            networks = self.vca.get_networks(vdc.get_name())
             filter_dict = {}
 
-            if not networks:
-                vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
-
-            for network in networks:
-                vdc_network_id = network.get_id().split(":")
-                if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
-                    filter_dict["name"] = network.get_name()
-                    filter_dict["id"] = vdc_network_id[3]
-                    filter_dict["shared"] = network.get_IsShared()
-                    filter_dict["tenant_id"] = vdc_id
-                    if network.get_status() == 1:
-                        filter_dict["admin_state_up"] = True
-                    else:
-                        filter_dict["admin_state_up"] = False
-                    filter_dict["status"] = "ACTIVE"
-                    filter_dict["type"] = "bridge"
-                    self.logger.debug("Returning {}".format(filter_dict))
-                    return filter_dict
-            else:
-                raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
+            for item in content:
+                if item.tag.split('}')[-1] == 'AvailableNetworks':
+                    for net in item:
+                        response = self.perform_request(req_type='GET',
+                                                   url=net.get('href'),
+                                                       headers=headers)
 
+                        if response.status_code != 200:
+                            self.logger.error("Failed to get network content")
+                            raise vimconn.vimconnNotFoundException("Failed to get network content")
+                        else:
+                            net_details = XmlElementTree.fromstring(response.content)
+
+                            vdc_network_id = net_details.get('id').split(":")
+                            if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
+                                filter_dict["name"] = net_details.get('name')
+                                filter_dict["id"] = vdc_network_id[3]
+                                if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
+                                    shared = True
+                                else:
+                                    shared = False
+                                filter_dict["shared"] = shared
+                                filter_dict["tenant_id"] = vdc_id
+                                if net_details.get('status') == 1:
+                                    filter_dict["admin_state_up"] = True
+                                else:
+                                    filter_dict["admin_state_up"] = False
+                                filter_dict["status"] = "ACTIVE"
+                                filter_dict["type"] = "bridge"
+                                self.logger.debug("Returning {}".format(filter_dict))
+                                return filter_dict
+                    else:
+                        raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
         except Exception as e:
             self.logger.debug("Error in get_network")
             self.logger.debug(traceback.format_exc())
@@ -863,16 +926,19 @@ class vimconnector(vimconn.vimconnector):
             Return:
                 returns the image identifier in UUID format or raises an exception on error
         """
-        vca = self.connect_as_admin()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed")
+        conn = self.connect_as_admin()
+        if not conn:
+            raise vimconn.vimconnConnectionException("Failed to connect vCD")
         # Get Catalog details
-        url_list = [self.vca.host, '/api/catalog/', image_id]
+        url_list = [self.url, '/api/catalog/', image_id]
         catalog_herf = ''.join(url_list)
-        response = Http.get(url=catalog_herf,
-                            headers=vca.vcloud_session.get_vcloud_headers(),
-                            verify=vca.verify,
-                            logger=vca.logger)
+
+        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                  'x-vcloud-authorization': conn._session.headers['x-vcloud-authorization']}
+
+        response = self.perform_request(req_type='GET',
+                                        url=catalog_herf,
+                                        headers=headers) 
 
         if response.status_code != requests.codes.ok:
             self.logger.debug("delete_image():GET REST API call {} failed. "\
@@ -882,6 +948,8 @@ class vimconnector(vimconn.vimconnector):
 
         lxmlroot_respond = lxmlElementTree.fromstring(response.content)
         namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
+        #For python3
+        #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
         namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
 
         catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems",namespaces)
@@ -889,11 +957,9 @@ class vimconnector(vimconn.vimconnector):
         for catalogItem in catalogItems:
             catalogItem_href = catalogItem.attrib['href']
 
-            #GET details of catalogItem
-            response = Http.get(url=catalogItem_href,
-                            headers=vca.vcloud_session.get_vcloud_headers(),
-                            verify=vca.verify,
-                            logger=vca.logger)
+            response = self.perform_request(req_type='GET',
+                                        url=catalogItem_href,
+                                        headers=headers)
 
             if response.status_code != requests.codes.ok:
                 self.logger.debug("delete_image():GET REST API call {} failed. "\
@@ -905,26 +971,26 @@ class vimconnector(vimconn.vimconnector):
 
             lxmlroot_respond = lxmlElementTree.fromstring(response.content)
             namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
+            #For python3
+            #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
             namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
             catalogitem_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
 
             #Remove catalogItem
-            response = Http.delete(url= catalogitem_remove_href,
-                                    headers=vca.vcloud_session.get_vcloud_headers(),
-                                    verify=vca.verify,
-                                    logger=vca.logger)
+            response = self.perform_request(req_type='DELETE',
+                                        url=catalogitem_remove_href,
+                                        headers=headers) 
             if response.status_code == requests.codes.no_content:
                 self.logger.debug("Deleted Catalog item {}".format(catalogItem))
             else:
                 raise vimconn.vimconnException("Fail to delete Catalog Item {}".format(catalogItem))
 
         #Remove catalog
-        url_list = [self.vca.host, '/api/admin/catalog/', image_id]
+        url_list = [self.url, '/api/admin/catalog/', image_id]
         catalog_remove_herf = ''.join(url_list)
-        response = Http.delete(url= catalog_remove_herf,
-                                    headers=vca.vcloud_session.get_vcloud_headers(),
-                                    verify=vca.verify,
-                                    logger=vca.logger)
+        response = self.perform_request(req_type='DELETE',
+                                        url=catalog_remove_herf,
+                                        headers=headers)
 
         if response.status_code == requests.codes.no_content:
             self.logger.debug("Deleted Catalog {}".format(image_id))
@@ -941,7 +1007,7 @@ class vimconnector(vimconn.vimconnector):
         :return:
         """
         for catalog in catalogs:
-            if catalog.name == catalog_name:
+            if catalog['name'] == catalog_name:
                 return True
         return False
 
@@ -957,11 +1023,10 @@ class vimconnector(vimconn.vimconnector):
 
         """
         try:
-            task = vca.create_catalog(catalog_name, catalog_name)
-            result = vca.block_until_completed(task)
-            if not result:
-                return False
-            catalogs = vca.get_catalogs()
+            result = vca.create_catalog(catalog_name, catalog_name)
+            if result is not None:
+                return True 
+            catalogs = vca.list_catalogs()
         except:
             return False
         return self.catalog_exists(catalog_name, catalogs)
@@ -989,37 +1054,45 @@ class vimconnector(vimconn.vimconnector):
         #  status change.
         #  if VCD can parse OVF we upload VMDK file
         try:
-            for catalog in vca.get_catalogs():
-                if catalog_name != catalog.name:
+            for catalog in vca.list_catalogs():
+                if catalog_name != catalog['name']:
                     continue
-                link = filter(lambda link: link.get_type() == "application/vnd.vmware.vcloud.media+xml" and
-                                           link.get_rel() == 'add', catalog.get_Link())
-                assert len(link) == 1
+                catalog_href = "{}/api/catalog/{}/action/upload".format(self.url, catalog['id'])
                 data = """
-                <UploadVAppTemplateParams name="%s" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>%s vApp Template</Description></UploadVAppTemplateParams>
-                """ % (escape(catalog_name), escape(description))
-                headers = vca.vcloud_session.get_vcloud_headers()
-                headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
-                response = Http.post(link[0].get_href(), headers=headers, data=data, verify=vca.verify, logger=self.logger)
+                <UploadVAppTemplateParams name="{}" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>{} vApp Template</Description></UploadVAppTemplateParams>
+                """.format(catalog_name, description)
+
+                if self.client:
+                    headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                    headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
+
+                response = self.perform_request(req_type='POST',
+                                                url=catalog_href,
+                                                headers=headers,
+                                                data=data)
+
                 if response.status_code == requests.codes.created:
                     catalogItem = XmlElementTree.fromstring(response.content)
                     entity = [child for child in catalogItem if
                               child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
                     href = entity.get('href')
                     template = href
-                    response = Http.get(href, headers=vca.vcloud_session.get_vcloud_headers(),
-                                        verify=vca.verify, logger=self.logger)
+
+                    response = self.perform_request(req_type='GET',
+                                                    url=href,
+                                                    headers=headers)
 
                     if response.status_code == requests.codes.ok:
-                        media = mediaType.parseString(response.content, True)
-                        link = filter(lambda link: link.get_rel() == 'upload:default',
-                                      media.get_Files().get_File()[0].get_Link())[0]
-                        headers = vca.vcloud_session.get_vcloud_headers()
                         headers['Content-Type'] = 'Content-Type text/xml'
-                        response = Http.put(link.get_href(),
-                                            data=open(media_file_name, 'rb'),
-                                            headers=headers,
-                                            verify=vca.verify, logger=self.logger)
+                        result = re.search('rel="upload:default"\shref="(.*?\/descriptor.ovf)"',response.content)
+                        if result:
+                            transfer_href = result.group(1)
+
+                        response = self.perform_request(req_type='PUT',
+                                                    url=transfer_href,
+                                                    headers=headers,
+                                                    data=open(media_file_name, 'rb'))
                         if response.status_code != requests.codes.ok:
                             self.logger.debug(
                                 "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
@@ -1033,73 +1106,64 @@ class vimconnector(vimconn.vimconnector):
 
                     # uploading VMDK file
                     # check status of OVF upload and upload remaining files.
-                    response = Http.get(template,
-                                        headers=vca.vcloud_session.get_vcloud_headers(),
-                                        verify=vca.verify,
-                                        logger=self.logger)
+                    response = self.perform_request(req_type='GET',
+                                                    url=template,
+                                                    headers=headers)
 
                     if response.status_code == requests.codes.ok:
-                        media = mediaType.parseString(response.content, True)
-                        number_of_files = len(media.get_Files().get_File())
-                        for index in xrange(0, number_of_files):
-                            links_list = filter(lambda link: link.get_rel() == 'upload:default',
-                                                media.get_Files().get_File()[index].get_Link())
-                            for link in links_list:
-                                # we skip ovf since it already uploaded.
-                                if 'ovf' in link.get_href():
-                                    continue
-                                # The OVF file and VMDK must be in a same directory
-                                head, tail = os.path.split(media_file_name)
-                                file_vmdk = head + '/' + link.get_href().split("/")[-1]
-                                if not os.path.isfile(file_vmdk):
-                                    return False
-                                statinfo = os.stat(file_vmdk)
-                                if statinfo.st_size == 0:
+                        result = re.search('rel="upload:default"\s*href="(.*?vmdk)"',response.content)
+                        if result:
+                            link_href = result.group(1)
+                        # we skip ovf since it already uploaded.
+                        if 'ovf' in link_href:
+                            continue
+                        # The OVF file and VMDK must be in a same directory
+                        head, tail = os.path.split(media_file_name)
+                        file_vmdk = head + '/' + link_href.split("/")[-1]
+                        if not os.path.isfile(file_vmdk):
+                            return False
+                        statinfo = os.stat(file_vmdk)
+                        if statinfo.st_size == 0:
+                            return False
+                        hrefvmdk = link_href
+
+                        if progress:
+                            widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
+                                           FileTransferSpeed()]
+                            progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
+
+                        bytes_transferred = 0
+                        f = open(file_vmdk, 'rb')
+                        while bytes_transferred < statinfo.st_size:
+                            my_bytes = f.read(chunk_bytes)
+                            if len(my_bytes) <= chunk_bytes:
+                                headers['Content-Range'] = 'bytes %s-%s/%s' % (
+                                    bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
+                                headers['Content-Length'] = str(len(my_bytes))
+                                response = requests.put(url=hrefvmdk,
+                                                         headers=headers,
+                                                         data=my_bytes,
+                                                         verify=False)
+                                if response.status_code == requests.codes.ok:
+                                    bytes_transferred += len(my_bytes)
+                                    if progress:
+                                        progress_bar.update(bytes_transferred)
+                                else:
+                                    self.logger.debug(
+                                        'file upload failed with error: [%s] %s' % (response.status_code,
+                                                                                        response.content))
+
+                                    f.close()
                                     return False
-                                hrefvmdk = link.get_href()
-
-                                if progress:
-                                    print("Uploading file: {}".format(file_vmdk))
-                                if progress:
-                                    widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
-                                               FileTransferSpeed()]
-                                    progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
-
-                                bytes_transferred = 0
-                                f = open(file_vmdk, 'rb')
-                                while bytes_transferred < statinfo.st_size:
-                                    my_bytes = f.read(chunk_bytes)
-                                    if len(my_bytes) <= chunk_bytes:
-                                        headers = vca.vcloud_session.get_vcloud_headers()
-                                        headers['Content-Range'] = 'bytes %s-%s/%s' % (
-                                            bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
-                                        headers['Content-Length'] = str(len(my_bytes))
-                                        response = Http.put(hrefvmdk,
-                                                            headers=headers,
-                                                            data=my_bytes,
-                                                            verify=vca.verify,
-                                                            logger=None)
-
-                                        if response.status_code == requests.codes.ok:
-                                            bytes_transferred += len(my_bytes)
-                                            if progress:
-                                                progress_bar.update(bytes_transferred)
-                                        else:
-                                            self.logger.debug(
-                                                'file upload failed with error: [%s] %s' % (response.status_code,
-                                                                                            response.content))
-
-                                            f.close()
-                                            return False
-                                f.close()
-                                if progress:
-                                    progress_bar.finish()
-                                time.sleep(10)
-                        return True
-                    else:
-                        self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
-                                          format(catalog_name, media_file_name))
-                        return False
+                        f.close()
+                        if progress:
+                            progress_bar.finish()
+                            time.sleep(10)
+                    return True
+                else:
+                    self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
+                                      format(catalog_name, media_file_name))
+                    return False
         except Exception as exp:
             self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
                 .format(catalog_name,media_file_name, exp))
@@ -1139,9 +1203,9 @@ class vimconnector(vimconn.vimconnector):
         """
 
         for catalog in catalogs:
-            if catalog.name == catalog_name:
-                catalog_id = catalog.get_id().split(":")
-                return catalog_id[3]
+            if catalog['name'] == catalog_name:
+                catalog_id = catalog['id']
+                return catalog_id
         return None
 
     def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
@@ -1158,9 +1222,9 @@ class vimconnector(vimconn.vimconnector):
             return None
 
         for catalog in catalogs:
-            catalog_id = catalog.get_id().split(":")[3]
+            catalog_id = catalog.get('id')
             if catalog_id == catalog_uuid:
-                return catalog.name
+                return catalog.get('name')
         return None
 
     def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
@@ -1177,7 +1241,7 @@ class vimconnector(vimconn.vimconnector):
             return None
 
         for catalog in catalogs:
-            catalog_id = catalog.get_id().split(":")[3]
+            catalog_id = catalog.get('id')
             if catalog_id == catalog_uuid:
                 return catalog
         return None
@@ -1226,43 +1290,45 @@ class vimconnector(vimconn.vimconnector):
                           "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
 
         try:
-            catalogs = self.vca.get_catalogs()
+            org,vdc = self.get_vdc_details()
+            catalogs = org.list_catalogs()
         except Exception as exp:
             self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
             raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp))
 
         if len(catalogs) == 0:
             self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
-            result = self.create_vimcatalog(self.vca, catalog_md5_name)
+            result = self.create_vimcatalog(org, catalog_md5_name)
             if not result:
                 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
-            result = self.upload_vimimage(vca=self.vca, catalog_name=catalog_md5_name,
+
+            result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
                                           media_name=filename, medial_file_name=path, progress=progress)
             if not result:
                 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_name))
-            return self.get_catalogid(catalog_name, self.vca.get_catalogs())
+            return self.get_catalogid(catalog_name, catalogs)
         else:
             for catalog in catalogs:
                 # search for existing catalog if we find same name we return ID
                 # TODO optimize this
-                if catalog.name == catalog_md5_name:
+                if catalog['name'] == catalog_md5_name:
                     self.logger.debug("Found existing catalog entry for {} "
                                       "catalog id {}".format(catalog_name,
                                                              self.get_catalogid(catalog_md5_name, catalogs)))
-                    return self.get_catalogid(catalog_md5_name, self.vca.get_catalogs())
+                    return self.get_catalogid(catalog_md5_name, catalogs)
 
         # if we didn't find existing catalog we create a new one and upload image.
         self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name))
-        result = self.create_vimcatalog(self.vca, catalog_md5_name)
+        result = self.create_vimcatalog(org, catalog_md5_name)
         if not result:
             raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
 
-        result = self.upload_vimimage(vca=self.vca, catalog_name=catalog_md5_name,
+        result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
                                       media_name=filename, medial_file_name=path, progress=progress)
         if not result:
             raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_md5_name))
 
-        return self.get_catalogid(catalog_md5_name, self.vca.get_catalogs())
+        return self.get_catalogid(catalog_md5_name, org.list_catalogs())
 
     def get_image_list(self, filter_dict={}):
         '''Obtain tenant images from VIM
@@ -1277,14 +1343,15 @@ class vimconnector(vimconn.vimconnector):
         '''
 
         try:
+            org, vdc = self.get_vdc_details()
             image_list = []
-            catalogs = self.vca.get_catalogs()
+            catalogs = org.list_catalogs()
             if len(catalogs) == 0:
                 return image_list
             else:
                 for catalog in catalogs:
-                    catalog_uuid = catalog.get_id().split(":")[3]
-                    name = catalog.name
+                    catalog_uuid = catalog.get('id')
+                    name = catalog.get('name')
                     filtered_dict = {}
                     if filter_dict.get("name") and filter_dict["name"] != name:
                         continue
@@ -1315,6 +1382,9 @@ class vimconnector(vimconn.vimconnector):
         try:
             refs = filter(lambda ref: ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
                           vdc.ResourceEntities.ResourceEntity)
+            #For python3
+            #refs = [ref for ref in vdc.ResourceEntities.ResourceEntity\
+            #         if ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
             if len(refs) == 1:
                 return refs[0].href.split("vapp")[1][1:]
         except Exception as e:
@@ -1339,6 +1409,9 @@ class vimconnector(vimconn.vimconnector):
             refs = filter(lambda ref:
                           ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
                           vdc.ResourceEntities.ResourceEntity)
+            #For python3
+            #refs = [ref for ref in vdc.ResourceEntities.ResourceEntity\
+            #         if ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
             for ref in refs:
                 vappid = ref.href.split("vapp")[1][1:]
                 # find vapp with respected vapp uuid
@@ -1349,34 +1422,30 @@ class vimconnector(vimconn.vimconnector):
             return False
         return False
 
-    def get_namebyvappid(self, vdc=None, vapp_uuid=None):
+    def get_namebyvappid(self, vapp_uuid=None):
         """Method returns vApp name from vCD and lookup done by vapp_id.
 
         Args:
-            vca: Connector to VCA
-            vdc: The VDC object.
             vapp_uuid: vappid is application identifier
 
         Returns:
             The return vApp name otherwise None
         """
-
         try:
-            refs = filter(lambda ref: ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
-                          vdc.ResourceEntities.ResourceEntity)
-            for ref in refs:
-                # we care only about UUID the rest doesn't matter
-                vappid = ref.href.split("vapp")[1][1:]
-                if vappid == vapp_uuid:
-                    response = Http.get(ref.href, headers=self.vca.vcloud_session.get_vcloud_headers(), verify=self.vca.verify,
-                                        logger=self.logger)
-
-                    #Retry login if session expired & retry sending request
-                    if response.status_code == 403:
-                        response = self.retry_rest('GET', ref.href)
-
-                    tree = XmlElementTree.fromstring(response.content)
-                    return tree.attrib['name']
+            if self.client and vapp_uuid:
+                vapp_call = "{}/api/vApp/vapp-{}".format(self.url, vapp_uuid)
+                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                     'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+
+                response = self.perform_request(req_type='GET',
+                                                url=vapp_call,
+                                                headers=headers) 
+                #Retry login if session expired & retry sending request
+                if response.status_code == 403:
+                    response = self.retry_rest('GET', vapp_call)
+
+                tree = XmlElementTree.fromstring(response.content)
+                return tree.attrib['name']
         except Exception as e:
             self.logger.exception(e)
             return None
@@ -1444,21 +1513,27 @@ class vimconnector(vimconn.vimconnector):
         new_vm_name = [name, '-', str(uuid.uuid4())]
         vmname_andid = ''.join(new_vm_name)
 
-        # if vm already deployed we return existing uuid
-        # vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), name)
-        # if vapp_uuid is not None:
-        #     return vapp_uuid
+        for net in net_list:
+            if net['type'] == "SR-IOV" or net['type'] == "PCI-PASSTHROUGH":
+                raise vimconn.vimconnNotSupportedException(
+                      "Current vCD version does not support type : {}".format(net['type']))
+
+        if len(net_list) > 10:
+            raise vimconn.vimconnNotSupportedException(
+                      "The VM hardware versions 7 and above support upto 10 NICs only")
 
+        # if vm already deployed we return existing uuid
         # we check for presence of VDC, Catalog entry and Flavor.
-        vdc = self.get_vdc_details()
+        org, vdc = self.get_vdc_details()
         if vdc is None:
             raise vimconn.vimconnNotFoundException(
                 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(name))
-        catalogs = self.vca.get_catalogs()
+        catalogs = org.list_catalogs()
         if catalogs is None:
             #Retry once, if failed by refreshing token
             self.get_token()
-            catalogs = self.vca.get_catalogs()
+            org = Org(self.client, resource=self.client.get_org())
+            catalogs = org.list_catalogs()
         if catalogs is None:
             raise vimconn.vimconnNotFoundException(
                 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(name))
@@ -1528,16 +1603,145 @@ class vimconnector(vimconn.vimconnector):
         # use: 'data', 'bridge', 'mgmt'
         # create vApp.  Set vcpu and ram based on flavor id.
         try:
+            vdc_obj = VDC(self.client, resource=org.get_vdc(self.tenant_name))
+            if not vdc_obj:
+                raise vimconn.vimconnNotFoundException("new_vminstance(): Failed to get VDC object") 
+
             for retry in (1,2):
-                vapptask = self.vca.create_vapp(self.tenant_name, vmname_andid, templateName,
-                                           self.get_catalogbyid(image_id, catalogs),
-                                           network_name=None,  # None while creating vapp
-                                           network_mode=network_mode,
-                                           vm_name=vmname_andid,
-                                           vm_cpus=vm_cpus,  # can be None if flavor is None
-                                           vm_memory=vm_memory)  # can be None if flavor is None
-
-                if not vapptask and retry==1:
+                items = org.get_catalog_item(catalog_hash_name, catalog_hash_name)
+                catalog_items = [items.attrib]
+
+                if len(catalog_items) == 1:
+                    if self.client:
+                        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+
+                    response = self.perform_request(req_type='GET',
+                                                url=catalog_items[0].get('href'),
+                                                headers=headers)
+                    catalogItem = XmlElementTree.fromstring(response.content)
+                    entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
+                    vapp_tempalte_href = entity.get("href")
+        
+                response = self.perform_request(req_type='GET',
+                                                    url=vapp_tempalte_href,
+                                                    headers=headers)    
+                if response.status_code != requests.codes.ok:
+                    self.logger.debug("REST API call {} failed. Return status code {}".format(vapp_tempalte_href,
+                                                                                           response.status_code))
+                else:
+                    result = (response.content).replace("\n"," ")
+
+                src = re.search('<Vm goldMaster="false"\sstatus="\d+"\sname="(.*?)"\s'
+                                               'id="(\w+:\w+:vm:.*?)"\shref="(.*?)"\s'
+                              'type="application/vnd\.vmware\.vcloud\.vm\+xml',result)
+                if src:
+                    vm_name = src.group(1)
+                    vm_id = src.group(2)
+                    vm_href = src.group(3)
+
+                cpus = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
+                memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
+                cores = re.search('<vmw:CoresPerSocket ovf:required.*?>(\d+)</vmw:CoresPerSocket>',result).group(1)
+
+                headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml' 
+                vdc_id = vdc.get('id').split(':')[-1]
+                instantiate_vapp_href = "{}/api/vdc/{}/action/instantiateVAppTemplate".format(self.url,
+                                                                                                vdc_id) 
+                data = """<?xml version="1.0" encoding="UTF-8"?>
+                <InstantiateVAppTemplateParams
+                xmlns="http://www.vmware.com/vcloud/v1.5"
+                name="{}"
+                deploy="false"
+                powerOn="false"
+                xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+                xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1">
+                <Description>Vapp instantiation</Description>
+                <InstantiationParams>
+                     <NetworkConfigSection>
+                         <ovf:Info>Configuration parameters for logical networks</ovf:Info>
+                         <NetworkConfig networkName="None">
+                             <Configuration>
+                                 <ParentNetwork href=""/>
+                                 <FenceMode>bridged</FenceMode>
+                             </Configuration>
+                         </NetworkConfig>
+                     </NetworkConfigSection>
+                <LeaseSettingsSection
+                type="application/vnd.vmware.vcloud.leaseSettingsSection+xml">
+                <ovf:Info>Lease Settings</ovf:Info>
+                <StorageLeaseInSeconds>172800</StorageLeaseInSeconds>
+                <StorageLeaseExpiration>2014-04-25T08:08:16.438-07:00</StorageLeaseExpiration>
+                </LeaseSettingsSection>
+                </InstantiationParams>
+                <Source href="{}"/> 
+                <SourcedItem>
+                <Source href="{}" id="{}" name="{}"
+                type="application/vnd.vmware.vcloud.vm+xml"/>
+                <VmGeneralParams>
+                    <NeedsCustomization>false</NeedsCustomization>
+                </VmGeneralParams>
+                <InstantiationParams>
+                      <NetworkConnectionSection>
+                      <ovf:Info>Specifies the available VM network connections</ovf:Info>
+                      <NetworkConnection network="{}">
+                      <NetworkConnectionIndex>0</NetworkConnectionIndex>
+                      <IsConnected>true</IsConnected>
+                      <IpAddressAllocationMode>DHCP</IpAddressAllocationMode>     
+                      </NetworkConnection> 
+                      </NetworkConnectionSection><ovf:VirtualHardwareSection>
+                      <ovf:Info>Virtual hardware requirements</ovf:Info>
+                      <ovf:Item xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
+                      xmlns:vmw="http://www.vmware.com/schema/ovf">
+                      <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>
+                      <rasd:Description>Number of Virtual CPUs</rasd:Description>
+                      <rasd:ElementName xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="str">{cpu} virtual CPU(s)</rasd:ElementName>
+                      <rasd:InstanceID>4</rasd:InstanceID>      
+                      <rasd:Reservation>0</rasd:Reservation>
+                      <rasd:ResourceType>3</rasd:ResourceType>
+                      <rasd:VirtualQuantity xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="int">{cpu}</rasd:VirtualQuantity>
+                      <rasd:Weight>0</rasd:Weight>
+                      <vmw:CoresPerSocket ovf:required="false">{core}</vmw:CoresPerSocket>
+                      </ovf:Item><ovf:Item xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData">
+                      <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>
+                      <rasd:Description>Memory Size</rasd:Description>
+                      <rasd:ElementName xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="str">{memory} MB of memory</rasd:ElementName>
+                      <rasd:InstanceID>5</rasd:InstanceID>
+                      <rasd:Reservation>0</rasd:Reservation>
+                      <rasd:ResourceType>4</rasd:ResourceType>
+                      <rasd:VirtualQuantity xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="int">{memory}</rasd:VirtualQuantity>
+                      <rasd:Weight>0</rasd:Weight>
+                      </ovf:Item>
+                </ovf:VirtualHardwareSection> 
+                </InstantiationParams>
+                </SourcedItem>
+                <AllEULAsAccepted>false</AllEULAsAccepted>
+                </InstantiateVAppTemplateParams>""".format(vmname_andid,
+                                                     vapp_tempalte_href,
+                                                                vm_href,
+                                                                  vm_id,
+                                                                vm_name,
+                                                        primary_netname,
+                                                               cpu=cpus,
+                                                             core=cores,
+                                                       memory=memory_mb)
+
+                response = self.perform_request(req_type='POST',
+                                                url=instantiate_vapp_href,
+                                                headers=headers,
+                                                data=data)
+
+                if response.status_code != 201:
+                    self.logger.error("REST call {} failed reason : {}"\
+                         "status code : {}".format(instantiate_vapp_href,
+                                                        response.content,
+                                                   response.status_code))
+                    raise vimconn.vimconnException("new_vminstance(): Failed to create"\
+                                                        "vAapp {}".format(vmname_andid))
+                else:
+                    vapptask = self.get_task_from_response(response.content)
+
+                if vapptask is None and retry==1:
                     self.get_token() # Retry getting token
                     continue
                 else:
@@ -1546,8 +1750,15 @@ class vimconnector(vimconn.vimconnector):
             if vapptask is None or vapptask is False:
                 raise vimconn.vimconnUnexpectedResponse(
                     "new_vminstance(): failed to create vApp {}".format(vmname_andid))
-            if type(vapptask) is VappTask:
-                self.vca.block_until_completed(vapptask)
+
+            # wait for task to complete   
+            result = self.client.get_task_monitor().wait_for_success(task=vapptask)
+
+            if result.get('status') == 'success':
+                self.logger.debug("new_vminstance(): Sucessfully created Vapp {}".format(vmname_andid))
+            else:
+                raise vimconn.vimconnUnexpectedResponse(
+                    "new_vminstance(): failed to create vApp {}".format(vmname_andid))
 
         except Exception as exp:
             raise vimconn.vimconnUnexpectedResponse(
@@ -1555,7 +1766,10 @@ class vimconnector(vimconn.vimconnector):
 
         # we should have now vapp in undeployed state.
         try:
-            vapp_uuid = self.get_vappid(self.get_vdc_details(), vmname_andid)
+            vdc_obj = VDC(self.client, href=vdc.get('href'))
+            vapp_resource = vdc_obj.get_vapp(vmname_andid)
+            vapp_uuid = vapp_resource.get('id').split(':')[-1]
+            vapp = VApp(self.client, resource=vapp_resource)
 
         except Exception as exp:
             raise vimconn.vimconnUnexpectedResponse(
@@ -1598,7 +1812,6 @@ class vimconnector(vimconn.vimconnector):
                                                             vmname_andid)
                                  )
 
-        vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
         # Modify vm disk
         if vm_disk:
             #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
@@ -1670,52 +1883,49 @@ class vimconnector(vimconn.vimconnector):
                                   - NONE (No IP addressing mode specified.)"""
 
                 if primary_netname is not None:
-                    nets = filter(lambda n: n.name == interface_net_name, self.vca.get_networks(self.tenant_name))
+                    nets = filter(lambda n: n.get('name') == interface_net_name, self.get_network_list())
+                    #For python3
+                    #nets = [n for n in self.get_network_list() if n.get('name') == interface_net_name]
                     if len(nets) == 1:
-                        self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].name))
+                        self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].get('name')))
 
-                        vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
-                        task = vapp.connect_to_network(nets[0].name, nets[0].href)
-                        if type(task) is GenericTask:
-                            self.vca.block_until_completed(task)
+                        vdc_obj = VDC(self.client, href=vdc.get('href'))
+                        vapp_resource = vdc_obj.get_vapp(vmname_andid)
+                        vapp = VApp(self.client, resource=vapp_resource)
                         # connect network to VM - with all DHCP by default
+                        task = vapp.connect_org_vdc_network(nets[0].get('name'))
+
+                        self.client.get_task_monitor().wait_for_success(task=task)
 
                         type_list = ('PF', 'PCI-PASSTHROUGH', 'VF', 'SR-IOV', 'VFnotShared')
                         if 'type' in net and net['type'] not in type_list:
                             # fetching nic type from vnf
                             if 'model' in net:
-                                if net['model'].lower() == 'virtio':
+                                if net['model'] is not None and net['model'].lower() == 'virtio':
                                     nic_type = 'VMXNET3'
                                 else:
                                     nic_type = net['model']
 
                                 self.logger.info("new_vminstance(): adding network adapter "\
-                                                          "to a network {}".format(nets[0].name))
-                                self.add_network_adapter_to_vms(vapp, nets[0].name,
+                                                          "to a network {}".format(nets[0].get('name')))
+                                self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
                                                                 primary_nic_index,
                                                                 nicIndex,
                                                                 net,
                                                                 nic_type=nic_type)
                             else:
                                 self.logger.info("new_vminstance(): adding network adapter "\
-                                                         "to a network {}".format(nets[0].name))
-                                self.add_network_adapter_to_vms(vapp, nets[0].name,
+                                                         "to a network {}".format(nets[0].get('name')))
+                                self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
                                                                 primary_nic_index,
                                                                 nicIndex,
                                                                 net)
                 nicIndex += 1
 
-            vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
             # cloud-init for ssh-key injection
             if cloud_config:
                 self.cloud_init(vapp,cloud_config)
 
-            # deploy and power on vm
-            self.logger.debug("new_vminstance(): Deploying vApp {} ".format(name))
-            deploytask = vapp.deploy(powerOn=False)
-            if type(deploytask) is GenericTask:
-                self.vca.block_until_completed(deploytask)
-
         # ############# Stub code for SRIOV #################
         #Add SRIOV
 #         if len(sriov_net_info) > 0:
@@ -1750,16 +1960,21 @@ class vimconnector(vimconn.vimconnector):
                     self.logger.info("Fail to reserved memory {} to VM {}".format(
                                                                 str(memReserve), str(vm_obj)))
 
-            self.logger.debug("new_vminstance(): power on vApp {} ".format(name))
+            self.logger.debug("new_vminstance(): starting power on vApp {} ".format(vmname_andid))
 
-            vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
-            poweron_task = vapp.poweron()
-            if type(poweron_task) is GenericTask:
-                self.vca.block_until_completed(poweron_task)
+            vapp_id = vapp_resource.get('id').split(':')[-1]
+            poweron_task = self.power_on_vapp(vapp_id, vmname_andid)
+            result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
+            if result.get('status') == 'success':
+                self.logger.info("new_vminstance(): Successfully power on "\
+                                             "vApp {}".format(vmname_andid))
+            else:
+                self.logger.error("new_vminstance(): failed to power on vApp "\
+                                                     "{}".format(vmname_andid))
 
         except Exception as exp :
             # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
-            self.logger.debug("new_vminstance(): Failed create new vm instance {} with exception {}"
+            self.logger.error("new_vminstance(): Failed create new vm instance {} with exception {}"
                               .format(name, exp))
             raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {} with exception {}"
                                            .format(name, exp))
@@ -1769,14 +1984,16 @@ class vimconnector(vimconn.vimconnector):
         vapp_uuid = None
         while wait_time <= MAX_WAIT_TIME:
             try:
-                vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
+                vapp_resource = vdc_obj.get_vapp(vmname_andid)
+                vapp = VApp(self.client, resource=vapp_resource) 
             except Exception as exp:
                 raise vimconn.vimconnUnexpectedResponse(
                         "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
                         .format(vmname_andid, exp))
 
-            if vapp and vapp.me.deployed:
-                vapp_uuid = self.get_vappid(self.get_vdc_details(), vmname_andid)
+            #if vapp and vapp.me.deployed:
+            if vapp and vapp_resource.get('deployed') == 'true':
+                vapp_uuid = vapp_resource.get('id').split(':')[-1]
                 break
             else:
                 self.logger.debug("new_vminstance(): Wait for vApp {} to deploy".format(name))
@@ -1809,7 +2026,7 @@ class vimconnector(vimconn.vimconnector):
 
         self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
 
-        vdc = self.get_vdc_details()
+        org, vdc = self.get_vdc_details()
         if vdc is None:
             raise vimconn.vimconnConnectionException(
                 "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
@@ -1855,15 +2072,18 @@ class vimconnector(vimconn.vimconnector):
 
         self.logger.debug("Client requesting delete vm instance {} ".format(vm__vim_uuid))
 
-        vdc = self.get_vdc_details()
-        if vdc is None:
+        org, vdc = self.get_vdc_details()
+        vdc_obj = VDC(self.client, href=vdc.get('href')) 
+        if vdc_obj is None:
             self.logger.debug("delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
                 self.tenant_name))
             raise vimconn.vimconnException(
                 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
 
         try:
-            vapp_name = self.get_namebyvappid(vdc, vm__vim_uuid)
+            vapp_name = self.get_namebyvappid(vm__vim_uuid)
+            vapp_resource = vdc_obj.get_vapp(vapp_name)
+            vapp = VApp(self.client, resource=vapp_resource)
             if vapp_name is None:
                 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
                 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
@@ -1871,26 +2091,20 @@ class vimconnector(vimconn.vimconnector):
                 self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
 
             # Delete vApp and wait for status change if task executed and vApp is None.
-            vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
 
             if vapp:
-                if vapp.me.deployed:
+                if vapp_resource.get('deployed') == 'true':
                     self.logger.info("Powering off vApp {}".format(vapp_name))
                     #Power off vApp
                     powered_off = False
                     wait_time = 0
                     while wait_time <= MAX_WAIT_TIME:
-                        vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
-                        if not vapp:
-                            self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
-                            return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
+                        power_off_task = vapp.power_off()
+                        result = self.client.get_task_monitor().wait_for_success(task=power_off_task)
 
-                        power_off_task = vapp.poweroff()
-                        if type(power_off_task) is GenericTask:
-                            result = self.vca.block_until_completed(power_off_task)
-                            if result:
-                                powered_off = True
-                                break
+                        if result.get('status') == 'success':
+                            powered_off = True
+                            break
                         else:
                             self.logger.info("Wait for vApp {} to power off".format(vapp_name))
                             time.sleep(INTERVAL_TIME)
@@ -1906,17 +2120,16 @@ class vimconnector(vimconn.vimconnector):
                     wait_time = 0
                     undeployed = False
                     while wait_time <= MAX_WAIT_TIME:
-                        vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
+                        vapp = VApp(self.client, resource=vapp_resource) 
                         if not vapp:
                             self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
                             return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
-                        undeploy_task = vapp.undeploy(action='powerOff')
+                        undeploy_task = vapp.undeploy()
 
-                        if type(undeploy_task) is GenericTask:
-                            result = self.vca.block_until_completed(undeploy_task)
-                            if result:
-                                undeployed = True
-                                break
+                        result = self.client.get_task_monitor().wait_for_success(task=undeploy_task)
+                        if result.get('status') == 'success':
+                            undeployed = True
+                            break
                         else:
                             self.logger.debug("Wait for vApp {} to undeploy".format(vapp_name))
                             time.sleep(INTERVAL_TIME)
@@ -1928,43 +2141,37 @@ class vimconnector(vimconn.vimconnector):
 
                 # delete vapp
                 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
-                vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
 
                 if vapp is not None:
                     wait_time = 0
                     result = False
 
                     while wait_time <= MAX_WAIT_TIME:
-                        vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
+                        vapp = VApp(self.client, resource=vapp_resource)
                         if not vapp:
                             self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
                             return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
 
-                        delete_task = vapp.delete()
+                        delete_task = vdc_obj.delete_vapp(vapp.name, force=True)
 
-                        if type(delete_task) is GenericTask:
-                            self.vca.block_until_completed(delete_task)
-                            result = self.vca.block_until_completed(delete_task)
-                            if result:
-                                break
+                        result = self.client.get_task_monitor().wait_for_success(task=delete_task)
+                        if result.get('status') == 'success':     
+                            break
                         else:
                             self.logger.debug("Wait for vApp {} to delete".format(vapp_name))
                             time.sleep(INTERVAL_TIME)
 
                         wait_time +=INTERVAL_TIME
 
-                    if not result:
+                    if result is None:
                         self.logger.debug("delete_vminstance(): Failed delete uuid {} ".format(vm__vim_uuid))
-
+                    else:
+                        self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
+                        return vm__vim_uuid
         except:
             self.logger.debug(traceback.format_exc())
             raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
 
-        if self.vca.get_vapp(self.get_vdc_details(), vapp_name) is None:
-            self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
-            return vm__vim_uuid
-        else:
-            raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
 
     def refresh_vms_status(self, vm_list):
         """Get the status of the virtual machines and their interfaces/ports
@@ -1992,51 +2199,83 @@ class vimconnector(vimconn.vimconnector):
 
         self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
 
-        vdc = self.get_vdc_details()
+        org,vdc = self.get_vdc_details()
         if vdc is None:
             raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
 
         vms_dict = {}
         nsx_edge_list = []
         for vmuuid in vm_list:
-            vmname = self.get_namebyvappid(self.get_vdc_details(), vmuuid)
-            if vmname is not None:
+            vapp_name = self.get_namebyvappid(vmuuid)
+            if vapp_name is not None:
 
                 try:
                     vm_pci_details = self.get_vm_pci_details(vmuuid)
-                    the_vapp = self.vca.get_vapp(self.get_vdc_details(), vmname)
-                    vm_info = the_vapp.get_vms_details()
-                    vm_status = vm_info[0]['status']
-                    vm_info[0].update(vm_pci_details)
-
-                    vm_dict = {'status': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
-                               'error_msg': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
-                               'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
-
-                    # get networks
-                    vm_app_networks = the_vapp.get_vms_network_info()
-                    for vapp_network in vm_app_networks:
-                        for vm_network in vapp_network:
-                            if vm_network['name'] == vmname:
-                                #Assign IP Address based on MAC Address in NSX DHCP lease info
-                                if vm_network['ip'] is None:
-                                    if not nsx_edge_list:
-                                        nsx_edge_list = self.get_edge_details()
-                                        if nsx_edge_list is None:
-                                            raise vimconn.vimconnException("refresh_vms_status:"\
-                                                                           "Failed to get edge details from NSX Manager")
-                                    if vm_network['mac'] is not None:
-                                        vm_network['ip'] = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_network['mac'])
-
-                                vm_net_id = self.get_network_id_by_name(vm_network['network_name'])
-                                interface = {"mac_address": vm_network['mac'],
-                                             "vim_net_id": vm_net_id,
-                                             "vim_interface_id": vm_net_id,
-                                             'ip_address': vm_network['ip']}
-                                # interface['vim_info'] = yaml.safe_dump(vm_network)
-                                vm_dict["interfaces"].append(interface)
+                    vdc_obj = VDC(self.client, href=vdc.get('href'))
+                    vapp_resource = vdc_obj.get_vapp(vapp_name)
+                    the_vapp = VApp(self.client, resource=vapp_resource)
+
+                    vm_details = {}
+                    for vm in the_vapp.get_all_vms():
+                        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}    
+                        response = self.perform_request(req_type='GET',
+                                                        url=vm.get('href'),
+                                                        headers=headers)
+
+                        if response.status_code != 200:
+                            self.logger.error("refresh_vms_status : REST call {} failed reason : {}"\
+                                                            "status code : {}".format(vm.get('href'),
+                                                                                    response.content,
+                                                                               response.status_code))
+                            raise vimconn.vimconnException("refresh_vms_status : Failed to get "\
+                                                                         "VM details")
+                        xmlroot = XmlElementTree.fromstring(response.content)
+
+                        result = response.content.replace("\n"," ")
+                        hdd_mb = re.search('vcloud:capacity="(\d+)"\svcloud:storageProfileOverrideVmDefault=',result).group(1)
+                        vm_details['hdd_mb'] = int(hdd_mb) if hdd_mb else None
+                        cpus = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
+                        vm_details['cpus'] = int(cpus) if cpus else None
+                        memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
+                        vm_details['memory_mb'] = int(memory_mb) if memory_mb else None
+                        vm_details['status'] = vcdStatusCode2manoFormat[int(xmlroot.get('status'))]
+                        vm_details['id'] = xmlroot.get('id')
+                        vm_details['name'] = xmlroot.get('name')
+                        vm_info = [vm_details]
+                        if vm_pci_details:
+                            vm_info[0].update(vm_pci_details)  
+
+                        vm_dict = {'status': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
+                                   'error_msg': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
+                                   'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
+
+                        # get networks
+                        vm_ip = None
+                        vm_mac = None   
+                        if vm.NetworkConnectionSection.NetworkConnection:
+                            vm_mac = vm.NetworkConnectionSection.NetworkConnection.MACAddress
+                        if vm_ip is None:
+                            if not nsx_edge_list:
+                                nsx_edge_list = self.get_edge_details()
+                                if nsx_edge_list is None:
+                                    raise vimconn.vimconnException("refresh_vms_status:"\
+                                                                      "Failed to get edge details from NSX Manager")
+                            if vm_mac is not None:
+                                vm_ip = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_mac)
+
+                        network_name = vm.NetworkConnectionSection.NetworkConnection.get('network')    
+                        vm_net_id = self.get_network_id_by_name(network_name)
+                        interface = {"mac_address": vm_mac,
+                                     "vim_net_id": vm_net_id,
+                                     "vim_interface_id": vm_net_id,
+                                     'ip_address': vm_ip}
+
+                        vm_dict["interfaces"].append(interface)
+
                     # add a vm to vm dict
                     vms_dict.setdefault(vmuuid, vm_dict)
+                    self.logger.debug("refresh_vms_status : vm info {}".format(vm_dict))
                 except Exception as exp:
                     self.logger.debug("Error in response {}".format(exp))
                     self.logger.debug(traceback.format_exc())
@@ -2155,11 +2394,11 @@ class vimconnector(vimconn.vimconnector):
         if vm__vim_uuid is None or action_dict is None:
             raise vimconn.vimconnException("Invalid request. VM id or action is None.")
 
-        vdc = self.get_vdc_details()
+        org, vdc = self.get_vdc_details()
         if vdc is None:
             raise  vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
 
-        vapp_name = self.get_namebyvappid(vdc, vm__vim_uuid)
+        vapp_name = self.get_namebyvappid(vm__vim_uuid)
         if vapp_name is None:
             self.logger.debug("action_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
             raise vimconn.vimconnException("Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
@@ -2167,55 +2406,56 @@ class vimconnector(vimconn.vimconnector):
             self.logger.info("Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
 
         try:
-            the_vapp = self.vca.get_vapp(vdc, vapp_name)
-            # TODO fix all status
+            vdc_obj = VDC(self.client, href=vdc.get('href'))
+            vapp_resource = vdc_obj.get_vapp(vapp_name)
+            vapp = VApp(self.client, resource=vapp_resource)  
             if "start" in action_dict:
-                vm_info = the_vapp.get_vms_details()
-                vm_status = vm_info[0]['status']
                 self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
-                if vm_status == "Suspended" or vm_status == "Powered off":
-                    power_on_task = the_vapp.poweron()
-                    result = self.vca.block_until_completed(power_on_task)
-                    self.instance_actions_result("start", result, vapp_name)
+                poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)   
+                result = self.client.get_task_monitor().wait_for_success(task=poweron_task) 
+                self.instance_actions_result("start", result, vapp_name)
             elif "rebuild" in action_dict:
                 self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
-                rebuild_task = the_vapp.deploy(powerOn=True)
-                result = self.vca.block_until_completed(rebuild_task)
+                rebuild_task = vapp.deploy(power_on=True)
+                result = self.client.get_task_monitor().wait_for_success(task=rebuild_task) 
                 self.instance_actions_result("rebuild", result, vapp_name)
             elif "pause" in action_dict:
                 self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
-                pause_task = the_vapp.undeploy(action='suspend')
-                result = self.vca.block_until_completed(pause_task)
+                pause_task = vapp.undeploy(action='suspend')
+                result = self.client.get_task_monitor().wait_for_success(task=pause_task) 
                 self.instance_actions_result("pause", result, vapp_name)
             elif "resume" in action_dict:
                 self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
-                power_task = the_vapp.poweron()
-                result = self.vca.block_until_completed(power_task)
+                poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
+                result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
                 self.instance_actions_result("resume", result, vapp_name)
             elif "shutoff" in action_dict or "shutdown" in action_dict:
                 action_name , value = action_dict.items()[0]
+                #For python3
+                #action_name , value = list(action_dict.items())[0]
                 self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
-                power_off_task = the_vapp.undeploy(action='powerOff')
-                result = self.vca.block_until_completed(power_off_task)
+                shutdown_task = vapp.shutdown()
+                result = self.client.get_task_monitor().wait_for_success(task=shutdown_task)
                 if action_name == "shutdown":
                     self.instance_actions_result("shutdown", result, vapp_name)
                 else:
                     self.instance_actions_result("shutoff", result, vapp_name)
             elif "forceOff" in action_dict:
-                result = the_vapp.undeploy(action='force')
+                result = vapp.undeploy(action='powerOff')
                 self.instance_actions_result("forceOff", result, vapp_name)
             elif "reboot" in action_dict:
                 self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
-                reboot_task = the_vapp.reboot()
+                reboot_task = vapp.reboot()
+                self.client.get_task_monitor().wait_for_success(task=reboot_task)  
             else:
                 raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict))
-            return None
+            return vm__vim_uuid
         except Exception as exp :
             self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
             raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp))
 
     def instance_actions_result(self, action, result, vapp_name):
-        if result:
+        if result.get('status') == 'success':
             self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
         else:
             self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
@@ -2328,6 +2568,8 @@ class vimconnector(vimconn.vimconnector):
             if org_dict and 'networks' in org_dict:
                 org_network_dict = org_dict['networks']
                 for net_uuid,net_name in org_network_dict.iteritems():
+                #For python3
+                #for net_uuid,net_name in org_network_dict.items():
                     if net_name == network_name:
                         return net_uuid
 
@@ -2347,15 +2589,16 @@ class vimconnector(vimconn.vimconnector):
             Returns:
                 The return XML respond
         """
-
-        url_list = [self.vca.host, '/api/org']
+        url_list = [self.url, '/api/org']
         vm_list_rest_call = ''.join(url_list)
 
-        if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
-            response = Http.get(url=vm_list_rest_call,
-                                headers=self.vca.vcloud_session.get_vcloud_headers(),
-                                verify=self.vca.verify,
-                                logger=self.vca.logger)
+        if self.client._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                       'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+
+            response = self.perform_request(req_type='GET',
+                                     url=vm_list_rest_call,
+                                           headers=headers)
 
             if response.status_code == 403:
                 response = self.retry_rest('GET', vm_list_rest_call)
@@ -2367,11 +2610,11 @@ class vimconnector(vimconn.vimconnector):
 
     def get_org_action(self, org_uuid=None):
         """
-        Method leverages vCloud director and retrieve available object fdr organization.
+        Method leverages vCloud director and retrieve available object for organization.
 
         Args:
-            vca - is active VCA connection.
-            vdc_name - is a vdc name that will be used to query vms action
+            org_uuid - vCD organization uuid
+            self.client - is active connection.
 
             Returns:
                 The return XML respond
@@ -2380,22 +2623,22 @@ class vimconnector(vimconn.vimconnector):
         if org_uuid is None:
             return None
 
-        url_list = [self.vca.host, '/api/org/', org_uuid]
+        url_list = [self.url, '/api/org/', org_uuid]
         vm_list_rest_call = ''.join(url_list)
 
-        if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
-            response = Http.get(url=vm_list_rest_call,
-                                headers=self.vca.vcloud_session.get_vcloud_headers(),
-                                verify=self.vca.verify,
-                                logger=self.vca.logger)
+        if self.client._session: 
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                     'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']} 
 
-            #Retry login if session expired & retry sending request
+            #response = requests.get(vm_list_rest_call, headers=headers, verify=False)
+            response = self.perform_request(req_type='GET',
+                                            url=vm_list_rest_call,
+                                            headers=headers)
             if response.status_code == 403:
                 response = self.retry_rest('GET', vm_list_rest_call)
 
             if response.status_code == requests.codes.ok:
-                return response.content
-
+                return response.content 
         return None
 
     def get_org(self, org_uuid=None):
@@ -2483,6 +2726,9 @@ class vimconnector(vimconn.vimconnector):
         if not (not vca.vcloud_session or not vca.vcloud_session.organization):
             refs = filter(lambda ref: ref.name == vdc_name and ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml',
                           vca.vcloud_session.organization.Link)
+            #For python3
+            #refs = [ref for ref in vca.vcloud_session.organization.Link if ref.name == vdc_name and\
+            #        ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml']
             if len(refs) == 1:
                 response = Http.get(url=vm_list_rest_call,
                                     headers=vca.vcloud_session.get_vcloud_headers(),
@@ -2624,15 +2870,16 @@ class vimconnector(vimconn.vimconnector):
         if network_uuid is None:
             return None
 
-        url_list = [self.vca.host, '/api/network/', network_uuid]
+        url_list = [self.url, '/api/network/', network_uuid]
         vm_list_rest_call = ''.join(url_list)
 
-        if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
-            response = Http.get(url=vm_list_rest_call,
-                                headers=self.vca.vcloud_session.get_vcloud_headers(),
-                                verify=self.vca.verify,
-                                logger=self.vca.logger)
+        if self.client._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                     'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
 
+            response = self.perform_request(req_type='GET',
+                                            url=vm_list_rest_call,
+                                            headers=headers)
             #Retry login if session expired & retry sending request
             if response.status_code == 403:
                 response = self.retry_rest('GET', vm_list_rest_call)
@@ -2713,22 +2960,21 @@ class vimconnector(vimconn.vimconnector):
             Returns:
                 The return None or XML respond or false
         """
-
-        vca = self.connect_as_admin()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed")
+        client = self.connect_as_admin()
+        if not client:
+            raise vimconn.vimconnConnectionException("Failed to connect vCD as admin")
         if network_uuid is None:
             return False
 
-        url_list = [vca.host, '/api/admin/network/', network_uuid]
+        url_list = [self.url, '/api/admin/network/', network_uuid]
         vm_list_rest_call = ''.join(url_list)
 
-        if not (not vca.vcloud_session or not vca.vcloud_session.organization):
-            response = Http.delete(url=vm_list_rest_call,
-                                   headers=vca.vcloud_session.get_vcloud_headers(),
-                                   verify=vca.verify,
-                                   logger=vca.logger)
-
+        if client._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                     'x-vcloud-authorization': client._session.headers['x-vcloud-authorization']} 
+            response = self.perform_request(req_type='DELETE',
+                                            url=vm_list_rest_call,
+                                            headers=headers)
             if response.status_code == 202:
                 return True
 
@@ -2787,20 +3033,22 @@ class vimconnector(vimconn.vimconnector):
             Returns:
                 The return network uuid or return None
         """
-
-        vca = self.connect_as_admin()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed.")
+        client_as_admin = self.connect_as_admin()
+        if not client_as_admin:
+            raise vimconn.vimconnConnectionException("Failed to connect vCD.")
         if network_name is None:
             return None
 
-        url_list = [vca.host, '/api/admin/vdc/', self.tenant_id]
+        url_list = [self.url, '/api/admin/vdc/', self.tenant_id]
         vm_list_rest_call = ''.join(url_list)
-        if not (not vca.vcloud_session or not vca.vcloud_session.organization):
-            response = Http.get(url=vm_list_rest_call,
-                                headers=vca.vcloud_session.get_vcloud_headers(),
-                                verify=vca.verify,
-                                logger=vca.logger)
+
+        if client_as_admin._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                     'x-vcloud-authorization': client_as_admin._session.headers['x-vcloud-authorization']}
+
+            response = self.perform_request(req_type='GET',
+                                            url=vm_list_rest_call,
+                                            headers=headers)
 
             provider_network = None
             available_networks = None
@@ -2827,17 +3075,14 @@ class vimconnector(vimconn.vimconnector):
                     return None
 
             # find  pvdc provided available network
-            response = Http.get(url=provider_network,
-                                headers=vca.vcloud_session.get_vcloud_headers(),
-                                verify=vca.verify,
-                                logger=vca.logger)
+            response = self.perform_request(req_type='GET',
+                                            url=provider_network,
+                                            headers=headers)
             if response.status_code != requests.codes.ok:
                 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
                                                                                           response.status_code))
                 return None
 
-            # available_networks.split("/")[-1]
-
             if parent_network_uuid is None:
                 try:
                     vm_list_xmlroot = XmlElementTree.fromstring(response.content)
@@ -2899,7 +3144,7 @@ class vimconnector(vimconn.vimconnector):
             # either use client provided UUID or search for a first available
             #  if both are not defined we return none
             if parent_network_uuid is not None:
-                url_list = [vca.host, '/api/admin/network/', parent_network_uuid]
+                url_list = [self.url, '/api/admin/network/', parent_network_uuid]
                 add_vdc_rest_url = ''.join(url_list)
 
             #Creating all networks as Direct Org VDC type networks.
@@ -2938,32 +3183,26 @@ class vimconnector(vimconn.vimconnector):
                                                     dhcp_start_address, dhcp_end_address, available_networks,
                                                     fence_mode, isshared)
 
-            headers = vca.vcloud_session.get_vcloud_headers()
             headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
             try:
-                response = Http.post(url=add_vdc_rest_url,
-                                     headers=headers,
-                                     data=data,
-                                     verify=vca.verify,
-                                     logger=vca.logger)
+                response = self.perform_request(req_type='POST',
+                                           url=add_vdc_rest_url,
+                                           headers=headers,
+                                           data=data)
 
                 if response.status_code != 201:
                     self.logger.debug("Create Network POST REST API call failed. Return status code {}, Response content: {}"
                                       .format(response.status_code,response.content))
                 else:
-                    network = networkType.parseString(response.content, True)
-                    create_nw_task = network.get_Tasks().get_Task()[0]
-
-                    # if we all ok we respond with content after network creation completes
-                    # otherwise by default return None
-                    if create_nw_task is not None:
-                        self.logger.debug("Create Network REST : Waiting for Network creation complete")
-                        status = vca.block_until_completed(create_nw_task)
-                        if status:
-                            return response.content
-                        else:
-                            self.logger.debug("create_network_rest task failed. Network Create response : {}"
-                                              .format(response.content))
+                    network_task = self.get_task_from_response(response.content)
+                    self.logger.debug("Create Network REST : Waiting for Network creation complete")
+                    time.sleep(5)
+                    result = self.client.get_task_monitor().wait_for_success(task=network_task)
+                    if result.get('status') == 'success':   
+                        return response.content
+                    else:
+                        self.logger.debug("create_network_rest task failed. Network Create response : {}"
+                                          .format(response.content))
             except Exception as exp:
                 self.logger.debug("create_network_rest : Exception : {} ".format(exp))
 
@@ -2999,11 +3238,13 @@ class vimconnector(vimconn.vimconnector):
                 The return xml content of respond or None
         """
 
-        url_list = [vca.host, '/api/admin']
-        response = Http.get(url=''.join(url_list),
-                            headers=vca.vcloud_session.get_vcloud_headers(),
-                            verify=vca.verify,
-                            logger=vca.logger)
+        url_list = [self.url, '/api/admin']
+        if vca:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                       'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+            response = self.perform_request(req_type='GET',
+                                            url=''.join(url_list),
+                                            headers=headers)
 
         if response.status_code == requests.codes.ok:
             return response.content
@@ -3030,7 +3271,7 @@ class vimconnector(vimconn.vimconnector):
     def create_vdc_from_tmpl_rest(self, vdc_name=None):
         """
         Method create vdc in vCloud director based on VDC template.
-        it uses pre-defined template that must be named openmano
+        it uses pre-defined template.
 
         Args:
             vdc_name -  name of a new vdc.
@@ -3038,20 +3279,22 @@ class vimconnector(vimconn.vimconnector):
             Returns:
                 The return xml content of respond or None
         """
-
+        # pre-requesite atleast one vdc template should be available in vCD
         self.logger.info("Creating new vdc {}".format(vdc_name))
-        vca = self.connect()
+        vca = self.connect_as_admin()
         if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed")
+            raise vimconn.vimconnConnectionException("Failed to connect vCD")
         if vdc_name is None:
             return None
 
-        url_list = [vca.host, '/api/vdcTemplates']
+        url_list = [self.url, '/api/vdcTemplates']
         vm_list_rest_call = ''.join(url_list)
-        response = Http.get(url=vm_list_rest_call,
-                            headers=vca.vcloud_session.get_vcloud_headers(),
-                            verify=vca.verify,
-                            logger=vca.logger)
+
+        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                    'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
+        response = self.perform_request(req_type='GET',
+                                        url=vm_list_rest_call,
+                                        headers=headers)
 
         # container url to a template
         vdc_template_ref = None
@@ -3074,20 +3317,22 @@ class vimconnector(vimconn.vimconnector):
 
         try:
             # instantiate vdc
-            url_list = [vca.host, '/api/org/', self.org_uuid, '/action/instantiate']
+            url_list = [self.url, '/api/org/', self.org_uuid, '/action/instantiate']
             vm_list_rest_call = ''.join(url_list)
             data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
                                         <Source href="{1:s}"></Source>
                                         <Description>opnemano</Description>
                                         </InstantiateVdcTemplateParams>""".format(vdc_name, vdc_template_ref)
-            headers = vca.vcloud_session.get_vcloud_headers()
+
             headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml'
-            response = Http.post(url=vm_list_rest_call, headers=headers, data=data, verify=vca.verify,
-                                 logger=vca.logger)
 
-            vdc_task = taskType.parseString(response.content, True)
-            if type(vdc_task) is GenericTask:
-                self.vca.block_until_completed(vdc_task)
+            response = self.perform_request(req_type='POST',
+                                            url=vm_list_rest_call,
+                                            headers=headers,
+                                            data=data)
+
+            vdc_task = self.get_task_from_response(response.content)
+            self.client.get_task_monitor().wait_for_success(task=vdc_task)
 
             # if we all ok we respond with content otherwise by default None
             if response.status_code >= 200 and response.status_code < 300:
@@ -3104,29 +3349,28 @@ class vimconnector(vimconn.vimconnector):
         Method create network in vCloud director
 
         Args:
-            network_name - is network name to be created.
-            parent_network_uuid - is parent provider vdc network that will be used for mapping.
-            It optional attribute. by default if no parent network indicate the first available will be used.
-
+            vdc_name - vdc name to be created
             Returns:
-                The return network uuid or return None
+                The return response
         """
 
         self.logger.info("Creating new vdc {}".format(vdc_name))
 
         vca = self.connect_as_admin()
         if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed")
+            raise vimconn.vimconnConnectionException("Failed to connect vCD")
         if vdc_name is None:
             return None
 
-        url_list = [vca.host, '/api/admin/org/', self.org_uuid]
+        url_list = [self.url, '/api/admin/org/', self.org_uuid]
         vm_list_rest_call = ''.join(url_list)
-        if not (not vca.vcloud_session or not vca.vcloud_session.organization):
-            response = Http.get(url=vm_list_rest_call,
-                                headers=vca.vcloud_session.get_vcloud_headers(),
-                                verify=vca.verify,
-                                logger=vca.logger)
+
+        if vca._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                      'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+            response = self.perform_request(req_type='GET',
+                                            url=vm_list_rest_call,
+                                            headers=headers)
 
             provider_vdc_ref = None
             add_vdc_rest_url = None
@@ -3176,10 +3420,12 @@ class vimconnector(vimconn.vimconnector):
                                                                                                   escape(vdc_name),
                                                                                                   provider_vdc_ref)
 
-                    headers = vca.vcloud_session.get_vcloud_headers()
                     headers['Content-Type'] = 'application/vnd.vmware.admin.createVdcParams+xml'
-                    response = Http.post(url=add_vdc_rest_url, headers=headers, data=data, verify=vca.verify,
-                                         logger=vca.logger)
+
+                    response = self.perform_request(req_type='POST',
+                                                    url=add_vdc_rest_url,
+                                                    headers=headers,
+                                                    data=data)
 
                     # if we all ok we respond with content otherwise by default None
                     if response.status_code == 201:
@@ -3203,21 +3449,22 @@ class vimconnector(vimconn.vimconnector):
         if need_admin_access:
             vca = self.connect_as_admin()
         else:
-            vca = self.vca
+            vca = self.client 
 
         if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed")
+            raise vimconn.vimconnConnectionException("Failed to connect vCD")
         if vapp_uuid is None:
             return None
 
-        url_list = [vca.host, '/api/vApp/vapp-', vapp_uuid]
+        url_list = [self.url, '/api/vApp/vapp-', vapp_uuid]
         get_vapp_restcall = ''.join(url_list)
 
-        if vca.vcloud_session and vca.vcloud_session.organization:
-            response = Http.get(url=get_vapp_restcall,
-                                headers=vca.vcloud_session.get_vcloud_headers(),
-                                verify=vca.verify,
-                                logger=vca.logger)
+        if vca._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                       'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}  
+            response = self.perform_request(req_type='GET',
+                                            url=get_vapp_restcall,
+                                            headers=headers)
 
             if response.status_code == 403:
                 if need_admin_access == False:
@@ -3327,20 +3574,21 @@ class vimconnector(vimconn.vimconnector):
                 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
         return parsed_respond
 
-    def acuire_console(self, vm_uuid=None):
+    def acquire_console(self, vm_uuid=None):
 
         if vm_uuid is None:
             return None
-
-        if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
-            vm_dict = self.get_vapp_details_rest(self, vapp_uuid=vm_uuid)
+        if self.client._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                       'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+            vm_dict = self.get_vapp_details_rest(vapp_uuid=vm_uuid)
             console_dict = vm_dict['acquireTicket']
             console_rest_call = console_dict['href']
 
-            response = Http.post(url=console_rest_call,
-                                 headers=self.vca.vcloud_session.get_vcloud_headers(),
-                                 verify=self.vca.verify,
-                                 logger=self.vca.logger)
+            response = self.perform_request(req_type='POST',
+                                            url=console_rest_call,
+                                            headers=headers)
+
             if response.status_code == 403:
                 response = self.retry_rest('POST', console_rest_call)
 
@@ -3402,11 +3650,12 @@ class vimconnector(vimconn.vimconnector):
         if disk_href is None or disk_size is None:
             return None
 
-        if self.vca.vcloud_session and self.vca.vcloud_session.organization:
-            response = Http.get(url=disk_href,
-                                headers=self.vca.vcloud_session.get_vcloud_headers(),
-                                verify=self.vca.verify,
-                                logger=self.vca.logger)
+        if self.client._session:
+                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}  
+                response = self.perform_request(req_type='GET',
+                                                url=disk_href,
+                                                headers=headers)
 
         if response.status_code == 403:
             response = self.retry_rest('GET', disk_href)
@@ -3418,6 +3667,8 @@ class vimconnector(vimconn.vimconnector):
         try:
             lxmlroot_respond = lxmlElementTree.fromstring(response.content)
             namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
+            #For python3
+            #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
             namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
 
             for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
@@ -3431,14 +3682,12 @@ class vimconnector(vimconn.vimconnector):
                                              xml_declaration=True)
 
             #Send PUT request to modify disk size
-            headers = self.vca.vcloud_session.get_vcloud_headers()
             headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
 
-            response = Http.put(url=disk_href,
-                                data=data,
-                                headers=headers,
-                                verify=self.vca.verify, logger=self.logger)
-
+            response = self.perform_request(req_type='PUT',
+                                                url=disk_href,
+                                                headers=headers,
+                                                data=data)
             if response.status_code == 403:
                 add_headers = {'Content-Type': headers['Content-Type']}
                 response = self.retry_rest('PUT', disk_href, add_headers, data)
@@ -3447,11 +3696,12 @@ class vimconnector(vimconn.vimconnector):
                 self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
                                                                             response.status_code))
             else:
-                modify_disk_task = taskType.parseString(response.content, True)
-                if type(modify_disk_task) is GenericTask:
-                    status = self.vca.block_until_completed(modify_disk_task)
-                    return status
-
+                modify_disk_task = self.get_task_from_response(response.content)
+                result = self.client.get_task_monitor().wait_for_success(task=modify_disk_task)
+                if result.get('status') == 'success':
+                    return True
+                else:
+                    return False   
             return None
 
         except Exception as exp :
@@ -3838,11 +4088,14 @@ class vimconnector(vimconn.vimconnector):
         try:
             ip_address = None
             floating_ip = False
+            mac_address = None
             if 'floating_ip' in net: floating_ip = net['floating_ip']
 
             # Stub for ip_address feature
             if 'ip_address' in net: ip_address = net['ip_address']
 
+            if 'mac_address' in net: mac_address = net['mac_address']
+
             if floating_ip:
                 allocation_mode = "POOL"
             elif ip_address:
@@ -3851,15 +4104,16 @@ class vimconnector(vimconn.vimconnector):
                 allocation_mode = "DHCP"
 
             if not nic_type:
-                for vms in vapp._get_vms():
-                    vm_id = (vms.id).split(':')[-1]
+                for vms in vapp.get_all_vms():
+                    vm_id = vms.get('id').split(':')[-1]
 
-                    url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.vca.host, vm_id)
+                    url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
 
-                    response = Http.get(url=url_rest_call,
-                                        headers=self.vca.vcloud_session.get_vcloud_headers(),
-                                        verify=self.vca.verify,
-                                        logger=self.vca.logger)
+                    headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}  
+                    response = self.perform_request(req_type='GET',
+                                                    url=url_rest_call,
+                                                    headers=headers)
 
                     if response.status_code == 403:
                         response = self.retry_rest('GET', url_rest_call)
@@ -3873,6 +4127,7 @@ class vimconnector(vimconn.vimconnector):
                                                                          "network connection section")
 
                     data = response.content
+                    data = data.split('<Link rel="edit"')[0]
                     if '<PrimaryNetworkConnectionIndex>' not in data:
                         item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
                                 <NetworkConnection network="{}">
@@ -3886,7 +4141,11 @@ class vimconnector(vimconn.vimconnector):
                             ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
                             item =  item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
 
-                        data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
+                        if mac_address:
+                            mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
+                            item =  item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
+
+                        data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
                     else:
                         new_item = """<NetworkConnection network="{}">
                                     <NetworkConnectionIndex>{}</NetworkConnectionIndex>
@@ -3899,13 +4158,18 @@ class vimconnector(vimconn.vimconnector):
                             ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
                             new_item =  new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
 
-                        data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
+                        if mac_address:
+                            mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
+                            new_item =  new_item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
+
+                        data = data + new_item + '</NetworkConnectionSection>'
 
-                    headers = self.vca.vcloud_session.get_vcloud_headers()
                     headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
-                    response = Http.put(url=url_rest_call, headers=headers, data=data,
-                                                                   verify=self.vca.verify,
-                                                                   logger=self.vca.logger)
+
+                    response = self.perform_request(req_type='PUT',
+                                                    url=url_rest_call,
+                                                    headers=headers,
+                                                    data=data)
 
                     if response.status_code == 403:
                         add_headers = {'Content-Type': headers['Content-Type']}
@@ -3919,24 +4183,26 @@ class vimconnector(vimconn.vimconnector):
                         raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
                                                                             "network connection section")
                     else:
-                        nic_task = taskType.parseString(response.content, True)
-                        if isinstance(nic_task, GenericTask):
-                            self.vca.block_until_completed(nic_task)
+                        nic_task = self.get_task_from_response(response.content)
+                        result = self.client.get_task_monitor().wait_for_success(task=nic_task)  
+                        if result.get('status') == 'success':
                             self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\
                                                                "default NIC type".format(vm_id))
                         else:
                             self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\
                                                               "connect NIC type".format(vm_id))
             else:
-                for vms in vapp._get_vms():
-                    vm_id = (vms.id).split(':')[-1]
+                for vms in vapp.get_all_vms():
+                    vm_id = vms.get('id').split(':')[-1]
 
-                    url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.vca.host, vm_id)
 
-                    response = Http.get(url=url_rest_call,
-                                        headers=self.vca.vcloud_session.get_vcloud_headers(),
-                                        verify=self.vca.verify,
-                                        logger=self.vca.logger)
+                    url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
+
+                    headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                    response = self.perform_request(req_type='GET',
+                                                    url=url_rest_call,
+                                                    headers=headers)
 
                     if response.status_code == 403:
                         response = self.retry_rest('GET', url_rest_call)
@@ -3949,6 +4215,7 @@ class vimconnector(vimconn.vimconnector):
                         raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
                                                                         "network connection section")
                     data = response.content
+                    data = data.split('<Link rel="edit"')[0]
                     if '<PrimaryNetworkConnectionIndex>' not in data:
                         item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
                                 <NetworkConnection network="{}">
@@ -3963,7 +4230,11 @@ class vimconnector(vimconn.vimconnector):
                             ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
                             item =  item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
 
-                        data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
+                        if mac_address:
+                            mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
+                            item =  item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag)) 
+
+                        data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
                     else:
                         new_item = """<NetworkConnection network="{}">
                                     <NetworkConnectionIndex>{}</NetworkConnectionIndex>
@@ -3977,13 +4248,18 @@ class vimconnector(vimconn.vimconnector):
                             ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
                             new_item =  new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
 
-                        data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
+                        if mac_address:
+                            mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
+                            new_item =  new_item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
+
+                        data = data + new_item + '</NetworkConnectionSection>'
 
-                    headers = self.vca.vcloud_session.get_vcloud_headers()
                     headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
-                    response = Http.put(url=url_rest_call, headers=headers, data=data,
-                                                                   verify=self.vca.verify,
-                                                                   logger=self.vca.logger)
+
+                    response = self.perform_request(req_type='PUT',
+                                                    url=url_rest_call,
+                                                    headers=headers,
+                                                    data=data)
 
                     if response.status_code == 403:
                         add_headers = {'Content-Type': headers['Content-Type']}
@@ -3997,9 +4273,9 @@ class vimconnector(vimconn.vimconnector):
                         raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
                                                                            "network connection section")
                     else:
-                        nic_task = taskType.parseString(response.content, True)
-                        if isinstance(nic_task, GenericTask):
-                            self.vca.block_until_completed(nic_task)
+                        nic_task = self.get_task_from_response(response.content)
+                        result = self.client.get_task_monitor().wait_for_success(task=nic_task)
+                        if result.get('status') == 'success':
                             self.logger.info("add_network_adapter_to_vms(): VM {} "\
                                                "conneced to NIC type {}".format(vm_id, nic_type))
                         else:
@@ -4088,6 +4364,7 @@ class vimconnector(vimconn.vimconnector):
 
                 self.logger.debug("cloud_init : Guest os customization started..")
                 customize_script = self.format_script(key_pairs=key_pairs, users_list=userdata)
+                customize_script = customize_script.replace("&","&amp;")
                 self.guest_customization(vapp, customize_script)
 
         except Exception as exp:
@@ -4097,8 +4374,7 @@ class vimconnector(vimconn.vimconnector):
                                                                "ssh-key".format(exp))
 
     def format_script(self, key_pairs=[], users_list=[]):
-        bash_script = """
-        #!/bin/bash
+        bash_script = """#!/bin/sh
         echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
         if [ "$1" = "precustomization" ];then
             echo performing precustomization tasks   on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
@@ -4162,11 +4438,49 @@ class vimconnector(vimconn.vimconnector):
         vapp - Vapp object
         customize_script - Customize script to be run at first boot of VM.
         """
-        for vm in vapp._get_vms():
-            vm_name = vm.name
-            task = vapp.customize_guest_os(vm_name, customization_script=customize_script)
-            if isinstance(task, GenericTask):
-                self.vca.block_until_completed(task)
+        for vm in vapp.get_all_vms():
+            vm_id = vm.get('id').split(':')[-1]
+            vm_name = vm.get('name') 
+            vm_name = vm_name.replace('_','-')    
+             
+            vm_customization_url = "{}/api/vApp/vm-{}/guestCustomizationSection/".format(self.url, vm_id)
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+
+            headers['Content-Type'] = "application/vnd.vmware.vcloud.guestCustomizationSection+xml"
+
+            data = """<GuestCustomizationSection
+                           xmlns="http://www.vmware.com/vcloud/v1.5"
+                           xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
+                           ovf:required="false" href="{}" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml">
+                           <ovf:Info>Specifies Guest OS Customization Settings</ovf:Info>
+                           <Enabled>true</Enabled>
+                           <ChangeSid>false</ChangeSid>
+                           <VirtualMachineId>{}</VirtualMachineId>
+                           <JoinDomainEnabled>false</JoinDomainEnabled>
+                           <UseOrgSettings>false</UseOrgSettings>
+                           <AdminPasswordEnabled>false</AdminPasswordEnabled>
+                           <AdminPasswordAuto>true</AdminPasswordAuto>
+                           <AdminAutoLogonEnabled>false</AdminAutoLogonEnabled>
+                           <AdminAutoLogonCount>0</AdminAutoLogonCount>
+                           <ResetPasswordRequired>false</ResetPasswordRequired>
+                           <CustomizationScript>{}</CustomizationScript>
+                           <ComputerName>{}</ComputerName>
+                           <Link href="{}" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" rel="edit"/>
+                       </GuestCustomizationSection> 
+                   """.format(vm_customization_url,
+                                             vm_id,
+                                  customize_script,
+                                           vm_name,
+                              vm_customization_url)  
+
+            response = self.perform_request(req_type='PUT',
+                                             url=vm_customization_url,
+                                             headers=headers,
+                                             data=data)
+            if response.status_code == 202:
+                guest_task = self.get_task_from_response(response.content)
+                self.client.get_task_monitor().wait_for_success(task=guest_task)
                 self.logger.info("guest_customization : customized guest os task "\
                                              "completed for VM {}".format(vm_name))
             else:
@@ -4222,11 +4536,12 @@ class vimconnector(vimconn.vimconnector):
             Returns: Status of add new disk task
         """
         status = False
-        if self.vca.vcloud_session and self.vca.vcloud_session.organization:
-            response = Http.get(url=disk_href,
-                                headers=self.vca.vcloud_session.get_vcloud_headers(),
-                                verify=self.vca.verify,
-                                logger=self.vca.logger)
+        if self.client._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}  
+            response = self.perform_request(req_type='GET',
+                                            url=disk_href,
+                                            headers=headers)
 
         if response.status_code == 403:
             response = self.retry_rest('GET', disk_href)
@@ -4239,6 +4554,8 @@ class vimconnector(vimconn.vimconnector):
             #Find but type & max of instance IDs assigned to disks
             lxmlroot_respond = lxmlElementTree.fromstring(response.content)
             namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
+            #For python3
+            #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
             namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
             instance_id = 0
             for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
@@ -4268,13 +4585,12 @@ class vimconnector(vimconn.vimconnector):
             new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
 
             # Send PUT request to modify virtual hardware section with new disk
-            headers = self.vca.vcloud_session.get_vcloud_headers()
             headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
 
-            response = Http.put(url=disk_href,
-                                data=new_data,
-                                headers=headers,
-                                verify=self.vca.verify, logger=self.logger)
+            response = self.perform_request(req_type='PUT',
+                                            url=disk_href,
+                                            data=new_data,
+                                            headers=headers)
 
             if response.status_code == 403:
                 add_headers = {'Content-Type': headers['Content-Type']}
@@ -4284,11 +4600,12 @@ class vimconnector(vimconn.vimconnector):
                 self.logger.error("PUT REST API call {} failed. Return status code {}. Response Content:{}"
                                   .format(disk_href, response.status_code, response.content))
             else:
-                add_disk_task = taskType.parseString(response.content, True)
-                if type(add_disk_task) is GenericTask:
-                    status = self.vca.block_until_completed(add_disk_task)
-                    if not status:
-                        self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
+                add_disk_task = self.get_task_from_response(response.content)
+                result = self.client.get_task_monitor().wait_for_success(task=add_disk_task)
+                if result.get('status') == 'success':  
+                    status = True
+                else:
+                    self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb)) 
 
         except Exception as exp:
             self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp))
@@ -4485,7 +4802,6 @@ class vimconnector(vimconn.vimconnector):
                 vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
                 if vm_details and "vm_vcenter_info" in vm_details:
                     vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
-
             return vm_moref_id
 
         except Exception as exp:
@@ -4508,18 +4824,22 @@ class vimconnector(vimconn.vimconnector):
 
         vca = self.connect_as_admin()
         if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed")
+            raise vimconn.vimconnConnectionException("Failed to connect vCD")
 
         try:
+            org, vdc = self.get_vdc_details()
             catalog = self.get_catalog_obj(image_id, catalogs)
             if catalog:
-                template_name = self.get_catalogbyid(image_id, catalogs)
-                catalog_items = filter(lambda catalogItemRef: catalogItemRef.get_name() == template_name, catalog.get_CatalogItems().get_CatalogItem())
+                items = org.get_catalog_item(catalog.get('name'), catalog.get('name'))
+                catalog_items = [items.attrib]
+
                 if len(catalog_items) == 1:
-                    response = Http.get(catalog_items[0].get_href(),
-                                        headers=vca.vcloud_session.get_vcloud_headers(),
-                                        verify=vca.verify,
-                                        logger=vca.logger)
+                    headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']} 
+
+                    response = self.perform_request(req_type='GET',
+                                                    url=catalog_items[0].get('href'),
+                                                    headers=headers)
                     catalogItem = XmlElementTree.fromstring(response.content)
                     entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
                     vapp_tempalte_href = entity.get("href")
@@ -4534,12 +4854,10 @@ class vimconnector(vimconn.vimconnector):
                                   'xmlns':"http://www.vmware.com/vcloud/v1.5"
                                 }
 
-                    if vca.vcloud_session and vca.vcloud_session.organization:
-                        response = Http.get(url=vapp_tempalte_href,
-                                            headers=vca.vcloud_session.get_vcloud_headers(),
-                                            verify=vca.verify,
-                                            logger=vca.logger
-                                            )
+                    if vca._session:
+                        response = self.perform_request(req_type='GET',
+                                                    url=vapp_tempalte_href,
+                                                    headers=headers)
 
                         if response.status_code != requests.codes.ok:
                             self.logger.debug("REST API call {} failed. Return status code {}".format(
@@ -4973,6 +5291,8 @@ class vimconnector(vimconn.vimconnector):
                 self.persistent_info["used_vlanIDs"] = {}
         else:
             used_ids = self.persistent_info["used_vlanIDs"].values()
+            #For python3
+            #used_ids = list(self.persistent_info["used_vlanIDs"].values())
 
         for vlanID_range in self.config.get('vlanID_range'):
             start_vlanid , end_vlanid = vlanID_range.split("-")
@@ -4981,6 +5301,8 @@ class vimconnector(vimconn.vimconnector):
                                                                         vlanID_range))
 
             for id in xrange(int(start_vlanid), int(end_vlanid) + 1):
+            #For python3
+            #for id in range(int(start_vlanid), int(end_vlanid) + 1):
                 if id not in used_ids:
                     vlan_id = id
                     self.persistent_info["used_vlanIDs"][network_name] = vlan_id
@@ -5012,11 +5334,13 @@ class vimconnector(vimconn.vimconnector):
         vca = self.connect()
         try:
             # fetching catalog details
-            rest_url = "{}/api/catalog/{}".format(vca.host,image_id)
-            response = Http.get(url=rest_url,
-                                headers=vca.vcloud_session.get_vcloud_headers(),
-                                verify=vca.verify,
-                                logger=vca.logger)
+            rest_url = "{}/api/catalog/{}".format(self.url, image_id)
+            if vca._session:
+                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}  
+                response = self.perform_request(req_type='GET',
+                                                url=rest_url,
+                                                headers=headers)
 
             if response.status_code != 200:
                 self.logger.error("REST call {} failed reason : {}"\
@@ -5038,31 +5362,30 @@ class vimconnector(vimconn.vimconnector):
                         id="urn:vcloud:media:{}"
                         href="https://{}/api/media/{}"/>
                      </ns6:MediaInsertOrEjectParams>""".format(iso_name, media_id,
-                                                                vca.host,media_id)
+                                                                self.url,media_id)
 
-                for vms in vapp._get_vms():
-                    vm_id = (vms.id).split(':')[-1]
+                for vms in vapp.get_all_vms():
+                    vm_id = vms.get('id').split(':')[-1]
 
-                    headers = vca.vcloud_session.get_vcloud_headers()
                     headers['Content-Type'] = 'application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml'
-                    rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(vca.host,vm_id)
+                    rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(self.url,vm_id)
 
-                    response = Http.post(url=rest_url,
-                                      headers=headers,
-                                            data=data,
-                                    verify=vca.verify,
-                                    logger=vca.logger)
+                    response = self.perform_request(req_type='POST',
+                                                       url=rest_url,
+                                                          data=data,
+                                                    headers=headers)
 
                     if response.status_code != 202:
                         self.logger.error("Failed to insert CD-ROM to vm")
                         raise vimconn.vimconnException("insert_media_to_vm() : Failed to insert"\
                                                                                     "ISO image to vm")
                     else:
-                        task = taskType.parseString(response.content, True)
-                        if isinstance(task, GenericTask):
-                            vca.block_until_completed(task)
+                        task = self.get_task_from_response(response.content)
+                        result = self.client.get_task_monitor().wait_for_success(task=task)
+                        if result.get('status') == 'success':
                             self.logger.info("insert_media_to_vm(): Sucessfully inserted media ISO"\
                                                                     " image to vm {}".format(vm_id))
+
         except Exception as exp:
             self.logger.error("insert_media_to_vm() : exception occurred "\
                                             "while inserting media CD-ROM")
@@ -5086,10 +5409,11 @@ class vimconnector(vimconn.vimconnector):
                 if cataloghref_list is not None:
                     for href in cataloghref_list:
                         if href:
-                            response = Http.get(url=href,
-                                        headers=vca.vcloud_session.get_vcloud_headers(),
-                                        verify=vca.verify,
-                                        logger=vca.logger)
+                            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}  
+                            response = self.perform_request(req_type='GET',
+                                                                  url=href,
+                                                           headers=headers)
                             if response.status_code != 200:
                                 self.logger.error("REST call {} failed reason : {}"\
                                              "status code : {}".format(href,
@@ -5128,33 +5452,31 @@ class vimconnector(vimconn.vimconnector):
         #Get token
         self.get_token()
 
-        headers=self.vca.vcloud_session.get_vcloud_headers()
+        if self.client._session:
+                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}  
 
         if add_headers:
             headers.update(add_headers)
 
         if method == 'GET':
-            response = Http.get(url=url,
-                                headers=headers,
-                                verify=self.vca.verify,
-                                logger=self.vca.logger)
+            response = self.perform_request(req_type='GET',
+                                            url=url,
+                                            headers=headers)
         elif method == 'PUT':
-            response = Http.put(url=url,
-                                data=data,
-                                headers=headers,
-                                verify=self.vca.verify,
-                                logger=self.logger)
+            response = self.perform_request(req_type='PUT',
+                                            url=url,
+                                            headers=headers,
+                                            data=data)     
         elif method == 'POST':
-            response = Http.post(url=url,
-                                 headers=headers,
-                                 data=data,
-                                 verify=self.vca.verify,
-                                 logger=self.vca.logger)
+            response = self.perform_request(req_type='POST',
+                                            url=url,
+                                            headers=headers,
+                                            data=data) 
         elif method == 'DELETE':
-            response = Http.delete(url=url,
-                                 headers=headers,
-                                 verify=self.vca.verify,
-                                 logger=self.vca.logger)
+            response = self.perform_request(req_type='DELETE',
+                                            url=url,
+                                            headers=headers)
         return response
 
 
@@ -5162,51 +5484,135 @@ class vimconnector(vimconn.vimconnector):
         """ Generate a new token if expired
 
             Returns:
-                The return vca object that letter can be used to connect to vCloud director as admin for VDC
+                The return client object that letter can be used to connect to vCloud director as admin for VDC
         """
-        vca = None
-
         try:
             self.logger.debug("Generate token for vca {} as {} to datacenter {}.".format(self.org_name,
                                                                                       self.user,
                                                                                       self.org_name))
-            vca = VCA(host=self.url,
-                      username=self.user,
-                      service_type=STANDALONE,
-                      version=VCAVERSION,
-                      verify=False,
-                      log=False)
-
-            result = vca.login(password=self.passwd, org=self.org_name)
-            if result is True:
-                result = vca.login(token=vca.token, org=self.org_name, org_url=vca.vcloud_session.org_url)
-                if result is True:
-                    self.logger.info(
-                        "Successfully generated token for vcloud direct org: {} as user: {}".format(self.org_name, self.user))
-                    #Update vca
-                    self.vca = vca
-                    return
+            host = self.url
+            client = Client(host, verify_ssl_certs=False)
+            client.set_credentials(BasicLoginCredentials(self.user, self.org_name, self.passwd))
+            # connection object
+            self.client = client    
 
         except:
             raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
                                                      "{} as user: {}".format(self.org_name, self.user))
 
-        if not vca or not result:
-            raise vimconn.vimconnConnectionException("self.connect() is failed while reconnecting")
+        if not client:
+            raise vimconn.vimconnConnectionException("Failed while reconnecting vCD")
 
 
     def get_vdc_details(self):
         """ Get VDC details using pyVcloud Lib
 
-            Returns vdc object
+            Returns org and vdc object
         """
-        vdc = self.vca.get_vdc(self.tenant_name)
+        org = Org(self.client, resource=self.client.get_org())
+        vdc = org.get_vdc(self.tenant_name)
 
         #Retry once, if failed by refreshing token
         if vdc is None:
             self.get_token()
-            vdc = self.vca.get_vdc(self.tenant_name)
+            vdc = org.get_vdc(self.tenant_name)
+
+        return org, vdc
+
+
+    def perform_request(self, req_type, url, headers=None, data=None):
+        """Perform the POST/PUT/GET/DELETE request."""
+
+        #Log REST request details
+        self.log_request(req_type, url=url, headers=headers, data=data)
+        # perform request and return its result
+        if req_type == 'GET':
+            response = requests.get(url=url,
+                                headers=headers,
+                                verify=False)
+        elif req_type == 'PUT':
+            response = requests.put(url=url,
+                                headers=headers,
+                                data=data,
+                                verify=False)
+        elif req_type == 'POST':
+            response = requests.post(url=url,
+                                 headers=headers,
+                                 data=data,
+                                 verify=False)
+        elif req_type == 'DELETE':
+            response = requests.delete(url=url,
+                                 headers=headers,
+                                 verify=False)
+        #Log the REST response
+        self.log_response(response)
+
+        return response
+
 
-        return vdc
+    def log_request(self, req_type, url=None, headers=None, data=None):
+        """Logs REST request details"""
+
+        if req_type is not None:
+            self.logger.debug("Request type: {}".format(req_type))
+
+        if url is not None:
+            self.logger.debug("Request url: {}".format(url))
+
+        if headers is not None:
+            for header in headers:
+                self.logger.debug("Request header: {}: {}".format(header, headers[header]))
+
+        if data is not None:
+            self.logger.debug("Request data: {}".format(data))
+
+
+    def log_response(self, response):
+        """Logs REST response details"""
+
+        self.logger.debug("Response status code: {} ".format(response.status_code))
+
+
+    def get_task_from_response(self, content):
+        """
+        content - API response content(response.content)
+        return task object 
+        """
+        xmlroot = XmlElementTree.fromstring(content)
+        if xmlroot.tag.split('}')[1] == "Task":
+            return xmlroot
+        else: 
+            for ele in xmlroot:
+                if ele.tag.split("}")[1] == "Tasks":
+                    task = ele[0]
+                    break  
+            return task
+
+
+    def power_on_vapp(self,vapp_id, vapp_name):
+        """
+        vapp_id - vApp uuid
+        vapp_name - vAapp name
+        return - Task object 
+        """
+        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                   'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+        
+        poweron_href = "{}/api/vApp/vapp-{}/power/action/powerOn".format(self.url,
+                                                                          vapp_id)
+        response = self.perform_request(req_type='POST',
+                                       url=poweron_href,
+                                        headers=headers)
+
+        if response.status_code != 202:
+            self.logger.error("REST call {} failed reason : {}"\
+                         "status code : {} ".format(poweron_href,
+                                                response.content,
+                                           response.status_code))
+            raise vimconn.vimconnException("power_on_vapp() : Failed to power on "\
+                                                      "vApp {}".format(vapp_name))
+        else:
+            poweron_task = self.get_task_from_response(response.content)
+            return poweron_task
 
 
index 2bfca00..374cb18 100644 (file)
@@ -12,7 +12,7 @@ python-keystoneclient
 python-glanceclient
 python-neutronclient
 python-cinderclient
-pyvcloud==18.2.2
+pyvcloud==19.1.1
 pyvmomi
 progressbar
 prettytable
diff --git a/scenarios/examples/v3_3vnf_2vdu_1vnffg_nsd.yaml b/scenarios/examples/v3_3vnf_2vdu_1vnffg_nsd.yaml
new file mode 100644 (file)
index 0000000..228ce63
--- /dev/null
@@ -0,0 +1,104 @@
+nsd:nsd-catalog:
+    nsd:
+    -   id:          3vdu_2vnf_1vnffg_nsd
+        name:        3vdu_2vnf_1vnffg_ns-name
+        short-name:  3vdu_2vnf_1vnffg-sname
+        description: 3 vnfs, each one with 2 cirros vdu, with 1 vnffg connecting the vnfs
+        vendor: OSM
+        version: '1.0'
+
+        logo: osm_2x.png
+
+        constituent-vnfd:
+            # The member-vnf-index needs to be unique, starting from 1
+            # vnfd-id-ref is the id of the VNFD
+            # Multiple constituent VNFDs can be specified
+        -   member-vnf-index: 1
+            vnfd-id-ref: 2vdu_vnfd
+        -   member-vnf-index: 2
+            vnfd-id-ref: 2vdu_vnfd
+        -   member-vnf-index: 3
+            vnfd-id-ref: 2vdu_vnfd
+
+        ip-profiles:
+        -   description: Inter VNF Link
+            ip-profile-params:
+                gateway-address: 31.31.31.210
+                ip-version: ipv4
+                subnet-address: 31.31.31.0/24
+                dns-server:
+                -   address: 8.8.8.8
+                -   address: 8.8.8.9
+                dhcp-params:
+                  count: 200
+                  start-address: 31.31.31.2
+            name: ipprofileA
+
+
+        vld:
+        # Networks for the VNFs
+            -   id: vld1
+                name: vld1-name
+                short-name: vld1-sname
+                type: ELAN
+                # vim-network-name: <update>
+                # provider-network:
+                #     overlay-type: VLAN
+                #     segmentation_id: <update>
+                ip-profile-ref: ipprofileA
+                vnfd-connection-point-ref:
+                # Specify the constituent VNFs
+                # member-vnf-index-ref - entry from constituent vnf
+                # vnfd-id-ref - VNFD id
+                # vnfd-connection-point-ref - connection point name in the VNFD
+                -   member-vnf-index-ref: 1
+                    vnfd-id-ref: 2vdu_vnfd
+                    vnfd-connection-point-ref: eth0
+                -   member-vnf-index-ref: 2
+                    vnfd-id-ref: 2vdu_vnfd
+                    vnfd-connection-point-ref: eth0
+                -   member-vnf-index-ref: 3
+                    vnfd-id-ref: 2vdu_vnfd
+                    vnfd-connection-point-ref: eth0
+
+
+        vnffgd:
+        # VNF Forwarding Graph Descriptors
+            -   id: vnffg1
+                name: vnffg1-name
+                short-name: vnffg1-sname
+                description: vnffg1-description
+                vendor: vnffg1-vendor
+                version: '1.0'
+                rsp:
+                -   id: rsp1
+                    name: rsp1-name
+                    vnfd-connection-point-ref:
+                    -   member-vnf-index-ref: 2
+                        order: 0
+                        vnfd-id-ref: 2vdu_vnfd
+                        vnfd-connection-point-ref: eth0
+                    -   member-vnf-index-ref: 3
+                        order: 1
+                        vnfd-id-ref: 2vdu_vnfd
+                        vnfd-connection-point-ref: eth0
+                classifier:
+                -   id: class1
+                    name: class1-name
+                    rsp-id-ref: rsp1
+                    member-vnf-index-ref: 1
+                    vnfd-id-ref: 2vdu_vnfd
+                    vnfd-connection-point-ref: eth0
+                    match-attributes:
+                    -   id: match1
+                        ip-proto: 6 # TCP
+                        source-ip-address: 10.0.0.1
+                        destination-ip-address: 10.0.0.2
+                        source-port: 0
+                        destination-port: 80
+                    -   id: match2
+                        ip-proto: 6 # TCP
+                        source-ip-address: 10.0.0.1
+                        destination-ip-address: 10.0.0.3
+                        source-port: 0
+                        destination-port: 80
index 6be885e..5dd66c0 100755 (executable)
@@ -223,7 +223,8 @@ then
         "#################################################################\n"\
         "#####        UPDATE REPOSITORIES                            #####\n"\
         "#################################################################"
-    [ "$_DISTRO" == "Ubuntu" ] && apt-get update -y
+    [ "$_DISTRO" == "Ubuntu" ] && apt-get update -y &&
+        add-apt-repository -y cloud-archive:ocata && apt-get update -y
 
     [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && yum check-update -y
     [ "$_DISTRO" == "CentOS" ] && yum install -y epel-release
@@ -242,14 +243,18 @@ then
         "#################################################################\n"\
         "#####        INSTALL PYTHON PACKAGES                        #####\n"\
         "#################################################################"
-    [ "$_DISTRO" == "Ubuntu" ] && install_packages "python-yaml python-bottle python-mysqldb python-jsonschema python-paramiko python-argcomplete python-requests python-logutils libxml2-dev libxslt-dev python-dev python-pip python-crypto"
-    [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && install_packages "PyYAML MySQL-python python-jsonschema python-paramiko python-argcomplete python-requests python-logutils libxslt-devel libxml2-devel python-devel python-pip python-crypto"
+    [ "$_DISTRO" == "Ubuntu" ] && install_packages "python-yaml python-bottle python-mysqldb python-jsonschema "\
+        "python-paramiko python-argcomplete python-requests python-logutils libxml2-dev libxslt-dev python-dev "\
+        "python-pip python-crypto"
+    [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && install_packages "PyYAML MySQL-python python-jsonschema "\
+        "python-paramiko python-argcomplete python-requests python-logutils libxslt-devel libxml2-devel python-devel "\
+        "python-pip python-crypto"
     # The only way to install python-bottle on Centos7 is with easy_install or pip
     [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && easy_install -U bottle
 
     # required for vmware connector TODO move that to separete opt in install script
     pip install --upgrade pip || exit 1
-    pip install pyvcloud==18.2.2 || exit 1
+    pip install pyvcloud==19.1.1 || exit 1
     pip install progressbar || exit 1
     pip install prettytable || exit 1
     pip install pyvmomi || exit 1
index 01ce867..f8670d3 100755 (executable)
@@ -23,7 +23,7 @@ OSMRO_PATH=`python -c 'import osm_ro; print osm_ro.__path__[0]'`
 
 #Pip packages required for vmware connector
 pip install --upgrade pip
-pip install --upgrade pyvcloud==18.2.2
+pip install --upgrade pyvcloud==19.1.1
 pip install --upgrade progressbar
 pip install --upgrade prettytable
 pip install --upgrade pyvmomi
diff --git a/test/RO_tests/v3_2vdu_set_ip_mac/scenario_2vdu_set_ip_mac.yaml b/test/RO_tests/v3_2vdu_set_ip_mac/scenario_2vdu_set_ip_mac.yaml
new file mode 100644 (file)
index 0000000..4dfc5b0
--- /dev/null
@@ -0,0 +1,83 @@
+nsd:nsd-catalog:
+    nsd:
+    -   id:          test_2vdu_nsd
+        name:        test_2vdu_nsd_name
+        short-name:  test_2vdu_nsd_sname
+        description: 2 vnfs, eatch one with 3 cirros vdu
+        vendor:      OSM
+        version:     '1.0'
+
+        # Place the logo as png in icons directory and provide the name here
+        logo:        osm_2x.png
+
+        # Specify the VNFDs that are part of this NSD
+        constituent-vnfd:
+            # The member-vnf-index needs to be unique, starting from 1
+            # vnfd-id-ref is the id of the VNFD
+            # Multiple constituent VNFDs can be specified
+        -   member-vnf-index: 1
+            vnfd-id-ref: test_2vdu
+        -   member-vnf-index: 2
+            vnfd-id-ref: test_2vdu2
+
+        ip-profiles:
+        -   description: Inter VNF Link
+            ip-profile-params:
+                gateway-address: 10.31.31.254
+                ip-version:      ipv4
+                subnet-address:  10.31.31.0/24
+                dns-server:
+                -   address: 8.8.8.8
+                -   address: 8.8.8.9 
+                dhcp-params:
+                  count: 200
+                  start-address: 10.31.31.20
+            name: ipprofileA
+        -   description: IP profile that disables dhcp server
+            ip-profile-params:
+                dhcp-params:
+                    enabled: 'false'
+                ip-version: ipv4
+            name: no_dhcp
+
+        vld:
+        # Networks for the VNFs
+        -   id:         vld1
+            name:       mgmt
+            short-name: vld1-sname
+            type:       ELAN
+            vnfd-connection-point-ref:
+            -   member-vnf-index-ref: 1
+                vnfd-id-ref: test_2vdu
+                vnfd-connection-point-ref: eth0
+            -   member-vnf-index-ref: 2
+                vnfd-id-ref: test_2vdu2
+                vnfd-connection-point-ref: eth0
+
+        -   id:         vld2
+            name:       vld2-name
+            short-name: vld2-sname
+            type:       ELAN
+            ip-profile-ref: ipprofileA
+            vnfd-connection-point-ref:
+            -   member-vnf-index-ref:      1
+                vnfd-id-ref:               test_2vdu
+                vnfd-connection-point-ref: eth1
+                ip-address:                10.31.31.4
+            -   member-vnf-index-ref:      2
+                vnfd-id-ref:               test_2vdu2
+                vnfd-connection-point-ref: eth1
+                ip-address:                10.31.31.5
+
+        -   id:         vld3
+            name:       vld3-name
+            short-name: vld3-sname
+            type:       ELAN
+            ip-profile-ref: no_dhcp
+            vnfd-connection-point-ref:
+            -   member-vnf-index-ref:      1
+                vnfd-id-ref:               test_2vdu
+                vnfd-connection-point-ref: eth4
+            -   member-vnf-index-ref:      2
+                vnfd-id-ref:               test_2vdu2
+                vnfd-connection-point-ref: eth4
diff --git a/test/RO_tests/v3_2vdu_set_ip_mac/vnfd_2vdu_set_ip_mac.yaml b/test/RO_tests/v3_2vdu_set_ip_mac/vnfd_2vdu_set_ip_mac.yaml
new file mode 100644 (file)
index 0000000..b6be27c
--- /dev/null
@@ -0,0 +1,93 @@
+vnfd-catalog:
+    vnfd:
+    -   connection-point:
+        -   name: eth0
+            type: VPORT
+        -   name: eth1
+            type: VPORT
+        -   name: eth4
+            type: VPORT
+        description: VNF with internal VLD and set IP and mac
+        id: test_2vdu
+        name: test_2vdu_name
+        short-name: test_2vdu_sname
+        mgmt-interface:
+            cp: eth0
+        internal-vld:
+        -   description: Internal VL
+            id:          net_internal
+            name:        net_internal_name
+            short-name:  net_internal_sname
+            type:        ELAN
+            internal-connection-point:
+            -   id-ref:     eth2
+                ip-address: 10.10.133.4
+            -   id-ref:     eth3
+                ip-address: 10.10.133.5
+            ip-profile-ref: ip-profile1
+        ip-profiles:
+        -   description: Inter VNF Link
+            ip-profile-params:
+                gateway-address: 10.10.133.1
+                ip-version:      ipv4
+                subnet-address:  10.10.133.0/24
+                dhcp-params:
+                  count:         200
+                  start-address: 10.10.133.20
+            name: ip-profile1
+        vdu:
+        -   id: VM1
+            name: VM1-name
+            image: US1604
+            interface:
+            -   name: iface11
+                type: EXTERNAL
+                virtual-interface:
+                    type: VIRTIO
+                external-connection-point-ref: eth0
+                mac-address:   "52:33:44:55:66:77"
+            -   name: iface12
+                type: INTERNAL
+                virtual-interface:
+                    type: VIRTIO
+                internal-connection-point-ref: eth2
+                mac-address:   "52:33:44:55:66:78"
+            -   name: iface13
+                type: EXTERNAL
+                virtual-interface:
+                    type: VIRTIO
+                external-connection-point-ref: eth4
+            internal-connection-point:
+            -   name: eth2-icp
+                id:   eth2
+                type: VPORT
+            vm-flavor:
+                memory-mb: '2048'
+                storage-gb: '8'
+                vcpu-count: '1'
+        -   id: VM2
+            image: US1604
+            name: VM2-name
+            interface:
+            -   name: iface21
+                type: EXTERNAL
+                virtual-interface:
+                    type: VIRTIO
+                external-connection-point-ref: eth1
+                mac-address:   52:33:44:55:66:79
+            -   name: iface22
+                type: INTERNAL
+                virtual-interface:
+                    type: VIRTIO
+                internal-connection-point-ref: eth3
+                mac-address:   52:33:44:55:66:80
+            internal-connection-point:
+            -   name: eth3-icp
+                id:   eth3
+                type: VPORT
+            vm-flavor:
+                memory-mb: '2048'
+                storage-gb: '8'
+                vcpu-count: '1'
+        vendor: ROtest
+        version: '1.0'
diff --git a/test/RO_tests/v3_2vdu_set_ip_mac/vnfd_2vdu_set_ip_mac2.yaml b/test/RO_tests/v3_2vdu_set_ip_mac/vnfd_2vdu_set_ip_mac2.yaml
new file mode 100644 (file)
index 0000000..fe200bf
--- /dev/null
@@ -0,0 +1,93 @@
+vnfd-catalog:
+    vnfd:
+    -   connection-point:
+        -   name: eth0
+            type: VPORT
+        -   name: eth1
+            type: VPORT
+        -   name: eth4
+            type: VPORT
+        description: VNF with internal VLD and set IP and mac
+        id: test_2vdu2
+        name: test_2vdu2_name
+        short-name: test_2vdu2_sname
+        mgmt-interface:
+            cp: eth0
+        internal-vld:
+        -   description: Internal VL
+            id:          net_internal
+            name:        net_internal_name
+            short-name:  net_internal_sname
+            type:        ELAN
+            internal-connection-point:
+            -   id-ref:     eth2
+                ip-address: 10.10.133.4
+            -   id-ref:     eth3
+                ip-address: 10.10.133.5
+            ip-profile-ref: ip-profile1
+        ip-profiles:
+        -   description: Inter VNF Link
+            ip-profile-params:
+                gateway-address: 10.10.133.1
+                ip-version:      ipv4
+                subnet-address:  10.10.133.0/24
+                dhcp-params:
+                  count:         200
+                  start-address: 10.10.133.20
+            name: ip-profile1
+        vdu:
+        -   id: VM1
+            name: VM1-name
+            image: US1604
+            interface:
+            -   name: iface11
+                type: EXTERNAL
+                virtual-interface:
+                    type: VIRTIO
+                external-connection-point-ref: eth0
+                mac-address:   "52:33:44:55:66:81"
+            -   name: iface12
+                type: INTERNAL
+                virtual-interface:
+                    type: VIRTIO
+                internal-connection-point-ref: eth2
+                mac-address:   "52:33:44:55:66:82"
+            -   name: iface13
+                type: EXTERNAL
+                virtual-interface:
+                    type: VIRTIO
+                external-connection-point-ref: eth4
+            internal-connection-point:
+            -   name: eth2-icp
+                id:   eth2
+                type: VPORT
+            vm-flavor:
+                memory-mb: '2048'
+                storage-gb: '8'
+                vcpu-count: '1'
+        -   id: VM2
+            image: US1604
+            name: VM2-name
+            interface:
+            -   name: iface21
+                type: EXTERNAL
+                virtual-interface:
+                    type: VIRTIO
+                external-connection-point-ref: eth1
+                mac-address:   52:33:44:55:66:83
+            -   name: iface22
+                type: INTERNAL
+                virtual-interface:
+                    type: VIRTIO
+                internal-connection-point-ref: eth3
+                mac-address:   52:33:44:55:66:84
+            internal-connection-point:
+            -   name: eth3-icp
+                id:   eth3
+                type: VPORT
+            vm-flavor:
+                memory-mb: '2048'
+                storage-gb: '8'
+                vcpu-count: '1'
+        vendor: ROtest
+        version: '1.0'
index 2684892..5b430a7 100755 (executable)
@@ -38,7 +38,6 @@ import sys
 import time
 import uuid
 import json
-from pyvcloud.vcloudair import VCA
 from argparse import ArgumentParser
 
 __author__ = "Pablo Montes, Alfonso Tierno"
@@ -292,8 +291,7 @@ class test_vimconn_connect(test_base):
         if test_config['vimtype'] == 'vmware':
             vca_object = test_config["vim_conn"].connect()
             logger.debug("{}".format(vca_object))
-            self.assertIsInstance(vca_object, VCA)
-
+            self.assertIsNotNone(vca_object)
 
 class test_vimconn_new_network(test_base):
     network_name = None
@@ -655,9 +653,10 @@ class test_vimconn_get_network(test_base):
                                                             self.__class__.test_index,
                                                 inspect.currentframe().f_code.co_name)
         self.__class__.test_index += 1
+        with self.assertRaises(Exception) as context:
+            test_config["vim_conn"].get_network(Non_exist_id)
 
-        network_info = test_config["vim_conn"].get_network(Non_exist_id)
-        self.assertEqual(network_info, {})
+        self.assertEqual((context.exception).http_code, 404)
 
 class test_vimconn_delete_network(test_base):
     network_name = None
@@ -824,10 +823,10 @@ class test_vimconn_new_image(test_base):
 
         image_path = test_config['image_path']
         if image_path:
-            image_id = test_config["vim_conn"].new_image({ 'name': 'TestImage', 'location' : image_path })
+            self.__class__.image_id = test_config["vim_conn"].new_image({ 'name': 'TestImage', 'location' : image_path })
             time.sleep(20)
-            self.assertEqual(type(image_id),str)
-            self.assertIsInstance(uuid.UUID(image_id),uuid.UUID)
+            self.assertEqual(type(self.__class__.image_id),str)
+            self.assertIsInstance(uuid.UUID(self.__class__.image_id),uuid.UUID)
         else:
             self.skipTest("Skipping test as image file not present at RO container")
 
diff --git a/vnfs/examples/v3_2vdu_vnfd.yaml b/vnfs/examples/v3_2vdu_vnfd.yaml
new file mode 100644 (file)
index 0000000..a0ebaf7
--- /dev/null
@@ -0,0 +1,59 @@
+vnfd:vnfd-catalog:
+    vnfd:
+    -   id: 2vdu_vnfd
+        name: 2vdu_vnfd-name
+        short-name: 2vdu-sname
+        description: Simple VNF example with a cirros and 2 vdu count
+        vendor: OSM
+        version: '1.0'
+
+        # Place the logo as png in icons directory and provide the name here
+        logo: cirros-64.png
+
+        # Management interface
+        mgmt-interface:
+            vdu-id: 2vduVM
+
+        # Atleast one VDU need to be specified
+        vdu:
+        -   id: 2vduVM
+            name: 2vduVM-name
+            description: 2vduVM-description
+            count: 2
+
+            # Flavour of the VM to be instantiated for the VDU
+            # flavor below can fit into m1.micro
+            vm-flavor:
+                vcpu-count: 1
+                memory-mb: 96
+                storage-gb: 0
+
+            # Image/checksum or image including the full path
+            image: 'cirros-0.3.5-x86_64-disk'
+            #checksum: 
+
+            interface:
+            # Specify the external interfaces
+            # There can be multiple interfaces defined
+            -   name: eth0
+                type: EXTERNAL
+                position: 0
+                virtual-interface:
+                    type: OM-MGMT
+                    bandwidth: '0'
+                # vnfd-connection-point-ref: eth0
+                external-connection-point-ref: eth0
+
+            # Replace the ssh-rsa public key to use your own public key
+            cloud-init: |
+                #cloud-config
+                ssh_authorized_keys:
+                -  ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDajuABKm3lzcA0hk1IQGAfSWxxE0viRedl1EnZ2s0qQL54zTGVqYzC73CndSu0az57ysAKDapKOnTWl6zfj+bU4j3c4jORDWrIelBVaeQaoWGfKtBmS7jE54I94cRgkAIk+4zM1ViRyPQ+0FoOOq7I/6rQZITZ4VqfyhygW7j2ke2vl3oJ/TKocOpdk4WlMmPC6dFYppmwlpTpPYKJVdh58aeq9G/wTRP1qvCAgZAm/1GYoj7JgQjw11j6ZZE0ci03F9aOqqMlICDJF87Zk3fUhnt+g6EYNMiEafd7kuNwXBAJ5D1n4vZnj/EpdQY+dlXhhGS2Bncr1db1YBJCoRWN Generated-by-Nova
+                users:
+                -  name: osm
+                   ssh_authorized_keys:
+                   -  ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDajuABKm3lzcA0hk1IQGAfSWxxE0viRedl1EnZ2s0qQL54zTGVqYzC73CndSu0az57ysAKDapKOnTWl6zfj+bU4j3c4jORDWrIelBVaeQaoWGfKtBmS7jE54I94cRgkAIk+4zM1ViRyPQ+0FoOOq7I/6rQZITZ4VqfyhygW7j2ke2vl3oJ/TKocOpdk4WlMmPC6dFYppmwlpTpPYKJVdh58aeq9G/wTRP1qvCAgZAm/1GYoj7JgQjw11j6ZZE0ci03F9aOqqMlICDJF87Zk3fUhnt+g6EYNMiEafd7kuNwXBAJ5D1n4vZnj/EpdQY+dlXhhGS2Bncr1db1YBJCoRWN Generated-by-Nova
+
+        connection-point:
+            -   name: eth0
+                type: VPORT