Merge branch 'packaging'
[osm/RO.git] / osm_ro / vimconn_vmware.py
index a224255..5b04458 100644 (file)
@@ -63,6 +63,7 @@ import hashlib
 import socket
 import struct
 import netaddr
+import random
 
 # global variable for vcd connector type
 STANDALONE = 'standalone'
@@ -71,13 +72,9 @@ STANDALONE = 'standalone'
 FLAVOR_RAM_KEY = 'ram'
 FLAVOR_VCPUS_KEY = 'vcpus'
 FLAVOR_DISK_KEY = 'disk'
-DEFAULT_IP_PROFILE = {'gateway_address':"192.168.1.1",
-                      'dhcp_count':50,
-                      'subnet_address':"192.168.1.0/24",
+DEFAULT_IP_PROFILE = {'dhcp_count':50,
                       'dhcp_enabled':True,
-                      'dhcp_start_address':"192.168.1.3",
-                      'ip_version':"IPv4",
-                      'dns_address':"192.168.1.2"
+                      'ip_version':"IPv4"
                       }
 # global variable for wait time
 INTERVAL_TIME = 5
@@ -181,10 +178,6 @@ class vimconnector(vimconn.vimconnector):
         self.nsx_manager = None
         self.nsx_user = None
         self.nsx_password = None
-        self.vcenter_ip = None
-        self.vcenter_port = None
-        self.vcenter_user = None
-        self.vcenter_password = None
 
         if tenant_name is not None:
             orgnameandtenant = tenant_name.split(":")
@@ -217,6 +210,14 @@ class vimconnector(vimconn.vimconnector):
         self.vcenter_user = config.get("vcenter_user", None)
         self.vcenter_password = config.get("vcenter_password", None)
 
+# ############# Stub code for SRIOV #################
+#         try:
+#             self.dvs_name = config['dv_switch_name']
+#         except KeyError:
+#             raise vimconn.vimconnException(message="Error: distributed virtaul switch name is empty in Config")
+#
+#         self.vlanID_range = config.get("vlanID_range", None)
+
         self.org_uuid = None
         self.vca = None
 
@@ -473,6 +474,12 @@ class vimconnector(vimconn.vimconnector):
         if shared:
             isshared = 'true'
 
+# ############# Stub code for SRIOV #################
+#         if net_type == "data" or net_type == "ptp":
+#             if self.config.get('dv_switch_name') == None:
+#                  raise vimconn.vimconnConflictException("You must provide 'dv_switch_name' at config value")
+#             network_uuid = self.create_dvPort_group(net_name)
+
         network_uuid = self.create_network(network_name=net_name, net_type=net_type,
                                            ip_profile=ip_profile, isshared=isshared)
         if network_uuid is not None:
@@ -558,11 +565,11 @@ class vimconnector(vimconn.vimconnector):
         if vdc is None:
             raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name))
 
-        vdcid = vdc.get_id().split(":")[3]
-        networks = vca.get_networks(vdc.get_name())
-        network_list = []
-
         try:
+            vdcid = vdc.get_id().split(":")[3]
+            networks = vca.get_networks(vdc.get_name())
+            network_list = []
+
             for network in networks:
                 filter_entry = {}
                 net_uuid = network.get_id().split(":")
@@ -610,13 +617,13 @@ class vimconnector(vimconn.vimconnector):
         if not vca:
             raise vimconn.vimconnConnectionException("self.connect() is failed")
 
-        vdc = vca.get_vdc(self.tenant_name)
-        vdc_id = vdc.get_id().split(":")[3]
+        try:
+            vdc = vca.get_vdc(self.tenant_name)
+            vdc_id = vdc.get_id().split(":")[3]
 
-        networks = vca.get_networks(vdc.get_name())
-        filter_dict = {}
+            networks = vca.get_networks(vdc.get_name())
+            filter_dict = {}
 
-        try:
             for network in networks:
                 vdc_network_id = network.get_id().split(":")
                 if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
@@ -649,6 +656,18 @@ class vimconnector(vimconn.vimconnector):
         if not vca:
             raise vimconn.vimconnConnectionException("self.connect() for tenant {} is failed.".format(self.tenant_name))
 
+        # ############# Stub code for SRIOV #################
+#         dvport_group = self.get_dvport_group(net_id)
+#         if dvport_group:
+#             #delete portgroup
+#             status = self.destroy_dvport_group(net_id)
+#             if status:
+#                 # Remove vlanID from persistent info
+#                 if net_id in self.persistent_info["used_vlanIDs"]:
+#                     del self.persistent_info["used_vlanIDs"][net_id]
+#
+#                 return net_id
+
         vcd_network = self.get_vcd_network(network_uuid=net_id)
         if vcd_network is not None and vcd_network:
             if self.delete_network_action(network_uuid=net_id):
@@ -844,117 +863,124 @@ class vimconnector(vimconn.vimconnector):
         #  create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
         #  status change.
         #  if VCD can parse OVF we upload VMDK file
-        for catalog in vca.get_catalogs():
-            if catalog_name != catalog.name:
-                continue
-            link = filter(lambda link: link.get_type() == "application/vnd.vmware.vcloud.media+xml" and
-                                       link.get_rel() == 'add', catalog.get_Link())
-            assert len(link) == 1
-            data = """
-            <UploadVAppTemplateParams name="%s" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>%s vApp Template</Description></UploadVAppTemplateParams>
-            """ % (escape(catalog_name), escape(description))
-            headers = vca.vcloud_session.get_vcloud_headers()
-            headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
-            response = Http.post(link[0].get_href(), headers=headers, data=data, verify=vca.verify, logger=self.logger)
-            if response.status_code == requests.codes.created:
-                catalogItem = XmlElementTree.fromstring(response.content)
-                entity = [child for child in catalogItem if
-                          child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
-                href = entity.get('href')
-                template = href
-                response = Http.get(href, headers=vca.vcloud_session.get_vcloud_headers(),
-                                    verify=vca.verify, logger=self.logger)
-
-                if response.status_code == requests.codes.ok:
-                    media = mediaType.parseString(response.content, True)
-                    link = filter(lambda link: link.get_rel() == 'upload:default',
-                                  media.get_Files().get_File()[0].get_Link())[0]
-                    headers = vca.vcloud_session.get_vcloud_headers()
-                    headers['Content-Type'] = 'Content-Type text/xml'
-                    response = Http.put(link.get_href(),
-                                        data=open(media_file_name, 'rb'),
-                                        headers=headers,
+        try:
+            for catalog in vca.get_catalogs():
+                if catalog_name != catalog.name:
+                    continue
+                link = filter(lambda link: link.get_type() == "application/vnd.vmware.vcloud.media+xml" and
+                                           link.get_rel() == 'add', catalog.get_Link())
+                assert len(link) == 1
+                data = """
+                <UploadVAppTemplateParams name="%s" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>%s vApp Template</Description></UploadVAppTemplateParams>
+                """ % (escape(catalog_name), escape(description))
+                headers = vca.vcloud_session.get_vcloud_headers()
+                headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
+                response = Http.post(link[0].get_href(), headers=headers, data=data, verify=vca.verify, logger=self.logger)
+                if response.status_code == requests.codes.created:
+                    catalogItem = XmlElementTree.fromstring(response.content)
+                    entity = [child for child in catalogItem if
+                              child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
+                    href = entity.get('href')
+                    template = href
+                    response = Http.get(href, headers=vca.vcloud_session.get_vcloud_headers(),
                                         verify=vca.verify, logger=self.logger)
-                    if response.status_code != requests.codes.ok:
-                        self.logger.debug(
-                            "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
-                                                                                                  media_file_name))
-                        return False
 
-                # TODO fix this with aync block
-                time.sleep(5)
-
-                self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
-
-                # uploading VMDK file
-                # check status of OVF upload and upload remaining files.
-                response = Http.get(template,
-                                    headers=vca.vcloud_session.get_vcloud_headers(),
-                                    verify=vca.verify,
-                                    logger=self.logger)
+                    if response.status_code == requests.codes.ok:
+                        media = mediaType.parseString(response.content, True)
+                        link = filter(lambda link: link.get_rel() == 'upload:default',
+                                      media.get_Files().get_File()[0].get_Link())[0]
+                        headers = vca.vcloud_session.get_vcloud_headers()
+                        headers['Content-Type'] = 'Content-Type text/xml'
+                        response = Http.put(link.get_href(),
+                                            data=open(media_file_name, 'rb'),
+                                            headers=headers,
+                                            verify=vca.verify, logger=self.logger)
+                        if response.status_code != requests.codes.ok:
+                            self.logger.debug(
+                                "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
+                                                                                                      media_file_name))
+                            return False
+
+                    # TODO fix this with aync block
+                    time.sleep(5)
+
+                    self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
+
+                    # uploading VMDK file
+                    # check status of OVF upload and upload remaining files.
+                    response = Http.get(template,
+                                        headers=vca.vcloud_session.get_vcloud_headers(),
+                                        verify=vca.verify,
+                                        logger=self.logger)
 
-                if response.status_code == requests.codes.ok:
-                    media = mediaType.parseString(response.content, True)
-                    number_of_files = len(media.get_Files().get_File())
-                    for index in xrange(0, number_of_files):
-                        links_list = filter(lambda link: link.get_rel() == 'upload:default',
-                                            media.get_Files().get_File()[index].get_Link())
-                        for link in links_list:
-                            # we skip ovf since it already uploaded.
-                            if 'ovf' in link.get_href():
-                                continue
-                            # The OVF file and VMDK must be in a same directory
-                            head, tail = os.path.split(media_file_name)
-                            file_vmdk = head + '/' + link.get_href().split("/")[-1]
-                            if not os.path.isfile(file_vmdk):
-                                return False
-                            statinfo = os.stat(file_vmdk)
-                            if statinfo.st_size == 0:
-                                return False
-                            hrefvmdk = link.get_href()
-
-                            if progress:
-                                print("Uploading file: {}".format(file_vmdk))
-                            if progress:
-                                widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
-                                           FileTransferSpeed()]
-                                progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
-
-                            bytes_transferred = 0
-                            f = open(file_vmdk, 'rb')
-                            while bytes_transferred < statinfo.st_size:
-                                my_bytes = f.read(chunk_bytes)
-                                if len(my_bytes) <= chunk_bytes:
-                                    headers = vca.vcloud_session.get_vcloud_headers()
-                                    headers['Content-Range'] = 'bytes %s-%s/%s' % (
-                                        bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
-                                    headers['Content-Length'] = str(len(my_bytes))
-                                    response = Http.put(hrefvmdk,
-                                                        headers=headers,
-                                                        data=my_bytes,
-                                                        verify=vca.verify,
-                                                        logger=None)
-
-                                    if response.status_code == requests.codes.ok:
-                                        bytes_transferred += len(my_bytes)
-                                        if progress:
-                                            progress_bar.update(bytes_transferred)
-                                    else:
-                                        self.logger.debug(
-                                            'file upload failed with error: [%s] %s' % (response.status_code,
-                                                                                        response.content))
-
-                                        f.close()
-                                        return False
-                            f.close()
-                            if progress:
-                                progress_bar.finish()
-                            time.sleep(10)
-                    return True
-                else:
-                    self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
-                                      format(catalog_name, media_file_name))
-                    return False
+                    if response.status_code == requests.codes.ok:
+                        media = mediaType.parseString(response.content, True)
+                        number_of_files = len(media.get_Files().get_File())
+                        for index in xrange(0, number_of_files):
+                            links_list = filter(lambda link: link.get_rel() == 'upload:default',
+                                                media.get_Files().get_File()[index].get_Link())
+                            for link in links_list:
+                                # we skip ovf since it already uploaded.
+                                if 'ovf' in link.get_href():
+                                    continue
+                                # The OVF file and VMDK must be in a same directory
+                                head, tail = os.path.split(media_file_name)
+                                file_vmdk = head + '/' + link.get_href().split("/")[-1]
+                                if not os.path.isfile(file_vmdk):
+                                    return False
+                                statinfo = os.stat(file_vmdk)
+                                if statinfo.st_size == 0:
+                                    return False
+                                hrefvmdk = link.get_href()
+
+                                if progress:
+                                    print("Uploading file: {}".format(file_vmdk))
+                                if progress:
+                                    widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
+                                               FileTransferSpeed()]
+                                    progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
+
+                                bytes_transferred = 0
+                                f = open(file_vmdk, 'rb')
+                                while bytes_transferred < statinfo.st_size:
+                                    my_bytes = f.read(chunk_bytes)
+                                    if len(my_bytes) <= chunk_bytes:
+                                        headers = vca.vcloud_session.get_vcloud_headers()
+                                        headers['Content-Range'] = 'bytes %s-%s/%s' % (
+                                            bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
+                                        headers['Content-Length'] = str(len(my_bytes))
+                                        response = Http.put(hrefvmdk,
+                                                            headers=headers,
+                                                            data=my_bytes,
+                                                            verify=vca.verify,
+                                                            logger=None)
+
+                                        if response.status_code == requests.codes.ok:
+                                            bytes_transferred += len(my_bytes)
+                                            if progress:
+                                                progress_bar.update(bytes_transferred)
+                                        else:
+                                            self.logger.debug(
+                                                'file upload failed with error: [%s] %s' % (response.status_code,
+                                                                                            response.content))
+
+                                            f.close()
+                                            return False
+                                f.close()
+                                if progress:
+                                    progress_bar.finish()
+                                time.sleep(10)
+                        return True
+                    else:
+                        self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
+                                          format(catalog_name, media_file_name))
+                        return False
+        except Exception as exp:
+            self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
+                .format(catalog_name,media_file_name, exp))
+            raise vimconn.vimconnException(
+                "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
+                .format(catalog_name,media_file_name, exp))
 
         self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name))
         return False
@@ -1012,6 +1038,25 @@ class vimconnector(vimconn.vimconnector):
                 return catalog.name
         return None
 
+    def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
+        """  Method check catalog and return catalog name lookup done by catalog UUID.
+
+        Args
+            catalog_name: catalog name as string
+            catalogs:  list of catalogs.
+
+        Return: catalogs name or None
+        """
+
+        if not self.validate_uuid4(uuid_string=catalog_uuid):
+            return None
+
+        for catalog in catalogs:
+            catalog_id = catalog.get_id().split(":")[3]
+            if catalog_id == catalog_uuid:
+                return catalog
+        return None
+
     def get_image_id_from_path(self, path=None, progress=False):
         """  Method upload OVF image to vCloud director.
 
@@ -1058,7 +1103,12 @@ class vimconnector(vimconn.vimconnector):
         self.logger.debug("File name {} Catalog Name {} file path {} "
                           "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
 
-        catalogs = vca.get_catalogs()
+        try:
+            catalogs = vca.get_catalogs()
+        except Exception as exp:
+            self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
+            raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp))
+
         if len(catalogs) == 0:
             self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
             result = self.create_vimcatalog(vca, catalog_md5_name)
@@ -1232,8 +1282,8 @@ class vimconnector(vimconn.vimconnector):
         """
 
         self.logger.info("Creating new instance for entry {}".format(name))
-        self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {}".format(
-                                    description, start, image_id, flavor_id, net_list, cloud_config))
+        self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {}".format(
+                                    description, start, image_id, flavor_id, net_list, cloud_config, disk_list))
         vca = self.connect()
         if not vca:
             raise vimconn.vimconnConnectionException("self.connect() is failed.")
@@ -1266,11 +1316,10 @@ class vimconnector(vimconn.vimconnector):
 
 
         # Set vCPU and Memory based on flavor.
-        #
         vm_cpus = None
         vm_memory = None
         vm_disk = None
-        pci_devices_info = []
+
         if flavor_id is not None:
             if flavor_id not in vimconnector.flavorlist:
                 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
@@ -1285,11 +1334,7 @@ class vimconnector(vimconn.vimconnector):
                     extended = flavor.get("extended", None)
                     if extended:
                         numas=extended.get("numas", None)
-                        if numas:
-                            for numa in numas:
-                                for interface in numa.get("interfaces",() ):
-                                    if interface["dedicated"].strip()=="yes":
-                                        pci_devices_info.append(interface)
+
                 except Exception as exp:
                     raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp))
 
@@ -1325,31 +1370,52 @@ class vimconnector(vimconn.vimconnector):
 
         # use: 'data', 'bridge', 'mgmt'
         # create vApp.  Set vcpu and ram based on flavor id.
-        vapptask = vca.create_vapp(self.tenant_name, vmname_andid, templateName,
-                                   self.get_catalogbyid(image_id, catalogs),
-                                   network_name=None,  # None while creating vapp
-                                   network_mode=network_mode,
-                                   vm_name=vmname_andid,
-                                   vm_cpus=vm_cpus,  # can be None if flavor is None
-                                   vm_memory=vm_memory)  # can be None if flavor is None
-
-        if vapptask is None or vapptask is False:
-            raise vimconn.vimconnUnexpectedResponse("new_vminstance(): failed deploy vApp {}".format(vmname_andid))
-        if type(vapptask) is VappTask:
-            vca.block_until_completed(vapptask)
+        try:
+            vapptask = vca.create_vapp(self.tenant_name, vmname_andid, templateName,
+                                       self.get_catalogbyid(image_id, catalogs),
+                                       network_name=None,  # None while creating vapp
+                                       network_mode=network_mode,
+                                       vm_name=vmname_andid,
+                                       vm_cpus=vm_cpus,  # can be None if flavor is None
+                                       vm_memory=vm_memory)  # can be None if flavor is None
+
+            if vapptask is None or vapptask is False:
+                raise vimconn.vimconnUnexpectedResponse(
+                    "new_vminstance(): failed to create vApp {}".format(vmname_andid))
+            if type(vapptask) is VappTask:
+                vca.block_until_completed(vapptask)
+
+        except Exception as exp:
+            raise vimconn.vimconnUnexpectedResponse(
+                "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp))
 
         # we should have now vapp in undeployed state.
-        vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid)
-        vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), vmname_andid)
+        try:
+            vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid)
+            vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), vmname_andid)
+        except Exception as exp:
+            raise vimconn.vimconnUnexpectedResponse(
+                    "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
+                    .format(vmname_andid, exp))
+
         if vapp is None:
             raise vimconn.vimconnUnexpectedResponse(
-                "new_vminstance(): Failed failed retrieve vApp {} after we deployed".format(
+                "new_vminstance(): Failed to retrieve vApp {} after creation".format(
                                                                             vmname_andid))
 
-        #Add PCI passthrough configrations
-        PCI_devices_status = False
+        #Add PCI passthrough/SRIOV configrations
         vm_obj = None
-        si = None
+        pci_devices_info = []
+        sriov_net_info = []
+        reserve_memory = False
+
+        for net in net_list:
+            if net["type"]=="PF":
+                pci_devices_info.append(net)
+            elif  (net["type"]=="VF" or  net["type"]=="VFnotShared") and 'net_id'in net:
+                sriov_net_info.append(net)
+
+        #Add PCI
         if len(pci_devices_info) > 0:
             self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info,
                                                                         vmname_andid ))
@@ -1361,18 +1427,47 @@ class vimconnector(vimconn.vimconnector):
                                                             pci_devices_info,
                                                             vmname_andid)
                                  )
+                reserve_memory = True
             else:
                 self.logger.info("Fail to add PCI devives {} to VM {}".format(
                                                             pci_devices_info,
                                                             vmname_andid)
                                  )
-        # add vm disk
+        # Modify vm disk
         if vm_disk:
             #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
             result = self.modify_vm_disk(vapp_uuid, vm_disk)
             if result :
                 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
 
+        #Add new or existing disks to vApp
+        if disk_list:
+            added_existing_disk = False
+            for disk in disk_list:
+                if "image_id" in disk and disk["image_id"] is not None:
+                    self.logger.debug("Adding existing disk from image {} to vm {} ".format(
+                                                                    disk["image_id"] , vapp_uuid))
+                    self.add_existing_disk(catalogs=catalogs,
+                                           image_id=disk["image_id"],
+                                           size = disk["size"],
+                                           template_name=templateName,
+                                           vapp_uuid=vapp_uuid
+                                           )
+                    added_existing_disk = True
+                else:
+                    #Wait till added existing disk gets reflected into vCD database/API
+                    if added_existing_disk:
+                        time.sleep(5)
+                        added_existing_disk = False
+                    self.add_new_disk(vca, vapp_uuid, disk['size'])
+
+        if numas:
+            # Assigning numa affinity setting
+            for numa in numas:
+                if 'paired-threads-id' in numa:
+                    paired_threads_id = numa['paired-threads-id']
+                    self.set_numa_affinity(vapp_uuid, paired_threads_id)
+
         # add NICs & connect to networks in netlist
         try:
             self.logger.info("Request to connect VM to a network: {}".format(net_list))
@@ -1407,49 +1502,93 @@ class vimconnector(vimconn.vimconnector):
                         if type(task) is GenericTask:
                             vca.block_until_completed(task)
                         # connect network to VM - with all DHCP by default
-                        self.logger.info("new_vminstance(): Connecting VM to a network {}".format(nets[0].name))
-                        task = vapp.connect_vms(nets[0].name,
-                                                connection_index=nicIndex,
-                                                connections_primary_index=primary_nic_index,
-                                                ip_allocation_mode='DHCP')
-                        if type(task) is GenericTask:
-                            vca.block_until_completed(task)
+
+                        type_list = ['PF','VF','VFnotShared']
+                        if 'type' in net and net['type'] not in type_list:
+                            # fetching nic type from vnf
+                            if 'model' in net:
+                                nic_type = net['model']
+                                self.logger.info("new_vminstance(): adding network adapter "\
+                                                          "to a network {}".format(nets[0].name))
+                                self.add_network_adapter_to_vms(vapp, nets[0].name,
+                                                                primary_nic_index,
+                                                                nicIndex,
+                                                                net,
+                                                                nic_type=nic_type)
+                            else:
+                                self.logger.info("new_vminstance(): adding network adapter "\
+                                                         "to a network {}".format(nets[0].name))
+                                self.add_network_adapter_to_vms(vapp, nets[0].name,
+                                                                primary_nic_index,
+                                                                nicIndex,
+                                                                net)
                 nicIndex += 1
-        except KeyError:
-            # it might be a case if specific mandatory entry in dict is empty
-            self.logger.debug("Key error {}".format(KeyError.message))
-            raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
 
-        # deploy and power on vm
-        self.logger.debug("new_vminstance(): Deploying vApp {} ".format(name))
-        deploytask = vapp.deploy(powerOn=False)
-        if type(deploytask) is GenericTask:
-            vca.block_until_completed(deploytask)
-
-        # If VM has PCI devices reserve memory for VM
-        if PCI_devices_status and vm_obj and vcenter_conect:
-            memReserve = vm_obj.config.hardware.memoryMB
-            spec = vim.vm.ConfigSpec()
-            spec.memoryAllocation = vim.ResourceAllocationInfo(reservation=memReserve)
-            task = vm_obj.ReconfigVM_Task(spec=spec)
-            if task:
-                result = self.wait_for_vcenter_task(task, vcenter_conect)
-                self.logger.info("Reserved memmoery {} MB for "\
-                                 "VM VM status: {}".format(str(memReserve),result))
-            else:
-                self.logger.info("Fail to reserved memmoery {} to VM {}".format(
-                                                            str(memReserve),str(vm_obj)))
+            # cloud-init for ssh-key injection
+            if cloud_config:
+                self.cloud_init(vapp,cloud_config)
+
+            # deploy and power on vm
+            self.logger.debug("new_vminstance(): Deploying vApp {} ".format(name))
+            deploytask = vapp.deploy(powerOn=False)
+            if type(deploytask) is GenericTask:
+                vca.block_until_completed(deploytask)
+
+        # ############# Stub code for SRIOV #################
+        #Add SRIOV
+#         if len(sriov_net_info) > 0:
+#             self.logger.info("Need to add SRIOV adapters {} into VM {}".format(sriov_net_info,
+#                                                                         vmname_andid ))
+#             sriov_status, vm_obj, vcenter_conect = self.add_sriov(vapp_uuid,
+#                                                                   sriov_net_info,
+#                                                                   vmname_andid)
+#             if sriov_status:
+#                 self.logger.info("Added SRIOV {} to VM {}".format(
+#                                                             sriov_net_info,
+#                                                             vmname_andid)
+#                                  )
+#                 reserve_memory = True
+#             else:
+#                 self.logger.info("Fail to add SRIOV {} to VM {}".format(
+#                                                             sriov_net_info,
+#                                                             vmname_andid)
+#                                  )
+
+            # If VM has PCI devices or SRIOV reserve memory for VM
+            if reserve_memory:
+                memReserve = vm_obj.config.hardware.memoryMB
+                spec = vim.vm.ConfigSpec()
+                spec.memoryAllocation = vim.ResourceAllocationInfo(reservation=memReserve)
+                task = vm_obj.ReconfigVM_Task(spec=spec)
+                if task:
+                    result = self.wait_for_vcenter_task(task, vcenter_conect)
+                    self.logger.info("Reserved memmoery {} MB for "\
+                                     "VM VM status: {}".format(str(memReserve),result))
+                else:
+                    self.logger.info("Fail to reserved memmoery {} to VM {}".format(
+                                                                str(memReserve),str(vm_obj)))
 
-        self.logger.debug("new_vminstance(): power on vApp {} ".format(name))
-        poweron_task = vapp.poweron()
-        if type(poweron_task) is GenericTask:
-            vca.block_until_completed(poweron_task)
+            self.logger.debug("new_vminstance(): power on vApp {} ".format(name))
+            poweron_task = vapp.poweron()
+            if type(poweron_task) is GenericTask:
+                vca.block_until_completed(poweron_task)
+
+        except Exception as exp :
+            # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
+            self.logger.debug("new_vminstance(): Failed create new vm instance {}".format(name, exp))
+            raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {}".format(name, exp))
 
         # check if vApp deployed and if that the case return vApp UUID otherwise -1
         wait_time = 0
         vapp_uuid = None
         while wait_time <= MAX_WAIT_TIME:
-            vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid)
+            try:
+                vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid)
+            except Exception as exp:
+                raise vimconn.vimconnUnexpectedResponse(
+                        "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
+                        .format(vmname_andid, exp))
+
             if vapp and vapp.me.deployed:
                 vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), vmname_andid)
                 break
@@ -1673,39 +1812,6 @@ class vimconnector(vimconn.vimconnector):
 
         self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
 
-        mac_ip_addr={}
-        rheaders = {'Content-Type': 'application/xml'}
-        iso_edges = ['edge-2','edge-3','edge-6','edge-7','edge-8','edge-9','edge-10']
-
-        try:
-            for edge in iso_edges:
-                nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
-                self.logger.debug("refresh_vms_status: NSX Manager url: {}".format(nsx_api_url))
-
-                resp = requests.get(self.nsx_manager + nsx_api_url,
-                                    auth = (self.nsx_user, self.nsx_password),
-                                    verify = False, headers = rheaders)
-
-                if resp.status_code == requests.codes.ok:
-                    dhcp_leases = XmlElementTree.fromstring(resp.text)
-                    for child in dhcp_leases:
-                        if child.tag == 'dhcpLeaseInfo':
-                            dhcpLeaseInfo = child
-                            for leaseInfo in dhcpLeaseInfo:
-                                for elem in leaseInfo:
-                                    if (elem.tag)=='macAddress':
-                                        mac_addr = elem.text
-                                    if (elem.tag)=='ipAddress':
-                                        ip_addr = elem.text
-                                if (mac_addr) is not None:
-                                    mac_ip_addr[mac_addr]= ip_addr
-                    self.logger.debug("NSX Manager DHCP Lease info: mac_ip_addr : {}".format(mac_ip_addr))
-                else:
-                    self.logger.debug("Error occurred while getting DHCP lease info from NSX Manager: {}".format(resp.content))
-        except KeyError:
-            self.logger.debug("Error in response from NSX Manager {}".format(KeyError.message))
-            self.logger.debug(traceback.format_exc())
-
         vca = self.connect()
         if not vca:
             raise vimconn.vimconnConnectionException("self.connect() is failed.")
@@ -1715,44 +1821,156 @@ class vimconnector(vimconn.vimconnector):
             raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
 
         vms_dict = {}
+        nsx_edge_list = []
         for vmuuid in vm_list:
             vmname = self.get_namebyvappid(vca, vdc, vmuuid)
             if vmname is not None:
 
-                the_vapp = vca.get_vapp(vdc, vmname)
-                vm_info = the_vapp.get_vms_details()
-                vm_status = vm_info[0]['status']
-                vm_pci_details = self.get_vm_pci_details(vmuuid)
-                vm_info[0].update(vm_pci_details)
+                try:
+                    the_vapp = vca.get_vapp(vdc, vmname)
+                    vm_info = the_vapp.get_vms_details()
+                    vm_status = vm_info[0]['status']
+                    vm_pci_details = self.get_vm_pci_details(vmuuid)
+                    vm_info[0].update(vm_pci_details)
 
-                vm_dict = {'status': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
-                           'error_msg': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
-                           'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
+                    vm_dict = {'status': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
+                               'error_msg': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
+                               'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
 
-                # get networks
-                try:
+                    # get networks
                     vm_app_networks = the_vapp.get_vms_network_info()
                     for vapp_network in vm_app_networks:
                         for vm_network in vapp_network:
                             if vm_network['name'] == vmname:
                                 #Assign IP Address based on MAC Address in NSX DHCP lease info
-                                for mac_adres,ip_adres in mac_ip_addr.iteritems():
-                                    if mac_adres == vm_network['mac']:
-                                        vm_network['ip']=ip_adres
+                                if vm_network['ip'] is None:
+                                    if not nsx_edge_list:
+                                        nsx_edge_list = self.get_edge_details()
+                                        if nsx_edge_list is None:
+                                            raise vimconn.vimconnException("refresh_vms_status:"\
+                                                                           "Failed to get edge details from NSX Manager")
+                                    if vm_network['mac'] is not None:
+                                        vm_network['ip'] = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_network['mac'])
+
+                                vm_net_id = self.get_network_id_by_name(vm_network['network_name'])
                                 interface = {"mac_address": vm_network['mac'],
-                                             "vim_net_id": self.get_network_id_by_name(vm_network['network_name']),
-                                             "vim_interface_id": self.get_network_id_by_name(vm_network['network_name']),
+                                             "vim_net_id": vm_net_id,
+                                             "vim_interface_id": vm_net_id,
                                              'ip_address': vm_network['ip']}
                                 # interface['vim_info'] = yaml.safe_dump(vm_network)
                                 vm_dict["interfaces"].append(interface)
                     # add a vm to vm dict
                     vms_dict.setdefault(vmuuid, vm_dict)
-                except KeyError:
-                    self.logger.debug("Error in respond {}".format(KeyError.message))
+                except Exception as exp:
+                    self.logger.debug("Error in response {}".format(exp))
                     self.logger.debug(traceback.format_exc())
 
         return vms_dict
 
+
+    def get_edge_details(self):
+        """Get the NSX edge list from NSX Manager
+           Returns list of NSX edges
+        """
+        edge_list = []
+        rheaders = {'Content-Type': 'application/xml'}
+        nsx_api_url = '/api/4.0/edges'
+
+        self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url))
+
+        try:
+            resp = requests.get(self.nsx_manager + nsx_api_url,
+                                auth = (self.nsx_user, self.nsx_password),
+                                verify = False, headers = rheaders)
+            if resp.status_code == requests.codes.ok:
+                paged_Edge_List = XmlElementTree.fromstring(resp.text)
+                for edge_pages in paged_Edge_List:
+                    if edge_pages.tag == 'edgePage':
+                        for edge_summary in edge_pages:
+                            if edge_summary.tag == 'pagingInfo':
+                                for element in edge_summary:
+                                    if element.tag == 'totalCount' and element.text == '0':
+                                        raise vimconn.vimconnException("get_edge_details: No NSX edges details found: {}"
+                                                                       .format(self.nsx_manager))
+
+                            if edge_summary.tag == 'edgeSummary':
+                                for element in edge_summary:
+                                    if element.tag == 'id':
+                                        edge_list.append(element.text)
+                    else:
+                        raise vimconn.vimconnException("get_edge_details: No NSX edge details found: {}"
+                                                       .format(self.nsx_manager))
+
+                if not edge_list:
+                    raise vimconn.vimconnException("get_edge_details: "\
+                                                   "No NSX edge details found: {}"
+                                                   .format(self.nsx_manager))
+                else:
+                    self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list))
+                    return edge_list
+            else:
+                self.logger.debug("get_edge_details: "
+                                  "Failed to get NSX edge details from NSX Manager: {}"
+                                  .format(resp.content))
+                return None
+
+        except Exception as exp:
+            self.logger.debug("get_edge_details: "\
+                              "Failed to get NSX edge details from NSX Manager: {}"
+                              .format(exp))
+            raise vimconn.vimconnException("get_edge_details: "\
+                                           "Failed to get NSX edge details from NSX Manager: {}"
+                                           .format(exp))
+
+
+    def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
+        """Get IP address details from NSX edges, using the MAC address
+           PARAMS: nsx_edges : List of NSX edges
+                   mac_address : Find IP address corresponding to this MAC address
+           Returns: IP address corrresponding to the provided MAC address
+        """
+
+        ip_addr = None
+        rheaders = {'Content-Type': 'application/xml'}
+
+        self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
+
+        try:
+            for edge in nsx_edges:
+                nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
+
+                resp = requests.get(self.nsx_manager + nsx_api_url,
+                                    auth = (self.nsx_user, self.nsx_password),
+                                    verify = False, headers = rheaders)
+
+                if resp.status_code == requests.codes.ok:
+                    dhcp_leases = XmlElementTree.fromstring(resp.text)
+                    for child in dhcp_leases:
+                        if child.tag == 'dhcpLeaseInfo':
+                            dhcpLeaseInfo = child
+                            for leaseInfo in dhcpLeaseInfo:
+                                for elem in leaseInfo:
+                                    if (elem.tag)=='macAddress':
+                                        edge_mac_addr = elem.text
+                                    if (elem.tag)=='ipAddress':
+                                        ip_addr = elem.text
+                                if edge_mac_addr is not None:
+                                    if edge_mac_addr == mac_address:
+                                        self.logger.debug("Found ip addr {} for mac {} at NSX edge {}"
+                                                          .format(ip_addr, mac_address,edge))
+                                        return ip_addr
+                else:
+                    self.logger.debug("get_ipaddr_from_NSXedge: "\
+                                      "Error occurred while getting DHCP lease info from NSX Manager: {}"
+                                      .format(resp.content))
+
+            self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge")
+            return None
+
+        except XmlElementTree.ParseError as Err:
+            self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True)
+
+
     def action_vminstance(self, vm__vim_uuid=None, action_dict=None):
         """Send and action over a VM instance from VIM
         Returns the vm_id if the action was successfully sent to the VIM"""
@@ -1782,54 +2000,53 @@ class vimconnector(vimconn.vimconnector):
             if "start" in action_dict:
                 vm_info = the_vapp.get_vms_details()
                 vm_status = vm_info[0]['status']
-                self.logger.info("Power on vApp: vm_status:{} {}".format(type(vm_status),vm_status))
+                self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
                 if vm_status == "Suspended" or vm_status == "Powered off":
                     power_on_task = the_vapp.poweron()
-                    if power_on_task is not None and type(power_on_task) is GenericTask:
-                        result = vca.block_until_completed(power_on_task)
-                        if result:
-                            self.logger.info("action_vminstance: Powered on vApp: {}".format(vapp_name))
-                        else:
-                            self.logger.info("action_vminstance: Failed to power on vApp: {}".format(vapp_name))
-                    else:
-                        self.logger.info("action_vminstance: Wait for vApp {} to power on".format(vapp_name))
-            elif "rebuild" in action_dict:
-                self.logger.info("action_vminstance: Rebuilding vApp: {}".format(vapp_name))
-                power_on_task = the_vapp.deploy(powerOn=True)
-                if type(power_on_task) is GenericTask:
                     result = vca.block_until_completed(power_on_task)
-                    if result:
-                        self.logger.info("action_vminstance: Rebuilt vApp: {}".format(vapp_name))
-                    else:
-                        self.logger.info("action_vminstance: Failed to rebuild vApp: {}".format(vapp_name))
-                else:
-                    self.logger.info("action_vminstance: Wait for vApp rebuild {} to power on".format(vapp_name))
+                    self.instance_actions_result("start", result, vapp_name)
+            elif "rebuild" in action_dict:
+                self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
+                rebuild_task = the_vapp.deploy(powerOn=True)
+                result = vca.block_until_completed(rebuild_task)
+                self.instance_actions_result("rebuild", result, vapp_name)
             elif "pause" in action_dict:
-                pass
-                ## server.pause()
+                self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
+                pause_task = the_vapp.undeploy(action='suspend')
+                result = vca.block_until_completed(pause_task)
+                self.instance_actions_result("pause", result, vapp_name)
             elif "resume" in action_dict:
-                pass
-                ## server.resume()
+                self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
+                power_task = the_vapp.poweron()
+                result = vca.block_until_completed(power_task)
+                self.instance_actions_result("resume", result, vapp_name)
             elif "shutoff" in action_dict or "shutdown" in action_dict:
+                action_name , value = action_dict.items()[0]
+                self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
                 power_off_task = the_vapp.undeploy(action='powerOff')
-                if type(power_off_task) is GenericTask:
-                    result = vca.block_until_completed(power_off_task)
-                    if result:
-                        self.logger.info("action_vminstance: Powered off vApp: {}".format(vapp_name))
-                    else:
-                        self.logger.info("action_vminstance: Failed to power off vApp: {}".format(vapp_name))
+                result = vca.block_until_completed(power_off_task)
+                if action_name == "shutdown":
+                    self.instance_actions_result("shutdown", result, vapp_name)
                 else:
-                    self.logger.info("action_vminstance: Wait for vApp {} to power off".format(vapp_name))
+                    self.instance_actions_result("shutoff", result, vapp_name)
             elif "forceOff" in action_dict:
-                the_vapp.reset()
-            elif "terminate" in action_dict:
-                the_vapp.delete()
-            # elif "createImage" in action_dict:
-            #     server.create_image()
+                result = the_vapp.undeploy(action='force')
+                self.instance_actions_result("forceOff", result, vapp_name)
+            elif "reboot" in action_dict:
+                self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
+                reboot_task = the_vapp.reboot()
             else:
-                pass
-        except:
-            pass
+                raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict))
+            return vm__vim_uuid
+        except Exception as exp :
+            self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
+            raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp))
+
+    def instance_actions_result(self, action, result, vapp_name):
+        if result:
+            self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
+        else:
+            self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
 
     def get_vminstance_console(self, vm_id, console_type="vnc"):
         """
@@ -2303,8 +2520,8 @@ class vimconnector(vimconn.vimconnector):
         if network_uuid is None:
             return network_uuid
 
-        content = self.get_network_action(network_uuid=network_uuid)
         try:
+            content = self.get_network_action(network_uuid=network_uuid)
             vm_list_xmlroot = XmlElementTree.fromstring(content)
 
             network_configuration['status'] = vm_list_xmlroot.get("status")
@@ -2320,8 +2537,9 @@ class vimconnector(vimconn.vimconnector):
                         if tagKey != "":
                             network_configuration[tagKey] = configuration.text.strip()
             return network_configuration
-        except:
-            pass
+        except Exception as exp :
+            self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
+            raise vimconn.vimconnException("get_vcd_network: Failed with Exception {}".format(exp))
 
         return network_configuration
 
@@ -2387,7 +2605,7 @@ class vimconnector(vimconn.vimconnector):
             vm_list_xmlroot = XmlElementTree.fromstring(content)
             vcd_uuid = vm_list_xmlroot.get('id').split(":")
             if len(vcd_uuid) == 4:
-                self.logger.info("Create new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
+                self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
                 return vcd_uuid[3]
         except:
             self.logger.debug("Failed create network {}".format(network_name))
@@ -2473,26 +2691,50 @@ class vimconnector(vimconn.vimconnector):
                 except:
                     return None
 
-            #Configure IP profile of the network
-            ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
-
-            gateway_address=ip_profile['gateway_address']
-            dhcp_count=int(ip_profile['dhcp_count'])
-            subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
+            try:
+                #Configure IP profile of the network
+                ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
 
-            if ip_profile['dhcp_enabled']==True:
-                dhcp_enabled='true'
-            else:
-                dhcp_enabled='false'
-            dhcp_start_address=ip_profile['dhcp_start_address']
+                if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
+                    subnet_rand = random.randint(0, 255)
+                    ip_base = "192.168.{}.".format(subnet_rand)
+                    ip_profile['subnet_address'] = ip_base + "0/24"
+                else:
+                    ip_base = ip_profile['subnet_address'].rsplit('.',1)[0] + '.'
+
+                if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None:
+                    ip_profile['gateway_address']=ip_base + "1"
+                if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None:
+                    ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count']
+                if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None:
+                    ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled']
+                if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None:
+                    ip_profile['dhcp_start_address']=ip_base + "3"
+                if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None:
+                    ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version']
+                if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None:
+                    ip_profile['dns_address']=ip_base + "2"
+
+                gateway_address=ip_profile['gateway_address']
+                dhcp_count=int(ip_profile['dhcp_count'])
+                subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
+
+                if ip_profile['dhcp_enabled']==True:
+                    dhcp_enabled='true'
+                else:
+                    dhcp_enabled='false'
+                dhcp_start_address=ip_profile['dhcp_start_address']
 
-            #derive dhcp_end_address from dhcp_start_address & dhcp_count
-            end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
-            end_ip_int += dhcp_count - 1
-            dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
+                #derive dhcp_end_address from dhcp_start_address & dhcp_count
+                end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
+                end_ip_int += dhcp_count - 1
+                dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
 
-            ip_version=ip_profile['ip_version']
-            dns_address=ip_profile['dns_address']
+                ip_version=ip_profile['ip_version']
+                dns_address=ip_profile['dns_address']
+            except KeyError as exp:
+                self.logger.debug("Create Network REST: Key error {}".format(exp))
+                raise vimconn.vimconnException("Create Network REST: Key error{}".format(exp))
 
             # either use client provided UUID or search for a first available
             #  if both are not defined we return none
@@ -2500,64 +2742,36 @@ class vimconnector(vimconn.vimconnector):
                 url_list = [vca.host, '/api/admin/network/', parent_network_uuid]
                 add_vdc_rest_url = ''.join(url_list)
 
-            if net_type=='ptp':
-                fence_mode="isolated"
-                isshared='false'
-                is_inherited='false'
-                data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
-                                <Description>Openmano created</Description>
-                                        <Configuration>
-                                            <IpScopes>
-                                                <IpScope>
-                                                    <IsInherited>{1:s}</IsInherited>
-                                                    <Gateway>{2:s}</Gateway>
-                                                    <Netmask>{3:s}</Netmask>
-                                                    <Dns1>{4:s}</Dns1>
-                                                    <IsEnabled>{5:s}</IsEnabled>
-                                                    <IpRanges>
-                                                        <IpRange>
-                                                            <StartAddress>{6:s}</StartAddress>
-                                                            <EndAddress>{7:s}</EndAddress>
-                                                        </IpRange>
-                                                    </IpRanges>
-                                                </IpScope>
-                                            </IpScopes>
-                                            <FenceMode>{8:s}</FenceMode>
-                                        </Configuration>
-                                        <IsShared>{9:s}</IsShared>
-                            </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
-                                                        subnet_address, dns_address, dhcp_enabled,
-                                                        dhcp_start_address, dhcp_end_address, fence_mode, isshared)
-
-            else:
-                fence_mode="bridged"
-                is_inherited='false'
-                data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
-                                <Description>Openmano created</Description>
-                                        <Configuration>
-                                            <IpScopes>
-                                                <IpScope>
-                                                    <IsInherited>{1:s}</IsInherited>
-                                                    <Gateway>{2:s}</Gateway>
-                                                    <Netmask>{3:s}</Netmask>
-                                                    <Dns1>{4:s}</Dns1>
-                                                    <IsEnabled>{5:s}</IsEnabled>
-                                                    <IpRanges>
-                                                        <IpRange>
-                                                            <StartAddress>{6:s}</StartAddress>
-                                                            <EndAddress>{7:s}</EndAddress>
-                                                        </IpRange>
-                                                    </IpRanges>
-                                                </IpScope>
-                                            </IpScopes>
-                                            <ParentNetwork href="{8:s}"/>
-                                            <FenceMode>{9:s}</FenceMode>
-                                        </Configuration>
-                                        <IsShared>{10:s}</IsShared>
-                            </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
-                                                        subnet_address, dns_address, dhcp_enabled,
-                                                        dhcp_start_address, dhcp_end_address, available_networks,
-                                                        fence_mode, isshared)
+            #Creating all networks as Direct Org VDC type networks.
+            #Unused in case of Underlay (data/ptp) network interface.
+            fence_mode="bridged"
+            is_inherited='false'
+            data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
+                            <Description>Openmano created</Description>
+                                    <Configuration>
+                                        <IpScopes>
+                                            <IpScope>
+                                                <IsInherited>{1:s}</IsInherited>
+                                                <Gateway>{2:s}</Gateway>
+                                                <Netmask>{3:s}</Netmask>
+                                                <Dns1>{4:s}</Dns1>
+                                                <IsEnabled>{5:s}</IsEnabled>
+                                                <IpRanges>
+                                                    <IpRange>
+                                                        <StartAddress>{6:s}</StartAddress>
+                                                        <EndAddress>{7:s}</EndAddress>
+                                                    </IpRange>
+                                                </IpRanges>
+                                            </IpScope>
+                                        </IpScopes>
+                                        <ParentNetwork href="{8:s}"/>
+                                        <FenceMode>{9:s}</FenceMode>
+                                    </Configuration>
+                                    <IsShared>{10:s}</IsShared>
+                        </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
+                                                    subnet_address, dns_address, dhcp_enabled,
+                                                    dhcp_start_address, dhcp_end_address, available_networks,
+                                                    fence_mode, isshared)
 
             headers = vca.vcloud_session.get_vcloud_headers()
             headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
@@ -2569,8 +2783,8 @@ class vimconnector(vimconn.vimconnector):
                                      logger=vca.logger)
 
                 if response.status_code != 201:
-                    self.logger.debug("Create Network POST REST API call failed. Return status code {}"
-                                      .format(response.status_code))
+                    self.logger.debug("Create Network POST REST API call failed. Return status code {}, Response content: {}"
+                                      .format(response.status_code,response.content))
                 else:
                     network = networkType.parseString(response.content, True)
                     create_nw_task = network.get_Tasks().get_Task()[0]
@@ -2578,7 +2792,7 @@ class vimconnector(vimconn.vimconnector):
                     # if we all ok we respond with content after network creation completes
                     # otherwise by default return None
                     if create_nw_task is not None:
-                        self.logger.debug("Create Network REST : Waiting for Nw creation complete")
+                        self.logger.debug("Create Network REST : Waiting for Network creation complete")
                         status = vca.block_until_completed(create_nw_task)
                         if status:
                             return response.content
@@ -3081,33 +3295,16 @@ class vimconnector(vimconn.vimconnector):
                 vcenter_conect object
         """
         vm_obj = None
-        vcenter_conect = None
         self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid))
-        try:
-            vm_vcenter_info = self.get_vm_vcenter_info(vapp_uuid)
-        except Exception as exp:
-            self.logger.error("Error occurred while getting vCenter infromationn"\
-                             " for VM : {}".format(exp))
-            raise vimconn.vimconnException(message=exp)
+        vcenter_conect, content = self.get_vcenter_content()
+        vm_moref_id = self.get_vm_moref_id(vapp_uuid)
 
-        if vm_vcenter_info["vm_moref_id"]:
-            context = None
-            if hasattr(ssl, '_create_unverified_context'):
-                context = ssl._create_unverified_context()
+        if vm_moref_id:
             try:
                 no_of_pci_devices = len(pci_devices)
                 if no_of_pci_devices > 0:
-                    vcenter_conect = SmartConnect(
-                                            host=vm_vcenter_info["vm_vcenter_ip"],
-                                            user=vm_vcenter_info["vm_vcenter_user"],
-                                            pwd=vm_vcenter_info["vm_vcenter_password"],
-                                            port=int(vm_vcenter_info["vm_vcenter_port"]),
-                                            sslContext=context)
-                    atexit.register(Disconnect, vcenter_conect)
-                    content = vcenter_conect.RetrieveContent()
-
                     #Get VM and its host
-                    host_obj, vm_obj = self.get_vm_obj(content ,vm_vcenter_info["vm_moref_id"])
+                    host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
                     self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
                     if host_obj and vm_obj:
                         #get PCI devies from host on which vapp is currently installed
@@ -3376,7 +3573,7 @@ class vimconnector(vimconn.vimconnector):
                                                                              exp))
         return task
 
-    def get_vm_vcenter_info(self , vapp_uuid):
+    def get_vm_vcenter_info(self):
         """
         Method to get details of vCenter and vm
 
@@ -3409,16 +3606,8 @@ class vimconnector(vimconn.vimconnector):
         else:
             raise vimconn.vimconnException(message="vCenter user password is not provided."\
                                            " Please provide vCenter user password while attaching datacenter to tenant in --config")
-        try:
-            vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
-            if vm_details and "vm_vcenter_info" in vm_details:
-                vm_vcenter_info["vm_moref_id"] = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
-
-            return vm_vcenter_info
 
-        except Exception as exp:
-            self.logger.error("Error occurred while getting vCenter infromationn"\
-                             " for VM : {}".format(exp))
+        return vm_vcenter_info
 
 
     def get_vm_pci_details(self, vmuuid):
@@ -3434,23 +3623,12 @@ class vimconnector(vimconn.vimconnector):
         """
         vm_pci_devices_info = {}
         try:
-            vm_vcenter_info = self.get_vm_vcenter_info(vmuuid)
-            if vm_vcenter_info["vm_moref_id"]:
-                context = None
-                if hasattr(ssl, '_create_unverified_context'):
-                    context = ssl._create_unverified_context()
-                vcenter_conect = SmartConnect(host=vm_vcenter_info["vm_vcenter_ip"],
-                                        user=vm_vcenter_info["vm_vcenter_user"],
-                                        pwd=vm_vcenter_info["vm_vcenter_password"],
-                                        port=int(vm_vcenter_info["vm_vcenter_port"]),
-                                        sslContext=context
-                                    )
-                atexit.register(Disconnect, vcenter_conect)
-                content = vcenter_conect.RetrieveContent()
-
+            vcenter_conect, content = self.get_vcenter_content()
+            vm_moref_id = self.get_vm_moref_id(vmuuid)
+            if vm_moref_id:
                 #Get VM and its host
                 if content:
-                    host_obj, vm_obj = self.get_vm_obj(content ,vm_vcenter_info["vm_moref_id"])
+                    host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
                     if host_obj and vm_obj:
                         vm_pci_devices_info["host_name"]= host_obj.name
                         vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress
@@ -3469,3 +3647,1144 @@ class vimconnector(vimconn.vimconnector):
                              " for VM : {}".format(exp))
             raise vimconn.vimconnException(message=exp)
 
+    def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
+        """
+            Method to add network adapter type to vm
+            Args :
+                network_name - name of network
+                primary_nic_index - int value for primary nic index
+                nicIndex - int value for nic index
+                nic_type - specify model name to which add to vm
+            Returns:
+                None
+        """
+        vca = self.connect()
+        if not vca:
+            raise vimconn.vimconnConnectionException("Failed to connect vCloud director")
+
+        try:
+            ip_address = None
+            floating_ip = False
+            if 'floating_ip' in net: floating_ip = net['floating_ip']
+
+            # Stub for ip_address feature
+            if 'ip_address' in net: ip_address = net['ip_address']
+
+            if floating_ip:
+                allocation_mode = "POOL"
+            elif ip_address:
+                allocation_mode = "MANUAL"
+            else:
+                allocation_mode = "DHCP"
+
+            if not nic_type:
+                for vms in vapp._get_vms():
+                    vm_id = (vms.id).split(':')[-1]
+
+                    url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(vca.host, vm_id)
+
+                    response = Http.get(url=url_rest_call,
+                                        headers=vca.vcloud_session.get_vcloud_headers(),
+                                        verify=vca.verify,
+                                        logger=vca.logger)
+                    if response.status_code != 200:
+                        self.logger.error("REST call {} failed reason : {}"\
+                                             "status code : {}".format(url_rest_call,
+                                                                    response.content,
+                                                               response.status_code))
+                        raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
+                                                                         "network connection section")
+
+                    data = response.content
+                    if '<PrimaryNetworkConnectionIndex>' not in data:
+                        item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
+                                <NetworkConnection network="{}">
+                                <NetworkConnectionIndex>{}</NetworkConnectionIndex>
+                                <IsConnected>true</IsConnected>
+                                <IpAddressAllocationMode>{}</IpAddressAllocationMode>
+                                </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
+                                                                                         allocation_mode)
+                        # Stub for ip_address feature
+                        if ip_address:
+                            ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
+                            item =  item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
+
+                        data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
+                    else:
+                        new_item = """<NetworkConnection network="{}">
+                                    <NetworkConnectionIndex>{}</NetworkConnectionIndex>
+                                    <IsConnected>true</IsConnected>
+                                    <IpAddressAllocationMode>{}</IpAddressAllocationMode>
+                                    </NetworkConnection>""".format(network_name, nicIndex,
+                                                                          allocation_mode)
+                        # Stub for ip_address feature
+                        if ip_address:
+                            ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
+                            new_item =  new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
+
+                        data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
+
+                    headers = vca.vcloud_session.get_vcloud_headers()
+                    headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
+                    response = Http.put(url=url_rest_call, headers=headers, data=data,
+                                                                   verify=vca.verify,
+                                                                   logger=vca.logger)
+                    if response.status_code != 202:
+                        self.logger.error("REST call {} failed reason : {}"\
+                                            "status code : {} ".format(url_rest_call,
+                                                                    response.content,
+                                                               response.status_code))
+                        raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
+                                                                            "network connection section")
+                    else:
+                        nic_task = taskType.parseString(response.content, True)
+                        if isinstance(nic_task, GenericTask):
+                            vca.block_until_completed(nic_task)
+                            self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\
+                                                               "default NIC type".format(vm_id))
+                        else:
+                            self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\
+                                                              "connect NIC type".format(vm_id))
+            else:
+                for vms in vapp._get_vms():
+                    vm_id = (vms.id).split(':')[-1]
+
+                    url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(vca.host, vm_id)
+
+                    response = Http.get(url=url_rest_call,
+                                        headers=vca.vcloud_session.get_vcloud_headers(),
+                                        verify=vca.verify,
+                                        logger=vca.logger)
+                    if response.status_code != 200:
+                        self.logger.error("REST call {} failed reason : {}"\
+                                            "status code : {}".format(url_rest_call,
+                                                                   response.content,
+                                                              response.status_code))
+                        raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
+                                                                        "network connection section")
+                    data = response.content
+                    if '<PrimaryNetworkConnectionIndex>' not in data:
+                        item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
+                                <NetworkConnection network="{}">
+                                <NetworkConnectionIndex>{}</NetworkConnectionIndex>
+                                <IsConnected>true</IsConnected>
+                                <IpAddressAllocationMode>{}</IpAddressAllocationMode>
+                                <NetworkAdapterType>{}</NetworkAdapterType>
+                                </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
+                                                                               allocation_mode, nic_type)
+                        # Stub for ip_address feature
+                        if ip_address:
+                            ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
+                            item =  item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
+
+                        data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
+                    else:
+                        new_item = """<NetworkConnection network="{}">
+                                    <NetworkConnectionIndex>{}</NetworkConnectionIndex>
+                                    <IsConnected>true</IsConnected>
+                                    <IpAddressAllocationMode>{}</IpAddressAllocationMode>
+                                    <NetworkAdapterType>{}</NetworkAdapterType>
+                                    </NetworkConnection>""".format(network_name, nicIndex,
+                                                                allocation_mode, nic_type)
+                        # Stub for ip_address feature
+                        if ip_address:
+                            ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
+                            new_item =  new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
+
+                        data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
+
+                    headers = vca.vcloud_session.get_vcloud_headers()
+                    headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
+                    response = Http.put(url=url_rest_call, headers=headers, data=data,
+                                                                   verify=vca.verify,
+                                                                   logger=vca.logger)
+
+                    if response.status_code != 202:
+                        self.logger.error("REST call {} failed reason : {}"\
+                                            "status code : {}".format(url_rest_call,
+                                                                   response.content,
+                                                              response.status_code))
+                        raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
+                                                                           "network connection section")
+                    else:
+                        nic_task = taskType.parseString(response.content, True)
+                        if isinstance(nic_task, GenericTask):
+                            vca.block_until_completed(nic_task)
+                            self.logger.info("add_network_adapter_to_vms(): VM {} "\
+                                               "conneced to NIC type {}".format(vm_id, nic_type))
+                        else:
+                            self.logger.error("add_network_adapter_to_vms(): VM {} "\
+                                               "failed to connect NIC type {}".format(vm_id, nic_type))
+        except Exception as exp:
+            self.logger.error("add_network_adapter_to_vms() : exception occurred "\
+                                               "while adding Network adapter")
+            raise vimconn.vimconnException(message=exp)
+
+
+    def set_numa_affinity(self, vmuuid, paired_threads_id):
+        """
+            Method to assign numa affinity in vm configuration parammeters
+            Args :
+                vmuuid - vm uuid
+                paired_threads_id - one or more virtual processor
+                                    numbers
+            Returns:
+                return if True
+        """
+        try:
+            vm_moref_id , vm_vcenter_host , vm_vcenter_username, vm_vcenter_port = self.get_vcenter_info_rest(vmuuid)
+            if vm_moref_id and vm_vcenter_host and vm_vcenter_username:
+                context = None
+                if hasattr(ssl, '_create_unverified_context'):
+                    context = ssl._create_unverified_context()
+                    vcenter_conect = SmartConnect(host=vm_vcenter_host, user=vm_vcenter_username,
+                                  pwd=self.passwd, port=int(vm_vcenter_port),
+                                  sslContext=context)
+                    atexit.register(Disconnect, vcenter_conect)
+                    content = vcenter_conect.RetrieveContent()
+
+                    host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
+                    if vm_obj:
+                        config_spec = vim.vm.ConfigSpec()
+                        config_spec.extraConfig = []
+                        opt = vim.option.OptionValue()
+                        opt.key = 'numa.nodeAffinity'
+                        opt.value = str(paired_threads_id)
+                        config_spec.extraConfig.append(opt)
+                        task = vm_obj.ReconfigVM_Task(config_spec)
+                        if task:
+                            result = self.wait_for_vcenter_task(task, vcenter_conect)
+                            extra_config = vm_obj.config.extraConfig
+                            flag = False
+                            for opts in extra_config:
+                                if 'numa.nodeAffinity' in opts.key:
+                                    flag = True
+                                    self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "\
+                                                             "value {} for vm {}".format(opt.value, vm_obj))
+                            if flag:
+                                return
+                    else:
+                        self.logger.error("set_numa_affinity: Failed to assign numa affinity")
+        except Exception as exp:
+            self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "\
+                                                       "for VM {} : {}".format(vm_obj, vm_moref_id))
+            raise vimconn.vimconnException("set_numa_affinity : Error {} failed to assign numa "\
+                                                                           "affinity".format(exp))
+
+
+
+    def cloud_init(self, vapp, cloud_config):
+        """
+        Method to inject ssh-key
+        vapp - vapp object
+        cloud_config a dictionary with:
+                'key-pairs': (optional) list of strings with the public key to be inserted to the default user
+                'users': (optional) list of users to be inserted, each item is a dict with:
+                    'name': (mandatory) user name,
+                    'key-pairs': (optional) list of strings with the public key to be inserted to the user
+                'user-data': (optional) string is a text script to be passed directly to cloud-init
+                'config-files': (optional). List of files to be transferred. Each item is a dict with:
+                    'dest': (mandatory) string with the destination absolute path
+                    'encoding': (optional, by default text). Can be one of:
+                        'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+                    'content' (mandatory): string with the content of the file
+                    'permissions': (optional) string with file permissions, typically octal notation '0644'
+                    'owner': (optional) file owner, string with the format 'owner:group'
+                'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
+        """
+        vca = self.connect()
+        if not vca:
+            raise vimconn.vimconnConnectionException("Failed to connect vCloud director")
+
+        try:
+            if isinstance(cloud_config, dict):
+                key_pairs = []
+                userdata = []
+                if "key-pairs" in cloud_config:
+                    key_pairs = cloud_config["key-pairs"]
+
+                if "users" in cloud_config:
+                    userdata = cloud_config["users"]
+
+            for key in key_pairs:
+                for user in userdata:
+                    if 'name' in user: user_name = user['name']
+                    if 'key-pairs' in user and len(user['key-pairs']) > 0:
+                        for user_key in user['key-pairs']:
+                            customize_script = """
+                        #!/bin/bash
+                        echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
+                        if [ "$1" = "precustomization" ];then
+                            echo performing precustomization tasks   on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
+                            if [ ! -d /root/.ssh ];then
+                                mkdir /root/.ssh
+                                chown root:root /root/.ssh
+                                chmod 700 /root/.ssh
+                                touch /root/.ssh/authorized_keys
+                                chown root:root /root/.ssh/authorized_keys
+                                chmod 600 /root/.ssh/authorized_keys
+                                # make centos with selinux happy
+                                which restorecon && restorecon -Rv /root/.ssh
+                                echo '{key}' >> /root/.ssh/authorized_keys
+                            else
+                                touch /root/.ssh/authorized_keys
+                                chown root:root /root/.ssh/authorized_keys
+                                chmod 600 /root/.ssh/authorized_keys
+                                echo '{key}' >> /root/.ssh/authorized_keys
+                            fi
+                            if [ -d /home/{user_name} ];then
+                                if [ ! -d /home/{user_name}/.ssh ];then
+                                    mkdir /home/{user_name}/.ssh
+                                    chown {user_name}:{user_name} /home/{user_name}/.ssh
+                                    chmod 700 /home/{user_name}/.ssh
+                                    touch /home/{user_name}/.ssh/authorized_keys
+                                    chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
+                                    chmod 600 /home/{user_name}/.ssh/authorized_keys
+                                    # make centos with selinux happy
+                                    which restorecon && restorecon -Rv /home/{user_name}/.ssh
+                                    echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
+                                else
+                                    touch /home/{user_name}/.ssh/authorized_keys
+                                    chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
+                                    chmod 600 /home/{user_name}/.ssh/authorized_keys
+                                    echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
+                                fi
+                            fi
+                        fi""".format(key=key, user_name=user_name, user_key=user_key)
+
+                            for vm in vapp._get_vms():
+                                vm_name = vm.name
+                                task = vapp.customize_guest_os(vm_name, customization_script=customize_script)
+                                if isinstance(task, GenericTask):
+                                    vca.block_until_completed(task)
+                                    self.logger.info("cloud_init : customized guest os task "\
+                                                        "completed for VM {}".format(vm_name))
+                                else:
+                                    self.logger.error("cloud_init : task for customized guest os"\
+                                                               "failed for VM {}".format(vm_name))
+        except Exception as exp:
+            self.logger.error("cloud_init : exception occurred while injecting "\
+                                                                       "ssh-key")
+            raise vimconn.vimconnException("cloud_init : Error {} failed to inject "\
+                                                               "ssh-key".format(exp))
+
+
+    def add_new_disk(self, vca, vapp_uuid, disk_size):
+        """
+            Method to create an empty vm disk
+
+            Args:
+                vapp_uuid - is vapp identifier.
+                disk_size - size of disk to be created in GB
+
+            Returns:
+                None
+        """
+        status = False
+        vm_details = None
+        try:
+            #Disk size in GB, convert it into MB
+            if disk_size is not None:
+                disk_size_mb = int(disk_size) * 1024
+                vm_details = self.get_vapp_details_rest(vapp_uuid)
+
+            if vm_details and "vm_virtual_hardware" in vm_details:
+                self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
+                disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
+                status = self.add_new_disk_rest(vca, disk_href, disk_size_mb)
+
+        except Exception as exp:
+            msg = "Error occurred while creating new disk {}.".format(exp)
+            self.rollback_newvm(vapp_uuid, msg)
+
+        if status:
+            self.logger.info("Added new disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
+        else:
+            #If failed to add disk, delete VM
+            msg = "add_new_disk: Failed to add new disk to {}".format(vm_details["name"])
+            self.rollback_newvm(vapp_uuid, msg)
+
+
+    def add_new_disk_rest(self, vca, disk_href, disk_size_mb):
+        """
+        Retrives vApp Disks section & add new empty disk
+
+        Args:
+            disk_href: Disk section href to addd disk
+            disk_size_mb: Disk size in MB
+
+            Returns: Status of add new disk task
+        """
+        status = False
+        if vca.vcloud_session and vca.vcloud_session.organization:
+            response = Http.get(url=disk_href,
+                                headers=vca.vcloud_session.get_vcloud_headers(),
+                                verify=vca.verify,
+                                logger=vca.logger)
+
+        if response.status_code != requests.codes.ok:
+            self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}"
+                              .format(disk_href, response.status_code))
+            return status
+        try:
+            #Find but type & max of instance IDs assigned to disks
+            lxmlroot_respond = lxmlElementTree.fromstring(response.content)
+            namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
+            namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+            instance_id = 0
+            for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
+                if item.find("rasd:Description",namespaces).text == "Hard disk":
+                    inst_id = int(item.find("rasd:InstanceID" ,namespaces).text)
+                    if inst_id > instance_id:
+                        instance_id = inst_id
+                        disk_item = item.find("rasd:HostResource" ,namespaces)
+                        bus_subtype = disk_item.attrib["{"+namespaces['xmlns']+"}busSubType"]
+                        bus_type = disk_item.attrib["{"+namespaces['xmlns']+"}busType"]
+
+            instance_id = instance_id + 1
+            new_item =   """<Item>
+                                <rasd:Description>Hard disk</rasd:Description>
+                                <rasd:ElementName>New disk</rasd:ElementName>
+                                <rasd:HostResource
+                                    xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
+                                    vcloud:capacity="{}"
+                                    vcloud:busSubType="{}"
+                                    vcloud:busType="{}"></rasd:HostResource>
+                                <rasd:InstanceID>{}</rasd:InstanceID>
+                                <rasd:ResourceType>17</rasd:ResourceType>
+                            </Item>""".format(disk_size_mb, bus_subtype, bus_type, instance_id)
+
+            new_data = response.content
+            #Add new item at the bottom
+            new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
+
+            # Send PUT request to modify virtual hardware section with new disk
+            headers = vca.vcloud_session.get_vcloud_headers()
+            headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
+
+            response = Http.put(url=disk_href,
+                                data=new_data,
+                                headers=headers,
+                                verify=vca.verify, logger=self.logger)
+
+            if response.status_code != 202:
+                self.logger.error("PUT REST API call {} failed. Return status code {}. Response Content:{}"
+                                  .format(disk_href, response.status_code, response.content))
+            else:
+                add_disk_task = taskType.parseString(response.content, True)
+                if type(add_disk_task) is GenericTask:
+                    status = vca.block_until_completed(add_disk_task)
+                    if not status:
+                        self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
+
+        except Exception as exp:
+            self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp))
+
+        return status
+
+
+    def add_existing_disk(self, catalogs=None, image_id=None, size=None, template_name=None, vapp_uuid=None):
+        """
+            Method to add existing disk to vm
+            Args :
+                catalogs - List of VDC catalogs
+                image_id - Catalog ID
+                template_name - Name of template in catalog
+                vapp_uuid - UUID of vApp
+            Returns:
+                None
+        """
+        disk_info = None
+        vcenter_conect, content = self.get_vcenter_content()
+        #find moref-id of vm in image
+        catalog_vm_info = self.get_vapp_template_details(catalogs=catalogs,
+                                                         image_id=image_id,
+                                                        )
+
+        if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
+            if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
+                catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get("vm_moref_id", None)
+                if catalog_vm_moref_id:
+                    self.logger.info("Moref_id of VM in catalog : {}" .format(catalog_vm_moref_id))
+                    host, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
+                    if catalog_vm_obj:
+                        #find existing disk
+                        disk_info = self.find_disk(catalog_vm_obj)
+                    else:
+                        exp_msg = "No VM with image id {} found".format(image_id)
+                        self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
+        else:
+            exp_msg = "No Image found with image ID {} ".format(image_id)
+            self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
+
+        if disk_info:
+            self.logger.info("Existing disk_info : {}".format(disk_info))
+            #get VM
+            vm_moref_id = self.get_vm_moref_id(vapp_uuid)
+            host, vm_obj = self.get_vm_obj(content, vm_moref_id)
+            if vm_obj:
+                status = self.add_disk(vcenter_conect=vcenter_conect,
+                                       vm=vm_obj,
+                                       disk_info=disk_info,
+                                       size=size,
+                                       vapp_uuid=vapp_uuid
+                                       )
+            if status:
+                self.logger.info("Disk from image id {} added to {}".format(image_id,
+                                                                            vm_obj.config.name)
+                                 )
+        else:
+            msg = "No disk found with image id {} to add in VM {}".format(
+                                                            image_id,
+                                                            vm_obj.config.name)
+            self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
+
+
+    def find_disk(self, vm_obj):
+        """
+         Method to find details of existing disk in VM
+            Args :
+                vm_obj - vCenter object of VM
+                image_id - Catalog ID
+            Returns:
+                disk_info : dict of disk details
+        """
+        disk_info = {}
+        if vm_obj:
+            try:
+                devices = vm_obj.config.hardware.device
+                for device in devices:
+                    if type(device) is vim.vm.device.VirtualDisk:
+                        if isinstance(device.backing,vim.vm.device.VirtualDisk.FlatVer2BackingInfo) and hasattr(device.backing, 'fileName'):
+                            disk_info["full_path"] = device.backing.fileName
+                            disk_info["datastore"] = device.backing.datastore
+                            disk_info["capacityKB"] = device.capacityInKB
+                            break
+            except Exception as exp:
+                self.logger.error("find_disk() : exception occurred while "\
+                                  "getting existing disk details :{}".format(exp))
+        return disk_info
+
+
+    def add_disk(self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}):
+        """
+         Method to add existing disk in VM
+            Args :
+                vcenter_conect - vCenter content object
+                vm - vCenter vm object
+                disk_info : dict of disk details
+            Returns:
+                status : status of add disk task
+        """
+        datastore = disk_info["datastore"] if "datastore" in disk_info else None
+        fullpath = disk_info["full_path"] if "full_path" in disk_info else None
+        capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
+        if size is not None:
+            #Convert size from GB to KB
+            sizeKB = int(size) * 1024 * 1024
+            #compare size of existing disk and user given size.Assign whicherver is greater
+            self.logger.info("Add Existing disk : sizeKB {} , capacityKB {}".format(
+                                                                    sizeKB, capacityKB))
+            if sizeKB > capacityKB:
+                capacityKB = sizeKB
+
+        if datastore and fullpath and capacityKB:
+            try:
+                spec = vim.vm.ConfigSpec()
+                # get all disks on a VM, set unit_number to the next available
+                unit_number = 0
+                for dev in vm.config.hardware.device:
+                    if hasattr(dev.backing, 'fileName'):
+                        unit_number = int(dev.unitNumber) + 1
+                        # unit_number 7 reserved for scsi controller
+                        if unit_number == 7:
+                            unit_number += 1
+                    if isinstance(dev, vim.vm.device.VirtualDisk):
+                        #vim.vm.device.VirtualSCSIController
+                        controller_key = dev.controllerKey
+
+                self.logger.info("Add Existing disk : unit number {} , controller key {}".format(
+                                                                    unit_number, controller_key))
+                # add disk here
+                dev_changes = []
+                disk_spec = vim.vm.device.VirtualDeviceSpec()
+                disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+                disk_spec.device = vim.vm.device.VirtualDisk()
+                disk_spec.device.backing = \
+                    vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
+                disk_spec.device.backing.thinProvisioned = True
+                disk_spec.device.backing.diskMode = 'persistent'
+                disk_spec.device.backing.datastore  = datastore
+                disk_spec.device.backing.fileName  = fullpath
+
+                disk_spec.device.unitNumber = unit_number
+                disk_spec.device.capacityInKB = capacityKB
+                disk_spec.device.controllerKey = controller_key
+                dev_changes.append(disk_spec)
+                spec.deviceChange = dev_changes
+                task = vm.ReconfigVM_Task(spec=spec)
+                status = self.wait_for_vcenter_task(task, vcenter_conect)
+                return status
+            except Exception as exp:
+                exp_msg = "add_disk() : exception {} occurred while adding disk "\
+                          "{} to vm {}".format(exp,
+                                               fullpath,
+                                               vm.config.name)
+                self.rollback_newvm(vapp_uuid, exp_msg)
+        else:
+            msg = "add_disk() : Can not add disk to VM with disk info {} ".format(disk_info)
+            self.rollback_newvm(vapp_uuid, msg)
+
+
+    def get_vcenter_content(self):
+        """
+         Get the vsphere content object
+        """
+        try:
+            vm_vcenter_info = self.get_vm_vcenter_info()
+        except Exception as exp:
+            self.logger.error("Error occurred while getting vCenter infromationn"\
+                             " for VM : {}".format(exp))
+            raise vimconn.vimconnException(message=exp)
+
+        context = None
+        if hasattr(ssl, '_create_unverified_context'):
+            context = ssl._create_unverified_context()
+
+        vcenter_conect = SmartConnect(
+                    host=vm_vcenter_info["vm_vcenter_ip"],
+                    user=vm_vcenter_info["vm_vcenter_user"],
+                    pwd=vm_vcenter_info["vm_vcenter_password"],
+                    port=int(vm_vcenter_info["vm_vcenter_port"]),
+                    sslContext=context
+                )
+        atexit.register(Disconnect, vcenter_conect)
+        content = vcenter_conect.RetrieveContent()
+        return vcenter_conect, content
+
+
+    def get_vm_moref_id(self, vapp_uuid):
+        """
+        Get the moref_id of given VM
+        """
+        try:
+            if vapp_uuid:
+                vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
+                if vm_details and "vm_vcenter_info" in vm_details:
+                    vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
+
+            return vm_moref_id
+
+        except Exception as exp:
+            self.logger.error("Error occurred while getting VM moref ID "\
+                             " for VM : {}".format(exp))
+            return None
+
+
+    def get_vapp_template_details(self, catalogs=None, image_id=None , template_name=None):
+        """
+            Method to get vApp template details
+                Args :
+                    catalogs - list of VDC catalogs
+                    image_id - Catalog ID to find
+                    template_name : template name in catalog
+                Returns:
+                    parsed_respond : dict of vApp tempalte details
+        """
+        parsed_response = {}
+
+        vca = self.connect_as_admin()
+        if not vca:
+            raise vimconn.vimconnConnectionException("self.connect() is failed")
+
+        try:
+            catalog = self.get_catalog_obj(image_id, catalogs)
+            if catalog:
+                template_name = self.get_catalogbyid(image_id, catalogs)
+                catalog_items = filter(lambda catalogItemRef: catalogItemRef.get_name() == template_name, catalog.get_CatalogItems().get_CatalogItem())
+                if len(catalog_items) == 1:
+                    response = Http.get(catalog_items[0].get_href(),
+                                        headers=vca.vcloud_session.get_vcloud_headers(),
+                                        verify=vca.verify,
+                                        logger=vca.logger)
+                    catalogItem = XmlElementTree.fromstring(response.content)
+                    entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
+                    vapp_tempalte_href = entity.get("href")
+                    #get vapp details and parse moref id
+
+                    namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
+                                  'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
+                                  'vmw': 'http://www.vmware.com/schema/ovf',
+                                  'vm': 'http://www.vmware.com/vcloud/v1.5',
+                                  'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
+                                  'vmext':"http://www.vmware.com/vcloud/extension/v1.5",
+                                  'xmlns':"http://www.vmware.com/vcloud/v1.5"
+                                }
+
+                    if vca.vcloud_session and vca.vcloud_session.organization:
+                        response = Http.get(url=vapp_tempalte_href,
+                                            headers=vca.vcloud_session.get_vcloud_headers(),
+                                            verify=vca.verify,
+                                            logger=vca.logger
+                                            )
+
+                        if response.status_code != requests.codes.ok:
+                            self.logger.debug("REST API call {} failed. Return status code {}".format(
+                                                vapp_tempalte_href, response.status_code))
+
+                        else:
+                            xmlroot_respond = XmlElementTree.fromstring(response.content)
+                            children_section = xmlroot_respond.find('vm:Children/', namespaces)
+                            if children_section is not None:
+                                vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
+                            if vCloud_extension_section is not None:
+                                vm_vcenter_info = {}
+                                vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
+                                vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
+                                if vmext is not None:
+                                    vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
+                                parsed_response["vm_vcenter_info"]= vm_vcenter_info
+
+        except Exception as exp :
+            self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
+
+        return parsed_response
+
+
+    def rollback_newvm(self, vapp_uuid, msg , exp_type="Genric"):
+        """
+            Method to delete vApp
+                Args :
+                    vapp_uuid - vApp UUID
+                    msg - Error message to be logged
+                    exp_type : Exception type
+                Returns:
+                    None
+        """
+        if vapp_uuid:
+            status = self.delete_vminstance(vapp_uuid)
+        else:
+            msg = "No vApp ID"
+        self.logger.error(msg)
+        if exp_type == "Genric":
+            raise vimconn.vimconnException(msg)
+        elif exp_type == "NotFound":
+            raise vimconn.vimconnNotFoundException(message=msg)
+
+    def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
+        """
+            Method to attach SRIOV adapters to VM
+
+             Args:
+                vapp_uuid - uuid of vApp/VM
+                sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
+                vmname_andid - vmname
+
+            Returns:
+                The status of add SRIOV adapter task , vm object and
+                vcenter_conect object
+        """
+        vm_obj = None
+        vcenter_conect, content = self.get_vcenter_content()
+        vm_moref_id = self.get_vm_moref_id(vapp_uuid)
+
+        if vm_moref_id:
+            try:
+                no_of_sriov_devices = len(sriov_nets)
+                if no_of_sriov_devices > 0:
+                    #Get VM and its host
+                    host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
+                    self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
+                    if host_obj and vm_obj:
+                        #get SRIOV devies from host on which vapp is currently installed
+                        avilable_sriov_devices = self.get_sriov_devices(host_obj,
+                                                                no_of_sriov_devices,
+                                                                )
+
+                        if len(avilable_sriov_devices) == 0:
+                            #find other hosts with active pci devices
+                            new_host_obj , avilable_sriov_devices = self.get_host_and_sriov_devices(
+                                                                content,
+                                                                no_of_sriov_devices,
+                                                                )
+
+                            if new_host_obj is not None and len(avilable_sriov_devices)> 0:
+                                #Migrate vm to the host where SRIOV devices are available
+                                self.logger.info("Relocate VM {} on new host {}".format(vm_obj,
+                                                                                    new_host_obj))
+                                task = self.relocate_vm(new_host_obj, vm_obj)
+                                if task is not None:
+                                    result = self.wait_for_vcenter_task(task, vcenter_conect)
+                                    self.logger.info("Migrate VM status: {}".format(result))
+                                    host_obj = new_host_obj
+                                else:
+                                    self.logger.info("Fail to migrate VM : {}".format(result))
+                                    raise vimconn.vimconnNotFoundException(
+                                    "Fail to migrate VM : {} to host {}".format(
+                                                    vmname_andid,
+                                                    new_host_obj)
+                                        )
+
+                        if host_obj is not None and avilable_sriov_devices is not None and len(avilable_sriov_devices)> 0:
+                            #Add SRIOV devices one by one
+                            for sriov_net in sriov_nets:
+                                network_name = sriov_net.get('net_id')
+                                dvs_portgr_name = self.create_dvPort_group(network_name)
+                                if sriov_net.get('type') == "VF":
+                                    #add vlan ID ,Modify portgroup for vlan ID
+                                    self.configure_vlanID(content, vcenter_conect, network_name)
+
+                                task = self.add_sriov_to_vm(content,
+                                                            vm_obj,
+                                                            host_obj,
+                                                            network_name,
+                                                            avilable_sriov_devices[0]
+                                                            )
+                                if task:
+                                    status= self.wait_for_vcenter_task(task, vcenter_conect)
+                                    if status:
+                                        self.logger.info("Added SRIOV {} to VM {}".format(
+                                                                        no_of_sriov_devices,
+                                                                        str(vm_obj)))
+                                else:
+                                    self.logger.error("Fail to add SRIOV {} to VM {}".format(
+                                                                        no_of_sriov_devices,
+                                                                        str(vm_obj)))
+                                    raise vimconn.vimconnUnexpectedResponse(
+                                    "Fail to add SRIOV adapter in VM ".format(str(vm_obj))
+                                        )
+                            return True, vm_obj, vcenter_conect
+                        else:
+                            self.logger.error("Currently there is no host with"\
+                                              " {} number of avaialble SRIOV "\
+                                              "VFs required for VM {}".format(
+                                                                no_of_sriov_devices,
+                                                                vmname_andid)
+                                              )
+                            raise vimconn.vimconnNotFoundException(
+                                    "Currently there is no host with {} "\
+                                    "number of avaialble SRIOV devices required for VM {}".format(
+                                                                            no_of_sriov_devices,
+                                                                            vmname_andid))
+                else:
+                    self.logger.debug("No infromation about SRIOV devices {} ",sriov_nets)
+
+            except vmodl.MethodFault as error:
+                self.logger.error("Error occurred while adding SRIOV {} ",error)
+        return None, vm_obj, vcenter_conect
+
+
+    def get_sriov_devices(self,host, no_of_vfs):
+        """
+            Method to get the details of SRIOV devices on given host
+             Args:
+                host - vSphere host object
+                no_of_vfs - number of VFs needed on host
+
+             Returns:
+                array of SRIOV devices
+        """
+        sriovInfo=[]
+        if host:
+            for device in host.config.pciPassthruInfo:
+                if isinstance(device,vim.host.SriovInfo) and device.sriovActive:
+                    if device.numVirtualFunction >= no_of_vfs:
+                        sriovInfo.append(device)
+                        break
+        return sriovInfo
+
+
+    def get_host_and_sriov_devices(self, content, no_of_vfs):
+        """
+         Method to get the details of SRIOV devices infromation on all hosts
+
+            Args:
+                content - vSphere host object
+                no_of_vfs - number of pci VFs needed on host
+
+            Returns:
+                 array of SRIOV devices and host object
+        """
+        host_obj = None
+        sriov_device_objs = None
+        try:
+            if content:
+                container = content.viewManager.CreateContainerView(content.rootFolder,
+                                                            [vim.HostSystem], True)
+                for host in container.view:
+                    devices = self.get_sriov_devices(host, no_of_vfs)
+                    if devices:
+                        host_obj = host
+                        sriov_device_objs = devices
+                        break
+        except Exception as exp:
+            self.logger.error("Error {} occurred while finding SRIOV devices on host: {}".format(exp, host_obj))
+
+        return host_obj,sriov_device_objs
+
+
+    def add_sriov_to_vm(self,content, vm_obj, host_obj, network_name, sriov_device):
+        """
+         Method to add SRIOV adapter to vm
+
+            Args:
+                host_obj - vSphere host object
+                vm_obj - vSphere vm object
+                content - vCenter content object
+                network_name - name of distributed virtaul portgroup
+                sriov_device - SRIOV device info
+
+            Returns:
+                 task object
+        """
+        devices = []
+        vnic_label = "sriov nic"
+        try:
+            dvs_portgr = self.get_dvport_group(network_name)
+            network_name = dvs_portgr.name
+            nic = vim.vm.device.VirtualDeviceSpec()
+            # VM device
+            nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+            nic.device = vim.vm.device.VirtualSriovEthernetCard()
+            nic.device.addressType = 'assigned'
+            #nic.device.key = 13016
+            nic.device.deviceInfo = vim.Description()
+            nic.device.deviceInfo.label = vnic_label
+            nic.device.deviceInfo.summary = network_name
+            nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
+
+            nic.device.backing.network = self.get_obj(content, [vim.Network], network_name)
+            nic.device.backing.deviceName = network_name
+            nic.device.backing.useAutoDetect = False
+            nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
+            nic.device.connectable.startConnected = True
+            nic.device.connectable.allowGuestControl = True
+
+            nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
+            nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
+            nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
+
+            devices.append(nic)
+            vmconf = vim.vm.ConfigSpec(deviceChange=devices)
+            task = vm_obj.ReconfigVM_Task(vmconf)
+            return task
+        except Exception as exp:
+            self.logger.error("Error {} occurred while adding SRIOV adapter in VM: {}".format(exp, vm_obj))
+            return None
+
+
+    def create_dvPort_group(self, network_name):
+        """
+         Method to create disributed virtual portgroup
+
+            Args:
+                network_name - name of network/portgroup
+
+            Returns:
+                portgroup key
+        """
+        try:
+            new_network_name = [network_name, '-', str(uuid.uuid4())]
+            network_name=''.join(new_network_name)
+            vcenter_conect, content = self.get_vcenter_content()
+
+            dv_switch = self.get_obj(content, [vim.DistributedVirtualSwitch], self.dvs_name)
+            if dv_switch:
+                dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
+                dv_pg_spec.name = network_name
+
+                dv_pg_spec.type = vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
+                dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
+                dv_pg_spec.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
+                dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=False)
+                dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=False)
+                dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=False)
+
+                task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
+                self.wait_for_vcenter_task(task, vcenter_conect)
+
+                dvPort_group = self.get_obj(content, [vim.dvs.DistributedVirtualPortgroup], network_name)
+                if dvPort_group:
+                    self.logger.info("Created disributed virtaul port group: {}".format(dvPort_group))
+                    return dvPort_group.key
+            else:
+                self.logger.debug("No disributed virtual switch found with name {}".format(network_name))
+
+        except Exception as exp:
+            self.logger.error("Error occurred while creating disributed virtaul port group {}"\
+                             " : {}".format(network_name, exp))
+        return None
+
+    def reconfig_portgroup(self, content, dvPort_group_name , config_info={}):
+        """
+         Method to reconfigure disributed virtual portgroup
+
+            Args:
+                dvPort_group_name - name of disributed virtual portgroup
+                content - vCenter content object
+                config_info - disributed virtual portgroup configuration
+
+            Returns:
+                task object
+        """
+        try:
+            dvPort_group = self.get_dvport_group(dvPort_group_name)
+            if dvPort_group:
+                dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
+                dv_pg_spec.configVersion = dvPort_group.config.configVersion
+                dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
+                if "vlanID" in config_info:
+                    dv_pg_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
+                    dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get('vlanID')
+
+                task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
+                return task
+            else:
+                return None
+        except Exception as exp:
+            self.logger.error("Error occurred while reconfiguraing disributed virtaul port group {}"\
+                             " : {}".format(dvPort_group_name, exp))
+            return None
+
+
+    def destroy_dvport_group(self , dvPort_group_name):
+        """
+         Method to destroy disributed virtual portgroup
+
+            Args:
+                network_name - name of network/portgroup
+
+            Returns:
+                True if portgroup successfully got deleted else false
+        """
+        vcenter_conect, content = self.get_vcenter_content()
+        try:
+            status = None
+            dvPort_group = self.get_dvport_group(dvPort_group_name)
+            if dvPort_group:
+                task = dvPort_group.Destroy_Task()
+                status = self.wait_for_vcenter_task(task, vcenter_conect)
+            return status
+        except vmodl.MethodFault as exp:
+            self.logger.error("Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
+                                                                    exp, dvPort_group_name))
+            return None
+
+
+    def get_dvport_group(self, dvPort_group_name):
+        """
+        Method to get disributed virtual portgroup
+
+            Args:
+                network_name - name of network/portgroup
+
+            Returns:
+                portgroup object
+        """
+        vcenter_conect, content = self.get_vcenter_content()
+        dvPort_group = None
+        try:
+            container = content.viewManager.CreateContainerView(content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True)
+            for item in container.view:
+                if item.key == dvPort_group_name:
+                    dvPort_group = item
+                    break
+            return dvPort_group
+        except vmodl.MethodFault as exp:
+            self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
+                                                                            exp, dvPort_group_name))
+            return None
+
+    def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
+        """
+         Method to get disributed virtual portgroup vlanID
+
+            Args:
+                network_name - name of network/portgroup
+
+            Returns:
+                vlan ID
+        """
+        vlanId = None
+        try:
+            dvPort_group = self.get_dvport_group(dvPort_group_name)
+            if dvPort_group:
+                vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
+        except vmodl.MethodFault as exp:
+            self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
+                                                                            exp, dvPort_group_name))
+        return vlanId
+
+
+    def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
+        """
+         Method to configure vlanID in disributed virtual portgroup vlanID
+
+            Args:
+                network_name - name of network/portgroup
+
+            Returns:
+                None
+        """
+        vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
+        if vlanID == 0:
+            #configure vlanID
+            vlanID = self.genrate_vlanID(dvPort_group_name)
+            config = {"vlanID":vlanID}
+            task = self.reconfig_portgroup(content, dvPort_group_name,
+                                    config_info=config)
+            if task:
+                status= self.wait_for_vcenter_task(task, vcenter_conect)
+                if status:
+                    self.logger.info("Reconfigured Port group {} for vlan ID {}".format(
+                                                        dvPort_group_name,vlanID))
+            else:
+                self.logger.error("Fail reconfigure portgroup {} for vlanID{}".format(
+                                        dvPort_group_name, vlanID))
+
+
+    def genrate_vlanID(self, network_name):
+        """
+         Method to get unused vlanID
+            Args:
+                network_name - name of network/portgroup
+            Returns:
+                vlanID
+        """
+        vlan_id = None
+        used_ids = []
+        if self.config.get('vlanID_range') == None:
+            raise vimconn.vimconnConflictException("You must provide a 'vlanID_range' "\
+                        "at config value before creating sriov network with vlan tag")
+        if "used_vlanIDs" not in self.persistent_info:
+                self.persistent_info["used_vlanIDs"] = {}
+        else:
+            used_ids = self.persistent_info["used_vlanIDs"].values()
+
+        for vlanID_range in self.config.get('vlanID_range'):
+            start_vlanid , end_vlanid = vlanID_range.split("-")
+            if start_vlanid > end_vlanid:
+                raise vimconn.vimconnConflictException("Invalid vlan ID range {}".format(
+                                                                        vlanID_range))
+
+            for id in xrange(int(start_vlanid), int(end_vlanid) + 1):
+                if id not in used_ids:
+                    vlan_id = id
+                    self.persistent_info["used_vlanIDs"][network_name] = vlan_id
+                    return vlan_id
+        if vlan_id is None:
+            raise vimconn.vimconnConflictException("All Vlan IDs are in use")
+
+
+    def get_obj(self, content, vimtype, name):
+        """
+         Get the vsphere object associated with a given text name
+        """
+        obj = None
+        container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
+        for item in container.view:
+            if item.name == name:
+                obj = item
+                break
+        return obj
+