New file setup.py: builds a python package
[osm/RO.git] / vimconn_vmware.py
index cd5a47b..a224255 100644 (file)
@@ -32,8 +32,14 @@ import os
 import traceback
 import itertools
 import requests
+import ssl
+import atexit
+
+from pyVmomi import vim, vmodl
+from pyVim.connect import SmartConnect, Disconnect
 
 from xml.etree import ElementTree as XmlElementTree
+from lxml import etree as lxmlElementTree
 
 import yaml
 from pyvcloud import Http
@@ -64,7 +70,7 @@ STANDALONE = 'standalone'
 # key for flavor dicts
 FLAVOR_RAM_KEY = 'ram'
 FLAVOR_VCPUS_KEY = 'vcpus'
-
+FLAVOR_DISK_KEY = 'disk'
 DEFAULT_IP_PROFILE = {'gateway_address':"192.168.1.1",
                       'dhcp_count':50,
                       'subnet_address':"192.168.1.0/24",
@@ -80,7 +86,7 @@ MAX_WAIT_TIME = 1800
 VCAVERSION = '5.9'
 
 __author__ = "Mustafa Bayramov, Arpita Kate, Sachin Bhangare"
-__date__ = "$23-Dec-2016 11:09:29$"
+__date__ = "$12-Jan-2017 11:09:29$"
 __version__ = '0.1'
 
 #     -1: "Could not be created",
@@ -115,13 +121,12 @@ netStatus2manoFormat = {'ACTIVE': 'ACTIVE', 'PAUSED': 'PAUSED', 'INACTIVE': 'INA
                         'ERROR': 'ERROR', 'DELETED': 'DELETED'
                         }
 
-# dict used to store flavor in memory
-flavorlist = {}
-
-
 class vimconnector(vimconn.vimconnector):
+    # dict used to store flavor in memory
+    flavorlist = {}
+
     def __init__(self, uuid=None, name=None, tenant_id=None, tenant_name=None,
-                 url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}):
+                 url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}, persistent_info={}):
         """
         Constructor create vmware connector to vCloud director.
 
@@ -148,6 +153,7 @@ class vimconnector(vimconn.vimconnector):
 
             dict['admin_username']
             dict['admin_password']
+            config - Provide NSX and vCenter information
 
             Returns:
                 Nothing.
@@ -158,6 +164,7 @@ class vimconnector(vimconn.vimconnector):
 
         self.logger = logging.getLogger('openmano.vim.vmware')
         self.logger.setLevel(10)
+        self.persistent_info = persistent_info
 
         self.name = name
         self.id = uuid
@@ -171,6 +178,13 @@ class vimconnector(vimconn.vimconnector):
         self.admin_password = None
         self.admin_user = None
         self.org_name = ""
+        self.nsx_manager = None
+        self.nsx_user = None
+        self.nsx_password = None
+        self.vcenter_ip = None
+        self.vcenter_port = None
+        self.vcenter_user = None
+        self.vcenter_password = None
 
         if tenant_name is not None:
             orgnameandtenant = tenant_name.split(":")
@@ -179,7 +193,7 @@ class vimconnector(vimconn.vimconnector):
                 self.org_name = orgnameandtenant[0]
             else:
                 self.tenant_name = tenant_name
-        elif "orgname" in config:
+        if "orgname" in config:
             self.org_name = config['orgname']
 
         if log_level:
@@ -191,6 +205,18 @@ class vimconnector(vimconn.vimconnector):
         except KeyError:
             raise vimconn.vimconnException(message="Error admin username or admin password is empty.")
 
+        try:
+            self.nsx_manager = config['nsx_manager']
+            self.nsx_user = config['nsx_user']
+            self.nsx_password = config['nsx_password']
+        except KeyError:
+            raise vimconn.vimconnException(message="Error: nsx manager or nsx user or nsx password is empty in Config")
+
+        self.vcenter_ip = config.get("vcenter_ip", None)
+        self.vcenter_port = config.get("vcenter_port", None)
+        self.vcenter_user = config.get("vcenter_user", None)
+        self.vcenter_password = config.get("vcenter_password", None)
+
         self.org_uuid = None
         self.vca = None
 
@@ -678,9 +704,9 @@ class vimconnector(vimconn.vimconnector):
         """Obtain flavor details from the  VIM
             Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
         """
-        if flavor_id not in flavorlist:
+        if flavor_id not in vimconnector.flavorlist:
             raise vimconn.vimconnNotFoundException("Flavor not found.")
-        return flavorlist[flavor_id]
+        return vimconnector.flavorlist[flavor_id]
 
     def new_flavor(self, flavor_data):
         """Adds a tenant flavor to VIM
@@ -699,15 +725,37 @@ class vimconnector(vimconn.vimconnector):
                             vpci: requested virtual PCI address
                 disk: disk size
                 is_public:
-
-
-
                  #TODO to concrete
         Returns the flavor identifier"""
 
+        # generate a new uuid put to internal dict and return it.
+        self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
+        new_flavor=flavor_data
+        ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
+        cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
+        disk = flavor_data.get(FLAVOR_DISK_KEY, 1)
+
+        extended_flv = flavor_data.get("extended")
+        if extended_flv:
+            numas=extended_flv.get("numas")
+            if numas:
+                for numa in numas:
+                    #overwrite ram and vcpus
+                    ram = numa['memory']*1024
+                    if 'paired-threads' in numa:
+                        cpu = numa['paired-threads']*2
+                    elif 'cores' in numa:
+                        cpu = numa['cores']
+                    elif 'threads' in numa:
+                        cpu = numa['threads']
+
+        new_flavor[FLAVOR_RAM_KEY] = ram
+        new_flavor[FLAVOR_VCPUS_KEY] = cpu
+        new_flavor[FLAVOR_DISK_KEY] = disk
         # generate a new uuid put to internal dict and return it.
         flavor_id = uuid.uuid4()
-        flavorlist[str(flavor_id)] = flavor_data
+        vimconnector.flavorlist[str(flavor_id)] = new_flavor
+        self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
 
         return str(flavor_id)
 
@@ -716,10 +764,10 @@ class vimconnector(vimconn.vimconnector):
 
            Returns the used id or raise an exception
         """
-        if flavor_id not in flavorlist:
+        if flavor_id not in vimconnector.flavorlist:
             raise vimconn.vimconnNotFoundException("Flavor not found.")
 
-        flavorlist.pop(flavor_id, None)
+        vimconnector.flavorlist.pop(flavor_id, None)
         return flavor_id
 
     def new_image(self, image_dict):
@@ -901,6 +949,7 @@ class vimconnector(vimconn.vimconnector):
                             f.close()
                             if progress:
                                 progress_bar.finish()
+                            time.sleep(10)
                     return True
                 else:
                     self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
@@ -1043,6 +1092,43 @@ class vimconnector(vimconn.vimconnector):
 
         return self.get_catalogid(catalog_md5_name, vca.get_catalogs())
 
+    def get_image_list(self, filter_dict={}):
+        '''Obtain tenant images from VIM
+        Filter_dict can be:
+            name: image name
+            id: image uuid
+            checksum: image checksum
+            location: image path
+        Returns the image list of dictionaries:
+            [{<the fields at Filter_dict plus some VIM specific>}, ...]
+            List can be empty
+        '''
+        vca = self.connect()
+        if not vca:
+            raise vimconn.vimconnConnectionException("self.connect() is failed.")
+        try:
+            image_list = []
+            catalogs = vca.get_catalogs()
+            if len(catalogs) == 0:
+                return image_list
+            else:
+                for catalog in catalogs:
+                    catalog_uuid = catalog.get_id().split(":")[3]
+                    name = catalog.name
+                    filtered_dict = {}
+                    if filter_dict.get("name") and filter_dict["name"] != name:
+                        continue
+                    if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
+                        continue
+                    filtered_dict ["name"] = name
+                    filtered_dict ["id"] = catalog_uuid
+                    image_list.append(filtered_dict)
+
+                self.logger.debug("List of already created catalog items: {}".format(image_list))
+                return image_list
+        except Exception as exp:
+            raise vimconn.vimconnException("Exception occured while retriving catalog items {}".format(exp))
+
     def get_vappid(self, vdc=None, vapp_name=None):
         """ Method takes vdc object and vApp name and returns vapp uuid or None
 
@@ -1146,8 +1232,8 @@ class vimconnector(vimconn.vimconnector):
         """
 
         self.logger.info("Creating new instance for entry {}".format(name))
-        self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {}".
-                          format(description, start, image_id, flavor_id, net_list, cloud_config))
+        self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {}".format(
+                                    description, start, image_id, flavor_id, net_list, cloud_config))
         vca = self.connect()
         if not vca:
             raise vimconn.vimconnConnectionException("self.connect() is failed.")
@@ -1183,18 +1269,29 @@ class vimconnector(vimconn.vimconnector):
         #
         vm_cpus = None
         vm_memory = None
+        vm_disk = None
+        pci_devices_info = []
         if flavor_id is not None:
-            if flavor_id not in flavorlist:
+            if flavor_id not in vimconnector.flavorlist:
                 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
                                                        "Failed retrieve flavor information "
                                                        "flavor id {}".format(name, flavor_id))
             else:
                 try:
-                    flavor = flavorlist[flavor_id]
+                    flavor = vimconnector.flavorlist[flavor_id]
                     vm_cpus = flavor[FLAVOR_VCPUS_KEY]
                     vm_memory = flavor[FLAVOR_RAM_KEY]
-                except KeyError:
-                    raise vimconn.vimconnException("Corrupted flavor. {}".format(flavor_id))
+                    vm_disk = flavor[FLAVOR_DISK_KEY]
+                    extended = flavor.get("extended", None)
+                    if extended:
+                        numas=extended.get("numas", None)
+                        if numas:
+                            for numa in numas:
+                                for interface in numa.get("interfaces",() ):
+                                    if interface["dedicated"].strip()=="yes":
+                                        pci_devices_info.append(interface)
+                except Exception as exp:
+                    raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp))
 
         # image upload creates template name as catalog name space Template.
         templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
@@ -1243,9 +1340,38 @@ class vimconnector(vimconn.vimconnector):
 
         # we should have now vapp in undeployed state.
         vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid)
+        vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), vmname_andid)
         if vapp is None:
             raise vimconn.vimconnUnexpectedResponse(
-                "new_vminstance(): Failed failed retrieve vApp {} after we deployed".format(vmname_andid))
+                "new_vminstance(): Failed failed retrieve vApp {} after we deployed".format(
+                                                                            vmname_andid))
+
+        #Add PCI passthrough configrations
+        PCI_devices_status = False
+        vm_obj = None
+        si = None
+        if len(pci_devices_info) > 0:
+            self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info,
+                                                                        vmname_andid ))
+            PCI_devices_status, vm_obj, vcenter_conect = self.add_pci_devices(vapp_uuid,
+                                                                            pci_devices_info,
+                                                                            vmname_andid)
+            if PCI_devices_status:
+                self.logger.info("Added PCI devives {} to VM {}".format(
+                                                            pci_devices_info,
+                                                            vmname_andid)
+                                 )
+            else:
+                self.logger.info("Fail to add PCI devives {} to VM {}".format(
+                                                            pci_devices_info,
+                                                            vmname_andid)
+                                 )
+        # add vm disk
+        if vm_disk:
+            #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
+            result = self.modify_vm_disk(vapp_uuid, vm_disk)
+            if result :
+                self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
 
         # add NICs & connect to networks in netlist
         try:
@@ -1295,13 +1421,30 @@ class vimconnector(vimconn.vimconnector):
             raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
 
         # deploy and power on vm
-        task = vapp.poweron()
-        if type(task) is TaskType:
-            vca.block_until_completed(task)
-        deploytask = vapp.deploy(powerOn='True')
-        if type(task) is TaskType:
+        self.logger.debug("new_vminstance(): Deploying vApp {} ".format(name))
+        deploytask = vapp.deploy(powerOn=False)
+        if type(deploytask) is GenericTask:
             vca.block_until_completed(deploytask)
 
+        # If VM has PCI devices reserve memory for VM
+        if PCI_devices_status and vm_obj and vcenter_conect:
+            memReserve = vm_obj.config.hardware.memoryMB
+            spec = vim.vm.ConfigSpec()
+            spec.memoryAllocation = vim.ResourceAllocationInfo(reservation=memReserve)
+            task = vm_obj.ReconfigVM_Task(spec=spec)
+            if task:
+                result = self.wait_for_vcenter_task(task, vcenter_conect)
+                self.logger.info("Reserved memmoery {} MB for "\
+                                 "VM VM status: {}".format(str(memReserve),result))
+            else:
+                self.logger.info("Fail to reserved memmoery {} to VM {}".format(
+                                                            str(memReserve),str(vm_obj)))
+
+        self.logger.debug("new_vminstance(): power on vApp {} ".format(name))
+        poweron_task = vapp.poweron()
+        if type(poweron_task) is GenericTask:
+            vca.block_until_completed(poweron_task)
+
         # check if vApp deployed and if that the case return vApp UUID otherwise -1
         wait_time = 0
         vapp_uuid = None
@@ -1529,6 +1672,40 @@ class vimconnector(vimconn.vimconnector):
         """
 
         self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
+
+        mac_ip_addr={}
+        rheaders = {'Content-Type': 'application/xml'}
+        iso_edges = ['edge-2','edge-3','edge-6','edge-7','edge-8','edge-9','edge-10']
+
+        try:
+            for edge in iso_edges:
+                nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
+                self.logger.debug("refresh_vms_status: NSX Manager url: {}".format(nsx_api_url))
+
+                resp = requests.get(self.nsx_manager + nsx_api_url,
+                                    auth = (self.nsx_user, self.nsx_password),
+                                    verify = False, headers = rheaders)
+
+                if resp.status_code == requests.codes.ok:
+                    dhcp_leases = XmlElementTree.fromstring(resp.text)
+                    for child in dhcp_leases:
+                        if child.tag == 'dhcpLeaseInfo':
+                            dhcpLeaseInfo = child
+                            for leaseInfo in dhcpLeaseInfo:
+                                for elem in leaseInfo:
+                                    if (elem.tag)=='macAddress':
+                                        mac_addr = elem.text
+                                    if (elem.tag)=='ipAddress':
+                                        ip_addr = elem.text
+                                if (mac_addr) is not None:
+                                    mac_ip_addr[mac_addr]= ip_addr
+                    self.logger.debug("NSX Manager DHCP Lease info: mac_ip_addr : {}".format(mac_ip_addr))
+                else:
+                    self.logger.debug("Error occurred while getting DHCP lease info from NSX Manager: {}".format(resp.content))
+        except KeyError:
+            self.logger.debug("Error in response from NSX Manager {}".format(KeyError.message))
+            self.logger.debug(traceback.format_exc())
+
         vca = self.connect()
         if not vca:
             raise vimconn.vimconnConnectionException("self.connect() is failed.")
@@ -1545,10 +1722,12 @@ class vimconnector(vimconn.vimconnector):
                 the_vapp = vca.get_vapp(vdc, vmname)
                 vm_info = the_vapp.get_vms_details()
                 vm_status = vm_info[0]['status']
+                vm_pci_details = self.get_vm_pci_details(vmuuid)
+                vm_info[0].update(vm_pci_details)
 
                 vm_dict = {'status': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
                            'error_msg': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
-                           'vim_info': yaml.safe_dump(the_vapp.get_vms_details()), 'interfaces': []}
+                           'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
 
                 # get networks
                 try:
@@ -1556,6 +1735,10 @@ class vimconnector(vimconn.vimconnector):
                     for vapp_network in vm_app_networks:
                         for vm_network in vapp_network:
                             if vm_network['name'] == vmname:
+                                #Assign IP Address based on MAC Address in NSX DHCP lease info
+                                for mac_adres,ip_adres in mac_ip_addr.iteritems():
+                                    if mac_adres == vm_network['mac']:
+                                        vm_network['ip']=ip_adres
                                 interface = {"mac_address": vm_network['mac'],
                                              "vim_net_id": self.get_network_id_by_name(vm_network['network_name']),
                                              "vim_interface_id": self.get_network_id_by_name(vm_network['network_name']),
@@ -1597,15 +1780,30 @@ class vimconnector(vimconn.vimconnector):
             the_vapp = vca.get_vapp(vdc, vapp_name)
             # TODO fix all status
             if "start" in action_dict:
-                if action_dict["start"] == "rebuild":
-                    the_vapp.deploy(powerOn=True)
+                vm_info = the_vapp.get_vms_details()
+                vm_status = vm_info[0]['status']
+                self.logger.info("Power on vApp: vm_status:{} {}".format(type(vm_status),vm_status))
+                if vm_status == "Suspended" or vm_status == "Powered off":
+                    power_on_task = the_vapp.poweron()
+                    if power_on_task is not None and type(power_on_task) is GenericTask:
+                        result = vca.block_until_completed(power_on_task)
+                        if result:
+                            self.logger.info("action_vminstance: Powered on vApp: {}".format(vapp_name))
+                        else:
+                            self.logger.info("action_vminstance: Failed to power on vApp: {}".format(vapp_name))
+                    else:
+                        self.logger.info("action_vminstance: Wait for vApp {} to power on".format(vapp_name))
+            elif "rebuild" in action_dict:
+                self.logger.info("action_vminstance: Rebuilding vApp: {}".format(vapp_name))
+                power_on_task = the_vapp.deploy(powerOn=True)
+                if type(power_on_task) is GenericTask:
+                    result = vca.block_until_completed(power_on_task)
+                    if result:
+                        self.logger.info("action_vminstance: Rebuilt vApp: {}".format(vapp_name))
+                    else:
+                        self.logger.info("action_vminstance: Failed to rebuild vApp: {}".format(vapp_name))
                 else:
-                    vm_info = the_vapp.get_vms_details()
-                    vm_status = vm_info[0]['status']
-                    if vm_status == "Suspended":
-                        the_vapp.poweron()
-                    elif vm_status.status == "Powered off":
-                        the_vapp.poweron()
+                    self.logger.info("action_vminstance: Wait for vApp rebuild {} to power on".format(vapp_name))
             elif "pause" in action_dict:
                 pass
                 ## server.pause()
@@ -1613,7 +1811,15 @@ class vimconnector(vimconn.vimconnector):
                 pass
                 ## server.resume()
             elif "shutoff" in action_dict or "shutdown" in action_dict:
-                the_vapp.shutdown()
+                power_off_task = the_vapp.undeploy(action='powerOff')
+                if type(power_off_task) is GenericTask:
+                    result = vca.block_until_completed(power_off_task)
+                    if result:
+                        self.logger.info("action_vminstance: Powered off vApp: {}".format(vapp_name))
+                    else:
+                        self.logger.info("action_vminstance: Failed to power off vApp: {}".format(vapp_name))
+                else:
+                    self.logger.info("action_vminstance: Wait for vApp {} to power off".format(vapp_name))
             elif "forceOff" in action_dict:
                 the_vapp.reset()
             elif "terminate" in action_dict:
@@ -2597,7 +2803,7 @@ class vimconnector(vimconn.vimconnector):
                         return response.content
         return None
 
-    def get_vapp_details_rest(self, vapp_uuid=None):
+    def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
         """
         Method retrieve vapp detail from vCloud director
 
@@ -2609,8 +2815,13 @@ class vimconnector(vimconn.vimconnector):
         """
 
         parsed_respond = {}
+        vca = None
+
+        if need_admin_access:
+            vca = self.connect_as_admin()
+        else:
+            vca = self.connect()
 
-        vca = self.connect()
         if not vca:
             raise vimconn.vimconnConnectionException("self.connect() is failed")
         if vapp_uuid is None:
@@ -2618,7 +2829,8 @@ class vimconnector(vimconn.vimconnector):
 
         url_list = [vca.host, '/api/vApp/vapp-', vapp_uuid]
         get_vapp_restcall = ''.join(url_list)
-        if not (not vca.vcloud_session or not vca.vcloud_session.organization):
+
+        if vca.vcloud_session and vca.vcloud_session.organization:
             response = Http.get(url=get_vapp_restcall,
                                 headers=vca.vcloud_session.get_vcloud_headers(),
                                 verify=vca.verify,
@@ -2633,21 +2845,26 @@ class vimconnector(vimconn.vimconnector):
                 xmlroot_respond = XmlElementTree.fromstring(response.content)
                 parsed_respond['ovfDescriptorUploaded'] = xmlroot_respond.attrib['ovfDescriptorUploaded']
 
-                namespaces_ovf = {'ovf': 'http://schemas.dmtf.org/ovf/envelope/1'}
-                namespace_vmm = {'vmw': 'http://www.vmware.com/schema/ovf'}
-                namespace_vm = {'vm': 'http://www.vmware.com/vcloud/v1.5'}
+                namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
+                              'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
+                              'vmw': 'http://www.vmware.com/schema/ovf',
+                              'vm': 'http://www.vmware.com/vcloud/v1.5',
+                              'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
+                              "vmext":"http://www.vmware.com/vcloud/extension/v1.5",
+                              "xmlns":"http://www.vmware.com/vcloud/v1.5"
+                             }
 
-                created_section = xmlroot_respond.find('vm:DateCreated', namespace_vm)
+                created_section = xmlroot_respond.find('vm:DateCreated', namespaces)
                 if created_section is not None:
                     parsed_respond['created'] = created_section.text
 
-                network_section = xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig', namespace_vm)
+                network_section = xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig', namespaces)
                 if network_section is not None and 'networkName' in network_section.attrib:
                     parsed_respond['networkname'] = network_section.attrib['networkName']
 
                 ipscopes_section = \
                     xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes',
-                                         namespace_vm)
+                                         namespaces)
                 if ipscopes_section is not None:
                     for ipscope in ipscopes_section:
                         for scope in ipscope:
@@ -2661,14 +2878,15 @@ class vimconnector(vimconn.vimconnector):
                                 parsed_respond[tag_key] = scope.text
 
                 # parse children section for other attrib
-                children_section = xmlroot_respond.find('vm:Children/', namespace_vm)
+                children_section = xmlroot_respond.find('vm:Children/', namespaces)
                 if children_section is not None:
                     parsed_respond['name'] = children_section.attrib['name']
-                    parsed_respond['nestedHypervisorEnabled'] = children_section.attrib['nestedHypervisorEnabled']
+                    parsed_respond['nestedHypervisorEnabled'] = children_section.attrib['nestedHypervisorEnabled'] \
+                     if  "nestedHypervisorEnabled" in children_section.attrib else None
                     parsed_respond['deployed'] = children_section.attrib['deployed']
                     parsed_respond['status'] = children_section.attrib['status']
                     parsed_respond['vmuuid'] = children_section.attrib['id'].split(":")[-1]
-                    network_adapter = children_section.find('vm:NetworkConnectionSection', namespace_vm)
+                    network_adapter = children_section.find('vm:NetworkConnectionSection', namespaces)
                     nic_list = []
                     for adapters in network_adapter:
                         adapter_key = adapters.tag.split("}")[1]
@@ -2691,9 +2909,35 @@ class vimconnector(vimconn.vimconnector):
                                 parsed_respond['acquireMksTicket'] = link.attrib
 
                     parsed_respond['interfaces'] = nic_list
-            except:
-                pass
+                    vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
+                    if vCloud_extension_section is not None:
+                        vm_vcenter_info = {}
+                        vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
+                        vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
+                        if vmext is not None:
+                            vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
+                        parsed_respond["vm_vcenter_info"]= vm_vcenter_info
+
+                    virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces)
+                    vm_virtual_hardware_info = {}
+                    if virtual_hardware_section is not None:
+                        for item in virtual_hardware_section.iterfind('ovf:Item',namespaces):
+                            if item.find("rasd:Description",namespaces).text == "Hard disk":
+                                disk_size = item.find("rasd:HostResource" ,namespaces
+                                                ).attrib["{"+namespaces['vm']+"}capacity"]
+
+                                vm_virtual_hardware_info["disk_size"]= disk_size
+                                break
 
+                        for link in virtual_hardware_section:
+                            if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
+                                if link.attrib['rel'] == 'edit' and link.attrib['href'].endswith("/disks"):
+                                    vm_virtual_hardware_info["disk_edit_href"] = link.attrib['href']
+                                    break
+
+                    parsed_respond["vm_virtual_hardware"]= vm_virtual_hardware_info
+            except Exception as exp :
+                self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
         return parsed_respond
 
     def acuire_console(self, vm_uuid=None):
@@ -2719,4 +2963,509 @@ class vimconnector(vimconn.vimconnector):
 
         return None
 
+    def modify_vm_disk(self, vapp_uuid, flavor_disk):
+        """
+        Method retrieve vm disk details
+
+        Args:
+            vapp_uuid - is vapp identifier.
+            flavor_disk - disk size as specified in VNFD (flavor)
+
+            Returns:
+                The return network uuid or return None
+        """
+        status = None
+        try:
+            #Flavor disk is in GB convert it into MB
+            flavor_disk = int(flavor_disk) * 1024
+            vm_details = self.get_vapp_details_rest(vapp_uuid)
+            if vm_details:
+                vm_name = vm_details["name"]
+                self.logger.info("VM: {} flavor_disk :{}".format(vm_name , flavor_disk))
+
+            if vm_details and "vm_virtual_hardware" in vm_details:
+                vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
+                disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
+
+                self.logger.info("VM: {} VM_disk :{}".format(vm_name , vm_disk))
+
+                if flavor_disk > vm_disk:
+                    status = self.modify_vm_disk_rest(disk_edit_href ,flavor_disk)
+                    self.logger.info("Modify disk of VM {} from {} to {} MB".format(vm_name,
+                                                         vm_disk,  flavor_disk ))
+                else:
+                    status = True
+                    self.logger.info("No need to modify disk of VM {}".format(vm_name))
+
+            return status
+        except Exception as exp:
+            self.logger.info("Error occurred while modifing disk size {}".format(exp))
+
+
+    def modify_vm_disk_rest(self, disk_href , disk_size):
+        """
+        Method retrieve modify vm disk size
+
+        Args:
+            disk_href - vCD API URL to GET and PUT disk data
+            disk_size - disk size as specified in VNFD (flavor)
+
+            Returns:
+                The return network uuid or return None
+        """
+        vca = self.connect()
+        if not vca:
+            raise vimconn.vimconnConnectionException("self.connect() is failed")
+        if disk_href is None or disk_size is None:
+            return None
+
+        if vca.vcloud_session and vca.vcloud_session.organization:
+            response = Http.get(url=disk_href,
+                                headers=vca.vcloud_session.get_vcloud_headers(),
+                                verify=vca.verify,
+                                logger=vca.logger)
+
+        if response.status_code != requests.codes.ok:
+            self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href,
+                                                                            response.status_code))
+            return None
+        try:
+            lxmlroot_respond = lxmlElementTree.fromstring(response.content)
+            namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
+            namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+
+            for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
+                if item.find("rasd:Description",namespaces).text == "Hard disk":
+                    disk_item = item.find("rasd:HostResource" ,namespaces )
+                    if disk_item is not None:
+                        disk_item.attrib["{"+namespaces['xmlns']+"}capacity"] = str(disk_size)
+                        break
+
+            data = lxmlElementTree.tostring(lxmlroot_respond, encoding='utf8', method='xml',
+                                             xml_declaration=True)
+
+            #Send PUT request to modify disk size
+            headers = vca.vcloud_session.get_vcloud_headers()
+            headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
+
+            response = Http.put(url=disk_href,
+                                data=data,
+                                headers=headers,
+                                verify=vca.verify, logger=self.logger)
+
+            if response.status_code != 202:
+                self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
+                                                                            response.status_code))
+            else:
+                modify_disk_task = taskType.parseString(response.content, True)
+                if type(modify_disk_task) is GenericTask:
+                    status = vca.block_until_completed(modify_disk_task)
+                    return status
+
+            return None
+
+        except Exception as exp :
+                self.logger.info("Error occurred calling rest api for modifing disk size {}".format(exp))
+                return None
+
+    def add_pci_devices(self, vapp_uuid , pci_devices , vmname_andid):
+        """
+            Method to attach pci devices to VM
+
+             Args:
+                vapp_uuid - uuid of vApp/VM
+                pci_devices - pci devices infromation as specified in VNFD (flavor)
+
+            Returns:
+                The status of add pci device task , vm object and
+                vcenter_conect object
+        """
+        vm_obj = None
+        vcenter_conect = None
+        self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid))
+        try:
+            vm_vcenter_info = self.get_vm_vcenter_info(vapp_uuid)
+        except Exception as exp:
+            self.logger.error("Error occurred while getting vCenter infromationn"\
+                             " for VM : {}".format(exp))
+            raise vimconn.vimconnException(message=exp)
+
+        if vm_vcenter_info["vm_moref_id"]:
+            context = None
+            if hasattr(ssl, '_create_unverified_context'):
+                context = ssl._create_unverified_context()
+            try:
+                no_of_pci_devices = len(pci_devices)
+                if no_of_pci_devices > 0:
+                    vcenter_conect = SmartConnect(
+                                            host=vm_vcenter_info["vm_vcenter_ip"],
+                                            user=vm_vcenter_info["vm_vcenter_user"],
+                                            pwd=vm_vcenter_info["vm_vcenter_password"],
+                                            port=int(vm_vcenter_info["vm_vcenter_port"]),
+                                            sslContext=context)
+                    atexit.register(Disconnect, vcenter_conect)
+                    content = vcenter_conect.RetrieveContent()
+
+                    #Get VM and its host
+                    host_obj, vm_obj = self.get_vm_obj(content ,vm_vcenter_info["vm_moref_id"])
+                    self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
+                    if host_obj and vm_obj:
+                        #get PCI devies from host on which vapp is currently installed
+                        avilable_pci_devices = self.get_pci_devices(host_obj, no_of_pci_devices)
+
+                        if avilable_pci_devices is None:
+                            #find other hosts with active pci devices
+                            new_host_obj , avilable_pci_devices = self.get_host_and_PCIdevices(
+                                                                content,
+                                                                no_of_pci_devices
+                                                                )
+
+                            if new_host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
+                                #Migrate vm to the host where PCI devices are availble
+                                self.logger.info("Relocate VM {} on new host {}".format(vm_obj, new_host_obj))
+                                task = self.relocate_vm(new_host_obj, vm_obj)
+                                if task is not None:
+                                    result = self.wait_for_vcenter_task(task, vcenter_conect)
+                                    self.logger.info("Migrate VM status: {}".format(result))
+                                    host_obj = new_host_obj
+                                else:
+                                    self.logger.info("Fail to migrate VM : {}".format(result))
+                                    raise vimconn.vimconnNotFoundException(
+                                    "Fail to migrate VM : {} to host {}".format(
+                                                    vmname_andid,
+                                                    new_host_obj)
+                                        )
+
+                        if host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
+                            #Add PCI devices one by one
+                            for pci_device in avilable_pci_devices:
+                                task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
+                                if task:
+                                    status= self.wait_for_vcenter_task(task, vcenter_conect)
+                                    if status:
+                                        self.logger.info("Added PCI device {} to VM {}".format(pci_device,str(vm_obj)))
+                                else:
+                                    self.logger.error("Fail to add PCI device {} to VM {}".format(pci_device,str(vm_obj)))
+                            return True, vm_obj, vcenter_conect
+                        else:
+                            self.logger.error("Currently there is no host with"\
+                                              " {} number of avaialble PCI devices required for VM {}".format(
+                                                                            no_of_pci_devices,
+                                                                            vmname_andid)
+                                              )
+                            raise vimconn.vimconnNotFoundException(
+                                    "Currently there is no host with {} "\
+                                    "number of avaialble PCI devices required for VM {}".format(
+                                                                            no_of_pci_devices,
+                                                                            vmname_andid))
+                else:
+                    self.logger.debug("No infromation about PCI devices {} ",pci_devices)
+
+            except vmodl.MethodFault as error:
+                self.logger.error("Error occurred while adding PCI devices {} ",error)
+        return None, vm_obj, vcenter_conect
+
+    def get_vm_obj(self, content, mob_id):
+        """
+            Method to get the vsphere VM object associated with a given morf ID
+             Args:
+                vapp_uuid - uuid of vApp/VM
+                content - vCenter content object
+                mob_id - mob_id of VM
+
+            Returns:
+                    VM and host object
+        """
+        vm_obj = None
+        host_obj = None
+        try :
+            container = content.viewManager.CreateContainerView(content.rootFolder,
+                                                        [vim.VirtualMachine], True
+                                                        )
+            for vm in container.view:
+                mobID = vm._GetMoId()
+                if mobID == mob_id:
+                    vm_obj = vm
+                    host_obj = vm_obj.runtime.host
+                    break
+        except Exception as exp:
+            self.logger.error("Error occurred while finding VM object : {}".format(exp))
+        return host_obj, vm_obj
+
+    def get_pci_devices(self, host, need_devices):
+        """
+            Method to get the details of pci devices on given host
+             Args:
+                host - vSphere host object
+                need_devices - number of pci devices needed on host
+
+             Returns:
+                array of pci devices
+        """
+        all_devices = []
+        all_device_ids = []
+        used_devices_ids = []
+
+        try:
+            if host:
+                pciPassthruInfo = host.config.pciPassthruInfo
+                pciDevies = host.hardware.pciDevice
+
+            for pci_status in pciPassthruInfo:
+                if pci_status.passthruActive:
+                    for device in pciDevies:
+                        if device.id == pci_status.id:
+                            all_device_ids.append(device.id)
+                            all_devices.append(device)
+
+            #check if devices are in use
+            avalible_devices = all_devices
+            for vm in host.vm:
+                if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
+                    vm_devices = vm.config.hardware.device
+                    for device in vm_devices:
+                        if type(device) is vim.vm.device.VirtualPCIPassthrough:
+                            if device.backing.id in all_device_ids:
+                                for use_device in avalible_devices:
+                                    if use_device.id == device.backing.id:
+                                        avalible_devices.remove(use_device)
+                                used_devices_ids.append(device.backing.id)
+                                self.logger.debug("Device {} from devices {}"\
+                                        "is in use".format(device.backing.id,
+                                                           device)
+                                            )
+            if len(avalible_devices) < need_devices:
+                self.logger.debug("Host {} don't have {} number of active devices".format(host,
+                                                                            need_devices))
+                self.logger.debug("found only {} devives {}".format(len(avalible_devices),
+                                                                    avalible_devices))
+                return None
+            else:
+                required_devices = avalible_devices[:need_devices]
+                self.logger.info("Found {} PCI devivces on host {} but required only {}".format(
+                                                            len(avalible_devices),
+                                                            host,
+                                                            need_devices))
+                self.logger.info("Retruning {} devices as {}".format(need_devices,
+                                                                required_devices ))
+                return required_devices
+
+        except Exception as exp:
+            self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host))
+
+        return None
+
+    def get_host_and_PCIdevices(self, content, need_devices):
+        """
+         Method to get the details of pci devices infromation on all hosts
+
+            Args:
+                content - vSphere host object
+                need_devices - number of pci devices needed on host
+
+            Returns:
+                 array of pci devices and host object
+        """
+        host_obj = None
+        pci_device_objs = None
+        try:
+            if content:
+                container = content.viewManager.CreateContainerView(content.rootFolder,
+                                                            [vim.HostSystem], True)
+                for host in container.view:
+                    devices = self.get_pci_devices(host, need_devices)
+                    if devices:
+                        host_obj = host
+                        pci_device_objs = devices
+                        break
+        except Exception as exp:
+            self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host_obj))
+
+        return host_obj,pci_device_objs
+
+    def relocate_vm(self, dest_host, vm) :
+        """
+         Method to get the relocate VM to new host
+
+            Args:
+                dest_host - vSphere host object
+                vm - vSphere VM object
+
+            Returns:
+                task object
+        """
+        task = None
+        try:
+            relocate_spec = vim.vm.RelocateSpec(host=dest_host)
+            task = vm.Relocate(relocate_spec)
+            self.logger.info("Migrating {} to destination host {}".format(vm, dest_host))
+        except Exception as exp:
+            self.logger.error("Error occurred while relocate VM {} to new host {}: {}".format(
+                                                                            dest_host, vm, exp))
+        return task
+
+    def wait_for_vcenter_task(self, task, actionName='job', hideResult=False):
+        """
+        Waits and provides updates on a vSphere task
+        """
+        while task.info.state == vim.TaskInfo.State.running:
+            time.sleep(2)
+
+        if task.info.state == vim.TaskInfo.State.success:
+            if task.info.result is not None and not hideResult:
+                self.logger.info('{} completed successfully, result: {}'.format(
+                                                            actionName,
+                                                            task.info.result))
+            else:
+                self.logger.info('Task {} completed successfully.'.format(actionName))
+        else:
+            self.logger.error('{} did not complete successfully: {} '.format(
+                                                            actionName,
+                                                            task.info.error)
+                              )
+
+        return task.info.result
+
+    def add_pci_to_vm(self,host_object, vm_object, host_pci_dev):
+        """
+         Method to add pci device in given VM
+
+            Args:
+                host_object - vSphere host object
+                vm_object - vSphere VM object
+                host_pci_dev -  host_pci_dev must be one of the devices from the
+                                host_object.hardware.pciDevice list
+                                which is configured as a PCI passthrough device
+
+            Returns:
+                task object
+        """
+        task = None
+        if vm_object and host_object and host_pci_dev:
+            try :
+                #Add PCI device to VM
+                pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(host=None).pciPassthrough
+                systemid_by_pciid = {item.pciDevice.id: item.systemId for item in pci_passthroughs}
+
+                if host_pci_dev.id not in systemid_by_pciid:
+                    self.logger.error("Device {} is not a passthrough device ".format(host_pci_dev))
+                    return None
+
+                deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip('0x')
+                backing = vim.VirtualPCIPassthroughDeviceBackingInfo(deviceId=deviceId,
+                                            id=host_pci_dev.id,
+                                            systemId=systemid_by_pciid[host_pci_dev.id],
+                                            vendorId=host_pci_dev.vendorId,
+                                            deviceName=host_pci_dev.deviceName)
+
+                hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
+
+                new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
+                new_device_config.operation = "add"
+                vmConfigSpec = vim.vm.ConfigSpec()
+                vmConfigSpec.deviceChange = [new_device_config]
+
+                task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
+                self.logger.info("Adding PCI device {} into VM {} from host {} ".format(
+                                                            host_pci_dev, vm_object, host_object)
+                                )
+            except Exception as exp:
+                self.logger.error("Error occurred while adding pci devive {} to VM {}: {}".format(
+                                                                            host_pci_dev,
+                                                                            vm_object,
+                                                                             exp))
+        return task
+
+    def get_vm_vcenter_info(self , vapp_uuid):
+        """
+        Method to get details of vCenter and vm
+
+            Args:
+                vapp_uuid - uuid of vApp or VM
+
+            Returns:
+                Moref Id of VM and deails of vCenter
+        """
+        vm_vcenter_info = {}
+
+        if self.vcenter_ip is not None:
+            vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
+        else:
+            raise vimconn.vimconnException(message="vCenter IP is not provided."\
+                                           " Please provide vCenter IP while attaching datacenter to tenant in --config")
+        if self.vcenter_port is not None:
+            vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
+        else:
+            raise vimconn.vimconnException(message="vCenter port is not provided."\
+                                           " Please provide vCenter port while attaching datacenter to tenant in --config")
+        if self.vcenter_user is not None:
+            vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
+        else:
+            raise vimconn.vimconnException(message="vCenter user is not provided."\
+                                           " Please provide vCenter user while attaching datacenter to tenant in --config")
+
+        if self.vcenter_password is not None:
+            vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
+        else:
+            raise vimconn.vimconnException(message="vCenter user password is not provided."\
+                                           " Please provide vCenter user password while attaching datacenter to tenant in --config")
+        try:
+            vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
+            if vm_details and "vm_vcenter_info" in vm_details:
+                vm_vcenter_info["vm_moref_id"] = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
+
+            return vm_vcenter_info
+
+        except Exception as exp:
+            self.logger.error("Error occurred while getting vCenter infromationn"\
+                             " for VM : {}".format(exp))
+
+
+    def get_vm_pci_details(self, vmuuid):
+        """
+            Method to get VM PCI device details from vCenter
+
+            Args:
+                vm_obj - vSphere VM object
+
+            Returns:
+                dict of PCI devives attached to VM
+
+        """
+        vm_pci_devices_info = {}
+        try:
+            vm_vcenter_info = self.get_vm_vcenter_info(vmuuid)
+            if vm_vcenter_info["vm_moref_id"]:
+                context = None
+                if hasattr(ssl, '_create_unverified_context'):
+                    context = ssl._create_unverified_context()
+                vcenter_conect = SmartConnect(host=vm_vcenter_info["vm_vcenter_ip"],
+                                        user=vm_vcenter_info["vm_vcenter_user"],
+                                        pwd=vm_vcenter_info["vm_vcenter_password"],
+                                        port=int(vm_vcenter_info["vm_vcenter_port"]),
+                                        sslContext=context
+                                    )
+                atexit.register(Disconnect, vcenter_conect)
+                content = vcenter_conect.RetrieveContent()
+
+                #Get VM and its host
+                if content:
+                    host_obj, vm_obj = self.get_vm_obj(content ,vm_vcenter_info["vm_moref_id"])
+                    if host_obj and vm_obj:
+                        vm_pci_devices_info["host_name"]= host_obj.name
+                        vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress
+                        for device in vm_obj.config.hardware.device:
+                            if type(device) == vim.vm.device.VirtualPCIPassthrough:
+                                device_details={'devide_id':device.backing.id,
+                                                'pciSlotNumber':device.slotInfo.pciSlotNumber,
+                                            }
+                                vm_pci_devices_info[device.deviceInfo.label] = device_details
+                else:
+                    self.logger.error("Can not connect to vCenter while getting "\
+                                          "PCI devices infromationn")
+                return vm_pci_devices_info
+        except Exception as exp:
+            self.logger.error("Error occurred while getting VM infromationn"\
+                             " for VM : {}".format(exp))
+            raise vimconn.vimconnException(message=exp)