X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=vimconn_vmware.py;h=a224255609527d7d94e03bf8aeadb5dfcf0f867a;hb=refs%2Fchanges%2F86%2F1386%2F1;hp=26ad23a54f9e8540c8732f26b105ffde33cae47d;hpb=a92ae39e334093ca351cbd72384983a0c9c69f9e;p=osm%2FRO.git diff --git a/vimconn_vmware.py b/vimconn_vmware.py index 26ad23a5..a2242556 100644 --- a/vimconn_vmware.py +++ b/vimconn_vmware.py @@ -32,6 +32,11 @@ import os import traceback import itertools import requests +import ssl +import atexit + +from pyVmomi import vim, vmodl +from pyVim.connect import SmartConnect, Disconnect from xml.etree import ElementTree as XmlElementTree from lxml import etree as lxmlElementTree @@ -116,13 +121,12 @@ netStatus2manoFormat = {'ACTIVE': 'ACTIVE', 'PAUSED': 'PAUSED', 'INACTIVE': 'INA 'ERROR': 'ERROR', 'DELETED': 'DELETED' } -# dict used to store flavor in memory -flavorlist = {} - - class vimconnector(vimconn.vimconnector): + # dict used to store flavor in memory + flavorlist = {} + def __init__(self, uuid=None, name=None, tenant_id=None, tenant_name=None, - url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}): + url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}, persistent_info={}): """ Constructor create vmware connector to vCloud director. @@ -149,6 +153,7 @@ class vimconnector(vimconn.vimconnector): dict['admin_username'] dict['admin_password'] + config - Provide NSX and vCenter information Returns: Nothing. @@ -159,6 +164,7 @@ class vimconnector(vimconn.vimconnector): self.logger = logging.getLogger('openmano.vim.vmware') self.logger.setLevel(10) + self.persistent_info = persistent_info self.name = name self.id = uuid @@ -172,6 +178,13 @@ class vimconnector(vimconn.vimconnector): self.admin_password = None self.admin_user = None self.org_name = "" + self.nsx_manager = None + self.nsx_user = None + self.nsx_password = None + self.vcenter_ip = None + self.vcenter_port = None + self.vcenter_user = None + self.vcenter_password = None if tenant_name is not None: orgnameandtenant = tenant_name.split(":") @@ -192,6 +205,18 @@ class vimconnector(vimconn.vimconnector): except KeyError: raise vimconn.vimconnException(message="Error admin username or admin password is empty.") + try: + self.nsx_manager = config['nsx_manager'] + self.nsx_user = config['nsx_user'] + self.nsx_password = config['nsx_password'] + except KeyError: + raise vimconn.vimconnException(message="Error: nsx manager or nsx user or nsx password is empty in Config") + + self.vcenter_ip = config.get("vcenter_ip", None) + self.vcenter_port = config.get("vcenter_port", None) + self.vcenter_user = config.get("vcenter_user", None) + self.vcenter_password = config.get("vcenter_password", None) + self.org_uuid = None self.vca = None @@ -679,9 +704,9 @@ class vimconnector(vimconn.vimconnector): """Obtain flavor details from the VIM Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete """ - if flavor_id not in flavorlist: + if flavor_id not in vimconnector.flavorlist: raise vimconn.vimconnNotFoundException("Flavor not found.") - return flavorlist[flavor_id] + return vimconnector.flavorlist[flavor_id] def new_flavor(self, flavor_data): """Adds a tenant flavor to VIM @@ -729,7 +754,7 @@ class vimconnector(vimconn.vimconnector): new_flavor[FLAVOR_DISK_KEY] = disk # generate a new uuid put to internal dict and return it. flavor_id = uuid.uuid4() - flavorlist[str(flavor_id)] = new_flavor + vimconnector.flavorlist[str(flavor_id)] = new_flavor self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor)) return str(flavor_id) @@ -739,10 +764,10 @@ class vimconnector(vimconn.vimconnector): Returns the used id or raise an exception """ - if flavor_id not in flavorlist: + if flavor_id not in vimconnector.flavorlist: raise vimconn.vimconnNotFoundException("Flavor not found.") - flavorlist.pop(flavor_id, None) + vimconnector.flavorlist.pop(flavor_id, None) return flavor_id def new_image(self, image_dict): @@ -1067,6 +1092,43 @@ class vimconnector(vimconn.vimconnector): return self.get_catalogid(catalog_md5_name, vca.get_catalogs()) + def get_image_list(self, filter_dict={}): + '''Obtain tenant images from VIM + Filter_dict can be: + name: image name + id: image uuid + checksum: image checksum + location: image path + Returns the image list of dictionaries: + [{}, ...] + List can be empty + ''' + vca = self.connect() + if not vca: + raise vimconn.vimconnConnectionException("self.connect() is failed.") + try: + image_list = [] + catalogs = vca.get_catalogs() + if len(catalogs) == 0: + return image_list + else: + for catalog in catalogs: + catalog_uuid = catalog.get_id().split(":")[3] + name = catalog.name + filtered_dict = {} + if filter_dict.get("name") and filter_dict["name"] != name: + continue + if filter_dict.get("id") and filter_dict["id"] != catalog_uuid: + continue + filtered_dict ["name"] = name + filtered_dict ["id"] = catalog_uuid + image_list.append(filtered_dict) + + self.logger.debug("List of already created catalog items: {}".format(image_list)) + return image_list + except Exception as exp: + raise vimconn.vimconnException("Exception occured while retriving catalog items {}".format(exp)) + def get_vappid(self, vdc=None, vapp_name=None): """ Method takes vdc object and vApp name and returns vapp uuid or None @@ -1170,8 +1232,8 @@ class vimconnector(vimconn.vimconnector): """ self.logger.info("Creating new instance for entry {}".format(name)) - self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {}". - format(description, start, image_id, flavor_id, net_list, cloud_config)) + self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {}".format( + description, start, image_id, flavor_id, net_list, cloud_config)) vca = self.connect() if not vca: raise vimconn.vimconnConnectionException("self.connect() is failed.") @@ -1208,21 +1270,28 @@ class vimconnector(vimconn.vimconnector): vm_cpus = None vm_memory = None vm_disk = None - + pci_devices_info = [] if flavor_id is not None: - if flavor_id not in flavorlist: + if flavor_id not in vimconnector.flavorlist: raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: " "Failed retrieve flavor information " "flavor id {}".format(name, flavor_id)) else: try: - flavor = flavorlist[flavor_id] + flavor = vimconnector.flavorlist[flavor_id] vm_cpus = flavor[FLAVOR_VCPUS_KEY] vm_memory = flavor[FLAVOR_RAM_KEY] vm_disk = flavor[FLAVOR_DISK_KEY] - - except KeyError: - raise vimconn.vimconnException("Corrupted flavor. {}".format(flavor_id)) + extended = flavor.get("extended", None) + if extended: + numas=extended.get("numas", None) + if numas: + for numa in numas: + for interface in numa.get("interfaces",() ): + if interface["dedicated"].strip()=="yes": + pci_devices_info.append(interface) + except Exception as exp: + raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp)) # image upload creates template name as catalog name space Template. templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs) @@ -1277,6 +1346,26 @@ class vimconnector(vimconn.vimconnector): "new_vminstance(): Failed failed retrieve vApp {} after we deployed".format( vmname_andid)) + #Add PCI passthrough configrations + PCI_devices_status = False + vm_obj = None + si = None + if len(pci_devices_info) > 0: + self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info, + vmname_andid )) + PCI_devices_status, vm_obj, vcenter_conect = self.add_pci_devices(vapp_uuid, + pci_devices_info, + vmname_andid) + if PCI_devices_status: + self.logger.info("Added PCI devives {} to VM {}".format( + pci_devices_info, + vmname_andid) + ) + else: + self.logger.info("Fail to add PCI devives {} to VM {}".format( + pci_devices_info, + vmname_andid) + ) # add vm disk if vm_disk: #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled @@ -1332,13 +1421,30 @@ class vimconnector(vimconn.vimconnector): raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name)) # deploy and power on vm - task = vapp.poweron() - if type(task) is TaskType: - vca.block_until_completed(task) - deploytask = vapp.deploy(powerOn='True') - if type(task) is TaskType: + self.logger.debug("new_vminstance(): Deploying vApp {} ".format(name)) + deploytask = vapp.deploy(powerOn=False) + if type(deploytask) is GenericTask: vca.block_until_completed(deploytask) + # If VM has PCI devices reserve memory for VM + if PCI_devices_status and vm_obj and vcenter_conect: + memReserve = vm_obj.config.hardware.memoryMB + spec = vim.vm.ConfigSpec() + spec.memoryAllocation = vim.ResourceAllocationInfo(reservation=memReserve) + task = vm_obj.ReconfigVM_Task(spec=spec) + if task: + result = self.wait_for_vcenter_task(task, vcenter_conect) + self.logger.info("Reserved memmoery {} MB for "\ + "VM VM status: {}".format(str(memReserve),result)) + else: + self.logger.info("Fail to reserved memmoery {} to VM {}".format( + str(memReserve),str(vm_obj))) + + self.logger.debug("new_vminstance(): power on vApp {} ".format(name)) + poweron_task = vapp.poweron() + if type(poweron_task) is GenericTask: + vca.block_until_completed(poweron_task) + # check if vApp deployed and if that the case return vApp UUID otherwise -1 wait_time = 0 vapp_uuid = None @@ -1566,6 +1672,40 @@ class vimconnector(vimconn.vimconnector): """ self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list)) + + mac_ip_addr={} + rheaders = {'Content-Type': 'application/xml'} + iso_edges = ['edge-2','edge-3','edge-6','edge-7','edge-8','edge-9','edge-10'] + + try: + for edge in iso_edges: + nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo' + self.logger.debug("refresh_vms_status: NSX Manager url: {}".format(nsx_api_url)) + + resp = requests.get(self.nsx_manager + nsx_api_url, + auth = (self.nsx_user, self.nsx_password), + verify = False, headers = rheaders) + + if resp.status_code == requests.codes.ok: + dhcp_leases = XmlElementTree.fromstring(resp.text) + for child in dhcp_leases: + if child.tag == 'dhcpLeaseInfo': + dhcpLeaseInfo = child + for leaseInfo in dhcpLeaseInfo: + for elem in leaseInfo: + if (elem.tag)=='macAddress': + mac_addr = elem.text + if (elem.tag)=='ipAddress': + ip_addr = elem.text + if (mac_addr) is not None: + mac_ip_addr[mac_addr]= ip_addr + self.logger.debug("NSX Manager DHCP Lease info: mac_ip_addr : {}".format(mac_ip_addr)) + else: + self.logger.debug("Error occurred while getting DHCP lease info from NSX Manager: {}".format(resp.content)) + except KeyError: + self.logger.debug("Error in response from NSX Manager {}".format(KeyError.message)) + self.logger.debug(traceback.format_exc()) + vca = self.connect() if not vca: raise vimconn.vimconnConnectionException("self.connect() is failed.") @@ -1582,10 +1722,12 @@ class vimconnector(vimconn.vimconnector): the_vapp = vca.get_vapp(vdc, vmname) vm_info = the_vapp.get_vms_details() vm_status = vm_info[0]['status'] + vm_pci_details = self.get_vm_pci_details(vmuuid) + vm_info[0].update(vm_pci_details) vm_dict = {'status': vcdStatusCode2manoFormat[the_vapp.me.get_status()], 'error_msg': vcdStatusCode2manoFormat[the_vapp.me.get_status()], - 'vim_info': yaml.safe_dump(the_vapp.get_vms_details()), 'interfaces': []} + 'vim_info': yaml.safe_dump(vm_info), 'interfaces': []} # get networks try: @@ -1593,6 +1735,10 @@ class vimconnector(vimconn.vimconnector): for vapp_network in vm_app_networks: for vm_network in vapp_network: if vm_network['name'] == vmname: + #Assign IP Address based on MAC Address in NSX DHCP lease info + for mac_adres,ip_adres in mac_ip_addr.iteritems(): + if mac_adres == vm_network['mac']: + vm_network['ip']=ip_adres interface = {"mac_address": vm_network['mac'], "vim_net_id": self.get_network_id_by_name(vm_network['network_name']), "vim_interface_id": self.get_network_id_by_name(vm_network['network_name']), @@ -1634,15 +1780,30 @@ class vimconnector(vimconn.vimconnector): the_vapp = vca.get_vapp(vdc, vapp_name) # TODO fix all status if "start" in action_dict: - if action_dict["start"] == "rebuild": - the_vapp.deploy(powerOn=True) + vm_info = the_vapp.get_vms_details() + vm_status = vm_info[0]['status'] + self.logger.info("Power on vApp: vm_status:{} {}".format(type(vm_status),vm_status)) + if vm_status == "Suspended" or vm_status == "Powered off": + power_on_task = the_vapp.poweron() + if power_on_task is not None and type(power_on_task) is GenericTask: + result = vca.block_until_completed(power_on_task) + if result: + self.logger.info("action_vminstance: Powered on vApp: {}".format(vapp_name)) + else: + self.logger.info("action_vminstance: Failed to power on vApp: {}".format(vapp_name)) + else: + self.logger.info("action_vminstance: Wait for vApp {} to power on".format(vapp_name)) + elif "rebuild" in action_dict: + self.logger.info("action_vminstance: Rebuilding vApp: {}".format(vapp_name)) + power_on_task = the_vapp.deploy(powerOn=True) + if type(power_on_task) is GenericTask: + result = vca.block_until_completed(power_on_task) + if result: + self.logger.info("action_vminstance: Rebuilt vApp: {}".format(vapp_name)) + else: + self.logger.info("action_vminstance: Failed to rebuild vApp: {}".format(vapp_name)) else: - vm_info = the_vapp.get_vms_details() - vm_status = vm_info[0]['status'] - if vm_status == "Suspended": - the_vapp.poweron() - elif vm_status.status == "Powered off": - the_vapp.poweron() + self.logger.info("action_vminstance: Wait for vApp rebuild {} to power on".format(vapp_name)) elif "pause" in action_dict: pass ## server.pause() @@ -1650,7 +1811,15 @@ class vimconnector(vimconn.vimconnector): pass ## server.resume() elif "shutoff" in action_dict or "shutdown" in action_dict: - the_vapp.shutdown() + power_off_task = the_vapp.undeploy(action='powerOff') + if type(power_off_task) is GenericTask: + result = vca.block_until_completed(power_off_task) + if result: + self.logger.info("action_vminstance: Powered off vApp: {}".format(vapp_name)) + else: + self.logger.info("action_vminstance: Failed to power off vApp: {}".format(vapp_name)) + else: + self.logger.info("action_vminstance: Wait for vApp {} to power off".format(vapp_name)) elif "forceOff" in action_dict: the_vapp.reset() elif "terminate" in action_dict: @@ -2634,7 +2803,7 @@ class vimconnector(vimconn.vimconnector): return response.content return None - def get_vapp_details_rest(self, vapp_uuid=None): + def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False): """ Method retrieve vapp detail from vCloud director @@ -2646,8 +2815,13 @@ class vimconnector(vimconn.vimconnector): """ parsed_respond = {} + vca = None + + if need_admin_access: + vca = self.connect_as_admin() + else: + vca = self.connect() - vca = self.connect() if not vca: raise vimconn.vimconnConnectionException("self.connect() is failed") if vapp_uuid is None: @@ -2655,7 +2829,8 @@ class vimconnector(vimconn.vimconnector): url_list = [vca.host, '/api/vApp/vapp-', vapp_uuid] get_vapp_restcall = ''.join(url_list) - if not (not vca.vcloud_session or not vca.vcloud_session.organization): + + if vca.vcloud_session and vca.vcloud_session.organization: response = Http.get(url=get_vapp_restcall, headers=vca.vcloud_session.get_vcloud_headers(), verify=vca.verify, @@ -2734,6 +2909,14 @@ class vimconnector(vimconn.vimconnector): parsed_respond['acquireMksTicket'] = link.attrib parsed_respond['interfaces'] = nic_list + vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces) + if vCloud_extension_section is not None: + vm_vcenter_info = {} + vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces) + vmext = vim_info.find('vmext:VmVimObjectRef', namespaces) + if vmext is not None: + vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text + parsed_respond["vm_vcenter_info"]= vm_vcenter_info virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces) vm_virtual_hardware_info = {} @@ -2885,4 +3068,404 @@ class vimconnector(vimconn.vimconnector): self.logger.info("Error occurred calling rest api for modifing disk size {}".format(exp)) return None + def add_pci_devices(self, vapp_uuid , pci_devices , vmname_andid): + """ + Method to attach pci devices to VM + + Args: + vapp_uuid - uuid of vApp/VM + pci_devices - pci devices infromation as specified in VNFD (flavor) + + Returns: + The status of add pci device task , vm object and + vcenter_conect object + """ + vm_obj = None + vcenter_conect = None + self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid)) + try: + vm_vcenter_info = self.get_vm_vcenter_info(vapp_uuid) + except Exception as exp: + self.logger.error("Error occurred while getting vCenter infromationn"\ + " for VM : {}".format(exp)) + raise vimconn.vimconnException(message=exp) + + if vm_vcenter_info["vm_moref_id"]: + context = None + if hasattr(ssl, '_create_unverified_context'): + context = ssl._create_unverified_context() + try: + no_of_pci_devices = len(pci_devices) + if no_of_pci_devices > 0: + vcenter_conect = SmartConnect( + host=vm_vcenter_info["vm_vcenter_ip"], + user=vm_vcenter_info["vm_vcenter_user"], + pwd=vm_vcenter_info["vm_vcenter_password"], + port=int(vm_vcenter_info["vm_vcenter_port"]), + sslContext=context) + atexit.register(Disconnect, vcenter_conect) + content = vcenter_conect.RetrieveContent() + + #Get VM and its host + host_obj, vm_obj = self.get_vm_obj(content ,vm_vcenter_info["vm_moref_id"]) + self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj)) + if host_obj and vm_obj: + #get PCI devies from host on which vapp is currently installed + avilable_pci_devices = self.get_pci_devices(host_obj, no_of_pci_devices) + + if avilable_pci_devices is None: + #find other hosts with active pci devices + new_host_obj , avilable_pci_devices = self.get_host_and_PCIdevices( + content, + no_of_pci_devices + ) + + if new_host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0: + #Migrate vm to the host where PCI devices are availble + self.logger.info("Relocate VM {} on new host {}".format(vm_obj, new_host_obj)) + task = self.relocate_vm(new_host_obj, vm_obj) + if task is not None: + result = self.wait_for_vcenter_task(task, vcenter_conect) + self.logger.info("Migrate VM status: {}".format(result)) + host_obj = new_host_obj + else: + self.logger.info("Fail to migrate VM : {}".format(result)) + raise vimconn.vimconnNotFoundException( + "Fail to migrate VM : {} to host {}".format( + vmname_andid, + new_host_obj) + ) + + if host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0: + #Add PCI devices one by one + for pci_device in avilable_pci_devices: + task = self.add_pci_to_vm(host_obj, vm_obj, pci_device) + if task: + status= self.wait_for_vcenter_task(task, vcenter_conect) + if status: + self.logger.info("Added PCI device {} to VM {}".format(pci_device,str(vm_obj))) + else: + self.logger.error("Fail to add PCI device {} to VM {}".format(pci_device,str(vm_obj))) + return True, vm_obj, vcenter_conect + else: + self.logger.error("Currently there is no host with"\ + " {} number of avaialble PCI devices required for VM {}".format( + no_of_pci_devices, + vmname_andid) + ) + raise vimconn.vimconnNotFoundException( + "Currently there is no host with {} "\ + "number of avaialble PCI devices required for VM {}".format( + no_of_pci_devices, + vmname_andid)) + else: + self.logger.debug("No infromation about PCI devices {} ",pci_devices) + + except vmodl.MethodFault as error: + self.logger.error("Error occurred while adding PCI devices {} ",error) + return None, vm_obj, vcenter_conect + + def get_vm_obj(self, content, mob_id): + """ + Method to get the vsphere VM object associated with a given morf ID + Args: + vapp_uuid - uuid of vApp/VM + content - vCenter content object + mob_id - mob_id of VM + + Returns: + VM and host object + """ + vm_obj = None + host_obj = None + try : + container = content.viewManager.CreateContainerView(content.rootFolder, + [vim.VirtualMachine], True + ) + for vm in container.view: + mobID = vm._GetMoId() + if mobID == mob_id: + vm_obj = vm + host_obj = vm_obj.runtime.host + break + except Exception as exp: + self.logger.error("Error occurred while finding VM object : {}".format(exp)) + return host_obj, vm_obj + + def get_pci_devices(self, host, need_devices): + """ + Method to get the details of pci devices on given host + Args: + host - vSphere host object + need_devices - number of pci devices needed on host + + Returns: + array of pci devices + """ + all_devices = [] + all_device_ids = [] + used_devices_ids = [] + + try: + if host: + pciPassthruInfo = host.config.pciPassthruInfo + pciDevies = host.hardware.pciDevice + + for pci_status in pciPassthruInfo: + if pci_status.passthruActive: + for device in pciDevies: + if device.id == pci_status.id: + all_device_ids.append(device.id) + all_devices.append(device) + + #check if devices are in use + avalible_devices = all_devices + for vm in host.vm: + if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn: + vm_devices = vm.config.hardware.device + for device in vm_devices: + if type(device) is vim.vm.device.VirtualPCIPassthrough: + if device.backing.id in all_device_ids: + for use_device in avalible_devices: + if use_device.id == device.backing.id: + avalible_devices.remove(use_device) + used_devices_ids.append(device.backing.id) + self.logger.debug("Device {} from devices {}"\ + "is in use".format(device.backing.id, + device) + ) + if len(avalible_devices) < need_devices: + self.logger.debug("Host {} don't have {} number of active devices".format(host, + need_devices)) + self.logger.debug("found only {} devives {}".format(len(avalible_devices), + avalible_devices)) + return None + else: + required_devices = avalible_devices[:need_devices] + self.logger.info("Found {} PCI devivces on host {} but required only {}".format( + len(avalible_devices), + host, + need_devices)) + self.logger.info("Retruning {} devices as {}".format(need_devices, + required_devices )) + return required_devices + + except Exception as exp: + self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host)) + + return None + + def get_host_and_PCIdevices(self, content, need_devices): + """ + Method to get the details of pci devices infromation on all hosts + + Args: + content - vSphere host object + need_devices - number of pci devices needed on host + + Returns: + array of pci devices and host object + """ + host_obj = None + pci_device_objs = None + try: + if content: + container = content.viewManager.CreateContainerView(content.rootFolder, + [vim.HostSystem], True) + for host in container.view: + devices = self.get_pci_devices(host, need_devices) + if devices: + host_obj = host + pci_device_objs = devices + break + except Exception as exp: + self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host_obj)) + + return host_obj,pci_device_objs + + def relocate_vm(self, dest_host, vm) : + """ + Method to get the relocate VM to new host + + Args: + dest_host - vSphere host object + vm - vSphere VM object + + Returns: + task object + """ + task = None + try: + relocate_spec = vim.vm.RelocateSpec(host=dest_host) + task = vm.Relocate(relocate_spec) + self.logger.info("Migrating {} to destination host {}".format(vm, dest_host)) + except Exception as exp: + self.logger.error("Error occurred while relocate VM {} to new host {}: {}".format( + dest_host, vm, exp)) + return task + + def wait_for_vcenter_task(self, task, actionName='job', hideResult=False): + """ + Waits and provides updates on a vSphere task + """ + while task.info.state == vim.TaskInfo.State.running: + time.sleep(2) + + if task.info.state == vim.TaskInfo.State.success: + if task.info.result is not None and not hideResult: + self.logger.info('{} completed successfully, result: {}'.format( + actionName, + task.info.result)) + else: + self.logger.info('Task {} completed successfully.'.format(actionName)) + else: + self.logger.error('{} did not complete successfully: {} '.format( + actionName, + task.info.error) + ) + + return task.info.result + + def add_pci_to_vm(self,host_object, vm_object, host_pci_dev): + """ + Method to add pci device in given VM + + Args: + host_object - vSphere host object + vm_object - vSphere VM object + host_pci_dev - host_pci_dev must be one of the devices from the + host_object.hardware.pciDevice list + which is configured as a PCI passthrough device + + Returns: + task object + """ + task = None + if vm_object and host_object and host_pci_dev: + try : + #Add PCI device to VM + pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(host=None).pciPassthrough + systemid_by_pciid = {item.pciDevice.id: item.systemId for item in pci_passthroughs} + + if host_pci_dev.id not in systemid_by_pciid: + self.logger.error("Device {} is not a passthrough device ".format(host_pci_dev)) + return None + + deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip('0x') + backing = vim.VirtualPCIPassthroughDeviceBackingInfo(deviceId=deviceId, + id=host_pci_dev.id, + systemId=systemid_by_pciid[host_pci_dev.id], + vendorId=host_pci_dev.vendorId, + deviceName=host_pci_dev.deviceName) + + hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing) + + new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object) + new_device_config.operation = "add" + vmConfigSpec = vim.vm.ConfigSpec() + vmConfigSpec.deviceChange = [new_device_config] + + task = vm_object.ReconfigVM_Task(spec=vmConfigSpec) + self.logger.info("Adding PCI device {} into VM {} from host {} ".format( + host_pci_dev, vm_object, host_object) + ) + except Exception as exp: + self.logger.error("Error occurred while adding pci devive {} to VM {}: {}".format( + host_pci_dev, + vm_object, + exp)) + return task + + def get_vm_vcenter_info(self , vapp_uuid): + """ + Method to get details of vCenter and vm + + Args: + vapp_uuid - uuid of vApp or VM + + Returns: + Moref Id of VM and deails of vCenter + """ + vm_vcenter_info = {} + + if self.vcenter_ip is not None: + vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip + else: + raise vimconn.vimconnException(message="vCenter IP is not provided."\ + " Please provide vCenter IP while attaching datacenter to tenant in --config") + if self.vcenter_port is not None: + vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port + else: + raise vimconn.vimconnException(message="vCenter port is not provided."\ + " Please provide vCenter port while attaching datacenter to tenant in --config") + if self.vcenter_user is not None: + vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user + else: + raise vimconn.vimconnException(message="vCenter user is not provided."\ + " Please provide vCenter user while attaching datacenter to tenant in --config") + + if self.vcenter_password is not None: + vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password + else: + raise vimconn.vimconnException(message="vCenter user password is not provided."\ + " Please provide vCenter user password while attaching datacenter to tenant in --config") + try: + vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True) + if vm_details and "vm_vcenter_info" in vm_details: + vm_vcenter_info["vm_moref_id"] = vm_details["vm_vcenter_info"].get("vm_moref_id", None) + + return vm_vcenter_info + + except Exception as exp: + self.logger.error("Error occurred while getting vCenter infromationn"\ + " for VM : {}".format(exp)) + + + def get_vm_pci_details(self, vmuuid): + """ + Method to get VM PCI device details from vCenter + + Args: + vm_obj - vSphere VM object + + Returns: + dict of PCI devives attached to VM + + """ + vm_pci_devices_info = {} + try: + vm_vcenter_info = self.get_vm_vcenter_info(vmuuid) + if vm_vcenter_info["vm_moref_id"]: + context = None + if hasattr(ssl, '_create_unverified_context'): + context = ssl._create_unverified_context() + vcenter_conect = SmartConnect(host=vm_vcenter_info["vm_vcenter_ip"], + user=vm_vcenter_info["vm_vcenter_user"], + pwd=vm_vcenter_info["vm_vcenter_password"], + port=int(vm_vcenter_info["vm_vcenter_port"]), + sslContext=context + ) + atexit.register(Disconnect, vcenter_conect) + content = vcenter_conect.RetrieveContent() + + #Get VM and its host + if content: + host_obj, vm_obj = self.get_vm_obj(content ,vm_vcenter_info["vm_moref_id"]) + if host_obj and vm_obj: + vm_pci_devices_info["host_name"]= host_obj.name + vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress + for device in vm_obj.config.hardware.device: + if type(device) == vim.vm.device.VirtualPCIPassthrough: + device_details={'devide_id':device.backing.id, + 'pciSlotNumber':device.slotInfo.pciSlotNumber, + } + vm_pci_devices_info[device.deviceInfo.label] = device_details + else: + self.logger.error("Can not connect to vCenter while getting "\ + "PCI devices infromationn") + return vm_pci_devices_info + except Exception as exp: + self.logger.error("Error occurred while getting VM infromationn"\ + " for VM : {}".format(exp)) + raise vimconn.vimconnException(message=exp)