X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=vimconn_vmware.py;h=9e4e7608f0cd11bf945d163bbbcf926ae011681e;hb=b12711fda02f2bac40a2cc1adfb5a27675bbad61;hp=f9862183f9494ef51fb159f291a0d2983e960eb6;hpb=d63062f28781779de40a579d50648199e061d44a;p=osm%2FRO.git diff --git a/vimconn_vmware.py b/vimconn_vmware.py index f9862183..9e4e7608 100644 --- a/vimconn_vmware.py +++ b/vimconn_vmware.py @@ -121,13 +121,12 @@ netStatus2manoFormat = {'ACTIVE': 'ACTIVE', 'PAUSED': 'PAUSED', 'INACTIVE': 'INA 'ERROR': 'ERROR', 'DELETED': 'DELETED' } -# dict used to store flavor in memory -flavorlist = {} - - class vimconnector(vimconn.vimconnector): + # dict used to store flavor in memory + flavorlist = {} + def __init__(self, uuid=None, name=None, tenant_id=None, tenant_name=None, - url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}): + url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}, persistent_info={}): """ Constructor create vmware connector to vCloud director. @@ -154,6 +153,7 @@ class vimconnector(vimconn.vimconnector): dict['admin_username'] dict['admin_password'] + config - Provide NSX and vCenter information Returns: Nothing. @@ -164,6 +164,7 @@ class vimconnector(vimconn.vimconnector): self.logger = logging.getLogger('openmano.vim.vmware') self.logger.setLevel(10) + self.persistent_info = persistent_info self.name = name self.id = uuid @@ -177,6 +178,13 @@ class vimconnector(vimconn.vimconnector): self.admin_password = None self.admin_user = None self.org_name = "" + self.nsx_manager = None + self.nsx_user = None + self.nsx_password = None + self.vcenter_ip = None + self.vcenter_port = None + self.vcenter_user = None + self.vcenter_password = None if tenant_name is not None: orgnameandtenant = tenant_name.split(":") @@ -197,6 +205,18 @@ class vimconnector(vimconn.vimconnector): except KeyError: raise vimconn.vimconnException(message="Error admin username or admin password is empty.") + try: + self.nsx_manager = config['nsx_manager'] + self.nsx_user = config['nsx_user'] + self.nsx_password = config['nsx_password'] + except KeyError: + raise vimconn.vimconnException(message="Error: nsx manager or nsx user or nsx password is empty in Config") + + self.vcenter_ip = config.get("vcenter_ip", None) + self.vcenter_port = config.get("vcenter_port", None) + self.vcenter_user = config.get("vcenter_user", None) + self.vcenter_password = config.get("vcenter_password", None) + self.org_uuid = None self.vca = None @@ -538,11 +558,11 @@ class vimconnector(vimconn.vimconnector): if vdc is None: raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name)) - vdcid = vdc.get_id().split(":")[3] - networks = vca.get_networks(vdc.get_name()) - network_list = [] - try: + vdcid = vdc.get_id().split(":")[3] + networks = vca.get_networks(vdc.get_name()) + network_list = [] + for network in networks: filter_entry = {} net_uuid = network.get_id().split(":") @@ -590,13 +610,13 @@ class vimconnector(vimconn.vimconnector): if not vca: raise vimconn.vimconnConnectionException("self.connect() is failed") - vdc = vca.get_vdc(self.tenant_name) - vdc_id = vdc.get_id().split(":")[3] + try: + vdc = vca.get_vdc(self.tenant_name) + vdc_id = vdc.get_id().split(":")[3] - networks = vca.get_networks(vdc.get_name()) - filter_dict = {} + networks = vca.get_networks(vdc.get_name()) + filter_dict = {} - try: for network in networks: vdc_network_id = network.get_id().split(":") if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id: @@ -684,9 +704,9 @@ class vimconnector(vimconn.vimconnector): """Obtain flavor details from the VIM Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete """ - if flavor_id not in flavorlist: + if flavor_id not in vimconnector.flavorlist: raise vimconn.vimconnNotFoundException("Flavor not found.") - return flavorlist[flavor_id] + return vimconnector.flavorlist[flavor_id] def new_flavor(self, flavor_data): """Adds a tenant flavor to VIM @@ -734,7 +754,7 @@ class vimconnector(vimconn.vimconnector): new_flavor[FLAVOR_DISK_KEY] = disk # generate a new uuid put to internal dict and return it. flavor_id = uuid.uuid4() - flavorlist[str(flavor_id)] = new_flavor + vimconnector.flavorlist[str(flavor_id)] = new_flavor self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor)) return str(flavor_id) @@ -744,10 +764,10 @@ class vimconnector(vimconn.vimconnector): Returns the used id or raise an exception """ - if flavor_id not in flavorlist: + if flavor_id not in vimconnector.flavorlist: raise vimconn.vimconnNotFoundException("Flavor not found.") - flavorlist.pop(flavor_id, None) + vimconnector.flavorlist.pop(flavor_id, None) return flavor_id def new_image(self, image_dict): @@ -824,117 +844,124 @@ class vimconnector(vimconn.vimconnector): # create vApp Template and check the status if vCD able to read OVF it will respond with appropirate # status change. # if VCD can parse OVF we upload VMDK file - for catalog in vca.get_catalogs(): - if catalog_name != catalog.name: - continue - link = filter(lambda link: link.get_type() == "application/vnd.vmware.vcloud.media+xml" and - link.get_rel() == 'add', catalog.get_Link()) - assert len(link) == 1 - data = """ - %s vApp Template - """ % (escape(catalog_name), escape(description)) - headers = vca.vcloud_session.get_vcloud_headers() - headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml' - response = Http.post(link[0].get_href(), headers=headers, data=data, verify=vca.verify, logger=self.logger) - if response.status_code == requests.codes.created: - catalogItem = XmlElementTree.fromstring(response.content) - entity = [child for child in catalogItem if - child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0] - href = entity.get('href') - template = href - response = Http.get(href, headers=vca.vcloud_session.get_vcloud_headers(), - verify=vca.verify, logger=self.logger) - - if response.status_code == requests.codes.ok: - media = mediaType.parseString(response.content, True) - link = filter(lambda link: link.get_rel() == 'upload:default', - media.get_Files().get_File()[0].get_Link())[0] - headers = vca.vcloud_session.get_vcloud_headers() - headers['Content-Type'] = 'Content-Type text/xml' - response = Http.put(link.get_href(), - data=open(media_file_name, 'rb'), - headers=headers, + try: + for catalog in vca.get_catalogs(): + if catalog_name != catalog.name: + continue + link = filter(lambda link: link.get_type() == "application/vnd.vmware.vcloud.media+xml" and + link.get_rel() == 'add', catalog.get_Link()) + assert len(link) == 1 + data = """ + %s vApp Template + """ % (escape(catalog_name), escape(description)) + headers = vca.vcloud_session.get_vcloud_headers() + headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml' + response = Http.post(link[0].get_href(), headers=headers, data=data, verify=vca.verify, logger=self.logger) + if response.status_code == requests.codes.created: + catalogItem = XmlElementTree.fromstring(response.content) + entity = [child for child in catalogItem if + child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0] + href = entity.get('href') + template = href + response = Http.get(href, headers=vca.vcloud_session.get_vcloud_headers(), verify=vca.verify, logger=self.logger) - if response.status_code != requests.codes.ok: - self.logger.debug( - "Failed create vApp template for catalog name {} and image {}".format(catalog_name, - media_file_name)) - return False - - # TODO fix this with aync block - time.sleep(5) - self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name)) - - # uploading VMDK file - # check status of OVF upload and upload remaining files. - response = Http.get(template, - headers=vca.vcloud_session.get_vcloud_headers(), - verify=vca.verify, - logger=self.logger) + if response.status_code == requests.codes.ok: + media = mediaType.parseString(response.content, True) + link = filter(lambda link: link.get_rel() == 'upload:default', + media.get_Files().get_File()[0].get_Link())[0] + headers = vca.vcloud_session.get_vcloud_headers() + headers['Content-Type'] = 'Content-Type text/xml' + response = Http.put(link.get_href(), + data=open(media_file_name, 'rb'), + headers=headers, + verify=vca.verify, logger=self.logger) + if response.status_code != requests.codes.ok: + self.logger.debug( + "Failed create vApp template for catalog name {} and image {}".format(catalog_name, + media_file_name)) + return False + + # TODO fix this with aync block + time.sleep(5) + + self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name)) + + # uploading VMDK file + # check status of OVF upload and upload remaining files. + response = Http.get(template, + headers=vca.vcloud_session.get_vcloud_headers(), + verify=vca.verify, + logger=self.logger) - if response.status_code == requests.codes.ok: - media = mediaType.parseString(response.content, True) - number_of_files = len(media.get_Files().get_File()) - for index in xrange(0, number_of_files): - links_list = filter(lambda link: link.get_rel() == 'upload:default', - media.get_Files().get_File()[index].get_Link()) - for link in links_list: - # we skip ovf since it already uploaded. - if 'ovf' in link.get_href(): - continue - # The OVF file and VMDK must be in a same directory - head, tail = os.path.split(media_file_name) - file_vmdk = head + '/' + link.get_href().split("/")[-1] - if not os.path.isfile(file_vmdk): - return False - statinfo = os.stat(file_vmdk) - if statinfo.st_size == 0: - return False - hrefvmdk = link.get_href() - - if progress: - print("Uploading file: {}".format(file_vmdk)) - if progress: - widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ', - FileTransferSpeed()] - progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start() - - bytes_transferred = 0 - f = open(file_vmdk, 'rb') - while bytes_transferred < statinfo.st_size: - my_bytes = f.read(chunk_bytes) - if len(my_bytes) <= chunk_bytes: - headers = vca.vcloud_session.get_vcloud_headers() - headers['Content-Range'] = 'bytes %s-%s/%s' % ( - bytes_transferred, len(my_bytes) - 1, statinfo.st_size) - headers['Content-Length'] = str(len(my_bytes)) - response = Http.put(hrefvmdk, - headers=headers, - data=my_bytes, - verify=vca.verify, - logger=None) - - if response.status_code == requests.codes.ok: - bytes_transferred += len(my_bytes) - if progress: - progress_bar.update(bytes_transferred) - else: - self.logger.debug( - 'file upload failed with error: [%s] %s' % (response.status_code, - response.content)) - - f.close() - return False - f.close() - if progress: - progress_bar.finish() - time.sleep(10) - return True - else: - self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}". - format(catalog_name, media_file_name)) - return False + if response.status_code == requests.codes.ok: + media = mediaType.parseString(response.content, True) + number_of_files = len(media.get_Files().get_File()) + for index in xrange(0, number_of_files): + links_list = filter(lambda link: link.get_rel() == 'upload:default', + media.get_Files().get_File()[index].get_Link()) + for link in links_list: + # we skip ovf since it already uploaded. + if 'ovf' in link.get_href(): + continue + # The OVF file and VMDK must be in a same directory + head, tail = os.path.split(media_file_name) + file_vmdk = head + '/' + link.get_href().split("/")[-1] + if not os.path.isfile(file_vmdk): + return False + statinfo = os.stat(file_vmdk) + if statinfo.st_size == 0: + return False + hrefvmdk = link.get_href() + + if progress: + print("Uploading file: {}".format(file_vmdk)) + if progress: + widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ', + FileTransferSpeed()] + progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start() + + bytes_transferred = 0 + f = open(file_vmdk, 'rb') + while bytes_transferred < statinfo.st_size: + my_bytes = f.read(chunk_bytes) + if len(my_bytes) <= chunk_bytes: + headers = vca.vcloud_session.get_vcloud_headers() + headers['Content-Range'] = 'bytes %s-%s/%s' % ( + bytes_transferred, len(my_bytes) - 1, statinfo.st_size) + headers['Content-Length'] = str(len(my_bytes)) + response = Http.put(hrefvmdk, + headers=headers, + data=my_bytes, + verify=vca.verify, + logger=None) + + if response.status_code == requests.codes.ok: + bytes_transferred += len(my_bytes) + if progress: + progress_bar.update(bytes_transferred) + else: + self.logger.debug( + 'file upload failed with error: [%s] %s' % (response.status_code, + response.content)) + + f.close() + return False + f.close() + if progress: + progress_bar.finish() + time.sleep(10) + return True + else: + self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}". + format(catalog_name, media_file_name)) + return False + except Exception as exp: + self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}" + .format(catalog_name,media_file_name, exp)) + raise vimconn.vimconnException( + "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}" + .format(catalog_name,media_file_name, exp)) self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name)) return False @@ -1038,7 +1065,12 @@ class vimconnector(vimconn.vimconnector): self.logger.debug("File name {} Catalog Name {} file path {} " "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name)) - catalogs = vca.get_catalogs() + try: + catalogs = vca.get_catalogs() + except Exception as exp: + self.logger.debug("Failed get catalogs() with Exception {} ".format(exp)) + raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp)) + if len(catalogs) == 0: self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name)) result = self.create_vimcatalog(vca, catalog_md5_name) @@ -1212,8 +1244,8 @@ class vimconnector(vimconn.vimconnector): """ self.logger.info("Creating new instance for entry {}".format(name)) - self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {}". - format(description, start, image_id, flavor_id, net_list, cloud_config)) + self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {}".format( + description, start, image_id, flavor_id, net_list, cloud_config)) vca = self.connect() if not vca: raise vimconn.vimconnConnectionException("self.connect() is failed.") @@ -1252,13 +1284,13 @@ class vimconnector(vimconn.vimconnector): vm_disk = None pci_devices_info = [] if flavor_id is not None: - if flavor_id not in flavorlist: + if flavor_id not in vimconnector.flavorlist: raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: " "Failed retrieve flavor information " "flavor id {}".format(name, flavor_id)) else: try: - flavor = flavorlist[flavor_id] + flavor = vimconnector.flavorlist[flavor_id] vm_cpus = flavor[FLAVOR_VCPUS_KEY] vm_memory = flavor[FLAVOR_RAM_KEY] vm_disk = flavor[FLAVOR_DISK_KEY] @@ -1270,8 +1302,8 @@ class vimconnector(vimconn.vimconnector): for interface in numa.get("interfaces",() ): if interface["dedicated"].strip()=="yes": pci_devices_info.append(interface) - except KeyError: - raise vimconn.vimconnException("Corrupted flavor. {}".format(flavor_id)) + except Exception as exp: + raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp)) # image upload creates template name as catalog name space Template. templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs) @@ -1305,25 +1337,37 @@ class vimconnector(vimconn.vimconnector): # use: 'data', 'bridge', 'mgmt' # create vApp. Set vcpu and ram based on flavor id. - vapptask = vca.create_vapp(self.tenant_name, vmname_andid, templateName, - self.get_catalogbyid(image_id, catalogs), - network_name=None, # None while creating vapp - network_mode=network_mode, - vm_name=vmname_andid, - vm_cpus=vm_cpus, # can be None if flavor is None - vm_memory=vm_memory) # can be None if flavor is None - - if vapptask is None or vapptask is False: - raise vimconn.vimconnUnexpectedResponse("new_vminstance(): failed deploy vApp {}".format(vmname_andid)) - if type(vapptask) is VappTask: - vca.block_until_completed(vapptask) + try: + vapptask = vca.create_vapp(self.tenant_name, vmname_andid, templateName, + self.get_catalogbyid(image_id, catalogs), + network_name=None, # None while creating vapp + network_mode=network_mode, + vm_name=vmname_andid, + vm_cpus=vm_cpus, # can be None if flavor is None + vm_memory=vm_memory) # can be None if flavor is None + + if vapptask is None or vapptask is False: + raise vimconn.vimconnUnexpectedResponse( + "new_vminstance(): failed to create vApp {}".format(vmname_andid)) + if type(vapptask) is VappTask: + vca.block_until_completed(vapptask) + + except Exception as exp: + raise vimconn.vimconnUnexpectedResponse( + "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp)) # we should have now vapp in undeployed state. - vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid) - vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), vmname_andid) + try: + vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid) + vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), vmname_andid) + except Exception as exp: + raise vimconn.vimconnUnexpectedResponse( + "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}" + .format(vmname_andid, exp)) + if vapp is None: raise vimconn.vimconnUnexpectedResponse( - "new_vminstance(): Failed failed retrieve vApp {} after we deployed".format( + "new_vminstance(): Failed to retrieve vApp {} after creation".format( vmname_andid)) #Add PCI passthrough configrations @@ -1387,49 +1431,67 @@ class vimconnector(vimconn.vimconnector): if type(task) is GenericTask: vca.block_until_completed(task) # connect network to VM - with all DHCP by default - self.logger.info("new_vminstance(): Connecting VM to a network {}".format(nets[0].name)) - task = vapp.connect_vms(nets[0].name, - connection_index=nicIndex, - connections_primary_index=primary_nic_index, - ip_allocation_mode='DHCP') - if type(task) is GenericTask: - vca.block_until_completed(task) + + type_list = ['PF','VF','VFnotShared'] + if 'type' in net and net['type'] not in type_list: + # fetching nic type from vnf + if 'model' in net: + nic_type = net['model'] + self.logger.info("new_vminstance(): adding network adapter "\ + "to a network {}".format(nets[0].name)) + self.add_network_adapter_to_vms(vapp, nets[0].name, + primary_nic_index, + nicIndex, + nic_type=nic_type) + else: + self.logger.info("new_vminstance(): adding network adapter "\ + "to a network {}".format(nets[0].name)) + self.add_network_adapter_to_vms(vapp, nets[0].name, + primary_nic_index, + nicIndex) nicIndex += 1 - except KeyError: - # it might be a case if specific mandatory entry in dict is empty - self.logger.debug("Key error {}".format(KeyError.message)) - raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name)) - # deploy and power on vm - self.logger.debug("new_vminstance(): Deploying vApp {} ".format(name)) - deploytask = vapp.deploy(powerOn=False) - if type(deploytask) is GenericTask: - vca.block_until_completed(deploytask) - - # If VM has PCI devices reserve memory for VM - if PCI_devices_status and vm_obj and vcenter_conect: - memReserve = vm_obj.config.hardware.memoryMB - spec = vim.vm.ConfigSpec() - spec.memoryAllocation = vim.ResourceAllocationInfo(reservation=memReserve) - task = vm_obj.ReconfigVM_Task(spec=spec) - if task: - result = self.wait_for_vcenter_task(task, vcenter_conect) - self.logger.info("Reserved memmoery {} MB for "\ - "VM VM status: {}".format(str(memReserve),result)) - else: - self.logger.info("Fail to reserved memmoery {} to VM {}".format( - str(memReserve),str(vm_obj))) + # deploy and power on vm + self.logger.debug("new_vminstance(): Deploying vApp {} ".format(name)) + deploytask = vapp.deploy(powerOn=False) + if type(deploytask) is GenericTask: + vca.block_until_completed(deploytask) + + # If VM has PCI devices reserve memory for VM + if PCI_devices_status and vm_obj and vcenter_conect: + memReserve = vm_obj.config.hardware.memoryMB + spec = vim.vm.ConfigSpec() + spec.memoryAllocation = vim.ResourceAllocationInfo(reservation=memReserve) + task = vm_obj.ReconfigVM_Task(spec=spec) + if task: + result = self.wait_for_vcenter_task(task, vcenter_conect) + self.logger.info("Reserved memmoery {} MB for "\ + "VM VM status: {}".format(str(memReserve),result)) + else: + self.logger.info("Fail to reserved memmoery {} to VM {}".format( + str(memReserve),str(vm_obj))) - self.logger.debug("new_vminstance(): power on vApp {} ".format(name)) - poweron_task = vapp.poweron() - if type(poweron_task) is GenericTask: - vca.block_until_completed(poweron_task) + self.logger.debug("new_vminstance(): power on vApp {} ".format(name)) + poweron_task = vapp.poweron() + if type(poweron_task) is GenericTask: + vca.block_until_completed(poweron_task) + + except Exception as exp : + # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception + self.logger.debug("new_vminstance(): Failed create new vm instance {}".format(name, exp)) + raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {}".format(name, exp)) # check if vApp deployed and if that the case return vApp UUID otherwise -1 wait_time = 0 vapp_uuid = None while wait_time <= MAX_WAIT_TIME: - vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid) + try: + vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid) + except Exception as exp: + raise vimconn.vimconnUnexpectedResponse( + "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}" + .format(vmname_andid, exp)) + if vapp and vapp.me.deployed: vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), vmname_andid) break @@ -1652,6 +1714,40 @@ class vimconnector(vimconn.vimconnector): """ self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list)) + + mac_ip_addr={} + rheaders = {'Content-Type': 'application/xml'} + iso_edges = ['edge-2','edge-3','edge-6','edge-7','edge-8','edge-9','edge-10'] + + try: + for edge in iso_edges: + nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo' + self.logger.debug("refresh_vms_status: NSX Manager url: {}".format(nsx_api_url)) + + resp = requests.get(self.nsx_manager + nsx_api_url, + auth = (self.nsx_user, self.nsx_password), + verify = False, headers = rheaders) + + if resp.status_code == requests.codes.ok: + dhcp_leases = XmlElementTree.fromstring(resp.text) + for child in dhcp_leases: + if child.tag == 'dhcpLeaseInfo': + dhcpLeaseInfo = child + for leaseInfo in dhcpLeaseInfo: + for elem in leaseInfo: + if (elem.tag)=='macAddress': + mac_addr = elem.text + if (elem.tag)=='ipAddress': + ip_addr = elem.text + if (mac_addr) is not None: + mac_ip_addr[mac_addr]= ip_addr + self.logger.debug("NSX Manager DHCP Lease info: mac_ip_addr : {}".format(mac_ip_addr)) + else: + self.logger.debug("Error occurred while getting DHCP lease info from NSX Manager: {}".format(resp.content)) + except KeyError: + self.logger.debug("Error in response from NSX Manager {}".format(KeyError.message)) + self.logger.debug(traceback.format_exc()) + vca = self.connect() if not vca: raise vimconn.vimconnConnectionException("self.connect() is failed.") @@ -1665,22 +1761,26 @@ class vimconnector(vimconn.vimconnector): vmname = self.get_namebyvappid(vca, vdc, vmuuid) if vmname is not None: - the_vapp = vca.get_vapp(vdc, vmname) - vm_info = the_vapp.get_vms_details() - vm_status = vm_info[0]['status'] - vm_pci_details = self.get_vm_pci_details(vmuuid) - vm_info[0].update(vm_pci_details) + try: + the_vapp = vca.get_vapp(vdc, vmname) + vm_info = the_vapp.get_vms_details() + vm_status = vm_info[0]['status'] + vm_pci_details = self.get_vm_pci_details(vmuuid) + vm_info[0].update(vm_pci_details) - vm_dict = {'status': vcdStatusCode2manoFormat[the_vapp.me.get_status()], - 'error_msg': vcdStatusCode2manoFormat[the_vapp.me.get_status()], - 'vim_info': yaml.safe_dump(vm_info), 'interfaces': []} + vm_dict = {'status': vcdStatusCode2manoFormat[the_vapp.me.get_status()], + 'error_msg': vcdStatusCode2manoFormat[the_vapp.me.get_status()], + 'vim_info': yaml.safe_dump(vm_info), 'interfaces': []} - # get networks - try: + # get networks vm_app_networks = the_vapp.get_vms_network_info() for vapp_network in vm_app_networks: for vm_network in vapp_network: if vm_network['name'] == vmname: + #Assign IP Address based on MAC Address in NSX DHCP lease info + for mac_adres,ip_adres in mac_ip_addr.iteritems(): + if mac_adres == vm_network['mac']: + vm_network['ip']=ip_adres interface = {"mac_address": vm_network['mac'], "vim_net_id": self.get_network_id_by_name(vm_network['network_name']), "vim_interface_id": self.get_network_id_by_name(vm_network['network_name']), @@ -1689,8 +1789,8 @@ class vimconnector(vimconn.vimconnector): vm_dict["interfaces"].append(interface) # add a vm to vm dict vms_dict.setdefault(vmuuid, vm_dict) - except KeyError: - self.logger.debug("Error in respond {}".format(KeyError.message)) + except Exception as exp: + self.logger.debug("Error in response {}".format(exp)) self.logger.debug(traceback.format_exc()) return vms_dict @@ -1722,33 +1822,55 @@ class vimconnector(vimconn.vimconnector): the_vapp = vca.get_vapp(vdc, vapp_name) # TODO fix all status if "start" in action_dict: - if action_dict["start"] == "rebuild": - the_vapp.deploy(powerOn=True) - else: - vm_info = the_vapp.get_vms_details() - vm_status = vm_info[0]['status'] - if vm_status == "Suspended": - the_vapp.poweron() - elif vm_status.status == "Powered off": - the_vapp.poweron() + vm_info = the_vapp.get_vms_details() + vm_status = vm_info[0]['status'] + self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name)) + if vm_status == "Suspended" or vm_status == "Powered off": + power_on_task = the_vapp.poweron() + result = vca.block_until_completed(power_on_task) + self.instance_actions_result("start", result, vapp_name) + elif "rebuild" in action_dict: + self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name)) + rebuild_task = the_vapp.deploy(powerOn=True) + result = vca.block_until_completed(rebuild_task) + self.instance_actions_result("rebuild", result, vapp_name) elif "pause" in action_dict: - pass - ## server.pause() + self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name)) + pause_task = the_vapp.undeploy(action='suspend') + result = vca.block_until_completed(pause_task) + self.instance_actions_result("pause", result, vapp_name) elif "resume" in action_dict: - pass - ## server.resume() + self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name)) + power_task = the_vapp.poweron() + result = vca.block_until_completed(power_task) + self.instance_actions_result("resume", result, vapp_name) elif "shutoff" in action_dict or "shutdown" in action_dict: - the_vapp.shutdown() + action_name , value = action_dict.items()[0] + self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name)) + power_off_task = the_vapp.undeploy(action='powerOff') + result = vca.block_until_completed(power_off_task) + if action_name == "shutdown": + self.instance_actions_result("shutdown", result, vapp_name) + else: + self.instance_actions_result("shutoff", result, vapp_name) elif "forceOff" in action_dict: - the_vapp.reset() - elif "terminate" in action_dict: - the_vapp.delete() - # elif "createImage" in action_dict: - # server.create_image() + result = the_vapp.undeploy(action='force') + self.instance_actions_result("forceOff", result, vapp_name) + elif "reboot" in action_dict: + self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name)) + reboot_task = the_vapp.reboot() else: - pass - except: - pass + raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict)) + return vm__vim_uuid + except Exception as exp : + self.logger.debug("action_vminstance: Failed with Exception {}".format(exp)) + raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp)) + + def instance_actions_result(self, action, result, vapp_name): + if result: + self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name)) + else: + self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name)) def get_vminstance_console(self, vm_id, console_type="vnc"): """ @@ -2222,8 +2344,8 @@ class vimconnector(vimconn.vimconnector): if network_uuid is None: return network_uuid - content = self.get_network_action(network_uuid=network_uuid) try: + content = self.get_network_action(network_uuid=network_uuid) vm_list_xmlroot = XmlElementTree.fromstring(content) network_configuration['status'] = vm_list_xmlroot.get("status") @@ -2239,8 +2361,9 @@ class vimconnector(vimconn.vimconnector): if tagKey != "": network_configuration[tagKey] = configuration.text.strip() return network_configuration - except: - pass + except Exception as exp : + self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp)) + raise vimconn.vimconnException("get_vcd_network: Failed with Exception {}".format(exp)) return network_configuration @@ -2306,7 +2429,7 @@ class vimconnector(vimconn.vimconnector): vm_list_xmlroot = XmlElementTree.fromstring(content) vcd_uuid = vm_list_xmlroot.get('id').split(":") if len(vcd_uuid) == 4: - self.logger.info("Create new network name: {} uuid: {}".format(network_name, vcd_uuid[3])) + self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3])) return vcd_uuid[3] except: self.logger.debug("Failed create network {}".format(network_name)) @@ -2392,26 +2515,45 @@ class vimconnector(vimconn.vimconnector): except: return None - #Configure IP profile of the network - ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE - - gateway_address=ip_profile['gateway_address'] - dhcp_count=int(ip_profile['dhcp_count']) - subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address']) - - if ip_profile['dhcp_enabled']==True: - dhcp_enabled='true' - else: - dhcp_enabled='false' - dhcp_start_address=ip_profile['dhcp_start_address'] + try: + #Configure IP profile of the network + ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE + + if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None: + ip_profile['gateway_address']=DEFAULT_IP_PROFILE['gateway_address'] + if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None: + ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count'] + if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None: + ip_profile['subnet_address']=DEFAULT_IP_PROFILE['subnet_address'] + if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None: + ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled'] + if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None: + ip_profile['dhcp_start_address']=DEFAULT_IP_PROFILE['dhcp_start_address'] + if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None: + ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version'] + if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None: + ip_profile['dns_address']=DEFAULT_IP_PROFILE['dns_address'] + + gateway_address=ip_profile['gateway_address'] + dhcp_count=int(ip_profile['dhcp_count']) + subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address']) + + if ip_profile['dhcp_enabled']==True: + dhcp_enabled='true' + else: + dhcp_enabled='false' + dhcp_start_address=ip_profile['dhcp_start_address'] - #derive dhcp_end_address from dhcp_start_address & dhcp_count - end_ip_int = int(netaddr.IPAddress(dhcp_start_address)) - end_ip_int += dhcp_count - 1 - dhcp_end_address = str(netaddr.IPAddress(end_ip_int)) + #derive dhcp_end_address from dhcp_start_address & dhcp_count + end_ip_int = int(netaddr.IPAddress(dhcp_start_address)) + end_ip_int += dhcp_count - 1 + dhcp_end_address = str(netaddr.IPAddress(end_ip_int)) - ip_version=ip_profile['ip_version'] - dns_address=ip_profile['dns_address'] + ip_version=ip_profile['ip_version'] + dns_address=ip_profile['dns_address'] + except KeyError as exp: + self.logger.debug("Create Network REST: Key error {}".format(exp)) + raise vimconn.vimconnException("Create Network REST: Key error{}".format(exp)) # either use client provided UUID or search for a first available # if both are not defined we return none @@ -2488,8 +2630,8 @@ class vimconnector(vimconn.vimconnector): logger=vca.logger) if response.status_code != 201: - self.logger.debug("Create Network POST REST API call failed. Return status code {}" - .format(response.status_code)) + self.logger.debug("Create Network POST REST API call failed. Return status code {}, Response content: {}" + .format(response.status_code,response.content)) else: network = networkType.parseString(response.content, True) create_nw_task = network.get_Tasks().get_Task()[0] @@ -2497,7 +2639,7 @@ class vimconnector(vimconn.vimconnector): # if we all ok we respond with content after network creation completes # otherwise by default return None if create_nw_task is not None: - self.logger.debug("Create Network REST : Waiting for Nw creation complete") + self.logger.debug("Create Network REST : Waiting for Network creation complete") status = vca.block_until_completed(create_nw_task) if status: return response.content @@ -2835,7 +2977,6 @@ class vimconnector(vimconn.vimconnector): vmext = vim_info.find('vmext:VmVimObjectRef', namespaces) if vmext is not None: vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text - vm_vcenter_info["vim_server_href"] = vmext.find('vmext:VimServerRef', namespaces).attrib['href'] parsed_respond["vm_vcenter_info"]= vm_vcenter_info virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces) @@ -3003,27 +3144,31 @@ class vimconnector(vimconn.vimconnector): vm_obj = None vcenter_conect = None self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid)) - #Assuming password of vCenter user is same as password of vCloud user - vm_moref_id , vm_vcenter_host , vm_vcenter_username, vm_vcenter_port = self.get_vcenter_info_rest(vapp_uuid) - self.logger.info("vm_moref_id, {} vm_vcenter_host {} vm_vcenter_username{} "\ - "vm_vcenter_port{}".format( - vm_moref_id, vm_vcenter_host, - vm_vcenter_username, vm_vcenter_port)) - if vm_moref_id and vm_vcenter_host and vm_vcenter_username: + try: + vm_vcenter_info = self.get_vm_vcenter_info(vapp_uuid) + except Exception as exp: + self.logger.error("Error occurred while getting vCenter infromationn"\ + " for VM : {}".format(exp)) + raise vimconn.vimconnException(message=exp) + + if vm_vcenter_info["vm_moref_id"]: context = None if hasattr(ssl, '_create_unverified_context'): context = ssl._create_unverified_context() try: no_of_pci_devices = len(pci_devices) if no_of_pci_devices > 0: - vcenter_conect = SmartConnect(host=vm_vcenter_host, user=vm_vcenter_username, - pwd=self.passwd, port=int(vm_vcenter_port) , - sslContext=context) + vcenter_conect = SmartConnect( + host=vm_vcenter_info["vm_vcenter_ip"], + user=vm_vcenter_info["vm_vcenter_user"], + pwd=vm_vcenter_info["vm_vcenter_password"], + port=int(vm_vcenter_info["vm_vcenter_port"]), + sslContext=context) atexit.register(Disconnect, vcenter_conect) content = vcenter_conect.RetrieveContent() #Get VM and its host - host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id) + host_obj, vm_obj = self.get_vm_obj(content ,vm_vcenter_info["vm_moref_id"]) self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj)) if host_obj and vm_obj: #get PCI devies from host on which vapp is currently installed @@ -3061,7 +3206,7 @@ class vimconnector(vimconn.vimconnector): if status: self.logger.info("Added PCI device {} to VM {}".format(pci_device,str(vm_obj))) else: - self.logger.info("Fail to add PCI device {} to VM {}".format(pci_device,str(vm_obj))) + self.logger.error("Fail to add PCI device {} to VM {}".format(pci_device,str(vm_obj))) return True, vm_obj, vcenter_conect else: self.logger.error("Currently there is no host with"\ @@ -3292,10 +3437,9 @@ class vimconnector(vimconn.vimconnector): exp)) return task - def get_vcenter_info_rest(self , vapp_uuid): + def get_vm_vcenter_info(self , vapp_uuid): """ - https://192.169.241.105/api/admin/extension/vimServer/cc82baf9-9f80-4468-bfe9-ce42b3f9dde5 - Method to get details of vCenter + Method to get details of vCenter and vm Args: vapp_uuid - uuid of vApp or VM @@ -3303,46 +3447,39 @@ class vimconnector(vimconn.vimconnector): Returns: Moref Id of VM and deails of vCenter """ - vm_moref_id = None - vm_vcenter = None - vm_vcenter_username = None - vm_vcenter_port = None - - vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True) - if vm_details and "vm_vcenter_info" in vm_details: - vm_moref_id = vm_details["vm_vcenter_info"]["vm_moref_id"] - vim_server_href = vm_details["vm_vcenter_info"]["vim_server_href"] - - if vim_server_href: - vca = self.connect_as_admin() - if not vca: - raise vimconn.vimconnConnectionException("self.connect() is failed") - if vim_server_href is None: - self.logger.error("No url to get vcenter details") - - if vca.vcloud_session and vca.vcloud_session.organization: - response = Http.get(url=vim_server_href, - headers=vca.vcloud_session.get_vcloud_headers(), - verify=vca.verify, - logger=vca.logger) + vm_vcenter_info = {} - if response.status_code != requests.codes.ok: - self.logger.debug("GET REST API call {} failed. Return status code {}".format(vim_server_href, - response.status_code)) - try: - namespaces={"vmext":"http://www.vmware.com/vcloud/extension/v1.5", - "vcloud":"http://www.vmware.com/vcloud/v1.5" - } - xmlroot_respond = XmlElementTree.fromstring(response.content) - vm_vcenter_username = xmlroot_respond.find('vmext:Username', namespaces).text - vcenter_url = xmlroot_respond.find('vmext:Url', namespaces).text - vm_vcenter_port = vcenter_url.split(":")[2] - vm_vcenter = vcenter_url.split(":")[1].split("//")[1] + if self.vcenter_ip is not None: + vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip + else: + raise vimconn.vimconnException(message="vCenter IP is not provided."\ + " Please provide vCenter IP while attaching datacenter to tenant in --config") + if self.vcenter_port is not None: + vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port + else: + raise vimconn.vimconnException(message="vCenter port is not provided."\ + " Please provide vCenter port while attaching datacenter to tenant in --config") + if self.vcenter_user is not None: + vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user + else: + raise vimconn.vimconnException(message="vCenter user is not provided."\ + " Please provide vCenter user while attaching datacenter to tenant in --config") - except Exception as exp : - self.logger.info("Error occurred calling rest api for vcenter information {}".format(exp)) + if self.vcenter_password is not None: + vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password + else: + raise vimconn.vimconnException(message="vCenter user password is not provided."\ + " Please provide vCenter user password while attaching datacenter to tenant in --config") + try: + vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True) + if vm_details and "vm_vcenter_info" in vm_details: + vm_vcenter_info["vm_moref_id"] = vm_details["vm_vcenter_info"].get("vm_moref_id", None) - return vm_moref_id , vm_vcenter , vm_vcenter_username, vm_vcenter_port + return vm_vcenter_info + + except Exception as exp: + self.logger.error("Error occurred while getting vCenter infromationn"\ + " for VM : {}".format(exp)) def get_vm_pci_details(self, vmuuid): @@ -3358,28 +3495,172 @@ class vimconnector(vimconn.vimconnector): """ vm_pci_devices_info = {} try: - vm_moref_id , vm_vcenter_host , vm_vcenter_username, vm_vcenter_port = self.get_vcenter_info_rest(vmuuid) - if vm_moref_id and vm_vcenter_host and vm_vcenter_username: + vm_vcenter_info = self.get_vm_vcenter_info(vmuuid) + if vm_vcenter_info["vm_moref_id"]: context = None if hasattr(ssl, '_create_unverified_context'): context = ssl._create_unverified_context() - vcenter_conect = SmartConnect(host=vm_vcenter_host, user=vm_vcenter_username, - pwd=self.passwd, port=int(vm_vcenter_port), - sslContext=context) + vcenter_conect = SmartConnect(host=vm_vcenter_info["vm_vcenter_ip"], + user=vm_vcenter_info["vm_vcenter_user"], + pwd=vm_vcenter_info["vm_vcenter_password"], + port=int(vm_vcenter_info["vm_vcenter_port"]), + sslContext=context + ) atexit.register(Disconnect, vcenter_conect) content = vcenter_conect.RetrieveContent() #Get VM and its host - host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id) - for device in vm_obj.config.hardware.device: - if type(device) == vim.vm.device.VirtualPCIPassthrough: - device_details={'devide_id':device.backing.id, - 'pciSlotNumber':device.slotInfo.pciSlotNumber - } - vm_pci_devices_info[device.deviceInfo.label] = device_details + if content: + host_obj, vm_obj = self.get_vm_obj(content ,vm_vcenter_info["vm_moref_id"]) + if host_obj and vm_obj: + vm_pci_devices_info["host_name"]= host_obj.name + vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress + for device in vm_obj.config.hardware.device: + if type(device) == vim.vm.device.VirtualPCIPassthrough: + device_details={'devide_id':device.backing.id, + 'pciSlotNumber':device.slotInfo.pciSlotNumber, + } + vm_pci_devices_info[device.deviceInfo.label] = device_details + else: + self.logger.error("Can not connect to vCenter while getting "\ + "PCI devices infromationn") + return vm_pci_devices_info except Exception as exp: - self.logger.info("Error occurred while getting PCI devices infromationn"\ - " for VM {} : {}".format(vm_obj,exp)) - return vm_pci_devices_info + self.logger.error("Error occurred while getting VM infromationn"\ + " for VM : {}".format(exp)) + raise vimconn.vimconnException(message=exp) + + def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, nic_type=None): + """ + Method to add network adapter type to vm + Args : + network_name - name of network + primary_nic_index - int value for primary nic index + nicIndex - int value for nic index + nic_type - specify model name to which add to vm + Returns: + None + """ + vca = self.connect() + if not vca: + raise vimconn.vimconnConnectionException("Failed to connect vCloud director") + + try: + if not nic_type: + for vms in vapp._get_vms(): + vm_id = (vms.id).split(':')[-1] + + url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(vca.host, vm_id) + + response = Http.get(url=url_rest_call, + headers=vca.vcloud_session.get_vcloud_headers(), + verify=vca.verify, + logger=vca.logger) + if response.status_code != 200: + self.logger.error("REST call {} failed reason : {}"\ + "status code : {}".format(url_rest_call, + response.content, + response.status_code)) + raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\ + "network connection section") + + data = response.content + if '' not in data: + item = """{} + + {} + true + DHCP + """.format(primary_nic_index, network_name, nicIndex) + data = data.replace('\n','\n{}\n'.format(item)) + else: + new_item = """ + {} + true + DHCP + """.format(network_name, nicIndex) + data = data.replace('\n','\n{}\n'.format(new_item)) + headers = vca.vcloud_session.get_vcloud_headers() + headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml' + response = Http.put(url=url_rest_call, headers=headers, data=data, + verify=vca.verify, + logger=vca.logger) + if response.status_code != 202: + self.logger.error("REST call {} failed reason : {}"\ + "status code : {} ".format(url_rest_call, + response.content, + response.status_code)) + raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\ + "network connection section") + else: + nic_task = taskType.parseString(response.content, True) + if isinstance(nic_task, GenericTask): + vca.block_until_completed(nic_task) + self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\ + "default NIC type".format(vm_id)) + else: + self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\ + "connect NIC type".format(vm_id)) + else: + for vms in vapp._get_vms(): + vm_id = (vms.id).split(':')[-1] + url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(vca.host, vm_id) + + response = Http.get(url=url_rest_call, + headers=vca.vcloud_session.get_vcloud_headers(), + verify=vca.verify, + logger=vca.logger) + if response.status_code != 200: + self.logger.error("REST call {} failed reason : {}"\ + "status code : {}".format(url_rest_call, + response.content, + response.status_code)) + raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\ + "network connection section") + data = response.content + if '' not in data: + item = """{} + + {} + true + DHCP + {} + """.format(primary_nic_index, network_name, nicIndex, nic_type) + data = data.replace('\n','\n{}\n'.format(item)) + else: + new_item = """ + {} + true + DHCP + {} + """.format(network_name, nicIndex, nic_type) + data = data.replace('\n','\n{}\n'.format(new_item)) + + headers = vca.vcloud_session.get_vcloud_headers() + headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml' + response = Http.put(url=url_rest_call, headers=headers, data=data, + verify=vca.verify, + logger=vca.logger) + + if response.status_code != 202: + self.logger.error("REST call {} failed reason : {}"\ + "status code : {}".format(url_rest_call, + response.content, + response.status_code)) + raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\ + "network connection section") + else: + nic_task = taskType.parseString(response.content, True) + if isinstance(nic_task, GenericTask): + vca.block_until_completed(nic_task) + self.logger.info("add_network_adapter_to_vms(): VM {} "\ + "conneced to NIC type {}".format(vm_id, nic_type)) + else: + self.logger.error("add_network_adapter_to_vms(): VM {} "\ + "failed to connect NIC type {}".format(vm_id, nic_type)) + except Exception as exp: + self.logger.error("add_network_adapter_to_vms() : exception occurred "\ + "while adding Network adapter") + raise vimconn.vimconnException(message=exp)