X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=osm_ro%2Fvimconn_vmware.py;h=9faf8b40044d3d6579c237e51d66869a06247303;hb=5461675ac6705ee92916ed741da1914bd2162482;hp=a224255609527d7d94e03bf8aeadb5dfcf0f867a;hpb=2c290ca4088492a3c32bb6ab218d0004da68f6ea;p=osm%2FRO.git diff --git a/osm_ro/vimconn_vmware.py b/osm_ro/vimconn_vmware.py index a2242556..9faf8b40 100644 --- a/osm_ro/vimconn_vmware.py +++ b/osm_ro/vimconn_vmware.py @@ -1,8 +1,8 @@ # -*- coding: utf-8 -*- ## -# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. -# This file is part of openmano +# Copyright 2016-2017 VMware Inc. +# This file is part of ETSI OSM # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -18,7 +18,7 @@ # under the License. # # For those usages not covered by the Apache License, Version 2.0 please -# contact with: nfvlabs@tid.es +# contact: osslegalrouting@vmware.com ## """ @@ -63,6 +63,7 @@ import hashlib import socket import struct import netaddr +import random # global variable for vcd connector type STANDALONE = 'standalone' @@ -71,13 +72,9 @@ STANDALONE = 'standalone' FLAVOR_RAM_KEY = 'ram' FLAVOR_VCPUS_KEY = 'vcpus' FLAVOR_DISK_KEY = 'disk' -DEFAULT_IP_PROFILE = {'gateway_address':"192.168.1.1", - 'dhcp_count':50, - 'subnet_address':"192.168.1.0/24", +DEFAULT_IP_PROFILE = {'dhcp_count':50, 'dhcp_enabled':True, - 'dhcp_start_address':"192.168.1.3", - 'ip_version':"IPv4", - 'dns_address':"192.168.1.2" + 'ip_version':"IPv4" } # global variable for wait time INTERVAL_TIME = 5 @@ -181,10 +178,6 @@ class vimconnector(vimconn.vimconnector): self.nsx_manager = None self.nsx_user = None self.nsx_password = None - self.vcenter_ip = None - self.vcenter_port = None - self.vcenter_user = None - self.vcenter_password = None if tenant_name is not None: orgnameandtenant = tenant_name.split(":") @@ -217,6 +210,14 @@ class vimconnector(vimconn.vimconnector): self.vcenter_user = config.get("vcenter_user", None) self.vcenter_password = config.get("vcenter_password", None) +# ############# Stub code for SRIOV ################# +# try: +# self.dvs_name = config['dv_switch_name'] +# except KeyError: +# raise vimconn.vimconnException(message="Error: distributed virtaul switch name is empty in Config") +# +# self.vlanID_range = config.get("vlanID_range", None) + self.org_uuid = None self.vca = None @@ -356,6 +357,11 @@ class vimconnector(vimconn.vimconnector): Returns: The return vca object that letter can be used to connect to vcloud direct as admin """ + vca = self.connect() + if not vca: + raise vimconn.vimconnConnectionException("self.connect() is failed.") + + self.vca = vca try: if self.org_uuid is None: org_dict = self.get_org_list() @@ -421,9 +427,61 @@ class vimconnector(vimconn.vimconnector): raise vimconn.vimconnException("Failed create tenant {}".format(tenant_name)) def delete_tenant(self, tenant_id=None): - """Delete a tenant from VIM""" - 'Returns the tenant identifier' - raise vimconn.vimconnNotImplemented("Should have implemented this") + """ Delete a tenant from VIM + Args: + tenant_id is tenant_id to be deleted. + + Return: + returns the tenant identifier in UUID format. + If action is failed method will throw exception + """ + vca = self.connect_as_admin() + if not vca: + raise vimconn.vimconnConnectionException("self.connect() is failed") + + if tenant_id is not None: + if vca.vcloud_session and vca.vcloud_session.organization: + #Get OrgVDC + url_list = [self.vca.host, '/api/vdc/', tenant_id] + orgvdc_herf = ''.join(url_list) + response = Http.get(url=orgvdc_herf, + headers=vca.vcloud_session.get_vcloud_headers(), + verify=vca.verify, + logger=vca.logger) + + if response.status_code != requests.codes.ok: + self.logger.debug("delete_tenant():GET REST API call {} failed. "\ + "Return status code {}".format(orgvdc_herf, + response.status_code)) + raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id)) + + lxmlroot_respond = lxmlElementTree.fromstring(response.content) + namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix} + namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5" + vdc_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href'] + vdc_remove_href = vdc_remove_href + '?recursive=true&force=true' + + #Delete OrgVDC + response = Http.delete(url=vdc_remove_href, + headers=vca.vcloud_session.get_vcloud_headers(), + verify=vca.verify, + logger=vca.logger) + + if response.status_code == 202: + delete_vdc_task = taskType.parseString(response.content, True) + if type(delete_vdc_task) is GenericTask: + self.vca.block_until_completed(delete_vdc_task) + self.logger.info("Deleted tenant with ID {}".format(tenant_id)) + return tenant_id + else: + self.logger.debug("delete_tenant(): DELETE REST API call {} failed. "\ + "Return status code {}".format(vdc_remove_href, + response.status_code)) + raise vimconn.vimconnException("Fail to delete tenant with ID {}".format(tenant_id)) + else: + self.logger.debug("delete_tenant():Incorrect tenant ID {}".format(tenant_id)) + raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id)) + def get_tenant_list(self, filter_dict={}): """Obtain tenants of VIM @@ -473,6 +531,12 @@ class vimconnector(vimconn.vimconnector): if shared: isshared = 'true' +# ############# Stub code for SRIOV ################# +# if net_type == "data" or net_type == "ptp": +# if self.config.get('dv_switch_name') == None: +# raise vimconn.vimconnConflictException("You must provide 'dv_switch_name' at config value") +# network_uuid = self.create_dvPort_group(net_name) + network_uuid = self.create_network(network_name=net_name, net_type=net_type, ip_profile=ip_profile, isshared=isshared) if network_uuid is not None: @@ -488,19 +552,16 @@ class vimconnector(vimconn.vimconnector): """ self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name)) - vca = self.connect() - if not vca: - raise vimconn.vimconnConnectionException("self.connect() is failed.") if not self.tenant_name: raise vimconn.vimconnConnectionException("Tenant name is empty.") - vdc = vca.get_vdc(self.tenant_name) + vdc = self.get_vdc_details() if vdc is None: raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}".format(self.tenant_name)) vdc_uuid = vdc.get_id().split(":")[3] - networks = vca.get_networks(vdc.get_name()) + networks = self.vca.get_networks(vdc.get_name()) network_list = [] try: for network in networks: @@ -546,23 +607,20 @@ class vimconnector(vimconn.vimconnector): List can be empty """ - self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name)) - vca = self.connect() - if not vca: - raise vimconn.vimconnConnectionException("self.connect() is failed.") + self.logger.debug("get_network_list(): retrieving network list for vcd {}".format(self.tenant_name)) if not self.tenant_name: raise vimconn.vimconnConnectionException("Tenant name is empty.") - vdc = vca.get_vdc(self.tenant_name) + vdc = self.get_vdc_details() if vdc is None: raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name)) - vdcid = vdc.get_id().split(":")[3] - networks = vca.get_networks(vdc.get_name()) - network_list = [] - try: + vdcid = vdc.get_id().split(":")[3] + networks = self.vca.get_networks(vdc.get_name()) + network_list = [] + for network in networks: filter_entry = {} net_uuid = network.get_id().split(":") @@ -606,17 +664,13 @@ class vimconnector(vimconn.vimconnector): """Method obtains network details of net_id VIM network Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]""" - vca = self.connect() - if not vca: - raise vimconn.vimconnConnectionException("self.connect() is failed") - - vdc = vca.get_vdc(self.tenant_name) - vdc_id = vdc.get_id().split(":")[3] + try: + vdc = self.get_vdc_details() + vdc_id = vdc.get_id().split(":")[3] - networks = vca.get_networks(vdc.get_name()) - filter_dict = {} + networks = self.vca.get_networks(vdc.get_name()) + filter_dict = {} - try: for network in networks: vdc_network_id = network.get_id().split(":") if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id: @@ -645,9 +699,17 @@ class vimconnector(vimconn.vimconnector): Returns the network identifier or raise an exception """ - vca = self.connect() - if not vca: - raise vimconn.vimconnConnectionException("self.connect() for tenant {} is failed.".format(self.tenant_name)) + # ############# Stub code for SRIOV ################# +# dvport_group = self.get_dvport_group(net_id) +# if dvport_group: +# #delete portgroup +# status = self.destroy_dvport_group(net_id) +# if status: +# # Remove vlanID from persistent info +# if net_id in self.persistent_info["used_vlanIDs"]: +# del self.persistent_info["used_vlanIDs"][net_id] +# +# return net_id vcd_network = self.get_vcd_network(network_uuid=net_id) if vcd_network is not None and vcd_network: @@ -674,10 +736,6 @@ class vimconnector(vimconn.vimconnector): """ - vca = self.connect() - if not vca: - raise vimconn.vimconnConnectionException("self.connect() is failed") - dict_entry = {} try: for net in net_list: @@ -735,6 +793,13 @@ class vimconnector(vimconn.vimconnector): cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1) disk = flavor_data.get(FLAVOR_DISK_KEY, 1) + if not isinstance(ram, int): + raise vimconn.vimconnException("Non-integer value for ram") + elif not isinstance(cpu, int): + raise vimconn.vimconnException("Non-integer value for cpu") + elif not isinstance(disk, int): + raise vimconn.vimconnException("Non-integer value for disk") + extended_flv = flavor_data.get("extended") if extended_flv: numas=extended_flv.get("numas") @@ -782,12 +847,81 @@ class vimconnector(vimconn.vimconnector): def delete_image(self, image_id): """ - - :param image_id: - :return: + Deletes a tenant image from VIM + Args: + image_id is ID of Image to be deleted + Return: + returns the image identifier in UUID format or raises an exception on error """ + vca = self.connect_as_admin() + if not vca: + raise vimconn.vimconnConnectionException("self.connect() is failed") + # Get Catalog details + url_list = [self.vca.host, '/api/catalog/', image_id] + catalog_herf = ''.join(url_list) + response = Http.get(url=catalog_herf, + headers=vca.vcloud_session.get_vcloud_headers(), + verify=vca.verify, + logger=vca.logger) + + if response.status_code != requests.codes.ok: + self.logger.debug("delete_image():GET REST API call {} failed. "\ + "Return status code {}".format(catalog_herf, + response.status_code)) + raise vimconn.vimconnNotFoundException("Fail to get image {}".format(image_id)) + + lxmlroot_respond = lxmlElementTree.fromstring(response.content) + namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix} + namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5" + + catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems",namespaces) + catalogItems = catalogItems_section.iterfind("xmlns:CatalogItem",namespaces) + for catalogItem in catalogItems: + catalogItem_href = catalogItem.attrib['href'] + + #GET details of catalogItem + response = Http.get(url=catalogItem_href, + headers=vca.vcloud_session.get_vcloud_headers(), + verify=vca.verify, + logger=vca.logger) + + if response.status_code != requests.codes.ok: + self.logger.debug("delete_image():GET REST API call {} failed. "\ + "Return status code {}".format(catalog_herf, + response.status_code)) + raise vimconn.vimconnNotFoundException("Fail to get catalogItem {} for catalog {}".format( + catalogItem, + image_id)) + + lxmlroot_respond = lxmlElementTree.fromstring(response.content) + namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix} + namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5" + catalogitem_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href'] + + #Remove catalogItem + response = Http.delete(url= catalogitem_remove_href, + headers=vca.vcloud_session.get_vcloud_headers(), + verify=vca.verify, + logger=vca.logger) + if response.status_code == requests.codes.no_content: + self.logger.debug("Deleted Catalog item {}".format(catalogItem)) + else: + raise vimconn.vimconnException("Fail to delete Catalog Item {}".format(catalogItem)) + + #Remove catalog + url_list = [self.vca.host, '/api/admin/catalog/', image_id] + catalog_remove_herf = ''.join(url_list) + response = Http.delete(url= catalog_remove_herf, + headers=vca.vcloud_session.get_vcloud_headers(), + verify=vca.verify, + logger=vca.logger) + + if response.status_code == requests.codes.no_content: + self.logger.debug("Deleted Catalog {}".format(image_id)) + return image_id + else: + raise vimconn.vimconnException("Fail to delete Catalog {}".format(image_id)) - raise vimconn.vimconnNotImplemented("Should have implemented this") def catalog_exists(self, catalog_name, catalogs): """ @@ -844,117 +978,124 @@ class vimconnector(vimconn.vimconnector): # create vApp Template and check the status if vCD able to read OVF it will respond with appropirate # status change. # if VCD can parse OVF we upload VMDK file - for catalog in vca.get_catalogs(): - if catalog_name != catalog.name: - continue - link = filter(lambda link: link.get_type() == "application/vnd.vmware.vcloud.media+xml" and - link.get_rel() == 'add', catalog.get_Link()) - assert len(link) == 1 - data = """ - %s vApp Template - """ % (escape(catalog_name), escape(description)) - headers = vca.vcloud_session.get_vcloud_headers() - headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml' - response = Http.post(link[0].get_href(), headers=headers, data=data, verify=vca.verify, logger=self.logger) - if response.status_code == requests.codes.created: - catalogItem = XmlElementTree.fromstring(response.content) - entity = [child for child in catalogItem if - child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0] - href = entity.get('href') - template = href - response = Http.get(href, headers=vca.vcloud_session.get_vcloud_headers(), - verify=vca.verify, logger=self.logger) - - if response.status_code == requests.codes.ok: - media = mediaType.parseString(response.content, True) - link = filter(lambda link: link.get_rel() == 'upload:default', - media.get_Files().get_File()[0].get_Link())[0] - headers = vca.vcloud_session.get_vcloud_headers() - headers['Content-Type'] = 'Content-Type text/xml' - response = Http.put(link.get_href(), - data=open(media_file_name, 'rb'), - headers=headers, + try: + for catalog in vca.get_catalogs(): + if catalog_name != catalog.name: + continue + link = filter(lambda link: link.get_type() == "application/vnd.vmware.vcloud.media+xml" and + link.get_rel() == 'add', catalog.get_Link()) + assert len(link) == 1 + data = """ + %s vApp Template + """ % (escape(catalog_name), escape(description)) + headers = vca.vcloud_session.get_vcloud_headers() + headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml' + response = Http.post(link[0].get_href(), headers=headers, data=data, verify=vca.verify, logger=self.logger) + if response.status_code == requests.codes.created: + catalogItem = XmlElementTree.fromstring(response.content) + entity = [child for child in catalogItem if + child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0] + href = entity.get('href') + template = href + response = Http.get(href, headers=vca.vcloud_session.get_vcloud_headers(), verify=vca.verify, logger=self.logger) - if response.status_code != requests.codes.ok: - self.logger.debug( - "Failed create vApp template for catalog name {} and image {}".format(catalog_name, - media_file_name)) - return False - - # TODO fix this with aync block - time.sleep(5) - - self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name)) - # uploading VMDK file - # check status of OVF upload and upload remaining files. - response = Http.get(template, - headers=vca.vcloud_session.get_vcloud_headers(), - verify=vca.verify, - logger=self.logger) + if response.status_code == requests.codes.ok: + media = mediaType.parseString(response.content, True) + link = filter(lambda link: link.get_rel() == 'upload:default', + media.get_Files().get_File()[0].get_Link())[0] + headers = vca.vcloud_session.get_vcloud_headers() + headers['Content-Type'] = 'Content-Type text/xml' + response = Http.put(link.get_href(), + data=open(media_file_name, 'rb'), + headers=headers, + verify=vca.verify, logger=self.logger) + if response.status_code != requests.codes.ok: + self.logger.debug( + "Failed create vApp template for catalog name {} and image {}".format(catalog_name, + media_file_name)) + return False + + # TODO fix this with aync block + time.sleep(5) + + self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name)) + + # uploading VMDK file + # check status of OVF upload and upload remaining files. + response = Http.get(template, + headers=vca.vcloud_session.get_vcloud_headers(), + verify=vca.verify, + logger=self.logger) - if response.status_code == requests.codes.ok: - media = mediaType.parseString(response.content, True) - number_of_files = len(media.get_Files().get_File()) - for index in xrange(0, number_of_files): - links_list = filter(lambda link: link.get_rel() == 'upload:default', - media.get_Files().get_File()[index].get_Link()) - for link in links_list: - # we skip ovf since it already uploaded. - if 'ovf' in link.get_href(): - continue - # The OVF file and VMDK must be in a same directory - head, tail = os.path.split(media_file_name) - file_vmdk = head + '/' + link.get_href().split("/")[-1] - if not os.path.isfile(file_vmdk): - return False - statinfo = os.stat(file_vmdk) - if statinfo.st_size == 0: - return False - hrefvmdk = link.get_href() - - if progress: - print("Uploading file: {}".format(file_vmdk)) - if progress: - widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ', - FileTransferSpeed()] - progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start() - - bytes_transferred = 0 - f = open(file_vmdk, 'rb') - while bytes_transferred < statinfo.st_size: - my_bytes = f.read(chunk_bytes) - if len(my_bytes) <= chunk_bytes: - headers = vca.vcloud_session.get_vcloud_headers() - headers['Content-Range'] = 'bytes %s-%s/%s' % ( - bytes_transferred, len(my_bytes) - 1, statinfo.st_size) - headers['Content-Length'] = str(len(my_bytes)) - response = Http.put(hrefvmdk, - headers=headers, - data=my_bytes, - verify=vca.verify, - logger=None) - - if response.status_code == requests.codes.ok: - bytes_transferred += len(my_bytes) - if progress: - progress_bar.update(bytes_transferred) - else: - self.logger.debug( - 'file upload failed with error: [%s] %s' % (response.status_code, - response.content)) - - f.close() - return False - f.close() - if progress: - progress_bar.finish() - time.sleep(10) - return True - else: - self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}". - format(catalog_name, media_file_name)) - return False + if response.status_code == requests.codes.ok: + media = mediaType.parseString(response.content, True) + number_of_files = len(media.get_Files().get_File()) + for index in xrange(0, number_of_files): + links_list = filter(lambda link: link.get_rel() == 'upload:default', + media.get_Files().get_File()[index].get_Link()) + for link in links_list: + # we skip ovf since it already uploaded. + if 'ovf' in link.get_href(): + continue + # The OVF file and VMDK must be in a same directory + head, tail = os.path.split(media_file_name) + file_vmdk = head + '/' + link.get_href().split("/")[-1] + if not os.path.isfile(file_vmdk): + return False + statinfo = os.stat(file_vmdk) + if statinfo.st_size == 0: + return False + hrefvmdk = link.get_href() + + if progress: + print("Uploading file: {}".format(file_vmdk)) + if progress: + widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ', + FileTransferSpeed()] + progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start() + + bytes_transferred = 0 + f = open(file_vmdk, 'rb') + while bytes_transferred < statinfo.st_size: + my_bytes = f.read(chunk_bytes) + if len(my_bytes) <= chunk_bytes: + headers = vca.vcloud_session.get_vcloud_headers() + headers['Content-Range'] = 'bytes %s-%s/%s' % ( + bytes_transferred, len(my_bytes) - 1, statinfo.st_size) + headers['Content-Length'] = str(len(my_bytes)) + response = Http.put(hrefvmdk, + headers=headers, + data=my_bytes, + verify=vca.verify, + logger=None) + + if response.status_code == requests.codes.ok: + bytes_transferred += len(my_bytes) + if progress: + progress_bar.update(bytes_transferred) + else: + self.logger.debug( + 'file upload failed with error: [%s] %s' % (response.status_code, + response.content)) + + f.close() + return False + f.close() + if progress: + progress_bar.finish() + time.sleep(10) + return True + else: + self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}". + format(catalog_name, media_file_name)) + return False + except Exception as exp: + self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}" + .format(catalog_name,media_file_name, exp)) + raise vimconn.vimconnException( + "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}" + .format(catalog_name,media_file_name, exp)) self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name)) return False @@ -1012,6 +1153,25 @@ class vimconnector(vimconn.vimconnector): return catalog.name return None + def get_catalog_obj(self, catalog_uuid=None, catalogs=None): + """ Method check catalog and return catalog name lookup done by catalog UUID. + + Args + catalog_name: catalog name as string + catalogs: list of catalogs. + + Return: catalogs name or None + """ + + if not self.validate_uuid4(uuid_string=catalog_uuid): + return None + + for catalog in catalogs: + catalog_id = catalog.get_id().split(":")[3] + if catalog_id == catalog_uuid: + return catalog + return None + def get_image_id_from_path(self, path=None, progress=False): """ Method upload OVF image to vCloud director. @@ -1032,9 +1192,6 @@ class vimconnector(vimconn.vimconnector): Return: if image uploaded correct method will provide image catalog UUID. """ - vca = self.connect() - if not vca: - raise vimconn.vimconnConnectionException("self.connect() is failed.") if not path: raise vimconn.vimconnException("Image path can't be None.") @@ -1058,17 +1215,22 @@ class vimconnector(vimconn.vimconnector): self.logger.debug("File name {} Catalog Name {} file path {} " "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name)) - catalogs = vca.get_catalogs() + try: + catalogs = self.vca.get_catalogs() + except Exception as exp: + self.logger.debug("Failed get catalogs() with Exception {} ".format(exp)) + raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp)) + if len(catalogs) == 0: self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name)) - result = self.create_vimcatalog(vca, catalog_md5_name) + result = self.create_vimcatalog(self.vca, catalog_md5_name) if not result: raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name)) - result = self.upload_vimimage(vca=vca, catalog_name=catalog_md5_name, + result = self.upload_vimimage(vca=self.vca, catalog_name=catalog_md5_name, media_name=filename, medial_file_name=path, progress=progress) if not result: raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_name)) - return self.get_catalogid(catalog_name, vca.get_catalogs()) + return self.get_catalogid(catalog_name, self.vca.get_catalogs()) else: for catalog in catalogs: # search for existing catalog if we find same name we return ID @@ -1077,20 +1239,20 @@ class vimconnector(vimconn.vimconnector): self.logger.debug("Found existing catalog entry for {} " "catalog id {}".format(catalog_name, self.get_catalogid(catalog_md5_name, catalogs))) - return self.get_catalogid(catalog_md5_name, vca.get_catalogs()) + return self.get_catalogid(catalog_md5_name, self.vca.get_catalogs()) # if we didn't find existing catalog we create a new one and upload image. self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name)) - result = self.create_vimcatalog(vca, catalog_md5_name) + result = self.create_vimcatalog(self.vca, catalog_md5_name) if not result: raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name)) - result = self.upload_vimimage(vca=vca, catalog_name=catalog_md5_name, + result = self.upload_vimimage(vca=self.vca, catalog_name=catalog_md5_name, media_name=filename, medial_file_name=path, progress=progress) if not result: raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_md5_name)) - return self.get_catalogid(catalog_md5_name, vca.get_catalogs()) + return self.get_catalogid(catalog_md5_name, self.vca.get_catalogs()) def get_image_list(self, filter_dict={}): '''Obtain tenant images from VIM @@ -1103,12 +1265,10 @@ class vimconnector(vimconn.vimconnector): [{}, ...] List can be empty ''' - vca = self.connect() - if not vca: - raise vimconn.vimconnConnectionException("self.connect() is failed.") + try: image_list = [] - catalogs = vca.get_catalogs() + catalogs = self.vca.get_catalogs() if len(catalogs) == 0: return image_list else: @@ -1179,7 +1339,7 @@ class vimconnector(vimconn.vimconnector): return False return False - def get_namebyvappid(self, vca=None, vdc=None, vapp_uuid=None): + def get_namebyvappid(self, vdc=None, vapp_uuid=None): """Method returns vApp name from vCD and lookup done by vapp_id. Args: @@ -1198,8 +1358,13 @@ class vimconnector(vimconn.vimconnector): # we care only about UUID the rest doesn't matter vappid = ref.href.split("vapp")[1][1:] if vappid == vapp_uuid: - response = Http.get(ref.href, headers=vca.vcloud_session.get_vcloud_headers(), verify=vca.verify, + response = Http.get(ref.href, headers=self.vca.vcloud_session.get_vcloud_headers(), verify=self.vca.verify, logger=self.logger) + + #Retry login if session expired & retry sending request + if response.status_code == 403: + response = self.retry_rest('GET', ref.href) + tree = XmlElementTree.fromstring(response.content) return tree.attrib['name'] except Exception as e: @@ -1208,7 +1373,7 @@ class vimconnector(vimconn.vimconnector): return None def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list={}, - cloud_config=None, disk_list=None): + cloud_config=None, disk_list=None, availability_zone_index=None, availability_zone_list=None): """Adds a VM instance to VIM Params: start: indicates if VM must start or boot in pause mode. Ignored @@ -1232,11 +1397,8 @@ class vimconnector(vimconn.vimconnector): """ self.logger.info("Creating new instance for entry {}".format(name)) - self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {}".format( - description, start, image_id, flavor_id, net_list, cloud_config)) - vca = self.connect() - if not vca: - raise vimconn.vimconnConnectionException("self.connect() is failed.") + self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {}".format( + description, start, image_id, flavor_id, net_list, cloud_config, disk_list)) #new vm name = vmname + tenant_id + uuid new_vm_name = [name, '-', str(uuid.uuid4())] @@ -1248,11 +1410,15 @@ class vimconnector(vimconn.vimconnector): # return vapp_uuid # we check for presence of VDC, Catalog entry and Flavor. - vdc = vca.get_vdc(self.tenant_name) + vdc = self.get_vdc_details() if vdc is None: raise vimconn.vimconnNotFoundException( "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(name)) - catalogs = vca.get_catalogs() + catalogs = self.vca.get_catalogs() + if catalogs is None: + #Retry once, if failed by refreshing token + self.get_token() + catalogs = self.vca.get_catalogs() if catalogs is None: raise vimconn.vimconnNotFoundException( "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(name)) @@ -1266,11 +1432,11 @@ class vimconnector(vimconn.vimconnector): # Set vCPU and Memory based on flavor. - # vm_cpus = None vm_memory = None vm_disk = None - pci_devices_info = [] + numas = None + if flavor_id is not None: if flavor_id not in vimconnector.flavorlist: raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: " @@ -1285,11 +1451,7 @@ class vimconnector(vimconn.vimconnector): extended = flavor.get("extended", None) if extended: numas=extended.get("numas", None) - if numas: - for numa in numas: - for interface in numa.get("interfaces",() ): - if interface["dedicated"].strip()=="yes": - pci_devices_info.append(interface) + except Exception as exp: raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp)) @@ -1325,31 +1487,59 @@ class vimconnector(vimconn.vimconnector): # use: 'data', 'bridge', 'mgmt' # create vApp. Set vcpu and ram based on flavor id. - vapptask = vca.create_vapp(self.tenant_name, vmname_andid, templateName, - self.get_catalogbyid(image_id, catalogs), - network_name=None, # None while creating vapp - network_mode=network_mode, - vm_name=vmname_andid, - vm_cpus=vm_cpus, # can be None if flavor is None - vm_memory=vm_memory) # can be None if flavor is None - - if vapptask is None or vapptask is False: - raise vimconn.vimconnUnexpectedResponse("new_vminstance(): failed deploy vApp {}".format(vmname_andid)) - if type(vapptask) is VappTask: - vca.block_until_completed(vapptask) + try: + for retry in (1,2): + vapptask = self.vca.create_vapp(self.tenant_name, vmname_andid, templateName, + self.get_catalogbyid(image_id, catalogs), + network_name=None, # None while creating vapp + network_mode=network_mode, + vm_name=vmname_andid, + vm_cpus=vm_cpus, # can be None if flavor is None + vm_memory=vm_memory) # can be None if flavor is None + + if not vapptask and retry==1: + self.get_token() # Retry getting token + continue + else: + break + + if vapptask is None or vapptask is False: + raise vimconn.vimconnUnexpectedResponse( + "new_vminstance(): failed to create vApp {}".format(vmname_andid)) + if type(vapptask) is VappTask: + self.vca.block_until_completed(vapptask) + + except Exception as exp: + raise vimconn.vimconnUnexpectedResponse( + "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp)) # we should have now vapp in undeployed state. - vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid) - vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), vmname_andid) - if vapp is None: + try: + vapp_uuid = self.get_vappid(self.get_vdc_details(), vmname_andid) + + except Exception as exp: + raise vimconn.vimconnUnexpectedResponse( + "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}" + .format(vmname_andid, exp)) + + if vapp_uuid is None: raise vimconn.vimconnUnexpectedResponse( - "new_vminstance(): Failed failed retrieve vApp {} after we deployed".format( + "new_vminstance(): Failed to retrieve vApp {} after creation".format( vmname_andid)) - #Add PCI passthrough configrations - PCI_devices_status = False + #Add PCI passthrough/SRIOV configrations vm_obj = None - si = None + pci_devices_info = [] + sriov_net_info = [] + reserve_memory = False + + for net in net_list: + if net["type"]=="PF": + pci_devices_info.append(net) + elif (net["type"]=="VF" or net["type"]=="VFnotShared") and 'net_id'in net: + sriov_net_info.append(net) + + #Add PCI if len(pci_devices_info) > 0: self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info, vmname_andid )) @@ -1361,18 +1551,54 @@ class vimconnector(vimconn.vimconnector): pci_devices_info, vmname_andid) ) + reserve_memory = True else: self.logger.info("Fail to add PCI devives {} to VM {}".format( pci_devices_info, vmname_andid) ) - # add vm disk + + vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid) + # Modify vm disk if vm_disk: #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled result = self.modify_vm_disk(vapp_uuid, vm_disk) if result : self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid)) + #Add new or existing disks to vApp + if disk_list: + added_existing_disk = False + for disk in disk_list: + if 'device_type' in disk and disk['device_type'] == 'cdrom': + image_id = disk['image_id'] + # Adding CD-ROM to VM + # will revisit code once specification ready to support this feature + self.insert_media_to_vm(vapp, image_id) + elif "image_id" in disk and disk["image_id"] is not None: + self.logger.debug("Adding existing disk from image {} to vm {} ".format( + disk["image_id"] , vapp_uuid)) + self.add_existing_disk(catalogs=catalogs, + image_id=disk["image_id"], + size = disk["size"], + template_name=templateName, + vapp_uuid=vapp_uuid + ) + added_existing_disk = True + else: + #Wait till added existing disk gets reflected into vCD database/API + if added_existing_disk: + time.sleep(5) + added_existing_disk = False + self.add_new_disk(vapp_uuid, disk['size']) + + if numas: + # Assigning numa affinity setting + for numa in numas: + if 'paired-threads-id' in numa: + paired_threads_id = numa['paired-threads-id'] + self.set_numa_affinity(vapp_uuid, paired_threads_id) + # add NICs & connect to networks in netlist try: self.logger.info("Request to connect VM to a network: {}".format(net_list)) @@ -1400,58 +1626,109 @@ class vimconnector(vimconn.vimconnector): - NONE (No IP addressing mode specified.)""" if primary_netname is not None: - nets = filter(lambda n: n.name == interface_net_name, vca.get_networks(self.tenant_name)) + nets = filter(lambda n: n.name == interface_net_name, self.vca.get_networks(self.tenant_name)) if len(nets) == 1: self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].name)) + + vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid) task = vapp.connect_to_network(nets[0].name, nets[0].href) if type(task) is GenericTask: - vca.block_until_completed(task) + self.vca.block_until_completed(task) # connect network to VM - with all DHCP by default - self.logger.info("new_vminstance(): Connecting VM to a network {}".format(nets[0].name)) - task = vapp.connect_vms(nets[0].name, - connection_index=nicIndex, - connections_primary_index=primary_nic_index, - ip_allocation_mode='DHCP') - if type(task) is GenericTask: - vca.block_until_completed(task) + + type_list = ['PF','VF','VFnotShared'] + if 'type' in net and net['type'] not in type_list: + # fetching nic type from vnf + if 'model' in net: + nic_type = net['model'] + self.logger.info("new_vminstance(): adding network adapter "\ + "to a network {}".format(nets[0].name)) + self.add_network_adapter_to_vms(vapp, nets[0].name, + primary_nic_index, + nicIndex, + net, + nic_type=nic_type) + else: + self.logger.info("new_vminstance(): adding network adapter "\ + "to a network {}".format(nets[0].name)) + self.add_network_adapter_to_vms(vapp, nets[0].name, + primary_nic_index, + nicIndex, + net) nicIndex += 1 - except KeyError: - # it might be a case if specific mandatory entry in dict is empty - self.logger.debug("Key error {}".format(KeyError.message)) - raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name)) - # deploy and power on vm - self.logger.debug("new_vminstance(): Deploying vApp {} ".format(name)) - deploytask = vapp.deploy(powerOn=False) - if type(deploytask) is GenericTask: - vca.block_until_completed(deploytask) - - # If VM has PCI devices reserve memory for VM - if PCI_devices_status and vm_obj and vcenter_conect: - memReserve = vm_obj.config.hardware.memoryMB - spec = vim.vm.ConfigSpec() - spec.memoryAllocation = vim.ResourceAllocationInfo(reservation=memReserve) - task = vm_obj.ReconfigVM_Task(spec=spec) - if task: - result = self.wait_for_vcenter_task(task, vcenter_conect) - self.logger.info("Reserved memmoery {} MB for "\ - "VM VM status: {}".format(str(memReserve),result)) - else: - self.logger.info("Fail to reserved memmoery {} to VM {}".format( - str(memReserve),str(vm_obj))) + vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid) + # cloud-init for ssh-key injection + if cloud_config: + self.cloud_init(vapp,cloud_config) + + # deploy and power on vm + self.logger.debug("new_vminstance(): Deploying vApp {} ".format(name)) + deploytask = vapp.deploy(powerOn=False) + if type(deploytask) is GenericTask: + self.vca.block_until_completed(deploytask) + + # ############# Stub code for SRIOV ################# + #Add SRIOV +# if len(sriov_net_info) > 0: +# self.logger.info("Need to add SRIOV adapters {} into VM {}".format(sriov_net_info, +# vmname_andid )) +# sriov_status, vm_obj, vcenter_conect = self.add_sriov(vapp_uuid, +# sriov_net_info, +# vmname_andid) +# if sriov_status: +# self.logger.info("Added SRIOV {} to VM {}".format( +# sriov_net_info, +# vmname_andid) +# ) +# reserve_memory = True +# else: +# self.logger.info("Fail to add SRIOV {} to VM {}".format( +# sriov_net_info, +# vmname_andid) +# ) + + # If VM has PCI devices or SRIOV reserve memory for VM + if reserve_memory: + memReserve = vm_obj.config.hardware.memoryMB + spec = vim.vm.ConfigSpec() + spec.memoryAllocation = vim.ResourceAllocationInfo(reservation=memReserve) + task = vm_obj.ReconfigVM_Task(spec=spec) + if task: + result = self.wait_for_vcenter_task(task, vcenter_conect) + self.logger.info("Reserved memmoery {} MB for "\ + "VM VM status: {}".format(str(memReserve),result)) + else: + self.logger.info("Fail to reserved memmoery {} to VM {}".format( + str(memReserve),str(vm_obj))) + + self.logger.debug("new_vminstance(): power on vApp {} ".format(name)) + + vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid) + poweron_task = vapp.poweron() + if type(poweron_task) is GenericTask: + self.vca.block_until_completed(poweron_task) - self.logger.debug("new_vminstance(): power on vApp {} ".format(name)) - poweron_task = vapp.poweron() - if type(poweron_task) is GenericTask: - vca.block_until_completed(poweron_task) + except Exception as exp : + # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception + self.logger.debug("new_vminstance(): Failed create new vm instance {} with exception {}" + .format(name, exp)) + raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {} with exception {}" + .format(name, exp)) # check if vApp deployed and if that the case return vApp UUID otherwise -1 wait_time = 0 vapp_uuid = None while wait_time <= MAX_WAIT_TIME: - vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid) + try: + vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid) + except Exception as exp: + raise vimconn.vimconnUnexpectedResponse( + "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}" + .format(vmname_andid, exp)) + if vapp and vapp.me.deployed: - vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), vmname_andid) + vapp_uuid = self.get_vappid(self.get_vdc_details(), vmname_andid) break else: self.logger.debug("new_vminstance(): Wait for vApp {} to deploy".format(name)) @@ -1483,11 +1760,8 @@ class vimconnector(vimconn.vimconnector): """Returns the VM instance information from VIM""" self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid)) - vca = self.connect() - if not vca: - raise vimconn.vimconnConnectionException("self.connect() is failed.") - vdc = vca.get_vdc(self.tenant_name) + vdc = self.get_vdc_details() if vdc is None: raise vimconn.vimconnConnectionException( "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name)) @@ -1532,11 +1806,8 @@ class vimconnector(vimconn.vimconnector): """ self.logger.debug("Client requesting delete vm instance {} ".format(vm__vim_uuid)) - vca = self.connect() - if not vca: - raise vimconn.vimconnConnectionException("self.connect() is failed.") - vdc = vca.get_vdc(self.tenant_name) + vdc = self.get_vdc_details() if vdc is None: self.logger.debug("delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format( self.tenant_name)) @@ -1544,7 +1815,7 @@ class vimconnector(vimconn.vimconnector): "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(self.tenant_name)) try: - vapp_name = self.get_namebyvappid(vca, vdc, vm__vim_uuid) + vapp_name = self.get_namebyvappid(vdc, vm__vim_uuid) if vapp_name is None: self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)) return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid) @@ -1552,7 +1823,7 @@ class vimconnector(vimconn.vimconnector): self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid)) # Delete vApp and wait for status change if task executed and vApp is None. - vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name) + vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name) if vapp: if vapp.me.deployed: @@ -1561,14 +1832,14 @@ class vimconnector(vimconn.vimconnector): powered_off = False wait_time = 0 while wait_time <= MAX_WAIT_TIME: - vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name) + vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name) if not vapp: self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)) return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid) power_off_task = vapp.poweroff() if type(power_off_task) is GenericTask: - result = vca.block_until_completed(power_off_task) + result = self.vca.block_until_completed(power_off_task) if result: powered_off = True break @@ -1587,14 +1858,14 @@ class vimconnector(vimconn.vimconnector): wait_time = 0 undeployed = False while wait_time <= MAX_WAIT_TIME: - vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name) + vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name) if not vapp: self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)) return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid) undeploy_task = vapp.undeploy(action='powerOff') if type(undeploy_task) is GenericTask: - result = vca.block_until_completed(undeploy_task) + result = self.vca.block_until_completed(undeploy_task) if result: undeployed = True break @@ -1609,14 +1880,14 @@ class vimconnector(vimconn.vimconnector): # delete vapp self.logger.info("Start deletion of vApp {} ".format(vapp_name)) - vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name) + vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name) if vapp is not None: wait_time = 0 result = False while wait_time <= MAX_WAIT_TIME: - vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name) + vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name) if not vapp: self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)) return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid) @@ -1624,8 +1895,8 @@ class vimconnector(vimconn.vimconnector): delete_task = vapp.delete() if type(delete_task) is GenericTask: - vca.block_until_completed(delete_task) - result = vca.block_until_completed(delete_task) + self.vca.block_until_completed(delete_task) + result = self.vca.block_until_completed(delete_task) if result: break else: @@ -1641,7 +1912,7 @@ class vimconnector(vimconn.vimconnector): self.logger.debug(traceback.format_exc()) raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid)) - if vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name) is None: + if self.vca.get_vapp(self.get_vdc_details(), vapp_name) is None: self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid)) return vm__vim_uuid else: @@ -1673,163 +1944,233 @@ class vimconnector(vimconn.vimconnector): self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list)) - mac_ip_addr={} - rheaders = {'Content-Type': 'application/xml'} - iso_edges = ['edge-2','edge-3','edge-6','edge-7','edge-8','edge-9','edge-10'] - - try: - for edge in iso_edges: - nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo' - self.logger.debug("refresh_vms_status: NSX Manager url: {}".format(nsx_api_url)) - - resp = requests.get(self.nsx_manager + nsx_api_url, - auth = (self.nsx_user, self.nsx_password), - verify = False, headers = rheaders) - - if resp.status_code == requests.codes.ok: - dhcp_leases = XmlElementTree.fromstring(resp.text) - for child in dhcp_leases: - if child.tag == 'dhcpLeaseInfo': - dhcpLeaseInfo = child - for leaseInfo in dhcpLeaseInfo: - for elem in leaseInfo: - if (elem.tag)=='macAddress': - mac_addr = elem.text - if (elem.tag)=='ipAddress': - ip_addr = elem.text - if (mac_addr) is not None: - mac_ip_addr[mac_addr]= ip_addr - self.logger.debug("NSX Manager DHCP Lease info: mac_ip_addr : {}".format(mac_ip_addr)) - else: - self.logger.debug("Error occurred while getting DHCP lease info from NSX Manager: {}".format(resp.content)) - except KeyError: - self.logger.debug("Error in response from NSX Manager {}".format(KeyError.message)) - self.logger.debug(traceback.format_exc()) - - vca = self.connect() - if not vca: - raise vimconn.vimconnConnectionException("self.connect() is failed.") - - vdc = vca.get_vdc(self.tenant_name) + vdc = self.get_vdc_details() if vdc is None: raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name)) vms_dict = {} + nsx_edge_list = [] for vmuuid in vm_list: - vmname = self.get_namebyvappid(vca, vdc, vmuuid) + vmname = self.get_namebyvappid(self.get_vdc_details(), vmuuid) if vmname is not None: - the_vapp = vca.get_vapp(vdc, vmname) - vm_info = the_vapp.get_vms_details() - vm_status = vm_info[0]['status'] - vm_pci_details = self.get_vm_pci_details(vmuuid) - vm_info[0].update(vm_pci_details) + try: + vm_pci_details = self.get_vm_pci_details(vmuuid) + the_vapp = self.vca.get_vapp(self.get_vdc_details(), vmname) + vm_info = the_vapp.get_vms_details() + vm_status = vm_info[0]['status'] + vm_info[0].update(vm_pci_details) - vm_dict = {'status': vcdStatusCode2manoFormat[the_vapp.me.get_status()], - 'error_msg': vcdStatusCode2manoFormat[the_vapp.me.get_status()], - 'vim_info': yaml.safe_dump(vm_info), 'interfaces': []} + vm_dict = {'status': vcdStatusCode2manoFormat[the_vapp.me.get_status()], + 'error_msg': vcdStatusCode2manoFormat[the_vapp.me.get_status()], + 'vim_info': yaml.safe_dump(vm_info), 'interfaces': []} - # get networks - try: + # get networks vm_app_networks = the_vapp.get_vms_network_info() for vapp_network in vm_app_networks: for vm_network in vapp_network: if vm_network['name'] == vmname: #Assign IP Address based on MAC Address in NSX DHCP lease info - for mac_adres,ip_adres in mac_ip_addr.iteritems(): - if mac_adres == vm_network['mac']: - vm_network['ip']=ip_adres + if vm_network['ip'] is None: + if not nsx_edge_list: + nsx_edge_list = self.get_edge_details() + if nsx_edge_list is None: + raise vimconn.vimconnException("refresh_vms_status:"\ + "Failed to get edge details from NSX Manager") + if vm_network['mac'] is not None: + vm_network['ip'] = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_network['mac']) + + vm_net_id = self.get_network_id_by_name(vm_network['network_name']) interface = {"mac_address": vm_network['mac'], - "vim_net_id": self.get_network_id_by_name(vm_network['network_name']), - "vim_interface_id": self.get_network_id_by_name(vm_network['network_name']), + "vim_net_id": vm_net_id, + "vim_interface_id": vm_net_id, 'ip_address': vm_network['ip']} # interface['vim_info'] = yaml.safe_dump(vm_network) vm_dict["interfaces"].append(interface) # add a vm to vm dict vms_dict.setdefault(vmuuid, vm_dict) - except KeyError: - self.logger.debug("Error in respond {}".format(KeyError.message)) + except Exception as exp: + self.logger.debug("Error in response {}".format(exp)) self.logger.debug(traceback.format_exc()) return vms_dict - def action_vminstance(self, vm__vim_uuid=None, action_dict=None): - """Send and action over a VM instance from VIM - Returns the vm_id if the action was successfully sent to the VIM""" - - self.logger.debug("Received action for vm {} and action dict {}".format(vm__vim_uuid, action_dict)) - if vm__vim_uuid is None or action_dict is None: - raise vimconn.vimconnException("Invalid request. VM id or action is None.") - - vca = self.connect() - if not vca: - raise vimconn.vimconnConnectionException("self.connect() is failed.") - vdc = vca.get_vdc(self.tenant_name) - if vdc is None: - return -1, "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name) + def get_edge_details(self): + """Get the NSX edge list from NSX Manager + Returns list of NSX edges + """ + edge_list = [] + rheaders = {'Content-Type': 'application/xml'} + nsx_api_url = '/api/4.0/edges' - vapp_name = self.get_namebyvappid(vca, vdc, vm__vim_uuid) - if vapp_name is None: - self.logger.debug("action_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)) - raise vimconn.vimconnException("Failed to get vm by given {} vm uuid".format(vm__vim_uuid)) - else: - self.logger.info("Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid)) + self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url)) try: - the_vapp = vca.get_vapp(vdc, vapp_name) - # TODO fix all status - if "start" in action_dict: - vm_info = the_vapp.get_vms_details() - vm_status = vm_info[0]['status'] - self.logger.info("Power on vApp: vm_status:{} {}".format(type(vm_status),vm_status)) - if vm_status == "Suspended" or vm_status == "Powered off": - power_on_task = the_vapp.poweron() - if power_on_task is not None and type(power_on_task) is GenericTask: - result = vca.block_until_completed(power_on_task) - if result: - self.logger.info("action_vminstance: Powered on vApp: {}".format(vapp_name)) - else: - self.logger.info("action_vminstance: Failed to power on vApp: {}".format(vapp_name)) - else: - self.logger.info("action_vminstance: Wait for vApp {} to power on".format(vapp_name)) - elif "rebuild" in action_dict: - self.logger.info("action_vminstance: Rebuilding vApp: {}".format(vapp_name)) - power_on_task = the_vapp.deploy(powerOn=True) - if type(power_on_task) is GenericTask: - result = vca.block_until_completed(power_on_task) - if result: - self.logger.info("action_vminstance: Rebuilt vApp: {}".format(vapp_name)) - else: - self.logger.info("action_vminstance: Failed to rebuild vApp: {}".format(vapp_name)) - else: - self.logger.info("action_vminstance: Wait for vApp rebuild {} to power on".format(vapp_name)) - elif "pause" in action_dict: - pass - ## server.pause() - elif "resume" in action_dict: - pass - ## server.resume() - elif "shutoff" in action_dict or "shutdown" in action_dict: - power_off_task = the_vapp.undeploy(action='powerOff') - if type(power_off_task) is GenericTask: - result = vca.block_until_completed(power_off_task) - if result: - self.logger.info("action_vminstance: Powered off vApp: {}".format(vapp_name)) + resp = requests.get(self.nsx_manager + nsx_api_url, + auth = (self.nsx_user, self.nsx_password), + verify = False, headers = rheaders) + if resp.status_code == requests.codes.ok: + paged_Edge_List = XmlElementTree.fromstring(resp.text) + for edge_pages in paged_Edge_List: + if edge_pages.tag == 'edgePage': + for edge_summary in edge_pages: + if edge_summary.tag == 'pagingInfo': + for element in edge_summary: + if element.tag == 'totalCount' and element.text == '0': + raise vimconn.vimconnException("get_edge_details: No NSX edges details found: {}" + .format(self.nsx_manager)) + + if edge_summary.tag == 'edgeSummary': + for element in edge_summary: + if element.tag == 'id': + edge_list.append(element.text) else: - self.logger.info("action_vminstance: Failed to power off vApp: {}".format(vapp_name)) + raise vimconn.vimconnException("get_edge_details: No NSX edge details found: {}" + .format(self.nsx_manager)) + + if not edge_list: + raise vimconn.vimconnException("get_edge_details: "\ + "No NSX edge details found: {}" + .format(self.nsx_manager)) else: - self.logger.info("action_vminstance: Wait for vApp {} to power off".format(vapp_name)) - elif "forceOff" in action_dict: - the_vapp.reset() - elif "terminate" in action_dict: - the_vapp.delete() - # elif "createImage" in action_dict: - # server.create_image() + self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list)) + return edge_list else: - pass - except: - pass + self.logger.debug("get_edge_details: " + "Failed to get NSX edge details from NSX Manager: {}" + .format(resp.content)) + return None + + except Exception as exp: + self.logger.debug("get_edge_details: "\ + "Failed to get NSX edge details from NSX Manager: {}" + .format(exp)) + raise vimconn.vimconnException("get_edge_details: "\ + "Failed to get NSX edge details from NSX Manager: {}" + .format(exp)) + + + def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address): + """Get IP address details from NSX edges, using the MAC address + PARAMS: nsx_edges : List of NSX edges + mac_address : Find IP address corresponding to this MAC address + Returns: IP address corrresponding to the provided MAC address + """ + + ip_addr = None + rheaders = {'Content-Type': 'application/xml'} + + self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge") + + try: + for edge in nsx_edges: + nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo' + + resp = requests.get(self.nsx_manager + nsx_api_url, + auth = (self.nsx_user, self.nsx_password), + verify = False, headers = rheaders) + + if resp.status_code == requests.codes.ok: + dhcp_leases = XmlElementTree.fromstring(resp.text) + for child in dhcp_leases: + if child.tag == 'dhcpLeaseInfo': + dhcpLeaseInfo = child + for leaseInfo in dhcpLeaseInfo: + for elem in leaseInfo: + if (elem.tag)=='macAddress': + edge_mac_addr = elem.text + if (elem.tag)=='ipAddress': + ip_addr = elem.text + if edge_mac_addr is not None: + if edge_mac_addr == mac_address: + self.logger.debug("Found ip addr {} for mac {} at NSX edge {}" + .format(ip_addr, mac_address,edge)) + return ip_addr + else: + self.logger.debug("get_ipaddr_from_NSXedge: "\ + "Error occurred while getting DHCP lease info from NSX Manager: {}" + .format(resp.content)) + + self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge") + return None + + except XmlElementTree.ParseError as Err: + self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True) + + + def action_vminstance(self, vm__vim_uuid=None, action_dict=None): + """Send and action over a VM instance from VIM + Returns the vm_id if the action was successfully sent to the VIM""" + + self.logger.debug("Received action for vm {} and action dict {}".format(vm__vim_uuid, action_dict)) + if vm__vim_uuid is None or action_dict is None: + raise vimconn.vimconnException("Invalid request. VM id or action is None.") + + vdc = self.get_vdc_details() + if vdc is None: + return -1, "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name) + + vapp_name = self.get_namebyvappid(vdc, vm__vim_uuid) + if vapp_name is None: + self.logger.debug("action_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)) + raise vimconn.vimconnException("Failed to get vm by given {} vm uuid".format(vm__vim_uuid)) + else: + self.logger.info("Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid)) + + try: + the_vapp = self.vca.get_vapp(vdc, vapp_name) + # TODO fix all status + if "start" in action_dict: + vm_info = the_vapp.get_vms_details() + vm_status = vm_info[0]['status'] + self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name)) + if vm_status == "Suspended" or vm_status == "Powered off": + power_on_task = the_vapp.poweron() + result = self.vca.block_until_completed(power_on_task) + self.instance_actions_result("start", result, vapp_name) + elif "rebuild" in action_dict: + self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name)) + rebuild_task = the_vapp.deploy(powerOn=True) + result = self.vca.block_until_completed(rebuild_task) + self.instance_actions_result("rebuild", result, vapp_name) + elif "pause" in action_dict: + self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name)) + pause_task = the_vapp.undeploy(action='suspend') + result = self.vca.block_until_completed(pause_task) + self.instance_actions_result("pause", result, vapp_name) + elif "resume" in action_dict: + self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name)) + power_task = the_vapp.poweron() + result = self.vca.block_until_completed(power_task) + self.instance_actions_result("resume", result, vapp_name) + elif "shutoff" in action_dict or "shutdown" in action_dict: + action_name , value = action_dict.items()[0] + self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name)) + power_off_task = the_vapp.undeploy(action='powerOff') + result = self.vca.block_until_completed(power_off_task) + if action_name == "shutdown": + self.instance_actions_result("shutdown", result, vapp_name) + else: + self.instance_actions_result("shutoff", result, vapp_name) + elif "forceOff" in action_dict: + result = the_vapp.undeploy(action='force') + self.instance_actions_result("forceOff", result, vapp_name) + elif "reboot" in action_dict: + self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name)) + reboot_task = the_vapp.reboot() + else: + raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict)) + return vm__vim_uuid + except Exception as exp : + self.logger.debug("action_vminstance: Failed with Exception {}".format(exp)) + raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp)) + + def instance_actions_result(self, action, result, vapp_name): + if result: + self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name)) + else: + self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name)) def get_vminstance_console(self, vm_id, console_type="vnc"): """ @@ -1904,10 +2245,6 @@ class vimconnector(vimconn.vimconnector): The return network name. """ - vca = self.connect() - if not vca: - raise vimconn.vimconnConnectionException("self.connect() is failed.") - if not network_uuid: return None @@ -1934,10 +2271,6 @@ class vimconnector(vimconn.vimconnector): network_uuid: network_id """ - vca = self.connect() - if not vca: - raise vimconn.vimconnConnectionException("self.connect() is failed.") - if not network_name: self.logger.debug("get_network_id_by_name() : Network name is empty") return None @@ -1967,18 +2300,18 @@ class vimconnector(vimconn.vimconnector): The return XML respond """ - vca = self.connect() - if not vca: - raise vimconn.vimconnConnectionException("self.connect() is failed") - - url_list = [vca.host, '/api/org'] + url_list = [self.vca.host, '/api/org'] vm_list_rest_call = ''.join(url_list) - if not (not vca.vcloud_session or not vca.vcloud_session.organization): + if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization): response = Http.get(url=vm_list_rest_call, - headers=vca.vcloud_session.get_vcloud_headers(), - verify=vca.verify, - logger=vca.logger) + headers=self.vca.vcloud_session.get_vcloud_headers(), + verify=self.vca.verify, + logger=self.vca.logger) + + if response.status_code == 403: + response = self.retry_rest('GET', vm_list_rest_call) + if response.status_code == requests.codes.ok: return response.content @@ -1996,21 +2329,22 @@ class vimconnector(vimconn.vimconnector): The return XML respond """ - vca = self.connect() - if not vca: - raise vimconn.vimconnConnectionException("self.connect() is failed") - if org_uuid is None: return None - url_list = [vca.host, '/api/org/', org_uuid] + url_list = [self.vca.host, '/api/org/', org_uuid] vm_list_rest_call = ''.join(url_list) - if not (not vca.vcloud_session or not vca.vcloud_session.organization): + if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization): response = Http.get(url=vm_list_rest_call, - headers=vca.vcloud_session.get_vcloud_headers(), - verify=vca.verify, - logger=vca.logger) + headers=self.vca.vcloud_session.get_vcloud_headers(), + verify=self.vca.verify, + logger=self.vca.logger) + + #Retry login if session expired & retry sending request + if response.status_code == 403: + response = self.retry_rest('GET', vm_list_rest_call) + if response.status_code == requests.codes.ok: return response.content @@ -2031,9 +2365,6 @@ class vimconnector(vimconn.vimconnector): """ org_dict = {} - vca = self.connect() - if not vca: - raise vimconn.vimconnConnectionException("self.connect() is failed") if org_uuid is None: return org_dict @@ -2071,9 +2402,6 @@ class vimconnector(vimconn.vimconnector): """ org_dict = {} - vca = self.connect() - if not vca: - raise vimconn.vimconnConnectionException("self.connect() is failed") content = self.list_org_action() try: @@ -2245,21 +2573,22 @@ class vimconnector(vimconn.vimconnector): The return XML respond """ - vca = self.connect() - if not vca: - raise vimconn.vimconnConnectionException("self.connect() is failed") - if network_uuid is None: return None - url_list = [vca.host, '/api/network/', network_uuid] + url_list = [self.vca.host, '/api/network/', network_uuid] vm_list_rest_call = ''.join(url_list) - if not (not vca.vcloud_session or not vca.vcloud_session.organization): + if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization): response = Http.get(url=vm_list_rest_call, - headers=vca.vcloud_session.get_vcloud_headers(), - verify=vca.verify, - logger=vca.logger) + headers=self.vca.vcloud_session.get_vcloud_headers(), + verify=self.vca.verify, + logger=self.vca.logger) + + #Retry login if session expired & retry sending request + if response.status_code == 403: + response = self.retry_rest('GET', vm_list_rest_call) + if response.status_code == requests.codes.ok: return response.content @@ -2303,8 +2632,8 @@ class vimconnector(vimconn.vimconnector): if network_uuid is None: return network_uuid - content = self.get_network_action(network_uuid=network_uuid) try: + content = self.get_network_action(network_uuid=network_uuid) vm_list_xmlroot = XmlElementTree.fromstring(content) network_configuration['status'] = vm_list_xmlroot.get("status") @@ -2320,8 +2649,9 @@ class vimconnector(vimconn.vimconnector): if tagKey != "": network_configuration[tagKey] = configuration.text.strip() return network_configuration - except: - pass + except Exception as exp : + self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp)) + raise vimconn.vimconnException("get_vcd_network: Failed with Exception {}".format(exp)) return network_configuration @@ -2387,7 +2717,7 @@ class vimconnector(vimconn.vimconnector): vm_list_xmlroot = XmlElementTree.fromstring(content) vcd_uuid = vm_list_xmlroot.get('id').split(":") if len(vcd_uuid) == 4: - self.logger.info("Create new network name: {} uuid: {}".format(network_name, vcd_uuid[3])) + self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3])) return vcd_uuid[3] except: self.logger.debug("Failed create network {}".format(network_name)) @@ -2473,26 +2803,50 @@ class vimconnector(vimconn.vimconnector): except: return None - #Configure IP profile of the network - ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE - - gateway_address=ip_profile['gateway_address'] - dhcp_count=int(ip_profile['dhcp_count']) - subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address']) + try: + #Configure IP profile of the network + ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE - if ip_profile['dhcp_enabled']==True: - dhcp_enabled='true' - else: - dhcp_enabled='false' - dhcp_start_address=ip_profile['dhcp_start_address'] + if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None: + subnet_rand = random.randint(0, 255) + ip_base = "192.168.{}.".format(subnet_rand) + ip_profile['subnet_address'] = ip_base + "0/24" + else: + ip_base = ip_profile['subnet_address'].rsplit('.',1)[0] + '.' + + if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None: + ip_profile['gateway_address']=ip_base + "1" + if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None: + ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count'] + if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None: + ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled'] + if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None: + ip_profile['dhcp_start_address']=ip_base + "3" + if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None: + ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version'] + if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None: + ip_profile['dns_address']=ip_base + "2" + + gateway_address=ip_profile['gateway_address'] + dhcp_count=int(ip_profile['dhcp_count']) + subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address']) + + if ip_profile['dhcp_enabled']==True: + dhcp_enabled='true' + else: + dhcp_enabled='false' + dhcp_start_address=ip_profile['dhcp_start_address'] - #derive dhcp_end_address from dhcp_start_address & dhcp_count - end_ip_int = int(netaddr.IPAddress(dhcp_start_address)) - end_ip_int += dhcp_count - 1 - dhcp_end_address = str(netaddr.IPAddress(end_ip_int)) + #derive dhcp_end_address from dhcp_start_address & dhcp_count + end_ip_int = int(netaddr.IPAddress(dhcp_start_address)) + end_ip_int += dhcp_count - 1 + dhcp_end_address = str(netaddr.IPAddress(end_ip_int)) - ip_version=ip_profile['ip_version'] - dns_address=ip_profile['dns_address'] + ip_version=ip_profile['ip_version'] + dns_address=ip_profile['dns_address'] + except KeyError as exp: + self.logger.debug("Create Network REST: Key error {}".format(exp)) + raise vimconn.vimconnException("Create Network REST: Key error{}".format(exp)) # either use client provided UUID or search for a first available # if both are not defined we return none @@ -2500,64 +2854,41 @@ class vimconnector(vimconn.vimconnector): url_list = [vca.host, '/api/admin/network/', parent_network_uuid] add_vdc_rest_url = ''.join(url_list) - if net_type=='ptp': - fence_mode="isolated" - isshared='false' - is_inherited='false' - data = """ - Openmano created - - - - {1:s} - {2:s} - {3:s} - {4:s} - {5:s} - - - {6:s} - {7:s} - - - - - {8:s} - - {9:s} - """.format(escape(network_name), is_inherited, gateway_address, - subnet_address, dns_address, dhcp_enabled, - dhcp_start_address, dhcp_end_address, fence_mode, isshared) - - else: - fence_mode="bridged" - is_inherited='false' - data = """ - Openmano created - - - - {1:s} - {2:s} - {3:s} - {4:s} - {5:s} - - - {6:s} - {7:s} - - - - - - {9:s} - - {10:s} - """.format(escape(network_name), is_inherited, gateway_address, - subnet_address, dns_address, dhcp_enabled, - dhcp_start_address, dhcp_end_address, available_networks, - fence_mode, isshared) + #Creating all networks as Direct Org VDC type networks. + #Unused in case of Underlay (data/ptp) network interface. + fence_mode="bridged" + is_inherited='false' + dns_list = dns_address.split(";") + dns1 = dns_list[0] + dns2_text = "" + if len(dns_list) >= 2: + dns2_text = "\n {}\n".format(dns_list[1]) + data = """ + Openmano created + + + + {1:s} + {2:s} + {3:s} + {4:s}{5:s} + {6:s} + + + {7:s} + {8:s} + + + + + + {10:s} + + {11:s} + """.format(escape(network_name), is_inherited, gateway_address, + subnet_address, dns1, dns2_text, dhcp_enabled, + dhcp_start_address, dhcp_end_address, available_networks, + fence_mode, isshared) headers = vca.vcloud_session.get_vcloud_headers() headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' @@ -2569,8 +2900,8 @@ class vimconnector(vimconn.vimconnector): logger=vca.logger) if response.status_code != 201: - self.logger.debug("Create Network POST REST API call failed. Return status code {}" - .format(response.status_code)) + self.logger.debug("Create Network POST REST API call failed. Return status code {}, Response content: {}" + .format(response.status_code,response.content)) else: network = networkType.parseString(response.content, True) create_nw_task = network.get_Tasks().get_Task()[0] @@ -2578,7 +2909,7 @@ class vimconnector(vimconn.vimconnector): # if we all ok we respond with content after network creation completes # otherwise by default return None if create_nw_task is not None: - self.logger.debug("Create Network REST : Waiting for Nw creation complete") + self.logger.debug("Create Network REST : Waiting for Network creation complete") status = vca.block_until_completed(create_nw_task) if status: return response.content @@ -2682,8 +3013,7 @@ class vimconnector(vimconn.vimconnector): # application/vnd.vmware.admin.providervdc+xml # we need find a template from witch we instantiate VDC if child.tag.split("}")[1] == 'VdcTemplate': - if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml' and child.attrib.get( - 'name') == 'openmano': + if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml': vdc_template_ref = child.attrib.get('href') except: self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call)) @@ -2706,6 +3036,11 @@ class vimconnector(vimconn.vimconnector): headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml' response = Http.post(url=vm_list_rest_call, headers=headers, data=data, verify=vca.verify, logger=vca.logger) + + vdc_task = taskType.parseString(response.content, True) + if type(vdc_task) is GenericTask: + self.vca.block_until_completed(vdc_task) + # if we all ok we respond with content otherwise by default None if response.status_code >= 200 and response.status_code < 300: return response.content @@ -2820,7 +3155,7 @@ class vimconnector(vimconn.vimconnector): if need_admin_access: vca = self.connect_as_admin() else: - vca = self.connect() + vca = self.vca if not vca: raise vimconn.vimconnConnectionException("self.connect() is failed") @@ -2836,6 +3171,10 @@ class vimconnector(vimconn.vimconnector): verify=vca.verify, logger=vca.logger) + if response.status_code == 403: + if need_admin_access == False: + response = self.retry_rest('GET', get_vapp_restcall) + if response.status_code != requests.codes.ok: self.logger.debug("REST API call {} failed. Return status code {}".format(get_vapp_restcall, response.status_code)) @@ -2942,21 +3281,20 @@ class vimconnector(vimconn.vimconnector): def acuire_console(self, vm_uuid=None): - vca = self.connect() - if not vca: - raise vimconn.vimconnConnectionException("self.connect() is failed") if vm_uuid is None: return None - if not (not vca.vcloud_session or not vca.vcloud_session.organization): + if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization): vm_dict = self.get_vapp_details_rest(self, vapp_uuid=vm_uuid) console_dict = vm_dict['acquireTicket'] console_rest_call = console_dict['href'] response = Http.post(url=console_rest_call, - headers=vca.vcloud_session.get_vcloud_headers(), - verify=vca.verify, - logger=vca.logger) + headers=self.vca.vcloud_session.get_vcloud_headers(), + verify=self.vca.verify, + logger=self.vca.logger) + if response.status_code == 403: + response = self.retry_rest('POST', console_rest_call) if response.status_code == requests.codes.ok: return response.content @@ -3013,17 +3351,17 @@ class vimconnector(vimconn.vimconnector): Returns: The return network uuid or return None """ - vca = self.connect() - if not vca: - raise vimconn.vimconnConnectionException("self.connect() is failed") if disk_href is None or disk_size is None: return None - if vca.vcloud_session and vca.vcloud_session.organization: + if self.vca.vcloud_session and self.vca.vcloud_session.organization: response = Http.get(url=disk_href, - headers=vca.vcloud_session.get_vcloud_headers(), - verify=vca.verify, - logger=vca.logger) + headers=self.vca.vcloud_session.get_vcloud_headers(), + verify=self.vca.verify, + logger=self.vca.logger) + + if response.status_code == 403: + response = self.retry_rest('GET', disk_href) if response.status_code != requests.codes.ok: self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href, @@ -3045,13 +3383,17 @@ class vimconnector(vimconn.vimconnector): xml_declaration=True) #Send PUT request to modify disk size - headers = vca.vcloud_session.get_vcloud_headers() + headers = self.vca.vcloud_session.get_vcloud_headers() headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1' response = Http.put(url=disk_href, data=data, headers=headers, - verify=vca.verify, logger=self.logger) + verify=self.vca.verify, logger=self.logger) + + if response.status_code == 403: + add_headers = {'Content-Type': headers['Content-Type']} + response = self.retry_rest('PUT', disk_href, add_headers, data) if response.status_code != 202: self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href, @@ -3059,7 +3401,7 @@ class vimconnector(vimconn.vimconnector): else: modify_disk_task = taskType.parseString(response.content, True) if type(modify_disk_task) is GenericTask: - status = vca.block_until_completed(modify_disk_task) + status = self.vca.block_until_completed(modify_disk_task) return status return None @@ -3081,33 +3423,16 @@ class vimconnector(vimconn.vimconnector): vcenter_conect object """ vm_obj = None - vcenter_conect = None self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid)) - try: - vm_vcenter_info = self.get_vm_vcenter_info(vapp_uuid) - except Exception as exp: - self.logger.error("Error occurred while getting vCenter infromationn"\ - " for VM : {}".format(exp)) - raise vimconn.vimconnException(message=exp) + vcenter_conect, content = self.get_vcenter_content() + vm_moref_id = self.get_vm_moref_id(vapp_uuid) - if vm_vcenter_info["vm_moref_id"]: - context = None - if hasattr(ssl, '_create_unverified_context'): - context = ssl._create_unverified_context() + if vm_moref_id: try: no_of_pci_devices = len(pci_devices) if no_of_pci_devices > 0: - vcenter_conect = SmartConnect( - host=vm_vcenter_info["vm_vcenter_ip"], - user=vm_vcenter_info["vm_vcenter_user"], - pwd=vm_vcenter_info["vm_vcenter_password"], - port=int(vm_vcenter_info["vm_vcenter_port"]), - sslContext=context) - atexit.register(Disconnect, vcenter_conect) - content = vcenter_conect.RetrieveContent() - #Get VM and its host - host_obj, vm_obj = self.get_vm_obj(content ,vm_vcenter_info["vm_moref_id"]) + host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id) self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj)) if host_obj and vm_obj: #get PCI devies from host on which vapp is currently installed @@ -3376,7 +3701,7 @@ class vimconnector(vimconn.vimconnector): exp)) return task - def get_vm_vcenter_info(self , vapp_uuid): + def get_vm_vcenter_info(self): """ Method to get details of vCenter and vm @@ -3409,16 +3734,8 @@ class vimconnector(vimconn.vimconnector): else: raise vimconn.vimconnException(message="vCenter user password is not provided."\ " Please provide vCenter user password while attaching datacenter to tenant in --config") - try: - vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True) - if vm_details and "vm_vcenter_info" in vm_details: - vm_vcenter_info["vm_moref_id"] = vm_details["vm_vcenter_info"].get("vm_moref_id", None) - - return vm_vcenter_info - except Exception as exp: - self.logger.error("Error occurred while getting vCenter infromationn"\ - " for VM : {}".format(exp)) + return vm_vcenter_info def get_vm_pci_details(self, vmuuid): @@ -3434,23 +3751,12 @@ class vimconnector(vimconn.vimconnector): """ vm_pci_devices_info = {} try: - vm_vcenter_info = self.get_vm_vcenter_info(vmuuid) - if vm_vcenter_info["vm_moref_id"]: - context = None - if hasattr(ssl, '_create_unverified_context'): - context = ssl._create_unverified_context() - vcenter_conect = SmartConnect(host=vm_vcenter_info["vm_vcenter_ip"], - user=vm_vcenter_info["vm_vcenter_user"], - pwd=vm_vcenter_info["vm_vcenter_password"], - port=int(vm_vcenter_info["vm_vcenter_port"]), - sslContext=context - ) - atexit.register(Disconnect, vcenter_conect) - content = vcenter_conect.RetrieveContent() - + vcenter_conect, content = self.get_vcenter_content() + vm_moref_id = self.get_vm_moref_id(vmuuid) + if vm_moref_id: #Get VM and its host if content: - host_obj, vm_obj = self.get_vm_obj(content ,vm_vcenter_info["vm_moref_id"]) + host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id) if host_obj and vm_obj: vm_pci_devices_info["host_name"]= host_obj.name vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress @@ -3469,3 +3775,1372 @@ class vimconnector(vimconn.vimconnector): " for VM : {}".format(exp)) raise vimconn.vimconnException(message=exp) + def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None): + """ + Method to add network adapter type to vm + Args : + network_name - name of network + primary_nic_index - int value for primary nic index + nicIndex - int value for nic index + nic_type - specify model name to which add to vm + Returns: + None + """ + + try: + ip_address = None + floating_ip = False + if 'floating_ip' in net: floating_ip = net['floating_ip'] + + # Stub for ip_address feature + if 'ip_address' in net: ip_address = net['ip_address'] + + if floating_ip: + allocation_mode = "POOL" + elif ip_address: + allocation_mode = "MANUAL" + else: + allocation_mode = "DHCP" + + if not nic_type: + for vms in vapp._get_vms(): + vm_id = (vms.id).split(':')[-1] + + url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.vca.host, vm_id) + + response = Http.get(url=url_rest_call, + headers=self.vca.vcloud_session.get_vcloud_headers(), + verify=self.vca.verify, + logger=self.vca.logger) + + if response.status_code == 403: + response = self.retry_rest('GET', url_rest_call) + + if response.status_code != 200: + self.logger.error("REST call {} failed reason : {}"\ + "status code : {}".format(url_rest_call, + response.content, + response.status_code)) + raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\ + "network connection section") + + data = response.content + if '' not in data: + item = """{} + + {} + true + {} + """.format(primary_nic_index, network_name, nicIndex, + allocation_mode) + # Stub for ip_address feature + if ip_address: + ip_tag = '{}'.format(ip_address) + item = item.replace('\n','\n{}\n'.format(ip_tag)) + + data = data.replace('\n','\n{}\n'.format(item)) + else: + new_item = """ + {} + true + {} + """.format(network_name, nicIndex, + allocation_mode) + # Stub for ip_address feature + if ip_address: + ip_tag = '{}'.format(ip_address) + new_item = new_item.replace('\n','\n{}\n'.format(ip_tag)) + + data = data.replace('\n','\n{}\n'.format(new_item)) + + headers = self.vca.vcloud_session.get_vcloud_headers() + headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml' + response = Http.put(url=url_rest_call, headers=headers, data=data, + verify=self.vca.verify, + logger=self.vca.logger) + + if response.status_code == 403: + add_headers = {'Content-Type': headers['Content-Type']} + response = self.retry_rest('PUT', url_rest_call, add_headers, data) + + if response.status_code != 202: + self.logger.error("REST call {} failed reason : {}"\ + "status code : {} ".format(url_rest_call, + response.content, + response.status_code)) + raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\ + "network connection section") + else: + nic_task = taskType.parseString(response.content, True) + if isinstance(nic_task, GenericTask): + self.vca.block_until_completed(nic_task) + self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\ + "default NIC type".format(vm_id)) + else: + self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\ + "connect NIC type".format(vm_id)) + else: + for vms in vapp._get_vms(): + vm_id = (vms.id).split(':')[-1] + + url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.vca.host, vm_id) + + response = Http.get(url=url_rest_call, + headers=self.vca.vcloud_session.get_vcloud_headers(), + verify=self.vca.verify, + logger=self.vca.logger) + + if response.status_code == 403: + response = self.retry_rest('GET', url_rest_call) + + if response.status_code != 200: + self.logger.error("REST call {} failed reason : {}"\ + "status code : {}".format(url_rest_call, + response.content, + response.status_code)) + raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\ + "network connection section") + data = response.content + if '' not in data: + item = """{} + + {} + true + {} + {} + """.format(primary_nic_index, network_name, nicIndex, + allocation_mode, nic_type) + # Stub for ip_address feature + if ip_address: + ip_tag = '{}'.format(ip_address) + item = item.replace('\n','\n{}\n'.format(ip_tag)) + + data = data.replace('\n','\n{}\n'.format(item)) + else: + new_item = """ + {} + true + {} + {} + """.format(network_name, nicIndex, + allocation_mode, nic_type) + # Stub for ip_address feature + if ip_address: + ip_tag = '{}'.format(ip_address) + new_item = new_item.replace('\n','\n{}\n'.format(ip_tag)) + + data = data.replace('\n','\n{}\n'.format(new_item)) + + headers = self.vca.vcloud_session.get_vcloud_headers() + headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml' + response = Http.put(url=url_rest_call, headers=headers, data=data, + verify=self.vca.verify, + logger=self.vca.logger) + + if response.status_code == 403: + add_headers = {'Content-Type': headers['Content-Type']} + response = self.retry_rest('PUT', url_rest_call, add_headers, data) + + if response.status_code != 202: + self.logger.error("REST call {} failed reason : {}"\ + "status code : {}".format(url_rest_call, + response.content, + response.status_code)) + raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\ + "network connection section") + else: + nic_task = taskType.parseString(response.content, True) + if isinstance(nic_task, GenericTask): + self.vca.block_until_completed(nic_task) + self.logger.info("add_network_adapter_to_vms(): VM {} "\ + "conneced to NIC type {}".format(vm_id, nic_type)) + else: + self.logger.error("add_network_adapter_to_vms(): VM {} "\ + "failed to connect NIC type {}".format(vm_id, nic_type)) + except Exception as exp: + self.logger.error("add_network_adapter_to_vms() : exception occurred "\ + "while adding Network adapter") + raise vimconn.vimconnException(message=exp) + + + def set_numa_affinity(self, vmuuid, paired_threads_id): + """ + Method to assign numa affinity in vm configuration parammeters + Args : + vmuuid - vm uuid + paired_threads_id - one or more virtual processor + numbers + Returns: + return if True + """ + try: + vm_moref_id , vm_vcenter_host , vm_vcenter_username, vm_vcenter_port = self.get_vcenter_info_rest(vmuuid) + if vm_moref_id and vm_vcenter_host and vm_vcenter_username: + context = None + if hasattr(ssl, '_create_unverified_context'): + context = ssl._create_unverified_context() + vcenter_conect = SmartConnect(host=vm_vcenter_host, user=vm_vcenter_username, + pwd=self.passwd, port=int(vm_vcenter_port), + sslContext=context) + atexit.register(Disconnect, vcenter_conect) + content = vcenter_conect.RetrieveContent() + + host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id) + if vm_obj: + config_spec = vim.vm.ConfigSpec() + config_spec.extraConfig = [] + opt = vim.option.OptionValue() + opt.key = 'numa.nodeAffinity' + opt.value = str(paired_threads_id) + config_spec.extraConfig.append(opt) + task = vm_obj.ReconfigVM_Task(config_spec) + if task: + result = self.wait_for_vcenter_task(task, vcenter_conect) + extra_config = vm_obj.config.extraConfig + flag = False + for opts in extra_config: + if 'numa.nodeAffinity' in opts.key: + flag = True + self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "\ + "value {} for vm {}".format(opt.value, vm_obj)) + if flag: + return + else: + self.logger.error("set_numa_affinity: Failed to assign numa affinity") + except Exception as exp: + self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "\ + "for VM {} : {}".format(vm_obj, vm_moref_id)) + raise vimconn.vimconnException("set_numa_affinity : Error {} failed to assign numa "\ + "affinity".format(exp)) + + + + def cloud_init(self, vapp, cloud_config): + """ + Method to inject ssh-key + vapp - vapp object + cloud_config a dictionary with: + 'key-pairs': (optional) list of strings with the public key to be inserted to the default user + 'users': (optional) list of users to be inserted, each item is a dict with: + 'name': (mandatory) user name, + 'key-pairs': (optional) list of strings with the public key to be inserted to the user + 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init, + or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file + 'config-files': (optional). List of files to be transferred. Each item is a dict with: + 'dest': (mandatory) string with the destination absolute path + 'encoding': (optional, by default text). Can be one of: + 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64' + 'content' (mandatory): string with the content of the file + 'permissions': (optional) string with file permissions, typically octal notation '0644' + 'owner': (optional) file owner, string with the format 'owner:group' + 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk + """ + + try: + if isinstance(cloud_config, dict): + key_pairs = [] + userdata = [] + if "key-pairs" in cloud_config: + key_pairs = cloud_config["key-pairs"] + + if "users" in cloud_config: + userdata = cloud_config["users"] + + for key in key_pairs: + for user in userdata: + if 'name' in user: user_name = user['name'] + if 'key-pairs' in user and len(user['key-pairs']) > 0: + for user_key in user['key-pairs']: + customize_script = """ + #!/bin/bash + echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log + if [ "$1" = "precustomization" ];then + echo performing precustomization tasks on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log + if [ ! -d /root/.ssh ];then + mkdir /root/.ssh + chown root:root /root/.ssh + chmod 700 /root/.ssh + touch /root/.ssh/authorized_keys + chown root:root /root/.ssh/authorized_keys + chmod 600 /root/.ssh/authorized_keys + # make centos with selinux happy + which restorecon && restorecon -Rv /root/.ssh + echo '{key}' >> /root/.ssh/authorized_keys + else + touch /root/.ssh/authorized_keys + chown root:root /root/.ssh/authorized_keys + chmod 600 /root/.ssh/authorized_keys + echo '{key}' >> /root/.ssh/authorized_keys + fi + if [ -d /home/{user_name} ];then + if [ ! -d /home/{user_name}/.ssh ];then + mkdir /home/{user_name}/.ssh + chown {user_name}:{user_name} /home/{user_name}/.ssh + chmod 700 /home/{user_name}/.ssh + touch /home/{user_name}/.ssh/authorized_keys + chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys + chmod 600 /home/{user_name}/.ssh/authorized_keys + # make centos with selinux happy + which restorecon && restorecon -Rv /home/{user_name}/.ssh + echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys + else + touch /home/{user_name}/.ssh/authorized_keys + chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys + chmod 600 /home/{user_name}/.ssh/authorized_keys + echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys + fi + fi + fi""".format(key=key, user_name=user_name, user_key=user_key) + + for vm in vapp._get_vms(): + vm_name = vm.name + task = vapp.customize_guest_os(vm_name, customization_script=customize_script) + if isinstance(task, GenericTask): + self.vca.block_until_completed(task) + self.logger.info("cloud_init : customized guest os task "\ + "completed for VM {}".format(vm_name)) + else: + self.logger.error("cloud_init : task for customized guest os"\ + "failed for VM {}".format(vm_name)) + except Exception as exp: + self.logger.error("cloud_init : exception occurred while injecting "\ + "ssh-key") + raise vimconn.vimconnException("cloud_init : Error {} failed to inject "\ + "ssh-key".format(exp)) + + + def add_new_disk(self, vapp_uuid, disk_size): + """ + Method to create an empty vm disk + + Args: + vapp_uuid - is vapp identifier. + disk_size - size of disk to be created in GB + + Returns: + None + """ + status = False + vm_details = None + try: + #Disk size in GB, convert it into MB + if disk_size is not None: + disk_size_mb = int(disk_size) * 1024 + vm_details = self.get_vapp_details_rest(vapp_uuid) + + if vm_details and "vm_virtual_hardware" in vm_details: + self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size)) + disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"] + status = self.add_new_disk_rest(disk_href, disk_size_mb) + + except Exception as exp: + msg = "Error occurred while creating new disk {}.".format(exp) + self.rollback_newvm(vapp_uuid, msg) + + if status: + self.logger.info("Added new disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size)) + else: + #If failed to add disk, delete VM + msg = "add_new_disk: Failed to add new disk to {}".format(vm_details["name"]) + self.rollback_newvm(vapp_uuid, msg) + + + def add_new_disk_rest(self, disk_href, disk_size_mb): + """ + Retrives vApp Disks section & add new empty disk + + Args: + disk_href: Disk section href to addd disk + disk_size_mb: Disk size in MB + + Returns: Status of add new disk task + """ + status = False + if self.vca.vcloud_session and self.vca.vcloud_session.organization: + response = Http.get(url=disk_href, + headers=self.vca.vcloud_session.get_vcloud_headers(), + verify=self.vca.verify, + logger=self.vca.logger) + + if response.status_code == 403: + response = self.retry_rest('GET', disk_href) + + if response.status_code != requests.codes.ok: + self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}" + .format(disk_href, response.status_code)) + return status + try: + #Find but type & max of instance IDs assigned to disks + lxmlroot_respond = lxmlElementTree.fromstring(response.content) + namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix} + namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5" + instance_id = 0 + for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces): + if item.find("rasd:Description",namespaces).text == "Hard disk": + inst_id = int(item.find("rasd:InstanceID" ,namespaces).text) + if inst_id > instance_id: + instance_id = inst_id + disk_item = item.find("rasd:HostResource" ,namespaces) + bus_subtype = disk_item.attrib["{"+namespaces['xmlns']+"}busSubType"] + bus_type = disk_item.attrib["{"+namespaces['xmlns']+"}busType"] + + instance_id = instance_id + 1 + new_item = """ + Hard disk + New disk + + {} + 17 + """.format(disk_size_mb, bus_subtype, bus_type, instance_id) + + new_data = response.content + #Add new item at the bottom + new_data = new_data.replace('\n', '\n{}\n'.format(new_item)) + + # Send PUT request to modify virtual hardware section with new disk + headers = self.vca.vcloud_session.get_vcloud_headers() + headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1' + + response = Http.put(url=disk_href, + data=new_data, + headers=headers, + verify=self.vca.verify, logger=self.logger) + + if response.status_code == 403: + add_headers = {'Content-Type': headers['Content-Type']} + response = self.retry_rest('PUT', disk_href, add_headers, new_data) + + if response.status_code != 202: + self.logger.error("PUT REST API call {} failed. Return status code {}. Response Content:{}" + .format(disk_href, response.status_code, response.content)) + else: + add_disk_task = taskType.parseString(response.content, True) + if type(add_disk_task) is GenericTask: + status = self.vca.block_until_completed(add_disk_task) + if not status: + self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb)) + + except Exception as exp: + self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp)) + + return status + + + def add_existing_disk(self, catalogs=None, image_id=None, size=None, template_name=None, vapp_uuid=None): + """ + Method to add existing disk to vm + Args : + catalogs - List of VDC catalogs + image_id - Catalog ID + template_name - Name of template in catalog + vapp_uuid - UUID of vApp + Returns: + None + """ + disk_info = None + vcenter_conect, content = self.get_vcenter_content() + #find moref-id of vm in image + catalog_vm_info = self.get_vapp_template_details(catalogs=catalogs, + image_id=image_id, + ) + + if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info: + if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]: + catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get("vm_moref_id", None) + if catalog_vm_moref_id: + self.logger.info("Moref_id of VM in catalog : {}" .format(catalog_vm_moref_id)) + host, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id) + if catalog_vm_obj: + #find existing disk + disk_info = self.find_disk(catalog_vm_obj) + else: + exp_msg = "No VM with image id {} found".format(image_id) + self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound") + else: + exp_msg = "No Image found with image ID {} ".format(image_id) + self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound") + + if disk_info: + self.logger.info("Existing disk_info : {}".format(disk_info)) + #get VM + vm_moref_id = self.get_vm_moref_id(vapp_uuid) + host, vm_obj = self.get_vm_obj(content, vm_moref_id) + if vm_obj: + status = self.add_disk(vcenter_conect=vcenter_conect, + vm=vm_obj, + disk_info=disk_info, + size=size, + vapp_uuid=vapp_uuid + ) + if status: + self.logger.info("Disk from image id {} added to {}".format(image_id, + vm_obj.config.name) + ) + else: + msg = "No disk found with image id {} to add in VM {}".format( + image_id, + vm_obj.config.name) + self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound") + + + def find_disk(self, vm_obj): + """ + Method to find details of existing disk in VM + Args : + vm_obj - vCenter object of VM + image_id - Catalog ID + Returns: + disk_info : dict of disk details + """ + disk_info = {} + if vm_obj: + try: + devices = vm_obj.config.hardware.device + for device in devices: + if type(device) is vim.vm.device.VirtualDisk: + if isinstance(device.backing,vim.vm.device.VirtualDisk.FlatVer2BackingInfo) and hasattr(device.backing, 'fileName'): + disk_info["full_path"] = device.backing.fileName + disk_info["datastore"] = device.backing.datastore + disk_info["capacityKB"] = device.capacityInKB + break + except Exception as exp: + self.logger.error("find_disk() : exception occurred while "\ + "getting existing disk details :{}".format(exp)) + return disk_info + + + def add_disk(self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}): + """ + Method to add existing disk in VM + Args : + vcenter_conect - vCenter content object + vm - vCenter vm object + disk_info : dict of disk details + Returns: + status : status of add disk task + """ + datastore = disk_info["datastore"] if "datastore" in disk_info else None + fullpath = disk_info["full_path"] if "full_path" in disk_info else None + capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None + if size is not None: + #Convert size from GB to KB + sizeKB = int(size) * 1024 * 1024 + #compare size of existing disk and user given size.Assign whicherver is greater + self.logger.info("Add Existing disk : sizeKB {} , capacityKB {}".format( + sizeKB, capacityKB)) + if sizeKB > capacityKB: + capacityKB = sizeKB + + if datastore and fullpath and capacityKB: + try: + spec = vim.vm.ConfigSpec() + # get all disks on a VM, set unit_number to the next available + unit_number = 0 + for dev in vm.config.hardware.device: + if hasattr(dev.backing, 'fileName'): + unit_number = int(dev.unitNumber) + 1 + # unit_number 7 reserved for scsi controller + if unit_number == 7: + unit_number += 1 + if isinstance(dev, vim.vm.device.VirtualDisk): + #vim.vm.device.VirtualSCSIController + controller_key = dev.controllerKey + + self.logger.info("Add Existing disk : unit number {} , controller key {}".format( + unit_number, controller_key)) + # add disk here + dev_changes = [] + disk_spec = vim.vm.device.VirtualDeviceSpec() + disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add + disk_spec.device = vim.vm.device.VirtualDisk() + disk_spec.device.backing = \ + vim.vm.device.VirtualDisk.FlatVer2BackingInfo() + disk_spec.device.backing.thinProvisioned = True + disk_spec.device.backing.diskMode = 'persistent' + disk_spec.device.backing.datastore = datastore + disk_spec.device.backing.fileName = fullpath + + disk_spec.device.unitNumber = unit_number + disk_spec.device.capacityInKB = capacityKB + disk_spec.device.controllerKey = controller_key + dev_changes.append(disk_spec) + spec.deviceChange = dev_changes + task = vm.ReconfigVM_Task(spec=spec) + status = self.wait_for_vcenter_task(task, vcenter_conect) + return status + except Exception as exp: + exp_msg = "add_disk() : exception {} occurred while adding disk "\ + "{} to vm {}".format(exp, + fullpath, + vm.config.name) + self.rollback_newvm(vapp_uuid, exp_msg) + else: + msg = "add_disk() : Can not add disk to VM with disk info {} ".format(disk_info) + self.rollback_newvm(vapp_uuid, msg) + + + def get_vcenter_content(self): + """ + Get the vsphere content object + """ + try: + vm_vcenter_info = self.get_vm_vcenter_info() + except Exception as exp: + self.logger.error("Error occurred while getting vCenter infromationn"\ + " for VM : {}".format(exp)) + raise vimconn.vimconnException(message=exp) + + context = None + if hasattr(ssl, '_create_unverified_context'): + context = ssl._create_unverified_context() + + vcenter_conect = SmartConnect( + host=vm_vcenter_info["vm_vcenter_ip"], + user=vm_vcenter_info["vm_vcenter_user"], + pwd=vm_vcenter_info["vm_vcenter_password"], + port=int(vm_vcenter_info["vm_vcenter_port"]), + sslContext=context + ) + atexit.register(Disconnect, vcenter_conect) + content = vcenter_conect.RetrieveContent() + return vcenter_conect, content + + + def get_vm_moref_id(self, vapp_uuid): + """ + Get the moref_id of given VM + """ + try: + if vapp_uuid: + vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True) + if vm_details and "vm_vcenter_info" in vm_details: + vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None) + + return vm_moref_id + + except Exception as exp: + self.logger.error("Error occurred while getting VM moref ID "\ + " for VM : {}".format(exp)) + return None + + + def get_vapp_template_details(self, catalogs=None, image_id=None , template_name=None): + """ + Method to get vApp template details + Args : + catalogs - list of VDC catalogs + image_id - Catalog ID to find + template_name : template name in catalog + Returns: + parsed_respond : dict of vApp tempalte details + """ + parsed_response = {} + + vca = self.connect_as_admin() + if not vca: + raise vimconn.vimconnConnectionException("self.connect() is failed") + + try: + catalog = self.get_catalog_obj(image_id, catalogs) + if catalog: + template_name = self.get_catalogbyid(image_id, catalogs) + catalog_items = filter(lambda catalogItemRef: catalogItemRef.get_name() == template_name, catalog.get_CatalogItems().get_CatalogItem()) + if len(catalog_items) == 1: + response = Http.get(catalog_items[0].get_href(), + headers=vca.vcloud_session.get_vcloud_headers(), + verify=vca.verify, + logger=vca.logger) + catalogItem = XmlElementTree.fromstring(response.content) + entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0] + vapp_tempalte_href = entity.get("href") + #get vapp details and parse moref id + + namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" , + 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1', + 'vmw': 'http://www.vmware.com/schema/ovf', + 'vm': 'http://www.vmware.com/vcloud/v1.5', + 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData", + 'vmext':"http://www.vmware.com/vcloud/extension/v1.5", + 'xmlns':"http://www.vmware.com/vcloud/v1.5" + } + + if vca.vcloud_session and vca.vcloud_session.organization: + response = Http.get(url=vapp_tempalte_href, + headers=vca.vcloud_session.get_vcloud_headers(), + verify=vca.verify, + logger=vca.logger + ) + + if response.status_code != requests.codes.ok: + self.logger.debug("REST API call {} failed. Return status code {}".format( + vapp_tempalte_href, response.status_code)) + + else: + xmlroot_respond = XmlElementTree.fromstring(response.content) + children_section = xmlroot_respond.find('vm:Children/', namespaces) + if children_section is not None: + vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces) + if vCloud_extension_section is not None: + vm_vcenter_info = {} + vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces) + vmext = vim_info.find('vmext:VmVimObjectRef', namespaces) + if vmext is not None: + vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text + parsed_response["vm_vcenter_info"]= vm_vcenter_info + + except Exception as exp : + self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp)) + + return parsed_response + + + def rollback_newvm(self, vapp_uuid, msg , exp_type="Genric"): + """ + Method to delete vApp + Args : + vapp_uuid - vApp UUID + msg - Error message to be logged + exp_type : Exception type + Returns: + None + """ + if vapp_uuid: + status = self.delete_vminstance(vapp_uuid) + else: + msg = "No vApp ID" + self.logger.error(msg) + if exp_type == "Genric": + raise vimconn.vimconnException(msg) + elif exp_type == "NotFound": + raise vimconn.vimconnNotFoundException(message=msg) + + def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid): + """ + Method to attach SRIOV adapters to VM + + Args: + vapp_uuid - uuid of vApp/VM + sriov_nets - SRIOV devices infromation as specified in VNFD (flavor) + vmname_andid - vmname + + Returns: + The status of add SRIOV adapter task , vm object and + vcenter_conect object + """ + vm_obj = None + vcenter_conect, content = self.get_vcenter_content() + vm_moref_id = self.get_vm_moref_id(vapp_uuid) + + if vm_moref_id: + try: + no_of_sriov_devices = len(sriov_nets) + if no_of_sriov_devices > 0: + #Get VM and its host + host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id) + self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj)) + if host_obj and vm_obj: + #get SRIOV devies from host on which vapp is currently installed + avilable_sriov_devices = self.get_sriov_devices(host_obj, + no_of_sriov_devices, + ) + + if len(avilable_sriov_devices) == 0: + #find other hosts with active pci devices + new_host_obj , avilable_sriov_devices = self.get_host_and_sriov_devices( + content, + no_of_sriov_devices, + ) + + if new_host_obj is not None and len(avilable_sriov_devices)> 0: + #Migrate vm to the host where SRIOV devices are available + self.logger.info("Relocate VM {} on new host {}".format(vm_obj, + new_host_obj)) + task = self.relocate_vm(new_host_obj, vm_obj) + if task is not None: + result = self.wait_for_vcenter_task(task, vcenter_conect) + self.logger.info("Migrate VM status: {}".format(result)) + host_obj = new_host_obj + else: + self.logger.info("Fail to migrate VM : {}".format(result)) + raise vimconn.vimconnNotFoundException( + "Fail to migrate VM : {} to host {}".format( + vmname_andid, + new_host_obj) + ) + + if host_obj is not None and avilable_sriov_devices is not None and len(avilable_sriov_devices)> 0: + #Add SRIOV devices one by one + for sriov_net in sriov_nets: + network_name = sriov_net.get('net_id') + dvs_portgr_name = self.create_dvPort_group(network_name) + if sriov_net.get('type') == "VF": + #add vlan ID ,Modify portgroup for vlan ID + self.configure_vlanID(content, vcenter_conect, network_name) + + task = self.add_sriov_to_vm(content, + vm_obj, + host_obj, + network_name, + avilable_sriov_devices[0] + ) + if task: + status= self.wait_for_vcenter_task(task, vcenter_conect) + if status: + self.logger.info("Added SRIOV {} to VM {}".format( + no_of_sriov_devices, + str(vm_obj))) + else: + self.logger.error("Fail to add SRIOV {} to VM {}".format( + no_of_sriov_devices, + str(vm_obj))) + raise vimconn.vimconnUnexpectedResponse( + "Fail to add SRIOV adapter in VM ".format(str(vm_obj)) + ) + return True, vm_obj, vcenter_conect + else: + self.logger.error("Currently there is no host with"\ + " {} number of avaialble SRIOV "\ + "VFs required for VM {}".format( + no_of_sriov_devices, + vmname_andid) + ) + raise vimconn.vimconnNotFoundException( + "Currently there is no host with {} "\ + "number of avaialble SRIOV devices required for VM {}".format( + no_of_sriov_devices, + vmname_andid)) + else: + self.logger.debug("No infromation about SRIOV devices {} ",sriov_nets) + + except vmodl.MethodFault as error: + self.logger.error("Error occurred while adding SRIOV {} ",error) + return None, vm_obj, vcenter_conect + + + def get_sriov_devices(self,host, no_of_vfs): + """ + Method to get the details of SRIOV devices on given host + Args: + host - vSphere host object + no_of_vfs - number of VFs needed on host + + Returns: + array of SRIOV devices + """ + sriovInfo=[] + if host: + for device in host.config.pciPassthruInfo: + if isinstance(device,vim.host.SriovInfo) and device.sriovActive: + if device.numVirtualFunction >= no_of_vfs: + sriovInfo.append(device) + break + return sriovInfo + + + def get_host_and_sriov_devices(self, content, no_of_vfs): + """ + Method to get the details of SRIOV devices infromation on all hosts + + Args: + content - vSphere host object + no_of_vfs - number of pci VFs needed on host + + Returns: + array of SRIOV devices and host object + """ + host_obj = None + sriov_device_objs = None + try: + if content: + container = content.viewManager.CreateContainerView(content.rootFolder, + [vim.HostSystem], True) + for host in container.view: + devices = self.get_sriov_devices(host, no_of_vfs) + if devices: + host_obj = host + sriov_device_objs = devices + break + except Exception as exp: + self.logger.error("Error {} occurred while finding SRIOV devices on host: {}".format(exp, host_obj)) + + return host_obj,sriov_device_objs + + + def add_sriov_to_vm(self,content, vm_obj, host_obj, network_name, sriov_device): + """ + Method to add SRIOV adapter to vm + + Args: + host_obj - vSphere host object + vm_obj - vSphere vm object + content - vCenter content object + network_name - name of distributed virtaul portgroup + sriov_device - SRIOV device info + + Returns: + task object + """ + devices = [] + vnic_label = "sriov nic" + try: + dvs_portgr = self.get_dvport_group(network_name) + network_name = dvs_portgr.name + nic = vim.vm.device.VirtualDeviceSpec() + # VM device + nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add + nic.device = vim.vm.device.VirtualSriovEthernetCard() + nic.device.addressType = 'assigned' + #nic.device.key = 13016 + nic.device.deviceInfo = vim.Description() + nic.device.deviceInfo.label = vnic_label + nic.device.deviceInfo.summary = network_name + nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo() + + nic.device.backing.network = self.get_obj(content, [vim.Network], network_name) + nic.device.backing.deviceName = network_name + nic.device.backing.useAutoDetect = False + nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo() + nic.device.connectable.startConnected = True + nic.device.connectable.allowGuestControl = True + + nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo() + nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo() + nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id + + devices.append(nic) + vmconf = vim.vm.ConfigSpec(deviceChange=devices) + task = vm_obj.ReconfigVM_Task(vmconf) + return task + except Exception as exp: + self.logger.error("Error {} occurred while adding SRIOV adapter in VM: {}".format(exp, vm_obj)) + return None + + + def create_dvPort_group(self, network_name): + """ + Method to create disributed virtual portgroup + + Args: + network_name - name of network/portgroup + + Returns: + portgroup key + """ + try: + new_network_name = [network_name, '-', str(uuid.uuid4())] + network_name=''.join(new_network_name) + vcenter_conect, content = self.get_vcenter_content() + + dv_switch = self.get_obj(content, [vim.DistributedVirtualSwitch], self.dvs_name) + if dv_switch: + dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec() + dv_pg_spec.name = network_name + + dv_pg_spec.type = vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding + dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy() + dv_pg_spec.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy() + dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=False) + dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=False) + dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=False) + + task = dv_switch.AddDVPortgroup_Task([dv_pg_spec]) + self.wait_for_vcenter_task(task, vcenter_conect) + + dvPort_group = self.get_obj(content, [vim.dvs.DistributedVirtualPortgroup], network_name) + if dvPort_group: + self.logger.info("Created disributed virtaul port group: {}".format(dvPort_group)) + return dvPort_group.key + else: + self.logger.debug("No disributed virtual switch found with name {}".format(network_name)) + + except Exception as exp: + self.logger.error("Error occurred while creating disributed virtaul port group {}"\ + " : {}".format(network_name, exp)) + return None + + def reconfig_portgroup(self, content, dvPort_group_name , config_info={}): + """ + Method to reconfigure disributed virtual portgroup + + Args: + dvPort_group_name - name of disributed virtual portgroup + content - vCenter content object + config_info - disributed virtual portgroup configuration + + Returns: + task object + """ + try: + dvPort_group = self.get_dvport_group(dvPort_group_name) + if dvPort_group: + dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec() + dv_pg_spec.configVersion = dvPort_group.config.configVersion + dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy() + if "vlanID" in config_info: + dv_pg_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec() + dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get('vlanID') + + task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec) + return task + else: + return None + except Exception as exp: + self.logger.error("Error occurred while reconfiguraing disributed virtaul port group {}"\ + " : {}".format(dvPort_group_name, exp)) + return None + + + def destroy_dvport_group(self , dvPort_group_name): + """ + Method to destroy disributed virtual portgroup + + Args: + network_name - name of network/portgroup + + Returns: + True if portgroup successfully got deleted else false + """ + vcenter_conect, content = self.get_vcenter_content() + try: + status = None + dvPort_group = self.get_dvport_group(dvPort_group_name) + if dvPort_group: + task = dvPort_group.Destroy_Task() + status = self.wait_for_vcenter_task(task, vcenter_conect) + return status + except vmodl.MethodFault as exp: + self.logger.error("Caught vmodl fault {} while deleting disributed virtaul port group {}".format( + exp, dvPort_group_name)) + return None + + + def get_dvport_group(self, dvPort_group_name): + """ + Method to get disributed virtual portgroup + + Args: + network_name - name of network/portgroup + + Returns: + portgroup object + """ + vcenter_conect, content = self.get_vcenter_content() + dvPort_group = None + try: + container = content.viewManager.CreateContainerView(content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True) + for item in container.view: + if item.key == dvPort_group_name: + dvPort_group = item + break + return dvPort_group + except vmodl.MethodFault as exp: + self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format( + exp, dvPort_group_name)) + return None + + def get_vlanID_from_dvs_portgr(self, dvPort_group_name): + """ + Method to get disributed virtual portgroup vlanID + + Args: + network_name - name of network/portgroup + + Returns: + vlan ID + """ + vlanId = None + try: + dvPort_group = self.get_dvport_group(dvPort_group_name) + if dvPort_group: + vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId + except vmodl.MethodFault as exp: + self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format( + exp, dvPort_group_name)) + return vlanId + + + def configure_vlanID(self, content, vcenter_conect, dvPort_group_name): + """ + Method to configure vlanID in disributed virtual portgroup vlanID + + Args: + network_name - name of network/portgroup + + Returns: + None + """ + vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name) + if vlanID == 0: + #configure vlanID + vlanID = self.genrate_vlanID(dvPort_group_name) + config = {"vlanID":vlanID} + task = self.reconfig_portgroup(content, dvPort_group_name, + config_info=config) + if task: + status= self.wait_for_vcenter_task(task, vcenter_conect) + if status: + self.logger.info("Reconfigured Port group {} for vlan ID {}".format( + dvPort_group_name,vlanID)) + else: + self.logger.error("Fail reconfigure portgroup {} for vlanID{}".format( + dvPort_group_name, vlanID)) + + + def genrate_vlanID(self, network_name): + """ + Method to get unused vlanID + Args: + network_name - name of network/portgroup + Returns: + vlanID + """ + vlan_id = None + used_ids = [] + if self.config.get('vlanID_range') == None: + raise vimconn.vimconnConflictException("You must provide a 'vlanID_range' "\ + "at config value before creating sriov network with vlan tag") + if "used_vlanIDs" not in self.persistent_info: + self.persistent_info["used_vlanIDs"] = {} + else: + used_ids = self.persistent_info["used_vlanIDs"].values() + + for vlanID_range in self.config.get('vlanID_range'): + start_vlanid , end_vlanid = vlanID_range.split("-") + if start_vlanid > end_vlanid: + raise vimconn.vimconnConflictException("Invalid vlan ID range {}".format( + vlanID_range)) + + for id in xrange(int(start_vlanid), int(end_vlanid) + 1): + if id not in used_ids: + vlan_id = id + self.persistent_info["used_vlanIDs"][network_name] = vlan_id + return vlan_id + if vlan_id is None: + raise vimconn.vimconnConflictException("All Vlan IDs are in use") + + + def get_obj(self, content, vimtype, name): + """ + Get the vsphere object associated with a given text name + """ + obj = None + container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True) + for item in container.view: + if item.name == name: + obj = item + break + return obj + + + def insert_media_to_vm(self, vapp, image_id): + """ + Method to insert media CD-ROM (ISO image) from catalog to vm. + vapp - vapp object to get vm id + Image_id - image id for cdrom to be inerted to vm + """ + # create connection object + vca = self.connect() + try: + # fetching catalog details + rest_url = "{}/api/catalog/{}".format(vca.host,image_id) + response = Http.get(url=rest_url, + headers=vca.vcloud_session.get_vcloud_headers(), + verify=vca.verify, + logger=vca.logger) + + if response.status_code != 200: + self.logger.error("REST call {} failed reason : {}"\ + "status code : {}".format(url_rest_call, + response.content, + response.status_code)) + raise vimconn.vimconnException("insert_media_to_vm(): Failed to get "\ + "catalog details") + # searching iso name and id + iso_name,media_id = self.get_media_details(vca, response.content) + + if iso_name and media_id: + data =""" + + + """.format(iso_name, media_id, + vca.host,media_id) + + for vms in vapp._get_vms(): + vm_id = (vms.id).split(':')[-1] + + headers = vca.vcloud_session.get_vcloud_headers() + headers['Content-Type'] = 'application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml' + rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(vca.host,vm_id) + + response = Http.post(url=rest_url, + headers=headers, + data=data, + verify=vca.verify, + logger=vca.logger) + + if response.status_code != 202: + self.logger.error("Failed to insert CD-ROM to vm") + raise vimconn.vimconnException("insert_media_to_vm() : Failed to insert"\ + "ISO image to vm") + else: + task = taskType.parseString(response.content, True) + if isinstance(task, GenericTask): + vca.block_until_completed(task) + self.logger.info("insert_media_to_vm(): Sucessfully inserted media ISO"\ + " image to vm {}".format(vm_id)) + except Exception as exp: + self.logger.error("insert_media_to_vm() : exception occurred "\ + "while inserting media CD-ROM") + raise vimconn.vimconnException(message=exp) + + + def get_media_details(self, vca, content): + """ + Method to get catalog item details + vca - connection object + content - Catalog details + Return - Media name, media id + """ + cataloghref_list = [] + try: + if content: + vm_list_xmlroot = XmlElementTree.fromstring(content) + for child in vm_list_xmlroot.iter(): + if 'CatalogItem' in child.tag: + cataloghref_list.append(child.attrib.get('href')) + if cataloghref_list is not None: + for href in cataloghref_list: + if href: + response = Http.get(url=href, + headers=vca.vcloud_session.get_vcloud_headers(), + verify=vca.verify, + logger=vca.logger) + if response.status_code != 200: + self.logger.error("REST call {} failed reason : {}"\ + "status code : {}".format(href, + response.content, + response.status_code)) + raise vimconn.vimconnException("get_media_details : Failed to get "\ + "catalogitem details") + list_xmlroot = XmlElementTree.fromstring(response.content) + for child in list_xmlroot.iter(): + if 'Entity' in child.tag: + if 'media' in child.attrib.get('href'): + name = child.attrib.get('name') + media_id = child.attrib.get('href').split('/').pop() + return name,media_id + else: + self.logger.debug("Media name and id not found") + return False,False + except Exception as exp: + self.logger.error("get_media_details : exception occurred "\ + "getting media details") + raise vimconn.vimconnException(message=exp) + + + def retry_rest(self, method, url, add_headers=None, data=None): + """ Method to get Token & retry respective REST request + Args: + api - REST API - Can be one of 'GET' or 'PUT' or 'POST' + url - request url to be used + add_headers - Additional headers (optional) + data - Request payload data to be passed in request + Returns: + response - Response of request + """ + response = None + + #Get token + self.get_token() + + headers=self.vca.vcloud_session.get_vcloud_headers() + + if add_headers: + headers.update(add_headers) + + if method == 'GET': + response = Http.get(url=url, + headers=headers, + verify=self.vca.verify, + logger=self.vca.logger) + elif method == 'PUT': + response = Http.put(url=url, + data=data, + headers=headers, + verify=self.vca.verify, + logger=self.logger) + elif method == 'POST': + response = Http.post(url=url, + headers=headers, + data=data, + verify=self.vca.verify, + logger=self.vca.logger) + elif method == 'DELETE': + response = Http.delete(url=url, + headers=headers, + verify=self.vca.verify, + logger=self.vca.logger) + return response + + + def get_token(self): + """ Generate a new token if expired + + Returns: + The return vca object that letter can be used to connect to vCloud director as admin for VDC + """ + vca = None + + try: + self.logger.debug("Generate token for vca {} as {} to datacenter {}.".format(self.org_name, + self.user, + self.org_name)) + vca = VCA(host=self.url, + username=self.user, + service_type=STANDALONE, + version=VCAVERSION, + verify=False, + log=False) + + result = vca.login(password=self.passwd, org=self.org_name) + if result is True: + result = vca.login(token=vca.token, org=self.org_name, org_url=vca.vcloud_session.org_url) + if result is True: + self.logger.info( + "Successfully generated token for vcloud direct org: {} as user: {}".format(self.org_name, self.user)) + #Update vca + self.vca = vca + return + + except: + raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: " + "{} as user: {}".format(self.org_name, self.user)) + + if not vca or not result: + raise vimconn.vimconnConnectionException("self.connect() is failed while reconnecting") + + + def get_vdc_details(self): + """ Get VDC details using pyVcloud Lib + + Returns vdc object + """ + vdc = self.vca.get_vdc(self.tenant_name) + + #Retry once, if failed by refreshing token + if vdc is None: + self.get_token() + vdc = self.vca.get_vdc(self.tenant_name) + + return vdc + +