+ raise vimconn.vimconnNotImplemented("Should have implemented this")
+
+ def get_network_name_by_id(self, network_uuid=None):
+ """Method gets vcloud director network named based on supplied uuid.
+
+ Args:
+ network_uuid: network_id
+
+ Returns:
+ The return network name.
+ """
+
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed.")
+
+ if not network_uuid:
+ return None
+
+ try:
+ org_dict = self.get_org(self.org_uuid)
+ if 'networks' in org_dict:
+ org_network_dict = org_dict['networks']
+ for net_uuid in org_network_dict:
+ if net_uuid == network_uuid:
+ return org_network_dict[net_uuid]
+ except:
+ self.logger.debug("Exception in get_network_name_by_id")
+ self.logger.debug(traceback.format_exc())
+
+ return None
+
+ def get_network_id_by_name(self, network_name=None):
+ """Method gets vcloud director network uuid based on supplied name.
+
+ Args:
+ network_name: network_name
+ Returns:
+ The return network uuid.
+ network_uuid: network_id
+ """
+
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed.")
+
+ if not network_name:
+ self.logger.debug("get_network_id_by_name() : Network name is empty")
+ return None
+
+ try:
+ org_dict = self.get_org(self.org_uuid)
+ if org_dict and 'networks' in org_dict:
+ org_network_dict = org_dict['networks']
+ for net_uuid,net_name in org_network_dict.iteritems():
+ if net_name == network_name:
+ return net_uuid
+
+ except KeyError as exp:
+ self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
+
+ return None
+
+ def list_org_action(self):
+ """
+ Method leverages vCloud director and query for available organization for particular user
+
+ Args:
+ vca - is active VCA connection.
+ vdc_name - is a vdc name that will be used to query vms action
+
+ Returns:
+ The return XML respond
+ """
+
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed")
+
+ url_list = [vca.host, '/api/org']
+ vm_list_rest_call = ''.join(url_list)
+
+ if not (not vca.vcloud_session or not vca.vcloud_session.organization):
+ response = Http.get(url=vm_list_rest_call,
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify,
+ logger=vca.logger)
+ if response.status_code == requests.codes.ok:
+ return response.content
+
+ return None
+
+ def get_org_action(self, org_uuid=None):
+ """
+ Method leverages vCloud director and retrieve available object fdr organization.
+
+ Args:
+ vca - is active VCA connection.
+ vdc_name - is a vdc name that will be used to query vms action
+
+ Returns:
+ The return XML respond
+ """
+
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed")
+
+ if org_uuid is None:
+ return None
+
+ url_list = [vca.host, '/api/org/', org_uuid]
+ vm_list_rest_call = ''.join(url_list)
+
+ if not (not vca.vcloud_session or not vca.vcloud_session.organization):
+ response = Http.get(url=vm_list_rest_call,
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify,
+ logger=vca.logger)
+ if response.status_code == requests.codes.ok:
+ return response.content
+
+ return None
+
+ def get_org(self, org_uuid=None):
+ """
+ Method retrieves available organization in vCloud Director
+
+ Args:
+ org_uuid - is a organization uuid.
+
+ Returns:
+ The return dictionary with following key
+ "network" - for network list under the org
+ "catalogs" - for network list under the org
+ "vdcs" - for vdc list under org
+ """
+
+ org_dict = {}
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed")
+
+ if org_uuid is None:
+ return org_dict
+
+ content = self.get_org_action(org_uuid=org_uuid)
+ try:
+ vdc_list = {}
+ network_list = {}
+ catalog_list = {}
+ vm_list_xmlroot = XmlElementTree.fromstring(content)
+ for child in vm_list_xmlroot:
+ if child.attrib['type'] == 'application/vnd.vmware.vcloud.vdc+xml':
+ vdc_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
+ org_dict['vdcs'] = vdc_list
+ if child.attrib['type'] == 'application/vnd.vmware.vcloud.orgNetwork+xml':
+ network_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
+ org_dict['networks'] = network_list
+ if child.attrib['type'] == 'application/vnd.vmware.vcloud.catalog+xml':
+ catalog_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
+ org_dict['catalogs'] = catalog_list
+ except:
+ pass
+
+ return org_dict
+
+ def get_org_list(self):
+ """
+ Method retrieves available organization in vCloud Director
+
+ Args:
+ vca - is active VCA connection.
+
+ Returns:
+ The return dictionary and key for each entry VDC UUID
+ """
+
+ org_dict = {}
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed")
+
+ content = self.list_org_action()
+ try:
+ vm_list_xmlroot = XmlElementTree.fromstring(content)
+ for vm_xml in vm_list_xmlroot:
+ if vm_xml.tag.split("}")[1] == 'Org':
+ org_uuid = vm_xml.attrib['href'].split('/')[-1:]
+ org_dict[org_uuid[0]] = vm_xml.attrib['name']
+ except:
+ pass
+
+ return org_dict
+
+ def vms_view_action(self, vdc_name=None):
+ """ Method leverages vCloud director vms query call
+
+ Args:
+ vca - is active VCA connection.
+ vdc_name - is a vdc name that will be used to query vms action
+
+ Returns:
+ The return XML respond
+ """
+ vca = self.connect()
+ if vdc_name is None:
+ return None
+
+ url_list = [vca.host, '/api/vms/query']
+ vm_list_rest_call = ''.join(url_list)
+
+ if not (not vca.vcloud_session or not vca.vcloud_session.organization):
+ refs = filter(lambda ref: ref.name == vdc_name and ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml',
+ vca.vcloud_session.organization.Link)
+ if len(refs) == 1:
+ response = Http.get(url=vm_list_rest_call,
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify,
+ logger=vca.logger)
+ if response.status_code == requests.codes.ok:
+ return response.content
+
+ return None
+
+ def get_vapp_list(self, vdc_name=None):
+ """
+ Method retrieves vApp list deployed vCloud director and returns a dictionary
+ contains a list of all vapp deployed for queried VDC.
+ The key for a dictionary is vApp UUID
+
+
+ Args:
+ vca - is active VCA connection.
+ vdc_name - is a vdc name that will be used to query vms action
+
+ Returns:
+ The return dictionary and key for each entry vapp UUID
+ """
+
+ vapp_dict = {}
+ if vdc_name is None:
+ return vapp_dict
+
+ content = self.vms_view_action(vdc_name=vdc_name)
+ try:
+ vm_list_xmlroot = XmlElementTree.fromstring(content)
+ for vm_xml in vm_list_xmlroot:
+ if vm_xml.tag.split("}")[1] == 'VMRecord':
+ if vm_xml.attrib['isVAppTemplate'] == 'true':
+ rawuuid = vm_xml.attrib['container'].split('/')[-1:]
+ if 'vappTemplate-' in rawuuid[0]:
+ # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
+ # vm and use raw UUID as key
+ vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
+ except:
+ pass
+
+ return vapp_dict
+
+ def get_vm_list(self, vdc_name=None):
+ """
+ Method retrieves VM's list deployed vCloud director. It returns a dictionary
+ contains a list of all VM's deployed for queried VDC.
+ The key for a dictionary is VM UUID
+
+
+ Args:
+ vca - is active VCA connection.
+ vdc_name - is a vdc name that will be used to query vms action
+
+ Returns:
+ The return dictionary and key for each entry vapp UUID
+ """
+ vm_dict = {}
+
+ if vdc_name is None:
+ return vm_dict
+
+ content = self.vms_view_action(vdc_name=vdc_name)
+ try:
+ vm_list_xmlroot = XmlElementTree.fromstring(content)
+ for vm_xml in vm_list_xmlroot:
+ if vm_xml.tag.split("}")[1] == 'VMRecord':
+ if vm_xml.attrib['isVAppTemplate'] == 'false':
+ rawuuid = vm_xml.attrib['href'].split('/')[-1:]
+ if 'vm-' in rawuuid[0]:
+ # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
+ # vm and use raw UUID as key
+ vm_dict[rawuuid[0][3:]] = vm_xml.attrib
+ except:
+ pass
+
+ return vm_dict
+
+ def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
+ """
+ Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
+ contains a list of all VM's deployed for queried VDC.
+ The key for a dictionary is VM UUID
+
+
+ Args:
+ vca - is active VCA connection.
+ vdc_name - is a vdc name that will be used to query vms action
+
+ Returns:
+ The return dictionary and key for each entry vapp UUID
+ """
+ vm_dict = {}
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed")
+
+ if vdc_name is None:
+ return vm_dict
+
+ content = self.vms_view_action(vdc_name=vdc_name)
+ try:
+ vm_list_xmlroot = XmlElementTree.fromstring(content)
+ for vm_xml in vm_list_xmlroot:
+ if vm_xml.tag.split("}")[1] == 'VMRecord' and vm_xml.attrib['isVAppTemplate'] == 'false':
+ # lookup done by UUID
+ if isuuid:
+ if vapp_name in vm_xml.attrib['container']:
+ rawuuid = vm_xml.attrib['href'].split('/')[-1:]
+ if 'vm-' in rawuuid[0]:
+ vm_dict[rawuuid[0][3:]] = vm_xml.attrib
+ break
+ # lookup done by Name
+ else:
+ if vapp_name in vm_xml.attrib['name']:
+ rawuuid = vm_xml.attrib['href'].split('/')[-1:]
+ if 'vm-' in rawuuid[0]:
+ vm_dict[rawuuid[0][3:]] = vm_xml.attrib
+ break
+ except:
+ pass
+
+ return vm_dict
+
+ def get_network_action(self, network_uuid=None):
+ """
+ Method leverages vCloud director and query network based on network uuid
+
+ Args:
+ vca - is active VCA connection.
+ network_uuid - is a network uuid
+
+ Returns:
+ The return XML respond
+ """
+
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed")
+
+ if network_uuid is None:
+ return None
+
+ url_list = [vca.host, '/api/network/', network_uuid]
+ vm_list_rest_call = ''.join(url_list)
+
+ if not (not vca.vcloud_session or not vca.vcloud_session.organization):
+ response = Http.get(url=vm_list_rest_call,
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify,
+ logger=vca.logger)
+ if response.status_code == requests.codes.ok:
+ return response.content
+
+ return None
+
+ def get_vcd_network(self, network_uuid=None):
+ """
+ Method retrieves available network from vCloud Director
+
+ Args:
+ network_uuid - is VCD network UUID
+
+ Each element serialized as key : value pair
+
+ Following keys available for access. network_configuration['Gateway'}
+ <Configuration>
+ <IpScopes>
+ <IpScope>
+ <IsInherited>true</IsInherited>
+ <Gateway>172.16.252.100</Gateway>
+ <Netmask>255.255.255.0</Netmask>
+ <Dns1>172.16.254.201</Dns1>
+ <Dns2>172.16.254.202</Dns2>
+ <DnsSuffix>vmwarelab.edu</DnsSuffix>
+ <IsEnabled>true</IsEnabled>
+ <IpRanges>
+ <IpRange>
+ <StartAddress>172.16.252.1</StartAddress>
+ <EndAddress>172.16.252.99</EndAddress>
+ </IpRange>
+ </IpRanges>
+ </IpScope>
+ </IpScopes>
+ <FenceMode>bridged</FenceMode>
+
+ Returns:
+ The return dictionary and key for each entry vapp UUID
+ """
+
+ network_configuration = {}
+ if network_uuid is None:
+ return network_uuid
+
+ content = self.get_network_action(network_uuid=network_uuid)
+ try:
+ vm_list_xmlroot = XmlElementTree.fromstring(content)
+
+ network_configuration['status'] = vm_list_xmlroot.get("status")
+ network_configuration['name'] = vm_list_xmlroot.get("name")
+ network_configuration['uuid'] = vm_list_xmlroot.get("id").split(":")[3]
+
+ for child in vm_list_xmlroot:
+ if child.tag.split("}")[1] == 'IsShared':
+ network_configuration['isShared'] = child.text.strip()
+ if child.tag.split("}")[1] == 'Configuration':
+ for configuration in child.iter():
+ tagKey = configuration.tag.split("}")[1].strip()
+ if tagKey != "":
+ network_configuration[tagKey] = configuration.text.strip()
+ return network_configuration
+ except:
+ pass
+
+ return network_configuration
+
+ def delete_network_action(self, network_uuid=None):
+ """
+ Method delete given network from vCloud director
+
+ Args:
+ network_uuid - is a network uuid that client wish to delete
+
+ Returns:
+ The return None or XML respond or false
+ """
+
+ vca = self.connect_as_admin()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed")
+ if network_uuid is None:
+ return False
+
+ url_list = [vca.host, '/api/admin/network/', network_uuid]
+ vm_list_rest_call = ''.join(url_list)
+
+ if not (not vca.vcloud_session or not vca.vcloud_session.organization):
+ response = Http.delete(url=vm_list_rest_call,
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify,
+ logger=vca.logger)
+
+ if response.status_code == 202:
+ return True
+
+ return False
+
+ def create_network(self, network_name=None, net_type='bridge', parent_network_uuid=None,
+ ip_profile=None, isshared='true'):
+ """
+ Method create network in vCloud director
+
+ Args:
+ network_name - is network name to be created.
+ net_type - can be 'bridge','data','ptp','mgmt'.
+ ip_profile is a dict containing the IP parameters of the network
+ isshared - is a boolean
+ parent_network_uuid - is parent provider vdc network that will be used for mapping.
+ It optional attribute. by default if no parent network indicate the first available will be used.
+
+ Returns:
+ The return network uuid or return None
+ """
+
+ new_network_name = [network_name, '-', str(uuid.uuid4())]
+ content = self.create_network_rest(network_name=''.join(new_network_name),
+ ip_profile=ip_profile,
+ net_type=net_type,
+ parent_network_uuid=parent_network_uuid,
+ isshared=isshared)
+ if content is None:
+ self.logger.debug("Failed create network {}.".format(network_name))
+ return None
+
+ try:
+ vm_list_xmlroot = XmlElementTree.fromstring(content)
+ vcd_uuid = vm_list_xmlroot.get('id').split(":")
+ if len(vcd_uuid) == 4:
+ self.logger.info("Create new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
+ return vcd_uuid[3]
+ except:
+ self.logger.debug("Failed create network {}".format(network_name))
+ return None
+
+ def create_network_rest(self, network_name=None, net_type='bridge', parent_network_uuid=None,
+ ip_profile=None, isshared='true'):
+ """
+ Method create network in vCloud director
+
+ Args:
+ network_name - is network name to be created.
+ net_type - can be 'bridge','data','ptp','mgmt'.
+ ip_profile is a dict containing the IP parameters of the network
+ isshared - is a boolean
+ parent_network_uuid - is parent provider vdc network that will be used for mapping.
+ It optional attribute. by default if no parent network indicate the first available will be used.
+
+ Returns:
+ The return network uuid or return None
+ """
+
+ vca = self.connect_as_admin()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed.")
+ if network_name is None:
+ return None
+
+ url_list = [vca.host, '/api/admin/vdc/', self.tenant_id]
+ vm_list_rest_call = ''.join(url_list)
+ if not (not vca.vcloud_session or not vca.vcloud_session.organization):
+ response = Http.get(url=vm_list_rest_call,
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify,
+ logger=vca.logger)
+
+ provider_network = None
+ available_networks = None
+ add_vdc_rest_url = None
+
+ if response.status_code != requests.codes.ok:
+ self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
+ response.status_code))
+ return None
+ else:
+ try:
+ vm_list_xmlroot = XmlElementTree.fromstring(response.content)
+ for child in vm_list_xmlroot:
+ if child.tag.split("}")[1] == 'ProviderVdcReference':
+ provider_network = child.attrib.get('href')
+ # application/vnd.vmware.admin.providervdc+xml
+ if child.tag.split("}")[1] == 'Link':
+ if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
+ and child.attrib.get('rel') == 'add':
+ add_vdc_rest_url = child.attrib.get('href')
+ except:
+ self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
+ self.logger.debug("Respond body {}".format(response.content))
+ return None
+
+ # find pvdc provided available network
+ response = Http.get(url=provider_network,
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify,
+ logger=vca.logger)
+ if response.status_code != requests.codes.ok:
+ self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
+ response.status_code))
+ return None
+
+ # available_networks.split("/")[-1]
+
+ if parent_network_uuid is None:
+ try:
+ vm_list_xmlroot = XmlElementTree.fromstring(response.content)
+ for child in vm_list_xmlroot.iter():
+ if child.tag.split("}")[1] == 'AvailableNetworks':
+ for networks in child.iter():
+ # application/vnd.vmware.admin.network+xml
+ if networks.attrib.get('href') is not None:
+ available_networks = networks.attrib.get('href')
+ break
+ except:
+ return None
+
+ #Configure IP profile of the network
+ ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
+
+ gateway_address=ip_profile['gateway_address']
+ dhcp_count=int(ip_profile['dhcp_count'])
+ subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
+
+ if ip_profile['dhcp_enabled']==True:
+ dhcp_enabled='true'
+ else:
+ dhcp_enabled='false'
+ dhcp_start_address=ip_profile['dhcp_start_address']
+
+ #derive dhcp_end_address from dhcp_start_address & dhcp_count
+ end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
+ end_ip_int += dhcp_count - 1
+ dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
+
+ ip_version=ip_profile['ip_version']
+ dns_address=ip_profile['dns_address']
+
+ # either use client provided UUID or search for a first available
+ # if both are not defined we return none
+ if parent_network_uuid is not None:
+ url_list = [vca.host, '/api/admin/network/', parent_network_uuid]
+ add_vdc_rest_url = ''.join(url_list)
+
+ if net_type=='ptp':
+ fence_mode="isolated"
+ isshared='false'
+ is_inherited='false'
+ data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
+ <Description>Openmano created</Description>
+ <Configuration>
+ <IpScopes>
+ <IpScope>
+ <IsInherited>{1:s}</IsInherited>
+ <Gateway>{2:s}</Gateway>
+ <Netmask>{3:s}</Netmask>
+ <Dns1>{4:s}</Dns1>
+ <IsEnabled>{5:s}</IsEnabled>
+ <IpRanges>
+ <IpRange>
+ <StartAddress>{6:s}</StartAddress>
+ <EndAddress>{7:s}</EndAddress>
+ </IpRange>
+ </IpRanges>
+ </IpScope>
+ </IpScopes>
+ <FenceMode>{8:s}</FenceMode>
+ </Configuration>
+ <IsShared>{9:s}</IsShared>
+ </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
+ subnet_address, dns_address, dhcp_enabled,
+ dhcp_start_address, dhcp_end_address, fence_mode, isshared)
+
+ else:
+ fence_mode="bridged"
+ is_inherited='false'
+ data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
+ <Description>Openmano created</Description>
+ <Configuration>
+ <IpScopes>
+ <IpScope>
+ <IsInherited>{1:s}</IsInherited>
+ <Gateway>{2:s}</Gateway>
+ <Netmask>{3:s}</Netmask>
+ <Dns1>{4:s}</Dns1>
+ <IsEnabled>{5:s}</IsEnabled>
+ <IpRanges>
+ <IpRange>
+ <StartAddress>{6:s}</StartAddress>
+ <EndAddress>{7:s}</EndAddress>
+ </IpRange>
+ </IpRanges>
+ </IpScope>
+ </IpScopes>
+ <ParentNetwork href="{8:s}"/>
+ <FenceMode>{9:s}</FenceMode>
+ </Configuration>
+ <IsShared>{10:s}</IsShared>
+ </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
+ subnet_address, dns_address, dhcp_enabled,
+ dhcp_start_address, dhcp_end_address, available_networks,
+ fence_mode, isshared)
+
+ headers = vca.vcloud_session.get_vcloud_headers()
+ headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
+ try:
+ response = Http.post(url=add_vdc_rest_url,
+ headers=headers,
+ data=data,
+ verify=vca.verify,
+ logger=vca.logger)
+
+ if response.status_code != 201:
+ self.logger.debug("Create Network POST REST API call failed. Return status code {}"
+ .format(response.status_code))
+ else:
+ network = networkType.parseString(response.content, True)
+ create_nw_task = network.get_Tasks().get_Task()[0]
+
+ # if we all ok we respond with content after network creation completes
+ # otherwise by default return None
+ if create_nw_task is not None:
+ self.logger.debug("Create Network REST : Waiting for Nw creation complete")
+ status = vca.block_until_completed(create_nw_task)
+ if status:
+ return response.content
+ else:
+ self.logger.debug("create_network_rest task failed. Network Create response : {}"
+ .format(response.content))
+ except Exception as exp:
+ self.logger.debug("create_network_rest : Exception : {} ".format(exp))
+
+ return None
+
+ def convert_cidr_to_netmask(self, cidr_ip=None):
+ """
+ Method sets convert CIDR netmask address to normal IP format
+ Args:
+ cidr_ip : CIDR IP address
+ Returns:
+ netmask : Converted netmask
+ """
+ if cidr_ip is not None:
+ if '/' in cidr_ip:
+ network, net_bits = cidr_ip.split('/')
+ netmask = socket.inet_ntoa(struct.pack(">I", (0xffffffff << (32 - int(net_bits))) & 0xffffffff))
+ else:
+ netmask = cidr_ip
+ return netmask
+ return None
+
+ def get_provider_rest(self, vca=None):
+ """
+ Method gets provider vdc view from vcloud director
+
+ Args:
+ network_name - is network name to be created.
+ parent_network_uuid - is parent provider vdc network that will be used for mapping.
+ It optional attribute. by default if no parent network indicate the first available will be used.
+
+ Returns:
+ The return xml content of respond or None
+ """
+
+ url_list = [vca.host, '/api/admin']
+ response = Http.get(url=''.join(url_list),
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify,
+ logger=vca.logger)
+
+ if response.status_code == requests.codes.ok:
+ return response.content
+ return None
+
+ def create_vdc(self, vdc_name=None):
+
+ vdc_dict = {}
+
+ xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
+ if xml_content is not None:
+ try:
+ task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
+ for child in task_resp_xmlroot:
+ if child.tag.split("}")[1] == 'Owner':
+ vdc_id = child.attrib.get('href').split("/")[-1]
+ vdc_dict[vdc_id] = task_resp_xmlroot.get('href')
+ return vdc_dict
+ except:
+ self.logger.debug("Respond body {}".format(xml_content))
+
+ return None
+
+ def create_vdc_from_tmpl_rest(self, vdc_name=None):
+ """
+ Method create vdc in vCloud director based on VDC template.
+ it uses pre-defined template that must be named openmano
+
+ Args:
+ vdc_name - name of a new vdc.
+
+ Returns:
+ The return xml content of respond or None
+ """
+
+ self.logger.info("Creating new vdc {}".format(vdc_name))
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed")
+ if vdc_name is None:
+ return None
+
+ url_list = [vca.host, '/api/vdcTemplates']
+ vm_list_rest_call = ''.join(url_list)
+ response = Http.get(url=vm_list_rest_call,
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify,
+ logger=vca.logger)
+
+ # container url to a template
+ vdc_template_ref = None
+ try:
+ vm_list_xmlroot = XmlElementTree.fromstring(response.content)
+ for child in vm_list_xmlroot:
+ # application/vnd.vmware.admin.providervdc+xml
+ # we need find a template from witch we instantiate VDC
+ if child.tag.split("}")[1] == 'VdcTemplate':
+ if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml' and child.attrib.get(
+ 'name') == 'openmano':
+ vdc_template_ref = child.attrib.get('href')
+ except:
+ self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
+ self.logger.debug("Respond body {}".format(response.content))
+ return None
+
+ # if we didn't found required pre defined template we return None
+ if vdc_template_ref is None:
+ return None
+
+ try:
+ # instantiate vdc
+ url_list = [vca.host, '/api/org/', self.org_uuid, '/action/instantiate']
+ vm_list_rest_call = ''.join(url_list)
+ data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
+ <Source href="{1:s}"></Source>
+ <Description>opnemano</Description>
+ </InstantiateVdcTemplateParams>""".format(vdc_name, vdc_template_ref)
+ headers = vca.vcloud_session.get_vcloud_headers()
+ headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml'
+ response = Http.post(url=vm_list_rest_call, headers=headers, data=data, verify=vca.verify,
+ logger=vca.logger)
+ # if we all ok we respond with content otherwise by default None
+ if response.status_code >= 200 and response.status_code < 300:
+ return response.content
+ return None
+ except:
+ self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
+ self.logger.debug("Respond body {}".format(response.content))
+
+ return None
+
+ def create_vdc_rest(self, vdc_name=None):
+ """
+ Method create network in vCloud director
+
+ Args:
+ network_name - is network name to be created.
+ parent_network_uuid - is parent provider vdc network that will be used for mapping.
+ It optional attribute. by default if no parent network indicate the first available will be used.
+
+ Returns:
+ The return network uuid or return None
+ """
+
+ self.logger.info("Creating new vdc {}".format(vdc_name))
+
+ vca = self.connect_as_admin()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed")
+ if vdc_name is None:
+ return None
+
+ url_list = [vca.host, '/api/admin/org/', self.org_uuid]
+ vm_list_rest_call = ''.join(url_list)
+ if not (not vca.vcloud_session or not vca.vcloud_session.organization):
+ response = Http.get(url=vm_list_rest_call,
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify,
+ logger=vca.logger)
+
+ provider_vdc_ref = None
+ add_vdc_rest_url = None
+ available_networks = None
+
+ if response.status_code != requests.codes.ok:
+ self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
+ response.status_code))
+ return None
+ else:
+ try:
+ vm_list_xmlroot = XmlElementTree.fromstring(response.content)
+ for child in vm_list_xmlroot:
+ # application/vnd.vmware.admin.providervdc+xml
+ if child.tag.split("}")[1] == 'Link':
+ if child.attrib.get('type') == 'application/vnd.vmware.admin.createVdcParams+xml' \
+ and child.attrib.get('rel') == 'add':
+ add_vdc_rest_url = child.attrib.get('href')
+ except:
+ self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
+ self.logger.debug("Respond body {}".format(response.content))
+ return None
+
+ response = self.get_provider_rest(vca=vca)
+ try:
+ vm_list_xmlroot = XmlElementTree.fromstring(response)
+ for child in vm_list_xmlroot:
+ if child.tag.split("}")[1] == 'ProviderVdcReferences':
+ for sub_child in child:
+ provider_vdc_ref = sub_child.attrib.get('href')
+ except:
+ self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
+ self.logger.debug("Respond body {}".format(response))
+ return None
+
+ if add_vdc_rest_url is not None and provider_vdc_ref is not None:
+ data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
+ <AllocationModel>ReservationPool</AllocationModel>
+ <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
+ <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
+ </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
+ <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
+ <ProviderVdcReference
+ name="Main Provider"
+ href="{2:s}" />
+ <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(escape(vdc_name),
+ escape(vdc_name),
+ provider_vdc_ref)
+
+ headers = vca.vcloud_session.get_vcloud_headers()
+ headers['Content-Type'] = 'application/vnd.vmware.admin.createVdcParams+xml'
+ response = Http.post(url=add_vdc_rest_url, headers=headers, data=data, verify=vca.verify,
+ logger=vca.logger)
+
+ # if we all ok we respond with content otherwise by default None
+ if response.status_code == 201:
+ return response.content
+ return None
+
+ def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
+ """
+ Method retrieve vapp detail from vCloud director
+
+ Args:
+ vapp_uuid - is vapp identifier.
+
+ Returns:
+ The return network uuid or return None
+ """
+
+ parsed_respond = {}
+ vca = None
+
+ if need_admin_access:
+ vca = self.connect_as_admin()
+ else:
+ vca = self.connect()
+
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed")
+ if vapp_uuid is None:
+ return None
+
+ url_list = [vca.host, '/api/vApp/vapp-', vapp_uuid]
+ get_vapp_restcall = ''.join(url_list)
+
+ if vca.vcloud_session and vca.vcloud_session.organization:
+ response = Http.get(url=get_vapp_restcall,
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify,
+ logger=vca.logger)
+
+ if response.status_code != requests.codes.ok:
+ self.logger.debug("REST API call {} failed. Return status code {}".format(get_vapp_restcall,
+ response.status_code))
+ return parsed_respond
+
+ try:
+ xmlroot_respond = XmlElementTree.fromstring(response.content)
+ parsed_respond['ovfDescriptorUploaded'] = xmlroot_respond.attrib['ovfDescriptorUploaded']
+
+ namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
+ 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
+ 'vmw': 'http://www.vmware.com/schema/ovf',
+ 'vm': 'http://www.vmware.com/vcloud/v1.5',
+ 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
+ "vmext":"http://www.vmware.com/vcloud/extension/v1.5",
+ "xmlns":"http://www.vmware.com/vcloud/v1.5"
+ }
+
+ created_section = xmlroot_respond.find('vm:DateCreated', namespaces)
+ if created_section is not None:
+ parsed_respond['created'] = created_section.text
+
+ network_section = xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig', namespaces)
+ if network_section is not None and 'networkName' in network_section.attrib:
+ parsed_respond['networkname'] = network_section.attrib['networkName']
+
+ ipscopes_section = \
+ xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes',
+ namespaces)
+ if ipscopes_section is not None:
+ for ipscope in ipscopes_section:
+ for scope in ipscope:
+ tag_key = scope.tag.split("}")[1]
+ if tag_key == 'IpRanges':
+ ip_ranges = scope.getchildren()
+ for ipblock in ip_ranges:
+ for block in ipblock:
+ parsed_respond[block.tag.split("}")[1]] = block.text
+ else:
+ parsed_respond[tag_key] = scope.text
+
+ # parse children section for other attrib
+ children_section = xmlroot_respond.find('vm:Children/', namespaces)
+ if children_section is not None:
+ parsed_respond['name'] = children_section.attrib['name']
+ parsed_respond['nestedHypervisorEnabled'] = children_section.attrib['nestedHypervisorEnabled'] \
+ if "nestedHypervisorEnabled" in children_section.attrib else None
+ parsed_respond['deployed'] = children_section.attrib['deployed']
+ parsed_respond['status'] = children_section.attrib['status']
+ parsed_respond['vmuuid'] = children_section.attrib['id'].split(":")[-1]
+ network_adapter = children_section.find('vm:NetworkConnectionSection', namespaces)
+ nic_list = []
+ for adapters in network_adapter:
+ adapter_key = adapters.tag.split("}")[1]
+ if adapter_key == 'PrimaryNetworkConnectionIndex':
+ parsed_respond['primarynetwork'] = adapters.text
+ if adapter_key == 'NetworkConnection':
+ vnic = {}
+ if 'network' in adapters.attrib:
+ vnic['network'] = adapters.attrib['network']
+ for adapter in adapters:
+ setting_key = adapter.tag.split("}")[1]
+ vnic[setting_key] = adapter.text
+ nic_list.append(vnic)
+
+ for link in children_section:
+ if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
+ if link.attrib['rel'] == 'screen:acquireTicket':
+ parsed_respond['acquireTicket'] = link.attrib
+ if link.attrib['rel'] == 'screen:acquireMksTicket':
+ parsed_respond['acquireMksTicket'] = link.attrib
+
+ parsed_respond['interfaces'] = nic_list
+ vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
+ if vCloud_extension_section is not None:
+ vm_vcenter_info = {}
+ vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
+ vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
+ if vmext is not None:
+ vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
+ vm_vcenter_info["vim_server_href"] = vmext.find('vmext:VimServerRef', namespaces).attrib['href']
+ parsed_respond["vm_vcenter_info"]= vm_vcenter_info
+
+ virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces)
+ vm_virtual_hardware_info = {}
+ if virtual_hardware_section is not None:
+ for item in virtual_hardware_section.iterfind('ovf:Item',namespaces):
+ if item.find("rasd:Description",namespaces).text == "Hard disk":
+ disk_size = item.find("rasd:HostResource" ,namespaces
+ ).attrib["{"+namespaces['vm']+"}capacity"]
+
+ vm_virtual_hardware_info["disk_size"]= disk_size
+ break
+
+ for link in virtual_hardware_section:
+ if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
+ if link.attrib['rel'] == 'edit' and link.attrib['href'].endswith("/disks"):
+ vm_virtual_hardware_info["disk_edit_href"] = link.attrib['href']
+ break
+
+ parsed_respond["vm_virtual_hardware"]= vm_virtual_hardware_info
+ except Exception as exp :
+ self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
+ return parsed_respond
+
+ def acuire_console(self, vm_uuid=None):
+
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed")
+ if vm_uuid is None:
+ return None
+
+ if not (not vca.vcloud_session or not vca.vcloud_session.organization):
+ vm_dict = self.get_vapp_details_rest(self, vapp_uuid=vm_uuid)
+ console_dict = vm_dict['acquireTicket']
+ console_rest_call = console_dict['href']
+
+ response = Http.post(url=console_rest_call,
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify,
+ logger=vca.logger)
+
+ if response.status_code == requests.codes.ok:
+ return response.content
+
+ return None
+
+ def modify_vm_disk(self, vapp_uuid, flavor_disk):
+ """
+ Method retrieve vm disk details
+
+ Args:
+ vapp_uuid - is vapp identifier.
+ flavor_disk - disk size as specified in VNFD (flavor)
+
+ Returns:
+ The return network uuid or return None
+ """
+ status = None
+ try:
+ #Flavor disk is in GB convert it into MB
+ flavor_disk = int(flavor_disk) * 1024
+ vm_details = self.get_vapp_details_rest(vapp_uuid)
+ if vm_details:
+ vm_name = vm_details["name"]
+ self.logger.info("VM: {} flavor_disk :{}".format(vm_name , flavor_disk))
+
+ if vm_details and "vm_virtual_hardware" in vm_details:
+ vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
+ disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
+
+ self.logger.info("VM: {} VM_disk :{}".format(vm_name , vm_disk))
+
+ if flavor_disk > vm_disk:
+ status = self.modify_vm_disk_rest(disk_edit_href ,flavor_disk)
+ self.logger.info("Modify disk of VM {} from {} to {} MB".format(vm_name,
+ vm_disk, flavor_disk ))
+ else:
+ status = True
+ self.logger.info("No need to modify disk of VM {}".format(vm_name))
+
+ return status
+ except Exception as exp:
+ self.logger.info("Error occurred while modifing disk size {}".format(exp))
+
+
+ def modify_vm_disk_rest(self, disk_href , disk_size):
+ """
+ Method retrieve modify vm disk size
+
+ Args:
+ disk_href - vCD API URL to GET and PUT disk data
+ disk_size - disk size as specified in VNFD (flavor)
+
+ Returns:
+ The return network uuid or return None
+ """
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed")
+ if disk_href is None or disk_size is None:
+ return None
+
+ if vca.vcloud_session and vca.vcloud_session.organization:
+ response = Http.get(url=disk_href,
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify,
+ logger=vca.logger)
+
+ if response.status_code != requests.codes.ok:
+ self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href,
+ response.status_code))
+ return None
+ try:
+ lxmlroot_respond = lxmlElementTree.fromstring(response.content)
+ namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
+ namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+
+ for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
+ if item.find("rasd:Description",namespaces).text == "Hard disk":
+ disk_item = item.find("rasd:HostResource" ,namespaces )
+ if disk_item is not None:
+ disk_item.attrib["{"+namespaces['xmlns']+"}capacity"] = str(disk_size)
+ break
+
+ data = lxmlElementTree.tostring(lxmlroot_respond, encoding='utf8', method='xml',
+ xml_declaration=True)
+
+ #Send PUT request to modify disk size
+ headers = vca.vcloud_session.get_vcloud_headers()
+ headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
+
+ response = Http.put(url=disk_href,
+ data=data,
+ headers=headers,
+ verify=vca.verify, logger=self.logger)
+
+ if response.status_code != 202:
+ self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
+ response.status_code))
+ else:
+ modify_disk_task = taskType.parseString(response.content, True)
+ if type(modify_disk_task) is GenericTask:
+ status = vca.block_until_completed(modify_disk_task)
+ return status
+
+ return None
+
+ except Exception as exp :
+ self.logger.info("Error occurred calling rest api for modifing disk size {}".format(exp))
+ return None
+
+ def add_pci_devices(self, vapp_uuid , pci_devices , vmname_andid):
+ """
+ Method to attach pci devices to VM
+
+ Args:
+ vapp_uuid - uuid of vApp/VM
+ pci_devices - pci devices infromation as specified in VNFD (flavor)
+
+ Returns:
+ The status of add pci device task , vm object and
+ vcenter_conect object
+ """
+ vm_obj = None
+ vcenter_conect = None
+ self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid))
+ #Assuming password of vCenter user is same as password of vCloud user
+ vm_moref_id , vm_vcenter_host , vm_vcenter_username, vm_vcenter_port = self.get_vcenter_info_rest(vapp_uuid)
+ self.logger.info("vm_moref_id, {} vm_vcenter_host {} vm_vcenter_username{} "\
+ "vm_vcenter_port{}".format(
+ vm_moref_id, vm_vcenter_host,
+ vm_vcenter_username, vm_vcenter_port))
+ if vm_moref_id and vm_vcenter_host and vm_vcenter_username:
+ context = None
+ if hasattr(ssl, '_create_unverified_context'):
+ context = ssl._create_unverified_context()
+ try:
+ no_of_pci_devices = len(pci_devices)
+ if no_of_pci_devices > 0:
+ vcenter_conect = SmartConnect(host=vm_vcenter_host, user=vm_vcenter_username,
+ pwd=self.passwd, port=int(vm_vcenter_port) ,
+ sslContext=context)
+ atexit.register(Disconnect, vcenter_conect)
+ content = vcenter_conect.RetrieveContent()
+
+ #Get VM and its host
+ host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
+ self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
+ if host_obj and vm_obj:
+ #get PCI devies from host on which vapp is currently installed
+ avilable_pci_devices = self.get_pci_devices(host_obj, no_of_pci_devices)
+
+ if avilable_pci_devices is None:
+ #find other hosts with active pci devices
+ new_host_obj , avilable_pci_devices = self.get_host_and_PCIdevices(
+ content,
+ no_of_pci_devices
+ )
+
+ if new_host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
+ #Migrate vm to the host where PCI devices are availble
+ self.logger.info("Relocate VM {} on new host {}".format(vm_obj, new_host_obj))
+ task = self.relocate_vm(new_host_obj, vm_obj)
+ if task is not None:
+ result = self.wait_for_vcenter_task(task, vcenter_conect)
+ self.logger.info("Migrate VM status: {}".format(result))
+ host_obj = new_host_obj
+ else:
+ self.logger.info("Fail to migrate VM : {}".format(result))
+ raise vimconn.vimconnNotFoundException(
+ "Fail to migrate VM : {} to host {}".format(
+ vmname_andid,
+ new_host_obj)
+ )
+
+ if host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
+ #Add PCI devices one by one
+ for pci_device in avilable_pci_devices:
+ task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
+ if task:
+ status= self.wait_for_vcenter_task(task, vcenter_conect)
+ if status:
+ self.logger.info("Added PCI device {} to VM {}".format(pci_device,str(vm_obj)))
+ else:
+ self.logger.info("Fail to add PCI device {} to VM {}".format(pci_device,str(vm_obj)))
+ return True, vm_obj, vcenter_conect
+ else:
+ self.logger.error("Currently there is no host with"\
+ " {} number of avaialble PCI devices required for VM {}".format(
+ no_of_pci_devices,
+ vmname_andid)
+ )
+ raise vimconn.vimconnNotFoundException(
+ "Currently there is no host with {} "\
+ "number of avaialble PCI devices required for VM {}".format(
+ no_of_pci_devices,
+ vmname_andid))
+ else:
+ self.logger.debug("No infromation about PCI devices {} ",pci_devices)
+
+ except vmodl.MethodFault as error:
+ self.logger.error("Error occurred while adding PCI devices {} ",error)
+ return None, vm_obj, vcenter_conect
+
+ def get_vm_obj(self, content, mob_id):
+ """
+ Method to get the vsphere VM object associated with a given morf ID
+ Args:
+ vapp_uuid - uuid of vApp/VM
+ content - vCenter content object
+ mob_id - mob_id of VM
+
+ Returns:
+ VM and host object
+ """
+ vm_obj = None
+ host_obj = None
+ try :
+ container = content.viewManager.CreateContainerView(content.rootFolder,
+ [vim.VirtualMachine], True
+ )
+ for vm in container.view:
+ mobID = vm._GetMoId()
+ if mobID == mob_id:
+ vm_obj = vm
+ host_obj = vm_obj.runtime.host
+ break
+ except Exception as exp:
+ self.logger.error("Error occurred while finding VM object : {}".format(exp))
+ return host_obj, vm_obj
+
+ def get_pci_devices(self, host, need_devices):
+ """
+ Method to get the details of pci devices on given host
+ Args:
+ host - vSphere host object
+ need_devices - number of pci devices needed on host
+
+ Returns:
+ array of pci devices
+ """
+ all_devices = []
+ all_device_ids = []
+ used_devices_ids = []
+
+ try:
+ if host:
+ pciPassthruInfo = host.config.pciPassthruInfo
+ pciDevies = host.hardware.pciDevice
+
+ for pci_status in pciPassthruInfo:
+ if pci_status.passthruActive:
+ for device in pciDevies:
+ if device.id == pci_status.id:
+ all_device_ids.append(device.id)
+ all_devices.append(device)
+
+ #check if devices are in use
+ avalible_devices = all_devices
+ for vm in host.vm:
+ if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
+ vm_devices = vm.config.hardware.device
+ for device in vm_devices:
+ if type(device) is vim.vm.device.VirtualPCIPassthrough:
+ if device.backing.id in all_device_ids:
+ for use_device in avalible_devices:
+ if use_device.id == device.backing.id:
+ avalible_devices.remove(use_device)
+ used_devices_ids.append(device.backing.id)
+ self.logger.debug("Device {} from devices {}"\
+ "is in use".format(device.backing.id,
+ device)
+ )
+ if len(avalible_devices) < need_devices:
+ self.logger.debug("Host {} don't have {} number of active devices".format(host,
+ need_devices))
+ self.logger.debug("found only {} devives {}".format(len(avalible_devices),
+ avalible_devices))
+ return None
+ else:
+ required_devices = avalible_devices[:need_devices]
+ self.logger.info("Found {} PCI devivces on host {} but required only {}".format(
+ len(avalible_devices),
+ host,
+ need_devices))
+ self.logger.info("Retruning {} devices as {}".format(need_devices,
+ required_devices ))
+ return required_devices
+
+ except Exception as exp:
+ self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host))
+
+ return None
+
+ def get_host_and_PCIdevices(self, content, need_devices):
+ """
+ Method to get the details of pci devices infromation on all hosts
+
+ Args:
+ content - vSphere host object
+ need_devices - number of pci devices needed on host
+
+ Returns:
+ array of pci devices and host object
+ """
+ host_obj = None
+ pci_device_objs = None
+ try:
+ if content:
+ container = content.viewManager.CreateContainerView(content.rootFolder,
+ [vim.HostSystem], True)
+ for host in container.view:
+ devices = self.get_pci_devices(host, need_devices)
+ if devices:
+ host_obj = host
+ pci_device_objs = devices
+ break
+ except Exception as exp:
+ self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host_obj))
+
+ return host_obj,pci_device_objs
+
+ def relocate_vm(self, dest_host, vm) :
+ """
+ Method to get the relocate VM to new host
+
+ Args:
+ dest_host - vSphere host object
+ vm - vSphere VM object
+
+ Returns:
+ task object
+ """
+ task = None
+ try:
+ relocate_spec = vim.vm.RelocateSpec(host=dest_host)
+ task = vm.Relocate(relocate_spec)
+ self.logger.info("Migrating {} to destination host {}".format(vm, dest_host))
+ except Exception as exp:
+ self.logger.error("Error occurred while relocate VM {} to new host {}: {}".format(
+ dest_host, vm, exp))
+ return task
+
+ def wait_for_vcenter_task(self, task, actionName='job', hideResult=False):
+ """
+ Waits and provides updates on a vSphere task
+ """
+ while task.info.state == vim.TaskInfo.State.running:
+ time.sleep(2)
+
+ if task.info.state == vim.TaskInfo.State.success:
+ if task.info.result is not None and not hideResult:
+ self.logger.info('{} completed successfully, result: {}'.format(
+ actionName,
+ task.info.result))
+ else:
+ self.logger.info('Task {} completed successfully.'.format(actionName))
+ else:
+ self.logger.error('{} did not complete successfully: {} '.format(
+ actionName,
+ task.info.error)
+ )
+
+ return task.info.result
+
+ def add_pci_to_vm(self,host_object, vm_object, host_pci_dev):
+ """
+ Method to add pci device in given VM
+
+ Args:
+ host_object - vSphere host object
+ vm_object - vSphere VM object
+ host_pci_dev - host_pci_dev must be one of the devices from the
+ host_object.hardware.pciDevice list
+ which is configured as a PCI passthrough device
+
+ Returns:
+ task object
+ """
+ task = None
+ if vm_object and host_object and host_pci_dev:
+ try :
+ #Add PCI device to VM
+ pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(host=None).pciPassthrough
+ systemid_by_pciid = {item.pciDevice.id: item.systemId for item in pci_passthroughs}
+
+ if host_pci_dev.id not in systemid_by_pciid:
+ self.logger.error("Device {} is not a passthrough device ".format(host_pci_dev))
+ return None
+
+ deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip('0x')
+ backing = vim.VirtualPCIPassthroughDeviceBackingInfo(deviceId=deviceId,
+ id=host_pci_dev.id,
+ systemId=systemid_by_pciid[host_pci_dev.id],
+ vendorId=host_pci_dev.vendorId,
+ deviceName=host_pci_dev.deviceName)
+
+ hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
+
+ new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
+ new_device_config.operation = "add"
+ vmConfigSpec = vim.vm.ConfigSpec()
+ vmConfigSpec.deviceChange = [new_device_config]
+
+ task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
+ self.logger.info("Adding PCI device {} into VM {} from host {} ".format(
+ host_pci_dev, vm_object, host_object)
+ )
+ except Exception as exp:
+ self.logger.error("Error occurred while adding pci devive {} to VM {}: {}".format(
+ host_pci_dev,
+ vm_object,
+ exp))
+ return task
+
+ def get_vcenter_info_rest(self , vapp_uuid):
+ """
+ https://192.169.241.105/api/admin/extension/vimServer/cc82baf9-9f80-4468-bfe9-ce42b3f9dde5
+ Method to get details of vCenter
+
+ Args:
+ vapp_uuid - uuid of vApp or VM
+
+ Returns:
+ Moref Id of VM and deails of vCenter
+ """
+ vm_moref_id = None
+ vm_vcenter = None
+ vm_vcenter_username = None
+ vm_vcenter_port = None
+
+ vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
+ if vm_details and "vm_vcenter_info" in vm_details:
+ vm_moref_id = vm_details["vm_vcenter_info"]["vm_moref_id"]
+ vim_server_href = vm_details["vm_vcenter_info"]["vim_server_href"]
+
+ if vim_server_href:
+ vca = self.connect_as_admin()
+ if not vca:
+ raise vimconn.vimconnConnectionException("self.connect() is failed")
+ if vim_server_href is None:
+ self.logger.error("No url to get vcenter details")
+
+ if vca.vcloud_session and vca.vcloud_session.organization:
+ response = Http.get(url=vim_server_href,
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify,
+ logger=vca.logger)
+
+ if response.status_code != requests.codes.ok:
+ self.logger.debug("GET REST API call {} failed. Return status code {}".format(vim_server_href,
+ response.status_code))
+ try:
+ namespaces={"vmext":"http://www.vmware.com/vcloud/extension/v1.5",
+ "vcloud":"http://www.vmware.com/vcloud/v1.5"
+ }
+ xmlroot_respond = XmlElementTree.fromstring(response.content)
+ vm_vcenter_username = xmlroot_respond.find('vmext:Username', namespaces).text
+ vcenter_url = xmlroot_respond.find('vmext:Url', namespaces).text
+ vm_vcenter_port = vcenter_url.split(":")[2]
+ vm_vcenter = vcenter_url.split(":")[1].split("//")[1]
+
+ except Exception as exp :
+ self.logger.info("Error occurred calling rest api for vcenter information {}".format(exp))
+
+ return vm_moref_id , vm_vcenter , vm_vcenter_username, vm_vcenter_port
+
+
+ def get_vm_pci_details(self, vmuuid):
+ """
+ Method to get VM PCI device details from vCenter
+
+ Args:
+ vm_obj - vSphere VM object
+
+ Returns:
+ dict of PCI devives attached to VM
+
+ """
+ vm_pci_devices_info = {}
+ try:
+ vm_moref_id , vm_vcenter_host , vm_vcenter_username, vm_vcenter_port = self.get_vcenter_info_rest(vmuuid)
+ if vm_moref_id and vm_vcenter_host and vm_vcenter_username:
+ context = None
+ if hasattr(ssl, '_create_unverified_context'):
+ context = ssl._create_unverified_context()
+ vcenter_conect = SmartConnect(host=vm_vcenter_host, user=vm_vcenter_username,
+ pwd=self.passwd, port=int(vm_vcenter_port),
+ sslContext=context)
+ atexit.register(Disconnect, vcenter_conect)
+ content = vcenter_conect.RetrieveContent()
+
+ #Get VM and its host
+ host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
+ for device in vm_obj.config.hardware.device:
+ if type(device) == vim.vm.device.VirtualPCIPassthrough:
+ device_details={'devide_id':device.backing.id,
+ 'pciSlotNumber':device.slotInfo.pciSlotNumber
+ }
+ vm_pci_devices_info[device.deviceInfo.label] = device_details
+ except Exception as exp:
+ self.logger.info("Error occurred while getting PCI devices infromationn"\
+ " for VM {} : {}".format(vm_obj,exp))
+ return vm_pci_devices_info
+