import socket
import struct
import netaddr
+import random
# global variable for vcd connector type
STANDALONE = 'standalone'
FLAVOR_RAM_KEY = 'ram'
FLAVOR_VCPUS_KEY = 'vcpus'
FLAVOR_DISK_KEY = 'disk'
-DEFAULT_IP_PROFILE = {'gateway_address':"192.168.1.1",
- 'dhcp_count':50,
- 'subnet_address':"192.168.1.0/24",
+DEFAULT_IP_PROFILE = {'dhcp_count':50,
'dhcp_enabled':True,
- 'dhcp_start_address':"192.168.1.3",
- 'ip_version':"IPv4",
- 'dns_address':"192.168.1.2"
+ 'ip_version':"IPv4"
}
# global variable for wait time
INTERVAL_TIME = 5
'ERROR': 'ERROR', 'DELETED': 'DELETED'
}
-# dict used to store flavor in memory
-flavorlist = {}
-
-
class vimconnector(vimconn.vimconnector):
+ # dict used to store flavor in memory
+ flavorlist = {}
+
def __init__(self, uuid=None, name=None, tenant_id=None, tenant_name=None,
url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}, persistent_info={}):
"""
dict['admin_username']
dict['admin_password']
+ config - Provide NSX and vCenter information
Returns:
Nothing.
self.nsx_manager = None
self.nsx_user = None
self.nsx_password = None
+ self.vcenter_ip = None
+ self.vcenter_port = None
+ self.vcenter_user = None
+ self.vcenter_password = None
if tenant_name is not None:
orgnameandtenant = tenant_name.split(":")
except KeyError:
raise vimconn.vimconnException(message="Error: nsx manager or nsx user or nsx password is empty in Config")
+ self.vcenter_ip = config.get("vcenter_ip", None)
+ self.vcenter_port = config.get("vcenter_port", None)
+ self.vcenter_user = config.get("vcenter_user", None)
+ self.vcenter_password = config.get("vcenter_password", None)
+
self.org_uuid = None
self.vca = None
if vdc is None:
raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name))
- vdcid = vdc.get_id().split(":")[3]
- networks = vca.get_networks(vdc.get_name())
- network_list = []
-
try:
+ vdcid = vdc.get_id().split(":")[3]
+ networks = vca.get_networks(vdc.get_name())
+ network_list = []
+
for network in networks:
filter_entry = {}
net_uuid = network.get_id().split(":")
if not vca:
raise vimconn.vimconnConnectionException("self.connect() is failed")
- vdc = vca.get_vdc(self.tenant_name)
- vdc_id = vdc.get_id().split(":")[3]
+ try:
+ vdc = vca.get_vdc(self.tenant_name)
+ vdc_id = vdc.get_id().split(":")[3]
- networks = vca.get_networks(vdc.get_name())
- filter_dict = {}
+ networks = vca.get_networks(vdc.get_name())
+ filter_dict = {}
- try:
for network in networks:
vdc_network_id = network.get_id().split(":")
if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
"""Obtain flavor details from the VIM
Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
"""
- if flavor_id not in flavorlist:
+ if flavor_id not in vimconnector.flavorlist:
raise vimconn.vimconnNotFoundException("Flavor not found.")
- return flavorlist[flavor_id]
+ return vimconnector.flavorlist[flavor_id]
def new_flavor(self, flavor_data):
"""Adds a tenant flavor to VIM
new_flavor[FLAVOR_DISK_KEY] = disk
# generate a new uuid put to internal dict and return it.
flavor_id = uuid.uuid4()
- flavorlist[str(flavor_id)] = new_flavor
+ vimconnector.flavorlist[str(flavor_id)] = new_flavor
self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
return str(flavor_id)
Returns the used id or raise an exception
"""
- if flavor_id not in flavorlist:
+ if flavor_id not in vimconnector.flavorlist:
raise vimconn.vimconnNotFoundException("Flavor not found.")
- flavorlist.pop(flavor_id, None)
+ vimconnector.flavorlist.pop(flavor_id, None)
return flavor_id
def new_image(self, image_dict):
# create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
# status change.
# if VCD can parse OVF we upload VMDK file
- for catalog in vca.get_catalogs():
- if catalog_name != catalog.name:
- continue
- link = filter(lambda link: link.get_type() == "application/vnd.vmware.vcloud.media+xml" and
- link.get_rel() == 'add', catalog.get_Link())
- assert len(link) == 1
- data = """
- <UploadVAppTemplateParams name="%s" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>%s vApp Template</Description></UploadVAppTemplateParams>
- """ % (escape(catalog_name), escape(description))
- headers = vca.vcloud_session.get_vcloud_headers()
- headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
- response = Http.post(link[0].get_href(), headers=headers, data=data, verify=vca.verify, logger=self.logger)
- if response.status_code == requests.codes.created:
- catalogItem = XmlElementTree.fromstring(response.content)
- entity = [child for child in catalogItem if
- child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
- href = entity.get('href')
- template = href
- response = Http.get(href, headers=vca.vcloud_session.get_vcloud_headers(),
- verify=vca.verify, logger=self.logger)
-
- if response.status_code == requests.codes.ok:
- media = mediaType.parseString(response.content, True)
- link = filter(lambda link: link.get_rel() == 'upload:default',
- media.get_Files().get_File()[0].get_Link())[0]
- headers = vca.vcloud_session.get_vcloud_headers()
- headers['Content-Type'] = 'Content-Type text/xml'
- response = Http.put(link.get_href(),
- data=open(media_file_name, 'rb'),
- headers=headers,
+ try:
+ for catalog in vca.get_catalogs():
+ if catalog_name != catalog.name:
+ continue
+ link = filter(lambda link: link.get_type() == "application/vnd.vmware.vcloud.media+xml" and
+ link.get_rel() == 'add', catalog.get_Link())
+ assert len(link) == 1
+ data = """
+ <UploadVAppTemplateParams name="%s" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>%s vApp Template</Description></UploadVAppTemplateParams>
+ """ % (escape(catalog_name), escape(description))
+ headers = vca.vcloud_session.get_vcloud_headers()
+ headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
+ response = Http.post(link[0].get_href(), headers=headers, data=data, verify=vca.verify, logger=self.logger)
+ if response.status_code == requests.codes.created:
+ catalogItem = XmlElementTree.fromstring(response.content)
+ entity = [child for child in catalogItem if
+ child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
+ href = entity.get('href')
+ template = href
+ response = Http.get(href, headers=vca.vcloud_session.get_vcloud_headers(),
verify=vca.verify, logger=self.logger)
- if response.status_code != requests.codes.ok:
- self.logger.debug(
- "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
- media_file_name))
- return False
- # TODO fix this with aync block
- time.sleep(5)
-
- self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
-
- # uploading VMDK file
- # check status of OVF upload and upload remaining files.
- response = Http.get(template,
- headers=vca.vcloud_session.get_vcloud_headers(),
- verify=vca.verify,
- logger=self.logger)
+ if response.status_code == requests.codes.ok:
+ media = mediaType.parseString(response.content, True)
+ link = filter(lambda link: link.get_rel() == 'upload:default',
+ media.get_Files().get_File()[0].get_Link())[0]
+ headers = vca.vcloud_session.get_vcloud_headers()
+ headers['Content-Type'] = 'Content-Type text/xml'
+ response = Http.put(link.get_href(),
+ data=open(media_file_name, 'rb'),
+ headers=headers,
+ verify=vca.verify, logger=self.logger)
+ if response.status_code != requests.codes.ok:
+ self.logger.debug(
+ "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
+ media_file_name))
+ return False
+
+ # TODO fix this with aync block
+ time.sleep(5)
+
+ self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
+
+ # uploading VMDK file
+ # check status of OVF upload and upload remaining files.
+ response = Http.get(template,
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify,
+ logger=self.logger)
- if response.status_code == requests.codes.ok:
- media = mediaType.parseString(response.content, True)
- number_of_files = len(media.get_Files().get_File())
- for index in xrange(0, number_of_files):
- links_list = filter(lambda link: link.get_rel() == 'upload:default',
- media.get_Files().get_File()[index].get_Link())
- for link in links_list:
- # we skip ovf since it already uploaded.
- if 'ovf' in link.get_href():
- continue
- # The OVF file and VMDK must be in a same directory
- head, tail = os.path.split(media_file_name)
- file_vmdk = head + '/' + link.get_href().split("/")[-1]
- if not os.path.isfile(file_vmdk):
- return False
- statinfo = os.stat(file_vmdk)
- if statinfo.st_size == 0:
- return False
- hrefvmdk = link.get_href()
-
- if progress:
- print("Uploading file: {}".format(file_vmdk))
- if progress:
- widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
- FileTransferSpeed()]
- progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
-
- bytes_transferred = 0
- f = open(file_vmdk, 'rb')
- while bytes_transferred < statinfo.st_size:
- my_bytes = f.read(chunk_bytes)
- if len(my_bytes) <= chunk_bytes:
- headers = vca.vcloud_session.get_vcloud_headers()
- headers['Content-Range'] = 'bytes %s-%s/%s' % (
- bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
- headers['Content-Length'] = str(len(my_bytes))
- response = Http.put(hrefvmdk,
- headers=headers,
- data=my_bytes,
- verify=vca.verify,
- logger=None)
-
- if response.status_code == requests.codes.ok:
- bytes_transferred += len(my_bytes)
- if progress:
- progress_bar.update(bytes_transferred)
- else:
- self.logger.debug(
- 'file upload failed with error: [%s] %s' % (response.status_code,
- response.content))
-
- f.close()
- return False
- f.close()
- if progress:
- progress_bar.finish()
- time.sleep(10)
- return True
- else:
- self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
- format(catalog_name, media_file_name))
- return False
+ if response.status_code == requests.codes.ok:
+ media = mediaType.parseString(response.content, True)
+ number_of_files = len(media.get_Files().get_File())
+ for index in xrange(0, number_of_files):
+ links_list = filter(lambda link: link.get_rel() == 'upload:default',
+ media.get_Files().get_File()[index].get_Link())
+ for link in links_list:
+ # we skip ovf since it already uploaded.
+ if 'ovf' in link.get_href():
+ continue
+ # The OVF file and VMDK must be in a same directory
+ head, tail = os.path.split(media_file_name)
+ file_vmdk = head + '/' + link.get_href().split("/")[-1]
+ if not os.path.isfile(file_vmdk):
+ return False
+ statinfo = os.stat(file_vmdk)
+ if statinfo.st_size == 0:
+ return False
+ hrefvmdk = link.get_href()
+
+ if progress:
+ print("Uploading file: {}".format(file_vmdk))
+ if progress:
+ widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
+ FileTransferSpeed()]
+ progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
+
+ bytes_transferred = 0
+ f = open(file_vmdk, 'rb')
+ while bytes_transferred < statinfo.st_size:
+ my_bytes = f.read(chunk_bytes)
+ if len(my_bytes) <= chunk_bytes:
+ headers = vca.vcloud_session.get_vcloud_headers()
+ headers['Content-Range'] = 'bytes %s-%s/%s' % (
+ bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
+ headers['Content-Length'] = str(len(my_bytes))
+ response = Http.put(hrefvmdk,
+ headers=headers,
+ data=my_bytes,
+ verify=vca.verify,
+ logger=None)
+
+ if response.status_code == requests.codes.ok:
+ bytes_transferred += len(my_bytes)
+ if progress:
+ progress_bar.update(bytes_transferred)
+ else:
+ self.logger.debug(
+ 'file upload failed with error: [%s] %s' % (response.status_code,
+ response.content))
+
+ f.close()
+ return False
+ f.close()
+ if progress:
+ progress_bar.finish()
+ time.sleep(10)
+ return True
+ else:
+ self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
+ format(catalog_name, media_file_name))
+ return False
+ except Exception as exp:
+ self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
+ .format(catalog_name,media_file_name, exp))
+ raise vimconn.vimconnException(
+ "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
+ .format(catalog_name,media_file_name, exp))
self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name))
return False
self.logger.debug("File name {} Catalog Name {} file path {} "
"vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
- catalogs = vca.get_catalogs()
+ try:
+ catalogs = vca.get_catalogs()
+ except Exception as exp:
+ self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
+ raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp))
+
if len(catalogs) == 0:
self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
result = self.create_vimcatalog(vca, catalog_md5_name)
"""
self.logger.info("Creating new instance for entry {}".format(name))
- self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {}".
- format(description, start, image_id, flavor_id, net_list, cloud_config))
+ self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {}".format(
+ description, start, image_id, flavor_id, net_list, cloud_config))
vca = self.connect()
if not vca:
raise vimconn.vimconnConnectionException("self.connect() is failed.")
vm_disk = None
pci_devices_info = []
if flavor_id is not None:
- if flavor_id not in flavorlist:
+ if flavor_id not in vimconnector.flavorlist:
raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
"Failed retrieve flavor information "
"flavor id {}".format(name, flavor_id))
else:
try:
- flavor = flavorlist[flavor_id]
+ flavor = vimconnector.flavorlist[flavor_id]
vm_cpus = flavor[FLAVOR_VCPUS_KEY]
vm_memory = flavor[FLAVOR_RAM_KEY]
vm_disk = flavor[FLAVOR_DISK_KEY]
for interface in numa.get("interfaces",() ):
if interface["dedicated"].strip()=="yes":
pci_devices_info.append(interface)
- except KeyError:
- raise vimconn.vimconnException("Corrupted flavor. {}".format(flavor_id))
+ except Exception as exp:
+ raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp))
# image upload creates template name as catalog name space Template.
templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
# use: 'data', 'bridge', 'mgmt'
# create vApp. Set vcpu and ram based on flavor id.
- vapptask = vca.create_vapp(self.tenant_name, vmname_andid, templateName,
- self.get_catalogbyid(image_id, catalogs),
- network_name=None, # None while creating vapp
- network_mode=network_mode,
- vm_name=vmname_andid,
- vm_cpus=vm_cpus, # can be None if flavor is None
- vm_memory=vm_memory) # can be None if flavor is None
-
- if vapptask is None or vapptask is False:
- raise vimconn.vimconnUnexpectedResponse("new_vminstance(): failed deploy vApp {}".format(vmname_andid))
- if type(vapptask) is VappTask:
- vca.block_until_completed(vapptask)
+ try:
+ vapptask = vca.create_vapp(self.tenant_name, vmname_andid, templateName,
+ self.get_catalogbyid(image_id, catalogs),
+ network_name=None, # None while creating vapp
+ network_mode=network_mode,
+ vm_name=vmname_andid,
+ vm_cpus=vm_cpus, # can be None if flavor is None
+ vm_memory=vm_memory) # can be None if flavor is None
+
+ if vapptask is None or vapptask is False:
+ raise vimconn.vimconnUnexpectedResponse(
+ "new_vminstance(): failed to create vApp {}".format(vmname_andid))
+ if type(vapptask) is VappTask:
+ vca.block_until_completed(vapptask)
+
+ except Exception as exp:
+ raise vimconn.vimconnUnexpectedResponse(
+ "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp))
# we should have now vapp in undeployed state.
- vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid)
- vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), vmname_andid)
+ try:
+ vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid)
+ vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), vmname_andid)
+ except Exception as exp:
+ raise vimconn.vimconnUnexpectedResponse(
+ "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
+ .format(vmname_andid, exp))
+
if vapp is None:
raise vimconn.vimconnUnexpectedResponse(
- "new_vminstance(): Failed failed retrieve vApp {} after we deployed".format(
+ "new_vminstance(): Failed to retrieve vApp {} after creation".format(
vmname_andid))
#Add PCI passthrough configrations
if result :
self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
+ if numas:
+ # Assigning numa affinity setting
+ for numa in numas:
+ if 'paired-threads-id' in numa:
+ paired_threads_id = numa['paired-threads-id']
+ self.set_numa_affinity(vapp_uuid, paired_threads_id)
+
# add NICs & connect to networks in netlist
try:
self.logger.info("Request to connect VM to a network: {}".format(net_list))
if type(task) is GenericTask:
vca.block_until_completed(task)
# connect network to VM - with all DHCP by default
- self.logger.info("new_vminstance(): Connecting VM to a network {}".format(nets[0].name))
- task = vapp.connect_vms(nets[0].name,
- connection_index=nicIndex,
- connections_primary_index=primary_nic_index,
- ip_allocation_mode='DHCP')
- if type(task) is GenericTask:
- vca.block_until_completed(task)
+
+ type_list = ['PF','VF','VFnotShared']
+ if 'type' in net and net['type'] not in type_list:
+ # fetching nic type from vnf
+ if 'model' in net:
+ nic_type = net['model']
+ self.logger.info("new_vminstance(): adding network adapter "\
+ "to a network {}".format(nets[0].name))
+ self.add_network_adapter_to_vms(vapp, nets[0].name,
+ primary_nic_index,
+ nicIndex,
+ net,
+ nic_type=nic_type)
+ else:
+ self.logger.info("new_vminstance(): adding network adapter "\
+ "to a network {}".format(nets[0].name))
+ self.add_network_adapter_to_vms(vapp, nets[0].name,
+ primary_nic_index,
+ nicIndex,
+ net)
nicIndex += 1
- except KeyError:
- # it might be a case if specific mandatory entry in dict is empty
- self.logger.debug("Key error {}".format(KeyError.message))
- raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
- # deploy and power on vm
- self.logger.debug("new_vminstance(): Deploying vApp {} ".format(name))
- deploytask = vapp.deploy(powerOn=False)
- if type(deploytask) is GenericTask:
- vca.block_until_completed(deploytask)
-
- # If VM has PCI devices reserve memory for VM
- if PCI_devices_status and vm_obj and vcenter_conect:
- memReserve = vm_obj.config.hardware.memoryMB
- spec = vim.vm.ConfigSpec()
- spec.memoryAllocation = vim.ResourceAllocationInfo(reservation=memReserve)
- task = vm_obj.ReconfigVM_Task(spec=spec)
- if task:
- result = self.wait_for_vcenter_task(task, vcenter_conect)
- self.logger.info("Reserved memmoery {} MB for "\
- "VM VM status: {}".format(str(memReserve),result))
- else:
- self.logger.info("Fail to reserved memmoery {} to VM {}".format(
- str(memReserve),str(vm_obj)))
+ # cloud-init for ssh-key injection
+ if cloud_config:
+ self.cloud_init(vapp,cloud_config)
+
+ # deploy and power on vm
+ self.logger.debug("new_vminstance(): Deploying vApp {} ".format(name))
+ deploytask = vapp.deploy(powerOn=False)
+ if type(deploytask) is GenericTask:
+ vca.block_until_completed(deploytask)
+
+ # If VM has PCI devices reserve memory for VM
+ if PCI_devices_status and vm_obj and vcenter_conect:
+ memReserve = vm_obj.config.hardware.memoryMB
+ spec = vim.vm.ConfigSpec()
+ spec.memoryAllocation = vim.ResourceAllocationInfo(reservation=memReserve)
+ task = vm_obj.ReconfigVM_Task(spec=spec)
+ if task:
+ result = self.wait_for_vcenter_task(task, vcenter_conect)
+ self.logger.info("Reserved memmoery {} MB for "\
+ "VM VM status: {}".format(str(memReserve),result))
+ else:
+ self.logger.info("Fail to reserved memmoery {} to VM {}".format(
+ str(memReserve),str(vm_obj)))
+
+ self.logger.debug("new_vminstance(): power on vApp {} ".format(name))
+ poweron_task = vapp.poweron()
+ if type(poweron_task) is GenericTask:
+ vca.block_until_completed(poweron_task)
- self.logger.debug("new_vminstance(): power on vApp {} ".format(name))
- poweron_task = vapp.poweron()
- if type(poweron_task) is GenericTask:
- vca.block_until_completed(poweron_task)
+ except Exception as exp :
+ # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
+ self.logger.debug("new_vminstance(): Failed create new vm instance {}".format(name, exp))
+ raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {}".format(name, exp))
# check if vApp deployed and if that the case return vApp UUID otherwise -1
wait_time = 0
vapp_uuid = None
while wait_time <= MAX_WAIT_TIME:
- vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid)
+ try:
+ vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid)
+ except Exception as exp:
+ raise vimconn.vimconnUnexpectedResponse(
+ "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
+ .format(vmname_andid, exp))
+
if vapp and vapp.me.deployed:
vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), vmname_andid)
break
self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
- mac_ip_addr={}
- rheaders = {'Content-Type': 'application/xml'}
- iso_edges = ['edge-2','edge-3','edge-6','edge-7','edge-8','edge-9','edge-10']
-
- try:
- for edge in iso_edges:
- nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
- self.logger.debug("refresh_vms_status: NSX Manager url: {}".format(nsx_api_url))
-
- resp = requests.get(self.nsx_manager + nsx_api_url,
- auth = (self.nsx_user, self.nsx_password),
- verify = False, headers = rheaders)
-
- if resp.status_code == requests.codes.ok:
- dhcp_leases = XmlElementTree.fromstring(resp.text)
- for child in dhcp_leases:
- if child.tag == 'dhcpLeaseInfo':
- dhcpLeaseInfo = child
- for leaseInfo in dhcpLeaseInfo:
- for elem in leaseInfo:
- if (elem.tag)=='macAddress':
- mac_addr = elem.text
- if (elem.tag)=='ipAddress':
- ip_addr = elem.text
- if (mac_addr) is not None:
- mac_ip_addr[mac_addr]= ip_addr
- self.logger.debug("NSX Manager DHCP Lease info: mac_ip_addr : {}".format(mac_ip_addr))
- else:
- self.logger.debug("Error occurred while getting DHCP lease info from NSX Manager: {}".format(resp.content))
- except KeyError:
- self.logger.debug("Error in response from NSX Manager {}".format(KeyError.message))
- self.logger.debug(traceback.format_exc())
-
vca = self.connect()
if not vca:
raise vimconn.vimconnConnectionException("self.connect() is failed.")
raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
vms_dict = {}
+ nsx_edge_list = []
for vmuuid in vm_list:
vmname = self.get_namebyvappid(vca, vdc, vmuuid)
if vmname is not None:
- the_vapp = vca.get_vapp(vdc, vmname)
- vm_info = the_vapp.get_vms_details()
- vm_status = vm_info[0]['status']
- vm_pci_details = self.get_vm_pci_details(vmuuid)
- vm_info[0].update(vm_pci_details)
+ try:
+ the_vapp = vca.get_vapp(vdc, vmname)
+ vm_info = the_vapp.get_vms_details()
+ vm_status = vm_info[0]['status']
+ vm_pci_details = self.get_vm_pci_details(vmuuid)
+ vm_info[0].update(vm_pci_details)
- vm_dict = {'status': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
- 'error_msg': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
- 'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
+ vm_dict = {'status': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
+ 'error_msg': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
+ 'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
- # get networks
- try:
+ # get networks
vm_app_networks = the_vapp.get_vms_network_info()
for vapp_network in vm_app_networks:
for vm_network in vapp_network:
if vm_network['name'] == vmname:
#Assign IP Address based on MAC Address in NSX DHCP lease info
- for mac_adres,ip_adres in mac_ip_addr.iteritems():
- if mac_adres == vm_network['mac']:
- vm_network['ip']=ip_adres
+ if vm_network['ip'] is None:
+ if not nsx_edge_list:
+ nsx_edge_list = self.get_edge_details()
+ if nsx_edge_list is None:
+ raise vimconn.vimconnException("refresh_vms_status:"\
+ "Failed to get edge details from NSX Manager")
+ if vm_network['mac'] is not None:
+ vm_network['ip'] = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_network['mac'])
+
+ vm_net_id = self.get_network_id_by_name(vm_network['network_name'])
interface = {"mac_address": vm_network['mac'],
- "vim_net_id": self.get_network_id_by_name(vm_network['network_name']),
- "vim_interface_id": self.get_network_id_by_name(vm_network['network_name']),
+ "vim_net_id": vm_net_id,
+ "vim_interface_id": vm_net_id,
'ip_address': vm_network['ip']}
# interface['vim_info'] = yaml.safe_dump(vm_network)
vm_dict["interfaces"].append(interface)
# add a vm to vm dict
vms_dict.setdefault(vmuuid, vm_dict)
- except KeyError:
- self.logger.debug("Error in respond {}".format(KeyError.message))
+ except Exception as exp:
+ self.logger.debug("Error in response {}".format(exp))
self.logger.debug(traceback.format_exc())
return vms_dict
+
+ def get_edge_details(self):
+ """Get the NSX edge list from NSX Manager
+ Returns list of NSX edges
+ """
+ edge_list = []
+ rheaders = {'Content-Type': 'application/xml'}
+ nsx_api_url = '/api/4.0/edges'
+
+ self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url))
+
+ try:
+ resp = requests.get(self.nsx_manager + nsx_api_url,
+ auth = (self.nsx_user, self.nsx_password),
+ verify = False, headers = rheaders)
+ if resp.status_code == requests.codes.ok:
+ paged_Edge_List = XmlElementTree.fromstring(resp.text)
+ for edge_pages in paged_Edge_List:
+ if edge_pages.tag == 'edgePage':
+ for edge_summary in edge_pages:
+ if edge_summary.tag == 'pagingInfo':
+ for element in edge_summary:
+ if element.tag == 'totalCount' and element.text == '0':
+ raise vimconn.vimconnException("get_edge_details: No NSX edges details found: {}"
+ .format(self.nsx_manager))
+
+ if edge_summary.tag == 'edgeSummary':
+ for element in edge_summary:
+ if element.tag == 'id':
+ edge_list.append(element.text)
+ else:
+ raise vimconn.vimconnException("get_edge_details: No NSX edge details found: {}"
+ .format(self.nsx_manager))
+
+ if not edge_list:
+ raise vimconn.vimconnException("get_edge_details: "\
+ "No NSX edge details found: {}"
+ .format(self.nsx_manager))
+ else:
+ self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list))
+ return edge_list
+ else:
+ self.logger.debug("get_edge_details: "
+ "Failed to get NSX edge details from NSX Manager: {}"
+ .format(resp.content))
+ return None
+
+ except Exception as exp:
+ self.logger.debug("get_edge_details: "\
+ "Failed to get NSX edge details from NSX Manager: {}"
+ .format(exp))
+ raise vimconn.vimconnException("get_edge_details: "\
+ "Failed to get NSX edge details from NSX Manager: {}"
+ .format(exp))
+
+
+ def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
+ """Get IP address details from NSX edges, using the MAC address
+ PARAMS: nsx_edges : List of NSX edges
+ mac_address : Find IP address corresponding to this MAC address
+ Returns: IP address corrresponding to the provided MAC address
+ """
+
+ ip_addr = None
+ rheaders = {'Content-Type': 'application/xml'}
+
+ self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
+
+ try:
+ for edge in nsx_edges:
+ nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
+
+ resp = requests.get(self.nsx_manager + nsx_api_url,
+ auth = (self.nsx_user, self.nsx_password),
+ verify = False, headers = rheaders)
+
+ if resp.status_code == requests.codes.ok:
+ dhcp_leases = XmlElementTree.fromstring(resp.text)
+ for child in dhcp_leases:
+ if child.tag == 'dhcpLeaseInfo':
+ dhcpLeaseInfo = child
+ for leaseInfo in dhcpLeaseInfo:
+ for elem in leaseInfo:
+ if (elem.tag)=='macAddress':
+ edge_mac_addr = elem.text
+ if (elem.tag)=='ipAddress':
+ ip_addr = elem.text
+ if edge_mac_addr is not None:
+ if edge_mac_addr == mac_address:
+ self.logger.debug("Found ip addr {} for mac {} at NSX edge {}"
+ .format(ip_addr, mac_address,edge))
+ return ip_addr
+ else:
+ self.logger.debug("get_ipaddr_from_NSXedge: "\
+ "Error occurred while getting DHCP lease info from NSX Manager: {}"
+ .format(resp.content))
+
+ self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge")
+ return None
+
+ except XmlElementTree.ParseError as Err:
+ self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True)
+
+
def action_vminstance(self, vm__vim_uuid=None, action_dict=None):
"""Send and action over a VM instance from VIM
Returns the vm_id if the action was successfully sent to the VIM"""
if "start" in action_dict:
vm_info = the_vapp.get_vms_details()
vm_status = vm_info[0]['status']
- self.logger.info("Power on vApp: vm_status:{} {}".format(type(vm_status),vm_status))
+ self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
if vm_status == "Suspended" or vm_status == "Powered off":
power_on_task = the_vapp.poweron()
- if power_on_task is not None and type(power_on_task) is GenericTask:
- result = vca.block_until_completed(power_on_task)
- if result:
- self.logger.info("action_vminstance: Powered on vApp: {}".format(vapp_name))
- else:
- self.logger.info("action_vminstance: Failed to power on vApp: {}".format(vapp_name))
- else:
- self.logger.info("action_vminstance: Wait for vApp {} to power on".format(vapp_name))
- elif "rebuild" in action_dict:
- self.logger.info("action_vminstance: Rebuilding vApp: {}".format(vapp_name))
- power_on_task = the_vapp.deploy(powerOn=True)
- if type(power_on_task) is GenericTask:
result = vca.block_until_completed(power_on_task)
- if result:
- self.logger.info("action_vminstance: Rebuilt vApp: {}".format(vapp_name))
- else:
- self.logger.info("action_vminstance: Failed to rebuild vApp: {}".format(vapp_name))
- else:
- self.logger.info("action_vminstance: Wait for vApp rebuild {} to power on".format(vapp_name))
+ self.instance_actions_result("start", result, vapp_name)
+ elif "rebuild" in action_dict:
+ self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
+ rebuild_task = the_vapp.deploy(powerOn=True)
+ result = vca.block_until_completed(rebuild_task)
+ self.instance_actions_result("rebuild", result, vapp_name)
elif "pause" in action_dict:
- pass
- ## server.pause()
+ self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
+ pause_task = the_vapp.undeploy(action='suspend')
+ result = vca.block_until_completed(pause_task)
+ self.instance_actions_result("pause", result, vapp_name)
elif "resume" in action_dict:
- pass
- ## server.resume()
+ self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
+ power_task = the_vapp.poweron()
+ result = vca.block_until_completed(power_task)
+ self.instance_actions_result("resume", result, vapp_name)
elif "shutoff" in action_dict or "shutdown" in action_dict:
+ action_name , value = action_dict.items()[0]
+ self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
power_off_task = the_vapp.undeploy(action='powerOff')
- if type(power_off_task) is GenericTask:
- result = vca.block_until_completed(power_off_task)
- if result:
- self.logger.info("action_vminstance: Powered off vApp: {}".format(vapp_name))
- else:
- self.logger.info("action_vminstance: Failed to power off vApp: {}".format(vapp_name))
+ result = vca.block_until_completed(power_off_task)
+ if action_name == "shutdown":
+ self.instance_actions_result("shutdown", result, vapp_name)
else:
- self.logger.info("action_vminstance: Wait for vApp {} to power off".format(vapp_name))
+ self.instance_actions_result("shutoff", result, vapp_name)
elif "forceOff" in action_dict:
- the_vapp.reset()
- elif "terminate" in action_dict:
- the_vapp.delete()
- # elif "createImage" in action_dict:
- # server.create_image()
+ result = the_vapp.undeploy(action='force')
+ self.instance_actions_result("forceOff", result, vapp_name)
+ elif "reboot" in action_dict:
+ self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
+ reboot_task = the_vapp.reboot()
else:
- pass
- except:
- pass
+ raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict))
+ return vm__vim_uuid
+ except Exception as exp :
+ self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
+ raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp))
+
+ def instance_actions_result(self, action, result, vapp_name):
+ if result:
+ self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
+ else:
+ self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
def get_vminstance_console(self, vm_id, console_type="vnc"):
"""
if network_uuid is None:
return network_uuid
- content = self.get_network_action(network_uuid=network_uuid)
try:
+ content = self.get_network_action(network_uuid=network_uuid)
vm_list_xmlroot = XmlElementTree.fromstring(content)
network_configuration['status'] = vm_list_xmlroot.get("status")
if tagKey != "":
network_configuration[tagKey] = configuration.text.strip()
return network_configuration
- except:
- pass
+ except Exception as exp :
+ self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
+ raise vimconn.vimconnException("get_vcd_network: Failed with Exception {}".format(exp))
return network_configuration
vm_list_xmlroot = XmlElementTree.fromstring(content)
vcd_uuid = vm_list_xmlroot.get('id').split(":")
if len(vcd_uuid) == 4:
- self.logger.info("Create new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
+ self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
return vcd_uuid[3]
except:
self.logger.debug("Failed create network {}".format(network_name))
except:
return None
- #Configure IP profile of the network
- ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
-
- gateway_address=ip_profile['gateway_address']
- dhcp_count=int(ip_profile['dhcp_count'])
- subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
+ try:
+ #Configure IP profile of the network
+ ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
- if ip_profile['dhcp_enabled']==True:
- dhcp_enabled='true'
- else:
- dhcp_enabled='false'
- dhcp_start_address=ip_profile['dhcp_start_address']
+ if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
+ subnet_rand = random.randint(0, 255)
+ ip_base = "192.168.{}.".format(subnet_rand)
+ ip_profile['subnet_address'] = ip_base + "0/24"
+ else:
+ ip_base = ip_profile['subnet_address'].rsplit('.',1)[0] + '.'
+
+ if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None:
+ ip_profile['gateway_address']=ip_base + "1"
+ if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None:
+ ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count']
+ if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None:
+ ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled']
+ if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None:
+ ip_profile['dhcp_start_address']=ip_base + "3"
+ if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None:
+ ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version']
+ if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None:
+ ip_profile['dns_address']=ip_base + "2"
+
+ gateway_address=ip_profile['gateway_address']
+ dhcp_count=int(ip_profile['dhcp_count'])
+ subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
+
+ if ip_profile['dhcp_enabled']==True:
+ dhcp_enabled='true'
+ else:
+ dhcp_enabled='false'
+ dhcp_start_address=ip_profile['dhcp_start_address']
- #derive dhcp_end_address from dhcp_start_address & dhcp_count
- end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
- end_ip_int += dhcp_count - 1
- dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
+ #derive dhcp_end_address from dhcp_start_address & dhcp_count
+ end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
+ end_ip_int += dhcp_count - 1
+ dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
- ip_version=ip_profile['ip_version']
- dns_address=ip_profile['dns_address']
+ ip_version=ip_profile['ip_version']
+ dns_address=ip_profile['dns_address']
+ except KeyError as exp:
+ self.logger.debug("Create Network REST: Key error {}".format(exp))
+ raise vimconn.vimconnException("Create Network REST: Key error{}".format(exp))
# either use client provided UUID or search for a first available
# if both are not defined we return none
logger=vca.logger)
if response.status_code != 201:
- self.logger.debug("Create Network POST REST API call failed. Return status code {}"
- .format(response.status_code))
+ self.logger.debug("Create Network POST REST API call failed. Return status code {}, Response content: {}"
+ .format(response.status_code,response.content))
else:
network = networkType.parseString(response.content, True)
create_nw_task = network.get_Tasks().get_Task()[0]
# if we all ok we respond with content after network creation completes
# otherwise by default return None
if create_nw_task is not None:
- self.logger.debug("Create Network REST : Waiting for Nw creation complete")
+ self.logger.debug("Create Network REST : Waiting for Network creation complete")
status = vca.block_until_completed(create_nw_task)
if status:
return response.content
vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
if vmext is not None:
vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
- vm_vcenter_info["vim_server_href"] = vmext.find('vmext:VimServerRef', namespaces).attrib['href']
parsed_respond["vm_vcenter_info"]= vm_vcenter_info
virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces)
vm_obj = None
vcenter_conect = None
self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid))
- #Assuming password of vCenter user is same as password of vCloud user
- vm_moref_id , vm_vcenter_host , vm_vcenter_username, vm_vcenter_port = self.get_vcenter_info_rest(vapp_uuid)
- self.logger.info("vm_moref_id, {} vm_vcenter_host {} vm_vcenter_username{} "\
- "vm_vcenter_port{}".format(
- vm_moref_id, vm_vcenter_host,
- vm_vcenter_username, vm_vcenter_port))
- if vm_moref_id and vm_vcenter_host and vm_vcenter_username:
+ try:
+ vm_vcenter_info = self.get_vm_vcenter_info(vapp_uuid)
+ except Exception as exp:
+ self.logger.error("Error occurred while getting vCenter infromationn"\
+ " for VM : {}".format(exp))
+ raise vimconn.vimconnException(message=exp)
+
+ if vm_vcenter_info["vm_moref_id"]:
context = None
if hasattr(ssl, '_create_unverified_context'):
context = ssl._create_unverified_context()
try:
no_of_pci_devices = len(pci_devices)
if no_of_pci_devices > 0:
- vcenter_conect = SmartConnect(host=vm_vcenter_host, user=vm_vcenter_username,
- pwd=self.passwd, port=int(vm_vcenter_port) ,
- sslContext=context)
+ vcenter_conect = SmartConnect(
+ host=vm_vcenter_info["vm_vcenter_ip"],
+ user=vm_vcenter_info["vm_vcenter_user"],
+ pwd=vm_vcenter_info["vm_vcenter_password"],
+ port=int(vm_vcenter_info["vm_vcenter_port"]),
+ sslContext=context)
atexit.register(Disconnect, vcenter_conect)
content = vcenter_conect.RetrieveContent()
#Get VM and its host
- host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
+ host_obj, vm_obj = self.get_vm_obj(content ,vm_vcenter_info["vm_moref_id"])
self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
if host_obj and vm_obj:
#get PCI devies from host on which vapp is currently installed
if status:
self.logger.info("Added PCI device {} to VM {}".format(pci_device,str(vm_obj)))
else:
- self.logger.info("Fail to add PCI device {} to VM {}".format(pci_device,str(vm_obj)))
+ self.logger.error("Fail to add PCI device {} to VM {}".format(pci_device,str(vm_obj)))
return True, vm_obj, vcenter_conect
else:
self.logger.error("Currently there is no host with"\
exp))
return task
- def get_vcenter_info_rest(self , vapp_uuid):
+ def get_vm_vcenter_info(self , vapp_uuid):
"""
- https://192.169.241.105/api/admin/extension/vimServer/cc82baf9-9f80-4468-bfe9-ce42b3f9dde5
- Method to get details of vCenter
+ Method to get details of vCenter and vm
Args:
vapp_uuid - uuid of vApp or VM
Returns:
Moref Id of VM and deails of vCenter
"""
- vm_moref_id = None
- vm_vcenter = None
- vm_vcenter_username = None
- vm_vcenter_port = None
-
- vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
- if vm_details and "vm_vcenter_info" in vm_details:
- vm_moref_id = vm_details["vm_vcenter_info"]["vm_moref_id"]
- vim_server_href = vm_details["vm_vcenter_info"]["vim_server_href"]
-
- if vim_server_href:
- vca = self.connect_as_admin()
- if not vca:
- raise vimconn.vimconnConnectionException("self.connect() is failed")
- if vim_server_href is None:
- self.logger.error("No url to get vcenter details")
-
- if vca.vcloud_session and vca.vcloud_session.organization:
- response = Http.get(url=vim_server_href,
- headers=vca.vcloud_session.get_vcloud_headers(),
- verify=vca.verify,
- logger=vca.logger)
+ vm_vcenter_info = {}
- if response.status_code != requests.codes.ok:
- self.logger.debug("GET REST API call {} failed. Return status code {}".format(vim_server_href,
- response.status_code))
- try:
- namespaces={"vmext":"http://www.vmware.com/vcloud/extension/v1.5",
- "vcloud":"http://www.vmware.com/vcloud/v1.5"
- }
- xmlroot_respond = XmlElementTree.fromstring(response.content)
- vm_vcenter_username = xmlroot_respond.find('vmext:Username', namespaces).text
- vcenter_url = xmlroot_respond.find('vmext:Url', namespaces).text
- vm_vcenter_port = vcenter_url.split(":")[2]
- vm_vcenter = vcenter_url.split(":")[1].split("//")[1]
+ if self.vcenter_ip is not None:
+ vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
+ else:
+ raise vimconn.vimconnException(message="vCenter IP is not provided."\
+ " Please provide vCenter IP while attaching datacenter to tenant in --config")
+ if self.vcenter_port is not None:
+ vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
+ else:
+ raise vimconn.vimconnException(message="vCenter port is not provided."\
+ " Please provide vCenter port while attaching datacenter to tenant in --config")
+ if self.vcenter_user is not None:
+ vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
+ else:
+ raise vimconn.vimconnException(message="vCenter user is not provided."\
+ " Please provide vCenter user while attaching datacenter to tenant in --config")
+
+ if self.vcenter_password is not None:
+ vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
+ else:
+ raise vimconn.vimconnException(message="vCenter user password is not provided."\
+ " Please provide vCenter user password while attaching datacenter to tenant in --config")
+ try:
+ vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
+ if vm_details and "vm_vcenter_info" in vm_details:
+ vm_vcenter_info["vm_moref_id"] = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
- except Exception as exp :
- self.logger.info("Error occurred calling rest api for vcenter information {}".format(exp))
+ return vm_vcenter_info
- return vm_moref_id , vm_vcenter , vm_vcenter_username, vm_vcenter_port
+ except Exception as exp:
+ self.logger.error("Error occurred while getting vCenter infromationn"\
+ " for VM : {}".format(exp))
def get_vm_pci_details(self, vmuuid):
"""
vm_pci_devices_info = {}
try:
- vm_moref_id , vm_vcenter_host , vm_vcenter_username, vm_vcenter_port = self.get_vcenter_info_rest(vmuuid)
- if vm_moref_id and vm_vcenter_host and vm_vcenter_username:
+ vm_vcenter_info = self.get_vm_vcenter_info(vmuuid)
+ if vm_vcenter_info["vm_moref_id"]:
context = None
if hasattr(ssl, '_create_unverified_context'):
context = ssl._create_unverified_context()
- vcenter_conect = SmartConnect(host=vm_vcenter_host, user=vm_vcenter_username,
- pwd=self.passwd, port=int(vm_vcenter_port),
- sslContext=context)
+ vcenter_conect = SmartConnect(host=vm_vcenter_info["vm_vcenter_ip"],
+ user=vm_vcenter_info["vm_vcenter_user"],
+ pwd=vm_vcenter_info["vm_vcenter_password"],
+ port=int(vm_vcenter_info["vm_vcenter_port"]),
+ sslContext=context
+ )
atexit.register(Disconnect, vcenter_conect)
content = vcenter_conect.RetrieveContent()
#Get VM and its host
- host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
- for device in vm_obj.config.hardware.device:
- if type(device) == vim.vm.device.VirtualPCIPassthrough:
- device_details={'devide_id':device.backing.id,
- 'pciSlotNumber':device.slotInfo.pciSlotNumber
- }
- vm_pci_devices_info[device.deviceInfo.label] = device_details
+ if content:
+ host_obj, vm_obj = self.get_vm_obj(content ,vm_vcenter_info["vm_moref_id"])
+ if host_obj and vm_obj:
+ vm_pci_devices_info["host_name"]= host_obj.name
+ vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress
+ for device in vm_obj.config.hardware.device:
+ if type(device) == vim.vm.device.VirtualPCIPassthrough:
+ device_details={'devide_id':device.backing.id,
+ 'pciSlotNumber':device.slotInfo.pciSlotNumber,
+ }
+ vm_pci_devices_info[device.deviceInfo.label] = device_details
+ else:
+ self.logger.error("Can not connect to vCenter while getting "\
+ "PCI devices infromationn")
+ return vm_pci_devices_info
except Exception as exp:
- self.logger.info("Error occurred while getting PCI devices infromationn"\
- " for VM {} : {}".format(vm_obj,exp))
- return vm_pci_devices_info
+ self.logger.error("Error occurred while getting VM infromationn"\
+ " for VM : {}".format(exp))
+ raise vimconn.vimconnException(message=exp)
+
+ def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
+ """
+ Method to add network adapter type to vm
+ Args :
+ network_name - name of network
+ primary_nic_index - int value for primary nic index
+ nicIndex - int value for nic index
+ nic_type - specify model name to which add to vm
+ Returns:
+ None
+ """
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("Failed to connect vCloud director")
+
+ try:
+ floating_ip = False
+ if 'floating_ip' in net: floating_ip = net['floating_ip']
+ allocation_mode = "POOL" if floating_ip else "DHCP"
+
+ if not nic_type:
+ for vms in vapp._get_vms():
+ vm_id = (vms.id).split(':')[-1]
+ url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(vca.host, vm_id)
+ response = Http.get(url=url_rest_call,
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify,
+ logger=vca.logger)
+ if response.status_code != 200:
+ self.logger.error("REST call {} failed reason : {}"\
+ "status code : {}".format(url_rest_call,
+ response.content,
+ response.status_code))
+ raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
+ "network connection section")
+
+ data = response.content
+ if '<PrimaryNetworkConnectionIndex>' not in data:
+ item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
+ <NetworkConnection network="{}">
+ <NetworkConnectionIndex>{}</NetworkConnectionIndex>
+ <IsConnected>true</IsConnected>
+ <IpAddressAllocationMode>{}</IpAddressAllocationMode>
+ </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
+ allocation_mode)
+
+ data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
+ else:
+ new_item = """<NetworkConnection network="{}">
+ <NetworkConnectionIndex>{}</NetworkConnectionIndex>
+ <IsConnected>true</IsConnected>
+ <IpAddressAllocationMode>{}</IpAddressAllocationMode>
+ </NetworkConnection>""".format(network_name, nicIndex,
+ allocation_mode)
+
+ data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
+
+ headers = vca.vcloud_session.get_vcloud_headers()
+ headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
+ response = Http.put(url=url_rest_call, headers=headers, data=data,
+ verify=vca.verify,
+ logger=vca.logger)
+ if response.status_code != 202:
+ self.logger.error("REST call {} failed reason : {}"\
+ "status code : {} ".format(url_rest_call,
+ response.content,
+ response.status_code))
+ raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
+ "network connection section")
+ else:
+ nic_task = taskType.parseString(response.content, True)
+ if isinstance(nic_task, GenericTask):
+ vca.block_until_completed(nic_task)
+ self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\
+ "default NIC type".format(vm_id))
+ else:
+ self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\
+ "connect NIC type".format(vm_id))
+ else:
+ for vms in vapp._get_vms():
+ vm_id = (vms.id).split(':')[-1]
+
+ url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(vca.host, vm_id)
+
+ response = Http.get(url=url_rest_call,
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify,
+ logger=vca.logger)
+ if response.status_code != 200:
+ self.logger.error("REST call {} failed reason : {}"\
+ "status code : {}".format(url_rest_call,
+ response.content,
+ response.status_code))
+ raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
+ "network connection section")
+ data = response.content
+ if '<PrimaryNetworkConnectionIndex>' not in data:
+ item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
+ <NetworkConnection network="{}">
+ <NetworkConnectionIndex>{}</NetworkConnectionIndex>
+ <IsConnected>true</IsConnected>
+ <IpAddressAllocationMode>{}</IpAddressAllocationMode>
+ <NetworkAdapterType>{}</NetworkAdapterType>
+ </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
+ allocation_mode, nic_type)
+
+ data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
+ else:
+ new_item = """<NetworkConnection network="{}">
+ <NetworkConnectionIndex>{}</NetworkConnectionIndex>
+ <IsConnected>true</IsConnected>
+ <IpAddressAllocationMode>{}</IpAddressAllocationMode>
+ <NetworkAdapterType>{}</NetworkAdapterType>
+ </NetworkConnection>""".format(network_name, nicIndex,
+ allocation_mode, nic_type)
+
+ data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
+
+ headers = vca.vcloud_session.get_vcloud_headers()
+ headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
+ response = Http.put(url=url_rest_call, headers=headers, data=data,
+ verify=vca.verify,
+ logger=vca.logger)
+
+ if response.status_code != 202:
+ self.logger.error("REST call {} failed reason : {}"\
+ "status code : {}".format(url_rest_call,
+ response.content,
+ response.status_code))
+ raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
+ "network connection section")
+ else:
+ nic_task = taskType.parseString(response.content, True)
+ if isinstance(nic_task, GenericTask):
+ vca.block_until_completed(nic_task)
+ self.logger.info("add_network_adapter_to_vms(): VM {} "\
+ "conneced to NIC type {}".format(vm_id, nic_type))
+ else:
+ self.logger.error("add_network_adapter_to_vms(): VM {} "\
+ "failed to connect NIC type {}".format(vm_id, nic_type))
+ except Exception as exp:
+ self.logger.error("add_network_adapter_to_vms() : exception occurred "\
+ "while adding Network adapter")
+ raise vimconn.vimconnException(message=exp)
+
+
+ def set_numa_affinity(self, vmuuid, paired_threads_id):
+ """
+ Method to assign numa affinity in vm configuration parammeters
+ Args :
+ vmuuid - vm uuid
+ paired_threads_id - one or more virtual processor
+ numbers
+ Returns:
+ return if True
+ """
+ try:
+ vm_moref_id , vm_vcenter_host , vm_vcenter_username, vm_vcenter_port = self.get_vcenter_info_rest(vmuuid)
+ if vm_moref_id and vm_vcenter_host and vm_vcenter_username:
+ context = None
+ if hasattr(ssl, '_create_unverified_context'):
+ context = ssl._create_unverified_context()
+ vcenter_conect = SmartConnect(host=vm_vcenter_host, user=vm_vcenter_username,
+ pwd=self.passwd, port=int(vm_vcenter_port),
+ sslContext=context)
+ atexit.register(Disconnect, vcenter_conect)
+ content = vcenter_conect.RetrieveContent()
+
+ host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
+ if vm_obj:
+ config_spec = vim.vm.ConfigSpec()
+ config_spec.extraConfig = []
+ opt = vim.option.OptionValue()
+ opt.key = 'numa.nodeAffinity'
+ opt.value = str(paired_threads_id)
+ config_spec.extraConfig.append(opt)
+ task = vm_obj.ReconfigVM_Task(config_spec)
+ if task:
+ result = self.wait_for_vcenter_task(task, vcenter_conect)
+ extra_config = vm_obj.config.extraConfig
+ flag = False
+ for opts in extra_config:
+ if 'numa.nodeAffinity' in opts.key:
+ flag = True
+ self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "\
+ "value {} for vm {}".format(opt.value, vm_obj))
+ if flag:
+ return
+ else:
+ self.logger.error("set_numa_affinity: Failed to assign numa affinity")
+ except Exception as exp:
+ self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "\
+ "for VM {} : {}".format(vm_obj, vm_moref_id))
+ raise vimconn.vimconnException("set_numa_affinity : Error {} failed to assign numa "\
+ "affinity".format(exp))
+
+
+ def cloud_init(self, vapp, cloud_config):
+ """
+ Method to inject ssh-key
+ vapp - vapp object
+ cloud_config a dictionary with:
+ 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
+ 'users': (optional) list of users to be inserted, each item is a dict with:
+ 'name': (mandatory) user name,
+ 'key-pairs': (optional) list of strings with the public key to be inserted to the user
+ 'user-data': (optional) string is a text script to be passed directly to cloud-init
+ 'config-files': (optional). List of files to be transferred. Each item is a dict with:
+ 'dest': (mandatory) string with the destination absolute path
+ 'encoding': (optional, by default text). Can be one of:
+ 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+ 'content' (mandatory): string with the content of the file
+ 'permissions': (optional) string with file permissions, typically octal notation '0644'
+ 'owner': (optional) file owner, string with the format 'owner:group'
+ 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
+ """
+ vca = self.connect()
+ if not vca:
+ raise vimconn.vimconnConnectionException("Failed to connect vCloud director")
+
+ try:
+ if isinstance(cloud_config, dict):
+ key_pairs = []
+ userdata = []
+ if "key-pairs" in cloud_config:
+ key_pairs = cloud_config["key-pairs"]
+
+ if "users" in cloud_config:
+ userdata = cloud_config["users"]
+
+ for key in key_pairs:
+ for user in userdata:
+ if 'name' in user: user_name = user['name']
+ if 'key-pairs' in user and len(user['key-pairs']) > 0:
+ for user_key in user['key-pairs']:
+ customize_script = """
+ #!/bin/bash
+ echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
+ if [ "$1" = "precustomization" ];then
+ echo performing precustomization tasks on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
+ if [ ! -d /root/.ssh ];then
+ mkdir /root/.ssh
+ chown root:root /root/.ssh
+ chmod 700 /root/.ssh
+ touch /root/.ssh/authorized_keys
+ chown root:root /root/.ssh/authorized_keys
+ chmod 600 /root/.ssh/authorized_keys
+ # make centos with selinux happy
+ which restorecon && restorecon -Rv /root/.ssh
+ echo '{key}' >> /root/.ssh/authorized_keys
+ else
+ touch /root/.ssh/authorized_keys
+ chown root:root /root/.ssh/authorized_keys
+ chmod 600 /root/.ssh/authorized_keys
+ echo '{key}' >> /root/.ssh/authorized_keys
+ fi
+ if [ -d /home/{user_name} ];then
+ if [ ! -d /home/{user_name}/.ssh ];then
+ mkdir /home/{user_name}/.ssh
+ chown {user_name}:{user_name} /home/{user_name}/.ssh
+ chmod 700 /home/{user_name}/.ssh
+ touch /home/{user_name}/.ssh/authorized_keys
+ chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
+ chmod 600 /home/{user_name}/.ssh/authorized_keys
+ # make centos with selinux happy
+ which restorecon && restorecon -Rv /home/{user_name}/.ssh
+ echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
+ else
+ touch /home/{user_name}/.ssh/authorized_keys
+ chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
+ chmod 600 /home/{user_name}/.ssh/authorized_keys
+ echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
+ fi
+ fi
+ fi""".format(key=key, user_name=user_name, user_key=user_key)
+
+ for vm in vapp._get_vms():
+ vm_name = vm.name
+ task = vapp.customize_guest_os(vm_name, customization_script=customize_script)
+ if isinstance(task, GenericTask):
+ vca.block_until_completed(task)
+ self.logger.info("cloud_init : customized guest os task "\
+ "completed for VM {}".format(vm_name))
+ else:
+ self.logger.error("cloud_init : task for customized guest os"\
+ "failed for VM {}".format(vm_name))
+ except Exception as exp:
+ self.logger.error("cloud_init : exception occurred while injecting "\
+ "ssh-key")
+ raise vimconn.vimconnException("cloud_init : Error {} failed to inject "\
+ "ssh-key".format(exp))