X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=vimconn_vmware.py;h=384da6234810b4be3435bf720e2d92e6bd18fabd;hb=d2963627c31938740cd8d398abd139f5679405b5;hp=9e4e7608f0cd11bf945d163bbbcf926ae011681e;hpb=4cb6e90ab3a28866243ce12a87aac9e1fea23b77;p=osm%2FRO.git diff --git a/vimconn_vmware.py b/vimconn_vmware.py index 9e4e7608..384da623 100644 --- a/vimconn_vmware.py +++ b/vimconn_vmware.py @@ -63,6 +63,7 @@ import hashlib import socket import struct import netaddr +import random # global variable for vcd connector type STANDALONE = 'standalone' @@ -71,13 +72,9 @@ STANDALONE = 'standalone' FLAVOR_RAM_KEY = 'ram' FLAVOR_VCPUS_KEY = 'vcpus' FLAVOR_DISK_KEY = 'disk' -DEFAULT_IP_PROFILE = {'gateway_address':"192.168.1.1", - 'dhcp_count':50, - 'subnet_address':"192.168.1.0/24", +DEFAULT_IP_PROFILE = {'dhcp_count':50, 'dhcp_enabled':True, - 'dhcp_start_address':"192.168.1.3", - 'ip_version':"IPv4", - 'dns_address':"192.168.1.2" + 'ip_version':"IPv4" } # global variable for wait time INTERVAL_TIME = 5 @@ -1397,6 +1394,13 @@ class vimconnector(vimconn.vimconnector): if result : self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid)) + if numas: + # Assigning numa affinity setting + for numa in numas: + if 'paired-threads-id' in numa: + paired_threads_id = numa['paired-threads-id'] + self.set_numa_affinity(vapp_uuid, paired_threads_id) + # add NICs & connect to networks in netlist try: self.logger.info("Request to connect VM to a network: {}".format(net_list)) @@ -1442,15 +1446,21 @@ class vimconnector(vimconn.vimconnector): self.add_network_adapter_to_vms(vapp, nets[0].name, primary_nic_index, nicIndex, + net, nic_type=nic_type) else: self.logger.info("new_vminstance(): adding network adapter "\ "to a network {}".format(nets[0].name)) self.add_network_adapter_to_vms(vapp, nets[0].name, primary_nic_index, - nicIndex) + nicIndex, + net) nicIndex += 1 + # cloud-init for ssh-key injection + if cloud_config: + self.cloud_init(vapp,cloud_config) + # deploy and power on vm self.logger.debug("new_vminstance(): Deploying vApp {} ".format(name)) deploytask = vapp.deploy(powerOn=False) @@ -1715,39 +1725,6 @@ class vimconnector(vimconn.vimconnector): self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list)) - mac_ip_addr={} - rheaders = {'Content-Type': 'application/xml'} - iso_edges = ['edge-2','edge-3','edge-6','edge-7','edge-8','edge-9','edge-10'] - - try: - for edge in iso_edges: - nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo' - self.logger.debug("refresh_vms_status: NSX Manager url: {}".format(nsx_api_url)) - - resp = requests.get(self.nsx_manager + nsx_api_url, - auth = (self.nsx_user, self.nsx_password), - verify = False, headers = rheaders) - - if resp.status_code == requests.codes.ok: - dhcp_leases = XmlElementTree.fromstring(resp.text) - for child in dhcp_leases: - if child.tag == 'dhcpLeaseInfo': - dhcpLeaseInfo = child - for leaseInfo in dhcpLeaseInfo: - for elem in leaseInfo: - if (elem.tag)=='macAddress': - mac_addr = elem.text - if (elem.tag)=='ipAddress': - ip_addr = elem.text - if (mac_addr) is not None: - mac_ip_addr[mac_addr]= ip_addr - self.logger.debug("NSX Manager DHCP Lease info: mac_ip_addr : {}".format(mac_ip_addr)) - else: - self.logger.debug("Error occurred while getting DHCP lease info from NSX Manager: {}".format(resp.content)) - except KeyError: - self.logger.debug("Error in response from NSX Manager {}".format(KeyError.message)) - self.logger.debug(traceback.format_exc()) - vca = self.connect() if not vca: raise vimconn.vimconnConnectionException("self.connect() is failed.") @@ -1757,6 +1734,7 @@ class vimconnector(vimconn.vimconnector): raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name)) vms_dict = {} + nsx_edge_list = [] for vmuuid in vm_list: vmname = self.get_namebyvappid(vca, vdc, vmuuid) if vmname is not None: @@ -1778,12 +1756,19 @@ class vimconnector(vimconn.vimconnector): for vm_network in vapp_network: if vm_network['name'] == vmname: #Assign IP Address based on MAC Address in NSX DHCP lease info - for mac_adres,ip_adres in mac_ip_addr.iteritems(): - if mac_adres == vm_network['mac']: - vm_network['ip']=ip_adres + if vm_network['ip'] is None: + if not nsx_edge_list: + nsx_edge_list = self.get_edge_details() + if nsx_edge_list is None: + raise vimconn.vimconnException("refresh_vms_status:"\ + "Failed to get edge details from NSX Manager") + if vm_network['mac'] is not None: + vm_network['ip'] = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_network['mac']) + + vm_net_id = self.get_network_id_by_name(vm_network['network_name']) interface = {"mac_address": vm_network['mac'], - "vim_net_id": self.get_network_id_by_name(vm_network['network_name']), - "vim_interface_id": self.get_network_id_by_name(vm_network['network_name']), + "vim_net_id": vm_net_id, + "vim_interface_id": vm_net_id, 'ip_address': vm_network['ip']} # interface['vim_info'] = yaml.safe_dump(vm_network) vm_dict["interfaces"].append(interface) @@ -1795,6 +1780,110 @@ class vimconnector(vimconn.vimconnector): return vms_dict + + def get_edge_details(self): + """Get the NSX edge list from NSX Manager + Returns list of NSX edges + """ + edge_list = [] + rheaders = {'Content-Type': 'application/xml'} + nsx_api_url = '/api/4.0/edges' + + self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url)) + + try: + resp = requests.get(self.nsx_manager + nsx_api_url, + auth = (self.nsx_user, self.nsx_password), + verify = False, headers = rheaders) + if resp.status_code == requests.codes.ok: + paged_Edge_List = XmlElementTree.fromstring(resp.text) + for edge_pages in paged_Edge_List: + if edge_pages.tag == 'edgePage': + for edge_summary in edge_pages: + if edge_summary.tag == 'pagingInfo': + for element in edge_summary: + if element.tag == 'totalCount' and element.text == '0': + raise vimconn.vimconnException("get_edge_details: No NSX edges details found: {}" + .format(self.nsx_manager)) + + if edge_summary.tag == 'edgeSummary': + for element in edge_summary: + if element.tag == 'id': + edge_list.append(element.text) + else: + raise vimconn.vimconnException("get_edge_details: No NSX edge details found: {}" + .format(self.nsx_manager)) + + if not edge_list: + raise vimconn.vimconnException("get_edge_details: "\ + "No NSX edge details found: {}" + .format(self.nsx_manager)) + else: + self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list)) + return edge_list + else: + self.logger.debug("get_edge_details: " + "Failed to get NSX edge details from NSX Manager: {}" + .format(resp.content)) + return None + + except Exception as exp: + self.logger.debug("get_edge_details: "\ + "Failed to get NSX edge details from NSX Manager: {}" + .format(exp)) + raise vimconn.vimconnException("get_edge_details: "\ + "Failed to get NSX edge details from NSX Manager: {}" + .format(exp)) + + + def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address): + """Get IP address details from NSX edges, using the MAC address + PARAMS: nsx_edges : List of NSX edges + mac_address : Find IP address corresponding to this MAC address + Returns: IP address corrresponding to the provided MAC address + """ + + ip_addr = None + rheaders = {'Content-Type': 'application/xml'} + + self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge") + + try: + for edge in nsx_edges: + nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo' + + resp = requests.get(self.nsx_manager + nsx_api_url, + auth = (self.nsx_user, self.nsx_password), + verify = False, headers = rheaders) + + if resp.status_code == requests.codes.ok: + dhcp_leases = XmlElementTree.fromstring(resp.text) + for child in dhcp_leases: + if child.tag == 'dhcpLeaseInfo': + dhcpLeaseInfo = child + for leaseInfo in dhcpLeaseInfo: + for elem in leaseInfo: + if (elem.tag)=='macAddress': + edge_mac_addr = elem.text + if (elem.tag)=='ipAddress': + ip_addr = elem.text + if edge_mac_addr is not None: + if edge_mac_addr == mac_address: + self.logger.debug("Found ip addr {} for mac {} at NSX edge {}" + .format(ip_addr, mac_address,edge)) + return ip_addr + else: + self.logger.debug("get_ipaddr_from_NSXedge: "\ + "Error occurred while getting DHCP lease info from NSX Manager: {}" + .format(resp.content)) + + self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge") + return None + + except XmlElementTree.ParseError as Err: + self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True) + + def action_vminstance(self, vm__vim_uuid=None, action_dict=None): """Send and action over a VM instance from VIM Returns the vm_id if the action was successfully sent to the VIM""" @@ -2519,20 +2608,25 @@ class vimconnector(vimconn.vimconnector): #Configure IP profile of the network ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE + if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None: + subnet_rand = random.randint(0, 255) + ip_base = "192.168.{}.".format(subnet_rand) + ip_profile['subnet_address'] = ip_base + "0/24" + else: + ip_base = ip_profile['subnet_address'].rsplit('.',1)[0] + '.' + if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None: - ip_profile['gateway_address']=DEFAULT_IP_PROFILE['gateway_address'] + ip_profile['gateway_address']=ip_base + "1" if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None: ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count'] - if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None: - ip_profile['subnet_address']=DEFAULT_IP_PROFILE['subnet_address'] if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None: ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled'] if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None: - ip_profile['dhcp_start_address']=DEFAULT_IP_PROFILE['dhcp_start_address'] + ip_profile['dhcp_start_address']=ip_base + "3" if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None: ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version'] if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None: - ip_profile['dns_address']=DEFAULT_IP_PROFILE['dns_address'] + ip_profile['dns_address']=ip_base + "2" gateway_address=ip_profile['gateway_address'] dhcp_count=int(ip_profile['dhcp_count']) @@ -3530,7 +3624,7 @@ class vimconnector(vimconn.vimconnector): " for VM : {}".format(exp)) raise vimconn.vimconnException(message=exp) - def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, nic_type=None): + def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None): """ Method to add network adapter type to vm Args : @@ -3546,6 +3640,20 @@ class vimconnector(vimconn.vimconnector): raise vimconn.vimconnConnectionException("Failed to connect vCloud director") try: + ip_address = None + floating_ip = False + if 'floating_ip' in net: floating_ip = net['floating_ip'] + + # Stub for ip_address feature + if 'ip_address' in net: ip_address = net['ip_address'] + + if floating_ip: + allocation_mode = "POOL" + elif ip_address: + allocation_mode = "MANUAL" + else: + allocation_mode = "DHCP" + if not nic_type: for vms in vapp._get_vms(): vm_id = (vms.id).split(':')[-1] @@ -3570,15 +3678,27 @@ class vimconnector(vimconn.vimconnector): {} true - DHCP - """.format(primary_nic_index, network_name, nicIndex) + {} + """.format(primary_nic_index, network_name, nicIndex, + allocation_mode) + # Stub for ip_address feature + if ip_address: + ip_tag = '{}'.format(ip_address) + item = item.replace('\n','\n{}\n'.format(ip_tag)) + data = data.replace('\n','\n{}\n'.format(item)) else: new_item = """ {} true - DHCP - """.format(network_name, nicIndex) + {} + """.format(network_name, nicIndex, + allocation_mode) + # Stub for ip_address feature + if ip_address: + ip_tag = '{}'.format(ip_address) + new_item = new_item.replace('\n','\n{}\n'.format(ip_tag)) + data = data.replace('\n','\n{}\n'.format(new_item)) headers = vca.vcloud_session.get_vcloud_headers() @@ -3625,17 +3745,29 @@ class vimconnector(vimconn.vimconnector): {} true - DHCP + {} {} - """.format(primary_nic_index, network_name, nicIndex, nic_type) + """.format(primary_nic_index, network_name, nicIndex, + allocation_mode, nic_type) + # Stub for ip_address feature + if ip_address: + ip_tag = '{}'.format(ip_address) + item = item.replace('\n','\n{}\n'.format(ip_tag)) + data = data.replace('\n','\n{}\n'.format(item)) else: new_item = """ {} true - DHCP + {} {} - """.format(network_name, nicIndex, nic_type) + """.format(network_name, nicIndex, + allocation_mode, nic_type) + # Stub for ip_address feature + if ip_address: + ip_tag = '{}'.format(ip_address) + new_item = new_item.replace('\n','\n{}\n'.format(ip_tag)) + data = data.replace('\n','\n{}\n'.format(new_item)) headers = vca.vcloud_session.get_vcloud_headers() @@ -3664,3 +3796,150 @@ class vimconnector(vimconn.vimconnector): self.logger.error("add_network_adapter_to_vms() : exception occurred "\ "while adding Network adapter") raise vimconn.vimconnException(message=exp) + + + def set_numa_affinity(self, vmuuid, paired_threads_id): + """ + Method to assign numa affinity in vm configuration parammeters + Args : + vmuuid - vm uuid + paired_threads_id - one or more virtual processor + numbers + Returns: + return if True + """ + try: + vm_moref_id , vm_vcenter_host , vm_vcenter_username, vm_vcenter_port = self.get_vcenter_info_rest(vmuuid) + if vm_moref_id and vm_vcenter_host and vm_vcenter_username: + context = None + if hasattr(ssl, '_create_unverified_context'): + context = ssl._create_unverified_context() + vcenter_conect = SmartConnect(host=vm_vcenter_host, user=vm_vcenter_username, + pwd=self.passwd, port=int(vm_vcenter_port), + sslContext=context) + atexit.register(Disconnect, vcenter_conect) + content = vcenter_conect.RetrieveContent() + + host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id) + if vm_obj: + config_spec = vim.vm.ConfigSpec() + config_spec.extraConfig = [] + opt = vim.option.OptionValue() + opt.key = 'numa.nodeAffinity' + opt.value = str(paired_threads_id) + config_spec.extraConfig.append(opt) + task = vm_obj.ReconfigVM_Task(config_spec) + if task: + result = self.wait_for_vcenter_task(task, vcenter_conect) + extra_config = vm_obj.config.extraConfig + flag = False + for opts in extra_config: + if 'numa.nodeAffinity' in opts.key: + flag = True + self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "\ + "value {} for vm {}".format(opt.value, vm_obj)) + if flag: + return + else: + self.logger.error("set_numa_affinity: Failed to assign numa affinity") + except Exception as exp: + self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "\ + "for VM {} : {}".format(vm_obj, vm_moref_id)) + raise vimconn.vimconnException("set_numa_affinity : Error {} failed to assign numa "\ + "affinity".format(exp)) + + + def cloud_init(self, vapp, cloud_config): + """ + Method to inject ssh-key + vapp - vapp object + cloud_config a dictionary with: + 'key-pairs': (optional) list of strings with the public key to be inserted to the default user + 'users': (optional) list of users to be inserted, each item is a dict with: + 'name': (mandatory) user name, + 'key-pairs': (optional) list of strings with the public key to be inserted to the user + 'user-data': (optional) string is a text script to be passed directly to cloud-init + 'config-files': (optional). List of files to be transferred. Each item is a dict with: + 'dest': (mandatory) string with the destination absolute path + 'encoding': (optional, by default text). Can be one of: + 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64' + 'content' (mandatory): string with the content of the file + 'permissions': (optional) string with file permissions, typically octal notation '0644' + 'owner': (optional) file owner, string with the format 'owner:group' + 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk + """ + vca = self.connect() + if not vca: + raise vimconn.vimconnConnectionException("Failed to connect vCloud director") + + try: + if isinstance(cloud_config, dict): + key_pairs = [] + userdata = [] + if "key-pairs" in cloud_config: + key_pairs = cloud_config["key-pairs"] + + if "users" in cloud_config: + userdata = cloud_config["users"] + + for key in key_pairs: + for user in userdata: + if 'name' in user: user_name = user['name'] + if 'key-pairs' in user and len(user['key-pairs']) > 0: + for user_key in user['key-pairs']: + customize_script = """ + #!/bin/bash + echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log + if [ "$1" = "precustomization" ];then + echo performing precustomization tasks on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log + if [ ! -d /root/.ssh ];then + mkdir /root/.ssh + chown root:root /root/.ssh + chmod 700 /root/.ssh + touch /root/.ssh/authorized_keys + chown root:root /root/.ssh/authorized_keys + chmod 600 /root/.ssh/authorized_keys + # make centos with selinux happy + which restorecon && restorecon -Rv /root/.ssh + echo '{key}' >> /root/.ssh/authorized_keys + else + touch /root/.ssh/authorized_keys + chown root:root /root/.ssh/authorized_keys + chmod 600 /root/.ssh/authorized_keys + echo '{key}' >> /root/.ssh/authorized_keys + fi + if [ -d /home/{user_name} ];then + if [ ! -d /home/{user_name}/.ssh ];then + mkdir /home/{user_name}/.ssh + chown {user_name}:{user_name} /home/{user_name}/.ssh + chmod 700 /home/{user_name}/.ssh + touch /home/{user_name}/.ssh/authorized_keys + chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys + chmod 600 /home/{user_name}/.ssh/authorized_keys + # make centos with selinux happy + which restorecon && restorecon -Rv /home/{user_name}/.ssh + echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys + else + touch /home/{user_name}/.ssh/authorized_keys + chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys + chmod 600 /home/{user_name}/.ssh/authorized_keys + echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys + fi + fi + fi""".format(key=key, user_name=user_name, user_key=user_key) + + for vm in vapp._get_vms(): + vm_name = vm.name + task = vapp.customize_guest_os(vm_name, customization_script=customize_script) + if isinstance(task, GenericTask): + vca.block_until_completed(task) + self.logger.info("cloud_init : customized guest os task "\ + "completed for VM {}".format(vm_name)) + else: + self.logger.error("cloud_init : task for customized guest os"\ + "failed for VM {}".format(vm_name)) + except Exception as exp: + self.logger.error("cloud_init : exception occurred while injecting "\ + "ssh-key") + raise vimconn.vimconnException("cloud_init : Error {} failed to inject "\ + "ssh-key".format(exp))