X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=osm_ro%2Fvimconn_openstack.py;h=43fdbc516ee6ddbea5b812cddb2f1266e5fd339f;hb=b42fd9bdcea865bd3c6d4a546a6f294ff69e1ef4;hp=e3d333465e875d16ae87844598cb1127938ee84c;hpb=3cf9bcdc0db2139497655a6a201600969318abd8;p=osm%2FRO.git diff --git a/osm_ro/vimconn_openstack.py b/osm_ro/vimconn_openstack.py index e3d33346..43fdbc51 100644 --- a/osm_ro/vimconn_openstack.py +++ b/osm_ro/vimconn_openstack.py @@ -36,13 +36,12 @@ __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor __date__ = "$22-sep-2017 23:59:59$" import vimconn -import json +# import json import logging import netaddr import time import yaml import random -import sys import re import copy @@ -60,8 +59,6 @@ from httplib import HTTPException from neutronclient.neutron import client as neClient from neutronclient.common import exceptions as neExceptions from requests.exceptions import ConnectionError -from email.mime.multipart import MIMEMultipart -from email.mime.text import MIMEText """contain the openstack virtual machine status to openmano status""" @@ -78,8 +75,8 @@ netStatus2manoFormat={'ACTIVE':'ACTIVE','PAUSED':'PAUSED','INACTIVE':'INACTIVE', supportedClassificationTypes = ['legacy_flow_classifier'] #global var to have a timeout creating and deleting volumes -volume_timeout = 60 -server_timeout = 300 +volume_timeout = 600 +server_timeout = 600 class vimconnector(vimconn.vimconnector): def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, @@ -104,7 +101,14 @@ class vimconnector(vimconn.vimconnector): vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level, config) - self.insecure = self.config.get("insecure", False) + if self.config.get("insecure") and self.config.get("ca_cert"): + raise vimconn.vimconnException("options insecure and ca_cert are mutually exclusive") + self.verify = True + if self.config.get("insecure"): + self.verify = False + if self.config.get("ca_cert"): + self.verify = self.config.get("ca_cert") + if not url: raise TypeError('url param can not be NoneType') self.persistent_info = persistent_info @@ -164,23 +168,33 @@ class vimconnector(vimconn.vimconnector): if self.config.get('APIversion'): self.api_version3 = self.config['APIversion'] == 'v3.3' or self.config['APIversion'] == '3' else: # get from ending auth_url that end with v3 or with v2.0 - self.api_version3 = self.url.split("/")[-1] == "v3" + self.api_version3 = self.url.endswith("/v3") or self.url.endswith("/v3/") self.session['api_version3'] = self.api_version3 if self.api_version3: + if self.config.get('project_domain_id') or self.config.get('project_domain_name'): + project_domain_id_default = None + else: + project_domain_id_default = 'default' + if self.config.get('user_domain_id') or self.config.get('user_domain_name'): + user_domain_id_default = None + else: + user_domain_id_default = 'default' auth = v3.Password(auth_url=self.url, username=self.user, password=self.passwd, project_name=self.tenant_name, project_id=self.tenant_id, - project_domain_id=self.config.get('project_domain_id', 'default'), - user_domain_id=self.config.get('user_domain_id', 'default')) + project_domain_id=self.config.get('project_domain_id', project_domain_id_default), + user_domain_id=self.config.get('user_domain_id', user_domain_id_default), + project_domain_name=self.config.get('project_domain_name'), + user_domain_name=self.config.get('user_domain_name')) else: auth = v2.Password(auth_url=self.url, username=self.user, password=self.passwd, tenant_name=self.tenant_name, tenant_id=self.tenant_id) - sess = session.Session(auth=auth, verify=not self.insecure) + sess = session.Session(auth=auth, verify=self.verify) if self.api_version3: self.keystone = ksClient_v3.Client(session=sess, endpoint_type=self.endpoint_type) else: @@ -346,7 +360,7 @@ class vimconnector(vimconn.vimconnector): elif isinstance(exception, nvExceptions.Conflict): raise vimconn.vimconnConflictException(type(exception).__name__ + ": " + str(exception)) elif isinstance(exception, vimconn.vimconnException): - raise + raise exception else: # () self.logger.error("General Exception " + str(exception), exc_info=True) raise vimconn.vimconnConnectionException(type(exception).__name__ + ": " + str(exception)) @@ -436,7 +450,7 @@ class vimconnector(vimconn.vimconnector): #create subnetwork, even if there is no profile if not ip_profile: ip_profile = {} - if 'subnet_address' not in ip_profile: + if not ip_profile.get('subnet_address'): #Fake subnet is required subnet_rand = random.randint(0, 255) ip_profile['subnet_address'] = "192.168.{}.0/24".format(subnet_rand) @@ -448,16 +462,18 @@ class vimconnector(vimconn.vimconnector): "cidr": ip_profile['subnet_address'] } # Gateway should be set to None if not needed. Otherwise openstack assigns one by default - subnet['gateway_ip'] = ip_profile.get('gateway_address') + if ip_profile.get('gateway_address'): + subnet['gateway_ip'] = ip_profile.get('gateway_address') if ip_profile.get('dns_address'): subnet['dns_nameservers'] = ip_profile['dns_address'].split(";") if 'dhcp_enabled' in ip_profile: - subnet['enable_dhcp'] = False if ip_profile['dhcp_enabled']=="false" else True - if 'dhcp_start_address' in ip_profile: + subnet['enable_dhcp'] = False if \ + ip_profile['dhcp_enabled']=="false" or ip_profile['dhcp_enabled']==False else True + if ip_profile.get('dhcp_start_address'): subnet['allocation_pools'] = [] subnet['allocation_pools'].append(dict()) subnet['allocation_pools'][0]['start'] = ip_profile['dhcp_start_address'] - if 'dhcp_count' in ip_profile: + if ip_profile.get('dhcp_count'): #parts = ip_profile['dhcp_start_address'].split('.') #ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3]) ip_int = int(netaddr.IPAddress(ip_profile['dhcp_start_address'])) @@ -467,7 +483,7 @@ class vimconnector(vimconn.vimconnector): #self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet)) self.neutron.create_subnet({"subnet": subnet} ) return new_net["network"]["id"] - except (neExceptions.ConnectionFailed, ksExceptions.ClientException, neExceptions.NeutronException, ConnectionError) as e: + except Exception as e: if new_net: self.neutron.delete_network(new_net['network']['id']) self._format_exception(e) @@ -486,10 +502,11 @@ class vimconnector(vimconn.vimconnector): self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict)) try: self._reload_connection() - if self.api_version3 and "tenant_id" in filter_dict: - filter_dict['project_id'] = filter_dict.pop('tenant_id') #TODO check - net_dict=self.neutron.list_networks(**filter_dict) - net_list=net_dict["networks"] + filter_dict_os = filter_dict.copy() + if self.api_version3 and "tenant_id" in filter_dict_os: + filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id') #T ODO check + net_dict = self.neutron.list_networks(**filter_dict_os) + net_list = net_dict["networks"] self.__net_os2mano(net_list) return net_list except (neExceptions.ConnectionFailed, ksExceptions.ClientException, neExceptions.NeutronException, ConnectionError) as e: @@ -635,7 +652,6 @@ class vimconnector(vimconn.vimconnector): except (nvExceptions.NotFound, nvExceptions.ClientException, ksExceptions.ClientException, ConnectionError) as e: self._format_exception(e) - def new_flavor(self, flavor_data, change_name_if_used=True): '''Adds a tenant flavor to openstack VIM if change_name_if_used is True, it will change name in case of conflict, because it is not supported name repetition @@ -680,7 +696,9 @@ class vimconnector(vimconn.vimconnector): numa_properties["vmware:latency_sensitivity_level"] = "high" for numa in numas: #overwrite ram and vcpus - ram = numa['memory']*1024 + #check if key 'memory' is present in numa else use ram value at flavor + if 'memory' in numa: + ram = numa['memory']*1024 #See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html if 'paired-threads' in numa: vcpus = numa['paired-threads']*2 @@ -706,7 +724,7 @@ class vimconnector(vimconn.vimconnector): new_flavor=self.nova.flavors.create(name, ram, vcpus, - flavor_data.get('disk',1), + flavor_data.get('disk',0), is_public=flavor_data.get('is_public', True) ) #add metadata @@ -832,57 +850,25 @@ class vimconnector(vimconn.vimconnector): self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict)) try: self._reload_connection() - filter_dict_os=filter_dict.copy() + filter_dict_os = filter_dict.copy() #First we filter by the available filter fields: name, id. The others are removed. - filter_dict_os.pop('checksum',None) - image_list=self.nova.images.findall(**filter_dict_os) - if len(image_list)==0: + filter_dict_os.pop('checksum', None) + image_list = self.nova.images.findall(**filter_dict_os) + if len(image_list) == 0: return [] #Then we filter by the rest of filter fields: checksum filtered_list = [] for image in image_list: - image_class=self.glance.images.get(image.id) - if 'checksum' not in filter_dict or image_class['checksum']==filter_dict.get('checksum'): - filtered_list.append(image_class.copy()) + try: + image_class = self.glance.images.get(image.id) + if 'checksum' not in filter_dict or image_class['checksum'] == filter_dict.get('checksum'): + filtered_list.append(image_class.copy()) + except gl1Exceptions.HTTPNotFound: + pass return filtered_list except (ksExceptions.ClientException, nvExceptions.ClientException, gl1Exceptions.CommunicationError, ConnectionError) as e: self._format_exception(e) - @staticmethod - def _create_mimemultipart(content_list): - """Creates a MIMEmultipart text combining the content_list - :param content_list: list of text scripts to be combined - :return: str of the created MIMEmultipart. If the list is empty returns None, if the list contains only one - element MIMEmultipart is not created and this content is returned - """ - if not content_list: - return None - elif len(content_list) == 1: - return content_list[0] - combined_message = MIMEMultipart() - for content in content_list: - if content.startswith('#include'): - format = 'text/x-include-url' - elif content.startswith('#include-once'): - format = 'text/x-include-once-url' - elif content.startswith('#!'): - format = 'text/x-shellscript' - elif content.startswith('#cloud-config'): - format = 'text/cloud-config' - elif content.startswith('#cloud-config-archive'): - format = 'text/cloud-config-archive' - elif content.startswith('#upstart-job'): - format = 'text/upstart-job' - elif content.startswith('#part-handler'): - format = 'text/part-handler' - elif content.startswith('#cloud-boothook'): - format = 'text/cloud-boothook' - else: # by default - format = 'text/x-shellscript' - sub_message = MIMEText(content, format, sys.getdefaultencoding()) - combined_message.attach(sub_message) - return combined_message.as_string() - def __wait_for_vm(self, vm_id, status): """wait until vm is in the desired status and return True. If the VM gets in ERROR status, return false. @@ -962,7 +948,7 @@ class vimconnector(vimconn.vimconnector): def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None, availability_zone_index=None, availability_zone_list=None): - '''Adds a VM instance to VIM + """Adds a VM instance to VIM Params: start: indicates if VM must start or boot in pause mode. Ignored image_id,flavor_id: iamge and flavor uuid @@ -973,23 +959,23 @@ class vimconnector(vimconn.vimconnector): model: interface model, ignored #TODO mac_address: used for SR-IOV ifaces #TODO for other types use: 'data', 'bridge', 'mgmt' - type: 'virtual', 'PF', 'VF', 'VFnotShared' + type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared' vim_id: filled/added by this function floating_ip: True/False (or it can be None) - 'cloud_config': (optional) dictionary with: - 'key-pairs': (optional) list of strings with the public key to be inserted to the default user - 'users': (optional) list of users to be inserted, each item is a dict with: - 'name': (mandatory) user name, - 'key-pairs': (optional) list of strings with the public key to be inserted to the user - 'user-data': (optional) string is a text script to be passed directly to cloud-init - 'config-files': (optional). List of files to be transferred. Each item is a dict with: - 'dest': (mandatory) string with the destination absolute path - 'encoding': (optional, by default text). Can be one of: - 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64' - 'content' (mandatory): string with the content of the file - 'permissions': (optional) string with file permissions, typically octal notation '0644' - 'owner': (optional) file owner, string with the format 'owner:group' - 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk) + 'cloud_config': (optional) dictionary with: + 'key-pairs': (optional) list of strings with the public key to be inserted to the default user + 'users': (optional) list of users to be inserted, each item is a dict with: + 'name': (mandatory) user name, + 'key-pairs': (optional) list of strings with the public key to be inserted to the user + 'user-data': (optional) string is a text script to be passed directly to cloud-init + 'config-files': (optional). List of files to be transferred. Each item is a dict with: + 'dest': (mandatory) string with the destination absolute path + 'encoding': (optional, by default text). Can be one of: + 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64' + 'content' (mandatory): string with the content of the file + 'permissions': (optional) string with file permissions, typically octal notation '0644' + 'owner': (optional) file owner, string with the format 'owner:group' + 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk) 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with: 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted 'size': (mandatory) string with the size of the disk in GB @@ -997,20 +983,25 @@ class vimconnector(vimconn.vimconnector): availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if availability_zone_index is None #TODO ip, security groups - Returns the instance identifier - ''' + Returns a tuple with the instance identifier and created_items or raises an exception on error + created_items can be None or a dictionary where this method can include key-values that will be passed to + the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc. + Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same + as not present. + """ self.logger.debug("new_vminstance input: image='%s' flavor='%s' nics='%s'",image_id, flavor_id,str(net_list)) try: server = None - metadata={} - net_list_vim=[] - external_network=[] # list of external networks to be connected to instance, later on used to create floating_ip + created_items = {} + # metadata = {} + net_list_vim = [] + external_network = [] # list of external networks to be connected to instance, later on used to create floating_ip no_secured_ports = [] # List of port-is with port-security disabled self._reload_connection() - metadata_vpci={} # For a specific neutron plugin + # metadata_vpci = {} # For a specific neutron plugin block_device_mapping = None for net in net_list: - if not net.get("net_id"): #skip non connected iface + if not net.get("net_id"): # skip non connected iface continue port_dict={ @@ -1019,37 +1010,43 @@ class vimconnector(vimconn.vimconnector): "admin_state_up": True } if net["type"]=="virtual": - if "vpci" in net: - metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]] - elif net["type"]=="VF": # for VF - if "vpci" in net: - if "VF" not in metadata_vpci: - metadata_vpci["VF"]=[] - metadata_vpci["VF"].append([ net["vpci"], "" ]) + pass + # if "vpci" in net: + # metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]] + elif net["type"] == "VF" or net["type"] == "SR-IOV": # for VF + # if "vpci" in net: + # if "VF" not in metadata_vpci: + # metadata_vpci["VF"]=[] + # metadata_vpci["VF"].append([ net["vpci"], "" ]) port_dict["binding:vnic_type"]="direct" - ########## VIO specific Changes ####### + # VIO specific Changes if self.vim_type == "VIO": - #Need to create port with port_security_enabled = False and no-security-groups + # Need to create port with port_security_enabled = False and no-security-groups port_dict["port_security_enabled"]=False port_dict["provider_security_groups"]=[] port_dict["security_groups"]=[] - else: #For PT - ########## VIO specific Changes ####### - #Current VIO release does not support port with type 'direct-physical' - #So no need to create virtual port in case of PCI-device. - #Will update port_dict code when support gets added in next VIO release + else: # For PT PCI-PASSTHROUGH + # VIO specific Changes + # Current VIO release does not support port with type 'direct-physical' + # So no need to create virtual port in case of PCI-device. + # Will update port_dict code when support gets added in next VIO release if self.vim_type == "VIO": - raise vimconn.vimconnNotSupportedException("Current VIO release does not support full passthrough (PT)") - if "vpci" in net: - if "PF" not in metadata_vpci: - metadata_vpci["PF"]=[] - metadata_vpci["PF"].append([ net["vpci"], "" ]) + raise vimconn.vimconnNotSupportedException( + "Current VIO release does not support full passthrough (PT)") + # if "vpci" in net: + # if "PF" not in metadata_vpci: + # metadata_vpci["PF"]=[] + # metadata_vpci["PF"].append([ net["vpci"], "" ]) port_dict["binding:vnic_type"]="direct-physical" if not port_dict["name"]: port_dict["name"]=name if net.get("mac_address"): port_dict["mac_address"]=net["mac_address"] + if net.get("ip_address"): + port_dict["fixed_ips"] = [{'ip_address': net["ip_address"]}] + # TODO add 'subnet_id': new_port = self.neutron.create_port({"port": port_dict }) + created_items["port:" + str(new_port["port"]["id"])] = True net["mac_adress"] = new_port["port"]["mac_address"] net["vim_id"] = new_port["port"]["id"] # if try to use a network without subnetwork, it will return a emtpy list @@ -1070,81 +1067,31 @@ class vimconnector(vimconn.vimconnector): elif net['use'] == 'mgmt' and self.config.get('use_floating_ip'): net['exit_on_floating_ip_error'] = False external_network.append(net) + net['floating_ip'] = self.config.get('use_floating_ip') # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic is dropped. # As a workaround we wait until the VM is active and then disable the port-security - if net.get("port_security") == False: + if net.get("port_security") == False and not self.config.get("no_port_security_extension"): no_secured_ports.append(new_port["port"]["id"]) - if metadata_vpci: - metadata = {"pci_assignement": json.dumps(metadata_vpci)} - if len(metadata["pci_assignement"]) >255: - #limit the metadata size - #metadata["pci_assignement"] = metadata["pci_assignement"][0:255] - self.logger.warn("Metadata deleted since it exceeds the expected length (255) ") - metadata = {} + # if metadata_vpci: + # metadata = {"pci_assignement": json.dumps(metadata_vpci)} + # if len(metadata["pci_assignement"]) >255: + # #limit the metadata size + # #metadata["pci_assignement"] = metadata["pci_assignement"][0:255] + # self.logger.warn("Metadata deleted since it exceeds the expected length (255) ") + # metadata = {} - self.logger.debug("name '%s' image_id '%s'flavor_id '%s' net_list_vim '%s' description '%s' metadata %s", - name, image_id, flavor_id, str(net_list_vim), description, str(metadata)) + self.logger.debug("name '%s' image_id '%s'flavor_id '%s' net_list_vim '%s' description '%s'", + name, image_id, flavor_id, str(net_list_vim), description) - security_groups = self.config.get('security_groups') + security_groups = self.config.get('security_groups') if type(security_groups) is str: security_groups = ( security_groups, ) - #cloud config - userdata=None - config_drive = None - userdata_list = [] - if isinstance(cloud_config, dict): - if cloud_config.get("user-data"): - if isinstance(cloud_config["user-data"], str): - userdata_list.append(cloud_config["user-data"]) - else: - for u in cloud_config["user-data"]: - userdata_list.append(u) - if cloud_config.get("boot-data-drive") != None: - config_drive = cloud_config["boot-data-drive"] - if cloud_config.get("config-files") or cloud_config.get("users") or cloud_config.get("key-pairs"): - userdata_dict={} - #default user - if cloud_config.get("key-pairs"): - userdata_dict["ssh-authorized-keys"] = cloud_config["key-pairs"] - userdata_dict["users"] = [{"default": None, "ssh-authorized-keys": cloud_config["key-pairs"] }] - if cloud_config.get("users"): - if "users" not in userdata_dict: - userdata_dict["users"] = [ "default" ] - for user in cloud_config["users"]: - user_info = { - "name" : user["name"], - "sudo": "ALL = (ALL)NOPASSWD:ALL" - } - if "user-info" in user: - user_info["gecos"] = user["user-info"] - if user.get("key-pairs"): - user_info["ssh-authorized-keys"] = user["key-pairs"] - userdata_dict["users"].append(user_info) - - if cloud_config.get("config-files"): - userdata_dict["write_files"] = [] - for file in cloud_config["config-files"]: - file_info = { - "path" : file["dest"], - "content": file["content"] - } - if file.get("encoding"): - file_info["encoding"] = file["encoding"] - if file.get("permissions"): - file_info["permissions"] = file["permissions"] - if file.get("owner"): - file_info["owner"] = file["owner"] - userdata_dict["write_files"].append(file_info) - userdata_list.append("#cloud-config\n" + yaml.safe_dump(userdata_dict, indent=4, - default_flow_style=False)) - userdata = self._create_mimemultipart(userdata_list) - self.logger.debug("userdata: %s", userdata) - elif isinstance(cloud_config, str): - userdata = cloud_config - - #Create additional volumes in case these are present in disk_list + # cloud config + config_drive, userdata = self._create_user_data(cloud_config) + + # Create additional volumes in case these are present in disk_list base_disk_index = ord('b') if disk_list != None: block_device_mapping = {} @@ -1155,10 +1102,11 @@ class vimconnector(vimconn.vimconnector): else: volume = self.cinder.volumes.create(size=disk['size'], name=name + '_vd' + chr(base_disk_index)) + created_items["volume:" + str(volume.id)] = True block_device_mapping['_vd' + chr(base_disk_index)] = volume.id base_disk_index += 1 - #wait until volumes are with status available + # Wait until volumes are with status available keep_waiting = True elapsed_time = 0 while keep_waiting and elapsed_time < volume_timeout: @@ -1170,28 +1118,19 @@ class vimconnector(vimconn.vimconnector): time.sleep(1) elapsed_time += 1 - #if we exceeded the timeout rollback + # If we exceeded the timeout rollback if elapsed_time >= volume_timeout: - #delete the volumes we just created - for volume_id in block_device_mapping.itervalues(): - self.cinder.volumes.delete(volume_id) - - #delete ports we just created - for net_item in net_list_vim: - if 'port-id' in net_item: - self.neutron.delete_port(net_item['port-id']) - raise vimconn.vimconnException('Timeout creating volumes for instance ' + name, http_code=vimconn.HTTP_Request_Timeout) # get availability Zone vm_av_zone = self._get_vm_availability_zone(availability_zone_index, availability_zone_list) - self.logger.debug("nova.servers.create({}, {}, {}, nics={}, meta={}, security_groups={}, " + self.logger.debug("nova.servers.create({}, {}, {}, nics={}, security_groups={}, " "availability_zone={}, key_name={}, userdata={}, config_drive={}, " - "block_device_mapping={})".format(name, image_id, flavor_id, net_list_vim, metadata, + "block_device_mapping={})".format(name, image_id, flavor_id, net_list_vim, security_groups, vm_av_zone, self.config.get('keypair'), - userdata, config_drive, block_device_mapping)) - server = self.nova.servers.create(name, image_id, flavor_id, nics=net_list_vim, meta=metadata, + userdata, config_drive, block_device_mapping)) + server = self.nova.servers.create(name, image_id, flavor_id, nics=net_list_vim, security_groups=security_groups, availability_zone=vm_av_zone, key_name=self.config.get('keypair'), @@ -1200,74 +1139,88 @@ class vimconnector(vimconn.vimconnector): block_device_mapping=block_device_mapping ) # , description=description) + vm_start_time = time.time() # Previously mentioned workaround to wait until the VM is active and then disable the port-security if no_secured_ports: self.__wait_for_vm(server.id, 'ACTIVE') for port_id in no_secured_ports: try: - self.neutron.update_port(port_id, {"port": {"port_security_enabled": False, "security_groups": None} }) - + self.neutron.update_port(port_id, + {"port": {"port_security_enabled": False, "security_groups": None}}) except Exception as e: - self.logger.error("It was not possible to disable port security for port {}".format(port_id)) - self.delete_vminstance(server.id) - raise - - #print "DONE :-)", server - pool_id = None - floating_ips = self.neutron.list_floatingips().get("floatingips", ()) + raise vimconn.vimconnException("It was not possible to disable port security for port {}".format( + port_id)) + # print "DONE :-)", server + # pool_id = None if external_network: - self.__wait_for_vm(server.id, 'ACTIVE') - + floating_ips = self.neutron.list_floatingips().get("floatingips", ()) for floating_network in external_network: try: assigned = False - while(assigned == False): + while not assigned: if floating_ips: ip = floating_ips.pop(0) - if not ip.get("port_id", False) and ip.get('tenant_id') == server.tenant_id: - free_floating_ip = ip.get("floating_ip_address") - try: - fix_ip = floating_network.get('ip') - server.add_floating_ip(free_floating_ip, fix_ip) - assigned = True - except Exception as e: - raise vimconn.vimconnException(type(e).__name__ + ": Cannot create floating_ip "+ str(e), http_code=vimconn.HTTP_Conflict) + if ip.get("port_id", False) or ip.get('tenant_id') != server.tenant_id: + continue + if isinstance(floating_network['floating_ip'], str): + if ip.get("floating_network_id") != floating_network['floating_ip']: + continue + free_floating_ip = ip.get("floating_ip_address") else: - #Find the external network - external_nets = list() - for net in self.neutron.list_networks()['networks']: - if net['router:external']: - external_nets.append(net) - - if len(external_nets) == 0: - raise vimconn.vimconnException("Cannot create floating_ip automatically since no external " - "network is present", - http_code=vimconn.HTTP_Conflict) - if len(external_nets) > 1: - raise vimconn.vimconnException("Cannot create floating_ip automatically since multiple " - "external networks are present", - http_code=vimconn.HTTP_Conflict) - - pool_id = external_nets[0].get('id') + if isinstance(floating_network['floating_ip'], str) and \ + floating_network['floating_ip'].lower() != "true": + pool_id = floating_network['floating_ip'] + else: + # Find the external network + external_nets = list() + for net in self.neutron.list_networks()['networks']: + if net['router:external']: + external_nets.append(net) + + if len(external_nets) == 0: + raise vimconn.vimconnException("Cannot create floating_ip automatically since no external " + "network is present", + http_code=vimconn.HTTP_Conflict) + if len(external_nets) > 1: + raise vimconn.vimconnException("Cannot create floating_ip automatically since multiple " + "external networks are present", + http_code=vimconn.HTTP_Conflict) + + pool_id = external_nets[0].get('id') param = {'floatingip': {'floating_network_id': pool_id, 'tenant_id': server.tenant_id}} try: - #self.logger.debug("Creating floating IP") + # self.logger.debug("Creating floating IP") new_floating_ip = self.neutron.create_floatingip(param) free_floating_ip = new_floating_ip['floatingip']['floating_ip_address'] - fix_ip = floating_network.get('ip') + except Exception as e: + raise vimconn.vimconnException(type(e).__name__ + ": Cannot create new floating_ip " + + str(e), http_code=vimconn.HTTP_Conflict) + + fix_ip = floating_network.get('ip') + while not assigned: + try: server.add_floating_ip(free_floating_ip, fix_ip) - assigned=True + assigned = True except Exception as e: - raise vimconn.vimconnException(type(e).__name__ + ": Cannot assign floating_ip "+ str(e), http_code=vimconn.HTTP_Conflict) + # openstack need some time after VM creation to asign an IP. So retry if fails + vm_status = self.nova.servers.get(server.id).status + if vm_status != 'ACTIVE' and vm_status != 'ERROR': + if time.time() - vm_start_time < server_timeout: + time.sleep(5) + continue + raise vimconn.vimconnException( + "Cannot create floating_ip: {} {}".format(type(e).__name__, e), + http_code=vimconn.HTTP_Conflict) + except Exception as e: if not floating_network['exit_on_floating_ip_error']: self.logger.warn("Cannot create floating_ip. %s", str(e)) continue raise - return server.id + return server.id, created_items # except nvExceptions.NotFound as e: # error_value=-vimconn.HTTP_Not_Found # error_text= "vm instance %s not found" % vm_id @@ -1275,19 +1228,13 @@ class vimconnector(vimconn.vimconnector): # raise vimconn.vimconnException(type(e).__name__ + ": "+ str(e), http_code=vimconn.HTTP_Bad_Request) except Exception as e: - # delete the volumes we just created - if block_device_mapping: - for volume_id in block_device_mapping.itervalues(): - self.cinder.volumes.delete(volume_id) - - # Delete the VM - if server != None: - self.delete_vminstance(server.id) - else: - # delete ports we just created - for net_item in net_list_vim: - if 'port-id' in net_item: - self.neutron.delete_port(net_item['port-id']) + server_id = None + if server: + server_id = server.id + try: + self.delete_vminstance(server_id, created_items) + except Exception as e2: + self.logger.error("new_vminstance rollback fail {}".format(e2)) self._format_exception(e) @@ -1353,50 +1300,59 @@ class vimconnector(vimconn.vimconnector): except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException, nvExceptions.BadRequest, ConnectionError) as e: self._format_exception(e) - def delete_vminstance(self, vm_id): + def delete_vminstance(self, vm_id, created_items=None): '''Removes a VM instance from VIM. Returns the old identifier ''' #print "osconnector: Getting VM from VIM" + if created_items == None: + created_items = {} try: self._reload_connection() - #delete VM ports attached to this networks before the virtual machine - ports = self.neutron.list_ports(device_id=vm_id) - for p in ports['ports']: + # delete VM ports attached to this networks before the virtual machine + for k, v in created_items.items(): + if not v: # skip already deleted + continue try: - self.neutron.delete_port(p["id"]) + k_item, _, k_id = k.partition(":") + if k_item == "port": + self.neutron.delete_port(k_id) except Exception as e: - self.logger.error("Error deleting port: " + type(e).__name__ + ": "+ str(e)) + self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e)) - #commented because detaching the volumes makes the servers.delete not work properly ?!? - #dettach volumes attached - server = self.nova.servers.get(vm_id) - volumes_attached_dict = server._info['os-extended-volumes:volumes_attached'] - #for volume in volumes_attached_dict: - # self.cinder.volumes.detach(volume['id']) + # #commented because detaching the volumes makes the servers.delete not work properly ?!? + # #dettach volumes attached + # server = self.nova.servers.get(vm_id) + # volumes_attached_dict = server._info['os-extended-volumes:volumes_attached'] #volume['id'] + # #for volume in volumes_attached_dict: + # # self.cinder.volumes.detach(volume['id']) - self.nova.servers.delete(vm_id) + if vm_id: + self.nova.servers.delete(vm_id) - #delete volumes. - #Although having detached them should have them in active status - #we ensure in this loop + # delete volumes. Although having detached, they should have in active status before deleting + # we ensure in this loop keep_waiting = True elapsed_time = 0 while keep_waiting and elapsed_time < volume_timeout: keep_waiting = False - for volume in volumes_attached_dict: - if self.cinder.volumes.get(volume['id']).status != 'available': - keep_waiting = True - else: - self.cinder.volumes.delete(volume['id']) + for k, v in created_items.items(): + if not v: # skip already deleted + continue + try: + k_item, _, k_id = k.partition(":") + if k_item == "volume": + if self.cinder.volumes.get(k_id).status != 'available': + keep_waiting = True + else: + self.cinder.volumes.delete(k_id) + except Exception as e: + self.logger.error("Error deleting volume: {}: {}".format(type(e).__name__, e)) if keep_waiting: time.sleep(1) elapsed_time += 1 - - return vm_id + return None except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError) as e: self._format_exception(e) - #TODO insert exception vimconn.HTTP_Unauthorized - #if reaching here is because an exception def refresh_vms_status(self, vm_list): '''Get the status of the virtual machines and their interfaces/ports @@ -1445,7 +1401,7 @@ class vimconnector(vimconn.vimconnector): #get interfaces try: self._reload_connection() - port_dict=self.neutron.list_ports(device_id=vm_id) + port_dict = self.neutron.list_ports(device_id=vm_id) for port in port_dict["ports"]: interface={} try: @@ -1479,16 +1435,20 @@ class vimconnector(vimconn.vimconnector): interface["vlan"] = network['network'].get('provider:segmentation_id') ips=[] #look for floating ip address - floating_ip_dict = self.neutron.list_floatingips(port_id=port["id"]) - if floating_ip_dict.get("floatingips"): - ips.append(floating_ip_dict["floatingips"][0].get("floating_ip_address") ) + try: + floating_ip_dict = self.neutron.list_floatingips(port_id=port["id"]) + if floating_ip_dict.get("floatingips"): + ips.append(floating_ip_dict["floatingips"][0].get("floating_ip_address") ) + except Exception: + pass for subnet in port["fixed_ips"]: ips.append(subnet["ip_address"]) interface["ip_address"] = ";".join(ips) vm["interfaces"].append(interface) except Exception as e: - self.logger.error("Error getting vm interface information " + type(e).__name__ + ": "+ str(e)) + self.logger.error("Error getting vm interface information {}: {}".format(type(e).__name__, e), + exc_info=True) except vimconn.vimconnNotFoundException as e: self.logger.error("Exception getting vm status: %s", str(e)) vm['status'] = "DELETED" @@ -1500,9 +1460,9 @@ class vimconnector(vimconn.vimconnector): vm_dict[vm_id] = vm return vm_dict - def action_vminstance(self, vm_id, action_dict): + def action_vminstance(self, vm_id, action_dict, created_items={}): '''Send and action over a VM instance from VIM - Returns the vm_id if the action was successfully sent to the VIM''' + Returns None or the console dict if the action was successfully sent to the VIM''' self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict)) try: self._reload_connection() @@ -1569,7 +1529,7 @@ class vimconnector(vimconn.vimconnector): except Exception as e: raise vimconn.vimconnException("Unexpected response from VIM " + str(console_dict)) - return vm_id + return None except (ksExceptions.ClientException, nvExceptions.ClientException, nvExceptions.NotFound, ConnectionError) as e: self._format_exception(e) #TODO insert exception vimconn.HTTP_Unauthorized @@ -1666,8 +1626,7 @@ class vimconnector(vimconn.vimconnector): error_text= type(e).__name__ + ": "+ (str(e) if len(e.args)==0 else str(e.args[0])) #TODO insert exception vimconn.HTTP_Unauthorized #if reaching here is because an exception - if self.debug: - self.logger.debug("new_user " + error_text) + self.logger.debug("new_user " + error_text) return error_value, error_text def delete_user(self, user_id): @@ -1690,8 +1649,7 @@ class vimconnector(vimconn.vimconnector): error_text= type(e).__name__ + ": "+ (str(e) if len(e.args)==0 else str(e.args[0])) #TODO insert exception vimconn.HTTP_Unauthorized #if reaching here is because an exception - if self.debug: - print("delete_tenant " + error_text) + self.logger.debug("delete_tenant " + error_text) return error_value, error_text def get_hosts_info(self): @@ -1714,8 +1672,7 @@ class vimconnector(vimconn.vimconnector): error_text= type(e).__name__ + ": "+ (str(e) if len(e.args)==0 else str(e.args[0])) #TODO insert exception vimconn.HTTP_Unauthorized #if reaching here is because an exception - if self.debug: - print("get_hosts_info " + error_text) + self.logger.debug("get_hosts_info " + error_text) return error_value, error_text def get_hosts(self, vim_tenant): @@ -1743,8 +1700,7 @@ class vimconnector(vimconn.vimconnector): error_text= type(e).__name__ + ": "+ (str(e) if len(e.args)==0 else str(e.args[0])) #TODO insert exception vimconn.HTTP_Unauthorized #if reaching here is because an exception - if self.debug: - print("get_hosts " + error_text) + self.logger.debug("get_hosts " + error_text) return error_value, error_text def new_classification(self, name, ctype, definition): @@ -1765,7 +1721,7 @@ class vimconnector(vimconn.vimconnector): classification_dict = definition classification_dict['name'] = name - new_class = self.neutron.create_flow_classifier( + new_class = self.neutron.create_sfc_flow_classifier( {'flow_classifier': classification_dict}) return new_class['flow_classifier']['id'] except (neExceptions.ConnectionFailed, ksExceptions.ClientException, @@ -1791,11 +1747,12 @@ class vimconnector(vimconn.vimconnector): self.logger.debug("Getting Classifications from VIM filter: '%s'", str(filter_dict)) try: + filter_dict_os = filter_dict.copy() self._reload_connection() - if self.api_version3 and "tenant_id" in filter_dict: - filter_dict['project_id'] = filter_dict.pop('tenant_id') - classification_dict = self.neutron.list_flow_classifier( - **filter_dict) + if self.api_version3 and "tenant_id" in filter_dict_os: + filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id') + classification_dict = self.neutron.list_sfc_flow_classifiers( + **filter_dict_os) classification_list = classification_dict["flow_classifiers"] self.__classification_os2mano(classification_list) return classification_list @@ -1807,7 +1764,7 @@ class vimconnector(vimconn.vimconnector): self.logger.debug("Deleting Classification '%s' from VIM", class_id) try: self._reload_connection() - self.neutron.delete_flow_classifier(class_id) + self.neutron.delete_sfc_flow_classifier(class_id) return class_id except (neExceptions.ConnectionFailed, neExceptions.NeutronException, ksExceptions.ClientException, neExceptions.NeutronException, @@ -1822,9 +1779,7 @@ class vimconnector(vimconn.vimconnector): self._reload_connection() correlation = None if sfc_encap: - # TODO(igordc): must be changed to NSH in Queens - # (MPLS is a workaround) - correlation = 'mpls' + correlation = 'nsh' if len(ingress_ports) != 1: raise vimconn.vimconnNotSupportedException( "OpenStack VIM connector can only have " @@ -1838,13 +1793,13 @@ class vimconnector(vimconn.vimconnector): 'egress': egress_ports[0], 'service_function_parameters': { 'correlation': correlation}} - new_sfi = self.neutron.create_port_pair({'port_pair': sfi_dict}) + new_sfi = self.neutron.create_sfc_port_pair({'port_pair': sfi_dict}) return new_sfi['port_pair']['id'] except (neExceptions.ConnectionFailed, ksExceptions.ClientException, neExceptions.NeutronException, ConnectionError) as e: if new_sfi: try: - self.neutron.delete_port_pair_group( + self.neutron.delete_sfc_port_pair( new_sfi['port_pair']['id']) except Exception: self.logger.error( @@ -1872,9 +1827,10 @@ class vimconnector(vimconn.vimconnector): "VIM filter: '%s'", str(filter_dict)) try: self._reload_connection() - if self.api_version3 and "tenant_id" in filter_dict: - filter_dict['project_id'] = filter_dict.pop('tenant_id') - sfi_dict = self.neutron.list_port_pair(**filter_dict) + filter_dict_os = filter_dict.copy() + if self.api_version3 and "tenant_id" in filter_dict_os: + filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id') + sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os) sfi_list = sfi_dict["port_pairs"] self.__sfi_os2mano(sfi_list) return sfi_list @@ -1887,7 +1843,7 @@ class vimconnector(vimconn.vimconnector): "from VIM", sfi_id) try: self._reload_connection() - self.neutron.delete_port_pair(sfi_id) + self.neutron.delete_sfc_port_pair(sfi_id) return sfi_id except (neExceptions.ConnectionFailed, neExceptions.NeutronException, ksExceptions.ClientException, neExceptions.NeutronException, @@ -1900,27 +1856,25 @@ class vimconnector(vimconn.vimconnector): try: new_sf = None self._reload_connection() - correlation = None - if sfc_encap: - # TODO(igordc): must be changed to NSH in Queens - # (MPLS is a workaround) - correlation = 'mpls' + # correlation = None + # if sfc_encap: + # correlation = 'nsh' for instance in sfis: sfi = self.get_sfi(instance) - if sfi.get('sfc_encap') != correlation: + if sfi.get('sfc_encap') != sfc_encap: raise vimconn.vimconnNotSupportedException( "OpenStack VIM connector requires all SFIs of the " "same SF to share the same SFC Encapsulation") sf_dict = {'name': name, 'port_pairs': sfis} - new_sf = self.neutron.create_port_pair_group({ + new_sf = self.neutron.create_sfc_port_pair_group({ 'port_pair_group': sf_dict}) return new_sf['port_pair_group']['id'] except (neExceptions.ConnectionFailed, ksExceptions.ClientException, neExceptions.NeutronException, ConnectionError) as e: if new_sf: try: - self.neutron.delete_port_pair_group( + self.neutron.delete_sfc_port_pair_group( new_sf['port_pair_group']['id']) except Exception: self.logger.error( @@ -1946,9 +1900,10 @@ class vimconnector(vimconn.vimconnector): str(filter_dict)) try: self._reload_connection() - if self.api_version3 and "tenant_id" in filter_dict: - filter_dict['project_id'] = filter_dict.pop('tenant_id') - sf_dict = self.neutron.list_port_pair_group(**filter_dict) + filter_dict_os = filter_dict.copy() + if self.api_version3 and "tenant_id" in filter_dict_os: + filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id') + sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os) sf_list = sf_dict["port_pair_groups"] self.__sf_os2mano(sf_list) return sf_list @@ -1960,7 +1915,7 @@ class vimconnector(vimconn.vimconnector): self.logger.debug("Deleting Service Function '%s' from VIM", sf_id) try: self._reload_connection() - self.neutron.delete_port_pair_group(sf_id) + self.neutron.delete_sfc_port_pair_group(sf_id) return sf_id except (neExceptions.ConnectionFailed, neExceptions.NeutronException, ksExceptions.ClientException, neExceptions.NeutronException, @@ -1973,26 +1928,24 @@ class vimconnector(vimconn.vimconnector): try: new_sfp = None self._reload_connection() - if not sfc_encap: - raise vimconn.vimconnNotSupportedException( - "OpenStack VIM connector only supports " - "SFC-Encapsulated chains") - # TODO(igordc): must be changed to NSH in Queens - # (MPLS is a workaround) - correlation = 'mpls' + # In networking-sfc the MPLS encapsulation is legacy + # should be used when no full SFC Encapsulation is intended + sfc_encap = 'mpls' + if sfc_encap: + correlation = 'nsh' sfp_dict = {'name': name, 'flow_classifiers': classifications, 'port_pair_groups': sfs, 'chain_parameters': {'correlation': correlation}} if spi: sfp_dict['chain_id'] = spi - new_sfp = self.neutron.create_port_chain({'port_chain': sfp_dict}) + new_sfp = self.neutron.create_sfc_port_chain({'port_chain': sfp_dict}) return new_sfp["port_chain"]["id"] except (neExceptions.ConnectionFailed, ksExceptions.ClientException, neExceptions.NeutronException, ConnectionError) as e: if new_sfp: try: - self.neutron.delete_port_chain(new_sfp['port_chain']['id']) + self.neutron.delete_sfc_port_chain(new_sfp['port_chain']['id']) except Exception: self.logger.error( 'Creation of Service Function Path failed, with ' @@ -2017,9 +1970,10 @@ class vimconnector(vimconn.vimconnector): "'%s'", str(filter_dict)) try: self._reload_connection() - if self.api_version3 and "tenant_id" in filter_dict: - filter_dict['project_id'] = filter_dict.pop('tenant_id') - sfp_dict = self.neutron.list_port_chain(**filter_dict) + filter_dict_os = filter_dict.copy() + if self.api_version3 and "tenant_id" in filter_dict_os: + filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id') + sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os) sfp_list = sfp_dict["port_chains"] self.__sfp_os2mano(sfp_list) return sfp_list @@ -2032,7 +1986,7 @@ class vimconnector(vimconn.vimconnector): "Deleting Service Function Path '%s' from VIM", sfp_id) try: self._reload_connection() - self.neutron.delete_port_chain(sfp_id) + self.neutron.delete_sfc_port_chain(sfp_id) return sfp_id except (neExceptions.ConnectionFailed, neExceptions.NeutronException, ksExceptions.ClientException, neExceptions.NeutronException,