X-Git-Url: https://osm.etsi.org/gitweb/?p=osm%2FRO.git;a=blobdiff_plain;f=RO-VIM-openstack%2Fosm_rovim_openstack%2Fvimconn_openstack.py;h=54d0e88ad8f9799484e3613f4cbb3fbb9272f333;hp=f3b89dcbadbda0e5b982abbdf3e2d8bd1313f654;hb=HEAD;hpb=26f7366ed0ab69a4c7c3e0bc2fdd51f35b36c396 diff --git a/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py b/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py index f3b89dcb..54d0e88a 100644 --- a/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py +++ b/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py @@ -38,9 +38,10 @@ from pprint import pformat import random import re import time -from typing import Dict, Optional, Tuple +from typing import Dict, List, Optional, Tuple from cinderclient import client as cClient +import cinderclient.exceptions as cExceptions from glanceclient import client as glClient import glanceclient.exc as gl1Exceptions from keystoneauth1 import session @@ -85,6 +86,16 @@ volume_timeout = 1800 server_timeout = 1800 +def catch_any_exception(func): + def format_exception(*args, **kwargs): + try: + return func(*args, *kwargs) + except Exception as e: + vimconnector._format_exception(e) + + return format_exception + + class SafeDumper(yaml.SafeDumper): def represent_data(self, data): # Openstack APIs use custom subclasses of dict and YAML safe dumper @@ -175,6 +186,8 @@ class vimconnector(vimconn.VimConnector): self.persistent_info = persistent_info self.availability_zone = persistent_info.get("availability_zone", None) + self.storage_availability_zone = None + self.vm_av_zone = None self.session = persistent_info.get("session", {"reload_client": True}) self.my_tenant_id = self.session.get("my_tenant_id") self.nova = self.session.get("nova") @@ -339,7 +352,7 @@ class vimconnector(vimconn.VimConnector): version = self.config.get("microversion") if not version: - version = "2.1" + version = "2.60" # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River # Titanium cloud and StarlingX @@ -355,12 +368,21 @@ class vimconnector(vimconn.VimConnector): endpoint_type=self.endpoint_type, region_name=region_name, ) - self.cinder = self.session["cinder"] = cClient.Client( - 2, - session=sess, - endpoint_type=self.endpoint_type, - region_name=region_name, - ) + + if sess.get_all_version_data(service_type="volumev2"): + self.cinder = self.session["cinder"] = cClient.Client( + 2, + session=sess, + endpoint_type=self.endpoint_type, + region_name=region_name, + ) + else: + self.cinder = self.session["cinder"] = cClient.Client( + 3, + session=sess, + endpoint_type=self.endpoint_type, + region_name=region_name, + ) try: self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id() @@ -516,7 +538,8 @@ class vimconnector(vimconn.VimConnector): # Types. Also, abstract vimconnector should call the validation # method before the implemented VIM connectors are called. - def _format_exception(self, exception): + @staticmethod + def _format_exception(exception): """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause""" message_error = str(exception) tip = "" @@ -526,8 +549,10 @@ class vimconnector(vimconn.VimConnector): ( neExceptions.NetworkNotFoundClient, nvExceptions.NotFound, + nvExceptions.ResourceNotFound, ksExceptions.NotFound, gl1Exceptions.HTTPNotFound, + cExceptions.NotFound, ), ): raise vimconn.VimConnNotFoundException( @@ -542,6 +567,7 @@ class vimconnector(vimconn.VimConnector): ConnectionError, ksExceptions.ConnectionError, neExceptions.ConnectionFailed, + cExceptions.ConnectionError, ), ): if type(exception).__name__ == "SSLError": @@ -556,17 +582,26 @@ class vimconnector(vimconn.VimConnector): KeyError, nvExceptions.BadRequest, ksExceptions.BadRequest, + gl1Exceptions.BadRequest, + cExceptions.BadRequest, ), ): + if message_error == "OS-EXT-SRV-ATTR:host": + tip = " (If the user does not have non-admin credentials, this attribute will be missing)" + raise vimconn.VimConnInsufficientCredentials( + type(exception).__name__ + ": " + message_error + tip + ) raise vimconn.VimConnException( type(exception).__name__ + ": " + message_error ) + elif isinstance( exception, ( nvExceptions.ClientException, ksExceptions.ClientException, neExceptions.NeutronException, + cExceptions.ClientException, ), ): raise vimconn.VimConnUnexpectedResponse( @@ -579,9 +614,10 @@ class vimconnector(vimconn.VimConnector): elif isinstance(exception, vimconn.VimConnException): raise exception else: # () - self.logger.error("General Exception " + message_error, exc_info=True) + logger = logging.getLogger("ro.vim.openstack") + logger.error("General Exception " + message_error, exc_info=True) - raise vimconn.VimConnConnectionException( + raise vimconn.VimConnException( type(exception).__name__ + ": " + message_error ) @@ -619,6 +655,32 @@ class vimconnector(vimconn.VimConnector): "Not found security group {} for this tenant".format(sg) ) + def _find_nova_server(self, vm_id): + """ + Returns the VM instance from Openstack and completes it with flavor ID + Do not call nova.servers.find directly, as it does not return flavor ID with microversion>=2.47 + """ + try: + self._reload_connection() + server = self.nova.servers.find(id=vm_id) + # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema) + server_dict = server.to_dict() + try: + if server_dict["flavor"].get("original_name"): + server_dict["flavor"]["id"] = self.nova.flavors.find( + name=server_dict["flavor"]["original_name"] + ).id + except nClient.exceptions.NotFound as e: + self.logger.warning(str(e.message)) + return server_dict + except ( + ksExceptions.ClientException, + nvExceptions.ClientException, + nvExceptions.NotFound, + ConnectionError, + ) as e: + self._format_exception(e) + def check_vim_connectivity(self): # just get network list to check connectivity and credentials self.get_network_list(filter_dict={}) @@ -632,7 +694,6 @@ class vimconnector(vimconn.VimConnector): Returns the tenant list of dictionaries: [{'name':', 'id':', ...}, ...] """ self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict)) - try: self._reload_connection() @@ -662,7 +723,6 @@ class vimconnector(vimconn.VimConnector): def new_tenant(self, tenant_name, tenant_description): """Adds a new tenant to openstack VIM. Returns the tenant identifier""" self.logger.debug("Adding a new tenant name: %s", tenant_name) - try: self._reload_connection() @@ -688,7 +748,6 @@ class vimconnector(vimconn.VimConnector): def delete_tenant(self, tenant_id): """Delete a tenant from openstack VIM. Returns the old tenant identifier""" self.logger.debug("Deleting tenant %s from VIM", tenant_id) - try: self._reload_connection() @@ -698,6 +757,7 @@ class vimconnector(vimconn.VimConnector): self.keystone.tenants.delete(tenant_id) return tenant_id + except ( ksExceptions.ConnectionError, ksExceptions.ClientException, @@ -754,7 +814,7 @@ class vimconnector(vimconn.VimConnector): self._reload_connection() network_dict = {"name": net_name, "admin_state_up": True} - if net_type in ("data", "ptp"): + if net_type in ("data", "ptp") or provider_network_profile: provider_physical_network = None if provider_network_profile and provider_network_profile.get( @@ -787,7 +847,7 @@ class vimconnector(vimconn.VimConnector): "dataplane_physical_net" ) - # if it is non empty list, use the first value. If it is a string use the value directly + # if it is non-empty list, use the first value. If it is a string use the value directly if ( isinstance(provider_physical_network, (tuple, list)) and provider_physical_network @@ -803,17 +863,17 @@ class vimconnector(vimconn.VimConnector): ) if not self.config.get("multisegment_support"): - network_dict[ - "provider:physical_network" - ] = provider_physical_network + network_dict["provider:physical_network"] = ( + provider_physical_network + ) if ( provider_network_profile and "network-type" in provider_network_profile ): - network_dict[ - "provider:network_type" - ] = provider_network_profile["network-type"] + network_dict["provider:network_type"] = ( + provider_network_profile["network-type"] + ) else: network_dict["provider:network_type"] = self.config.get( "dataplane_network_type", "vlan" @@ -880,7 +940,7 @@ class vimconnector(vimconn.VimConnector): if not ip_profile.get("subnet_address"): # Fake subnet is required - subnet_rand = random.randint(0, 255) + subnet_rand = random.SystemRandom().randint(0, 255) ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand) if "ip_version" not in ip_profile: @@ -925,6 +985,15 @@ class vimconnector(vimconn.VimConnector): ip_str = str(netaddr.IPAddress(ip_int)) subnet["allocation_pools"][0]["end"] = ip_str + if ( + ip_profile.get("ipv6_address_mode") + and ip_profile["ip_version"] != "IPv4" + ): + subnet["ipv6_address_mode"] = ip_profile["ipv6_address_mode"] + # ipv6_ra_mode can be set to the same value for most use cases, see documentation: + # https://docs.openstack.org/neutron/latest/admin/config-ipv6.html#ipv6-ra-mode-and-ipv6-address-mode-combinations + subnet["ipv6_ra_mode"] = ip_profile["ipv6_address_mode"] + # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet)) self.neutron.create_subnet({"subnet": subnet}) @@ -957,6 +1026,14 @@ class vimconnector(vimconn.VimConnector): if k_item == "l2gwconn": self.neutron.delete_l2_gateway_connection(k_id) + + except (neExceptions.ConnectionFailed, ConnectionError) as e2: + self.logger.error( + "Error deleting l2 gateway connection: {}: {}".format( + type(e2).__name__, e2 + ) + ) + self._format_exception(e2) except Exception as e2: self.logger.error( "Error deleting l2 gateway connection: {}: {}".format( @@ -981,7 +1058,6 @@ class vimconnector(vimconn.VimConnector): Returns the network list of dictionaries """ self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict)) - try: self._reload_connection() filter_dict_os = filter_dict.copy() @@ -1041,6 +1117,7 @@ class vimconnector(vimconn.VimConnector): return net + @catch_any_exception def delete_network(self, net_id, created_items=None): """ Removes a tenant network from VIM and its associated elements @@ -1064,6 +1141,14 @@ class vimconnector(vimconn.VimConnector): k_item, _, k_id = k.partition(":") if k_item == "l2gwconn": self.neutron.delete_l2_gateway_connection(k_id) + + except (neExceptions.ConnectionFailed, ConnectionError) as e: + self.logger.error( + "Error deleting l2 gateway connection: {}: {}".format( + type(e).__name__, e + ) + ) + self._format_exception(e) except Exception as e: self.logger.error( "Error deleting l2 gateway connection: {}: {}".format( @@ -1076,21 +1161,22 @@ class vimconnector(vimconn.VimConnector): for p in ports["ports"]: try: self.neutron.delete_port(p["id"]) + + except (neExceptions.ConnectionFailed, ConnectionError) as e: + self.logger.error("Error deleting port %s: %s", p["id"], str(e)) + # If there is connection error, it raises. + self._format_exception(e) except Exception as e: self.logger.error("Error deleting port %s: %s", p["id"], str(e)) self.neutron.delete_network(net_id) return net_id - except ( - neExceptions.ConnectionFailed, - neExceptions.NetworkNotFoundClient, - neExceptions.NeutronException, - ksExceptions.ClientException, - neExceptions.NeutronException, - ConnectionError, - ) as e: - self._format_exception(e) + except (neExceptions.NetworkNotFoundClient, neExceptions.NotFound) as e: + # If network to be deleted is not found, it does not raise. + self.logger.warning( + f"Error deleting network: {net_id} is not found, {str(e)}" + ) def refresh_nets_status(self, net_list): """Get the status of the networks @@ -1143,13 +1229,11 @@ class vimconnector(vimconn.VimConnector): def get_flavor(self, flavor_id): """Obtain flavor details from the VIM. Returns the flavor dict details""" self.logger.debug("Getting flavor '%s'", flavor_id) - try: self._reload_connection() flavor = self.nova.flavors.find(id=flavor_id) - # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema) - return flavor.to_dict() + except ( nvExceptions.NotFound, nvExceptions.ClientException, @@ -1221,17 +1305,21 @@ class vimconnector(vimconn.VimConnector): ) except ( nvExceptions.NotFound, + nvExceptions.BadRequest, nvExceptions.ClientException, ksExceptions.ClientException, ConnectionError, ) as e: self._format_exception(e) - def process_resource_quota(self, quota, prefix, extra_specs): - """ - :param prefix: - :param extra_specs: - :return: + @staticmethod + def process_resource_quota(quota: dict, prefix: str, extra_specs: dict) -> None: + """Process resource quota and fill up extra_specs. + Args: + quota (dict): Keeping the quota of resurces + prefix (str) Prefix + extra_specs (dict) Dict to be filled to be used during flavor creation + """ if "limit" in quota: extra_specs["quota:" + prefix + "_limit"] = quota["limit"] @@ -1243,200 +1331,321 @@ class vimconnector(vimconn.VimConnector): extra_specs["quota:" + prefix + "_shares_level"] = "custom" extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"] - def new_flavor(self, flavor_data, change_name_if_used=True): - """Adds a tenant flavor to openstack VIM - if change_name_if_used is True, it will change name in case of conflict, because it is not supported name - repetition - Returns the flavor identifier + @staticmethod + def process_numa_memory( + numa: dict, node_id: Optional[int], extra_specs: dict + ) -> None: + """Set the memory in extra_specs. + Args: + numa (dict): A dictionary which includes numa information + node_id (int): ID of numa node + extra_specs (dict): To be filled. + + """ + if not numa.get("memory"): + return + memory_mb = numa["memory"] * 1024 + memory = "hw:numa_mem.{}".format(node_id) + extra_specs[memory] = int(memory_mb) + + @staticmethod + def process_numa_vcpu(numa: dict, node_id: int, extra_specs: dict) -> None: + """Set the cpu in extra_specs. + Args: + numa (dict): A dictionary which includes numa information + node_id (int): ID of numa node + extra_specs (dict): To be filled. + + """ + if not numa.get("vcpu"): + return + vcpu = numa["vcpu"] + cpu = "hw:numa_cpus.{}".format(node_id) + vcpu = ",".join(map(str, vcpu)) + extra_specs[cpu] = vcpu + + @staticmethod + def process_numa_paired_threads(numa: dict, extra_specs: dict) -> Optional[int]: + """Fill up extra_specs if numa has paired-threads. + Args: + numa (dict): A dictionary which includes numa information + extra_specs (dict): To be filled. + + Returns: + threads (int) Number of virtual cpus + + """ + if not numa.get("paired-threads"): + return + + # cpu_thread_policy "require" implies that compute node must have an STM architecture + threads = numa["paired-threads"] * 2 + extra_specs["hw:cpu_thread_policy"] = "require" + extra_specs["hw:cpu_policy"] = "dedicated" + return threads + + @staticmethod + def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]: + """Fill up extra_specs if numa has cores. + Args: + numa (dict): A dictionary which includes numa information + extra_specs (dict): To be filled. + + Returns: + cores (int) Number of virtual cpus + + """ + # cpu_thread_policy "isolate" implies that the host must not have an SMT + # architecture, or a non-SMT architecture will be emulated + if not numa.get("cores"): + return + cores = numa["cores"] + extra_specs["hw:cpu_thread_policy"] = "isolate" + extra_specs["hw:cpu_policy"] = "dedicated" + return cores + + @staticmethod + def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]: + """Fill up extra_specs if numa has threads. + Args: + numa (dict): A dictionary which includes numa information + extra_specs (dict): To be filled. + + Returns: + threads (int) Number of virtual cpus + + """ + # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture + if not numa.get("threads"): + return + threads = numa["threads"] + extra_specs["hw:cpu_thread_policy"] = "prefer" + extra_specs["hw:cpu_policy"] = "dedicated" + return threads + + def _process_numa_parameters_of_flavor( + self, numas: List, extra_specs: Dict + ) -> None: + """Process numa parameters and fill up extra_specs. + + Args: + numas (list): List of dictionary which includes numa information + extra_specs (dict): To be filled. + + """ + numa_nodes = len(numas) + extra_specs["hw:numa_nodes"] = str(numa_nodes) + cpu_cores, cpu_threads = 0, 0 + + if self.vim_type == "VIO": + self.process_vio_numa_nodes(numa_nodes, extra_specs) + + for numa in numas: + if "id" in numa: + node_id = numa["id"] + # overwrite ram and vcpus + # check if key "memory" is present in numa else use ram value at flavor + self.process_numa_memory(numa, node_id, extra_specs) + self.process_numa_vcpu(numa, node_id, extra_specs) + + # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html + extra_specs["hw:cpu_sockets"] = str(numa_nodes) + + if "paired-threads" in numa: + threads = self.process_numa_paired_threads(numa, extra_specs) + cpu_threads += threads + + elif "cores" in numa: + cores = self.process_numa_cores(numa, extra_specs) + cpu_cores += cores + + elif "threads" in numa: + threads = self.process_numa_threads(numa, extra_specs) + cpu_threads += threads + + if cpu_cores: + extra_specs["hw:cpu_cores"] = str(cpu_cores) + if cpu_threads: + extra_specs["hw:cpu_threads"] = str(cpu_threads) + + @staticmethod + def process_vio_numa_nodes(numa_nodes: int, extra_specs: Dict) -> None: + """According to number of numa nodes, updates the extra_specs for VIO. + + Args: + + numa_nodes (int): List keeps the numa node numbers + extra_specs (dict): Extra specs dict to be updated + + """ + # If there are several numas, we do not define specific affinity. + extra_specs["vmware:latency_sensitivity_level"] = "high" + + def _change_flavor_name( + self, name: str, name_suffix: int, flavor_data: dict + ) -> str: + """Change the flavor name if the name already exists. + + Args: + name (str): Flavor name to be checked + name_suffix (int): Suffix to be appended to name + flavor_data (dict): Flavor dict + + Returns: + name (str): New flavor name to be used + + """ + # Get used names + fl = self.nova.flavors.list() + fl_names = [f.name for f in fl] + + while name in fl_names: + name_suffix += 1 + name = flavor_data["name"] + "-" + str(name_suffix) + + return name + + def _process_extended_config_of_flavor( + self, extended: dict, extra_specs: dict + ) -> None: + """Process the extended dict to fill up extra_specs. + Args: + + extended (dict): Keeping the extra specification of flavor + extra_specs (dict) Dict to be filled to be used during flavor creation + + """ + quotas = { + "cpu-quota": "cpu", + "mem-quota": "memory", + "vif-quota": "vif", + "disk-io-quota": "disk_io", + } + + page_sizes = { + "LARGE": "large", + "SMALL": "small", + "SIZE_2MB": "2MB", + "SIZE_1GB": "1GB", + "PREFER_LARGE": "any", + } + + policies = { + "cpu-pinning-policy": "hw:cpu_policy", + "cpu-thread-pinning-policy": "hw:cpu_thread_policy", + "mem-policy": "hw:numa_mempolicy", + } + + numas = extended.get("numas") + if numas: + self._process_numa_parameters_of_flavor(numas, extra_specs) + + for quota, item in quotas.items(): + if quota in extended.keys(): + self.process_resource_quota(extended.get(quota), item, extra_specs) + + # Set the mempage size as specified in the descriptor + if extended.get("mempage-size"): + if extended["mempage-size"] in page_sizes.keys(): + extra_specs["hw:mem_page_size"] = page_sizes[extended["mempage-size"]] + else: + # Normally, validations in NBI should not allow to this condition. + self.logger.debug( + "Invalid mempage-size %s. Will be ignored", + extended.get("mempage-size"), + ) + + for policy, hw_policy in policies.items(): + if extended.get(policy): + extra_specs[hw_policy] = extended[policy].lower() + + @staticmethod + def _get_flavor_details(flavor_data: dict) -> Tuple: + """Returns the details of flavor + Args: + flavor_data (dict): Dictionary that includes required flavor details + + Returns: + ram, vcpus, extra_specs, extended (tuple): Main items of required flavor + + """ + return ( + flavor_data.get("ram", 64), + flavor_data.get("vcpus", 1), + {}, + flavor_data.get("extended"), + ) + + @catch_any_exception + def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str: + """Adds a tenant flavor to openstack VIM. + if change_name_if_used is True, it will change name in case of conflict, + because it is not supported name repetition. + + Args: + flavor_data (dict): Flavor details to be processed + change_name_if_used (bool): Change name in case of conflict + + Returns: + flavor_id (str): flavor identifier + """ self.logger.debug("Adding flavor '%s'", str(flavor_data)) retry = 0 max_retries = 3 name_suffix = 0 + name = flavor_data["name"] + while retry < max_retries: + retry += 1 + try: + self._reload_connection() - try: - name = flavor_data["name"] - while retry < max_retries: - retry += 1 - try: - self._reload_connection() + if change_name_if_used: + name = self._change_flavor_name(name, name_suffix, flavor_data) - if change_name_if_used: - # get used names - fl_names = [] - fl = self.nova.flavors.list() - - for f in fl: - fl_names.append(f.name) - - while name in fl_names: - name_suffix += 1 - name = flavor_data["name"] + "-" + str(name_suffix) - - ram = flavor_data.get("ram", 64) - vcpus = flavor_data.get("vcpus", 1) - extra_specs = {} - - extended = flavor_data.get("extended") - if extended: - numas = extended.get("numas") - - if numas: - numa_nodes = len(numas) - - extra_specs["hw:numa_nodes"] = str(numa_nodes) - - if self.vim_type == "VIO": - extra_specs[ - "vmware:extra_config" - ] = '{"numa.nodeAffinity":"0"}' - extra_specs["vmware:latency_sensitivity_level"] = "high" - - for numa in numas: - if "id" in numa: - node_id = numa["id"] - - if "memory" in numa: - memory_mb = numa["memory"] * 1024 - memory = "hw:numa_mem.{}".format(node_id) - extra_specs[memory] = int(memory_mb) - - if "vcpu" in numa: - vcpu = numa["vcpu"] - cpu = "hw:numa_cpus.{}".format(node_id) - vcpu = ",".join(map(str, vcpu)) - extra_specs[cpu] = vcpu - - # overwrite ram and vcpus - # check if key "memory" is present in numa else use ram value at flavor - # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/ - # implemented/virt-driver-cpu-thread-pinning.html - extra_specs["hw:cpu_sockets"] = str(numa_nodes) - - if "paired-threads" in numa: - vcpus = numa["paired-threads"] * 2 - # cpu_thread_policy "require" implies that the compute node must have an - # STM architecture - extra_specs["hw:cpu_thread_policy"] = "require" - extra_specs["hw:cpu_policy"] = "dedicated" - elif "cores" in numa: - vcpus = numa["cores"] - # cpu_thread_policy "prefer" implies that the host must not have an SMT - # architecture, or a non-SMT architecture will be emulated - extra_specs["hw:cpu_thread_policy"] = "isolate" - extra_specs["hw:cpu_policy"] = "dedicated" - elif "threads" in numa: - vcpus = numa["threads"] - # cpu_thread_policy "prefer" implies that the host may or may not have an SMT - # architecture - extra_specs["hw:cpu_thread_policy"] = "prefer" - extra_specs["hw:cpu_policy"] = "dedicated" - # for interface in numa.get("interfaces",() ): - # if interface["dedicated"]=="yes": - # raise vimconn.VimConnException("Passthrough interfaces are not supported - # for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable) - # #TODO, add the key 'pci_passthrough:alias"="