X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=RO-VIM-openstack%2Fosm_rovim_openstack%2Fvimconn_openstack.py;h=2017d5631263f62007d41296a3c458e79ff8ef6f;hb=refs%2Fheads%2Fv14.0;hp=7c57817c11d72eed4c5752a02c4d10530a48fe66;hpb=4415c4cc1eb8032f0d6a5f49fba297992b355c42;p=osm%2FRO.git diff --git a/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py b/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py index 7c57817c..2017d563 100644 --- a/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py +++ b/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py @@ -339,7 +339,7 @@ class vimconnector(vimconn.VimConnector): version = self.config.get("microversion") if not version: - version = "2.1" + version = "2.60" # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River # Titanium cloud and StarlingX @@ -355,12 +355,21 @@ class vimconnector(vimconn.VimConnector): endpoint_type=self.endpoint_type, region_name=region_name, ) - self.cinder = self.session["cinder"] = cClient.Client( - 2, - session=sess, - endpoint_type=self.endpoint_type, - region_name=region_name, - ) + + if sess.get_all_version_data(service_type="volumev2"): + self.cinder = self.session["cinder"] = cClient.Client( + 2, + session=sess, + endpoint_type=self.endpoint_type, + region_name=region_name, + ) + else: + self.cinder = self.session["cinder"] = cClient.Client( + 3, + session=sess, + endpoint_type=self.endpoint_type, + region_name=region_name, + ) try: self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id() @@ -558,9 +567,15 @@ class vimconnector(vimconn.VimConnector): ksExceptions.BadRequest, ), ): + if message_error == "OS-EXT-SRV-ATTR:host": + tip = " (If the user does not have non-admin credentials, this attribute will be missing)" + raise vimconn.VimConnInsufficientCredentials( + type(exception).__name__ + ": " + message_error + tip + ) raise vimconn.VimConnException( type(exception).__name__ + ": " + message_error ) + elif isinstance( exception, ( @@ -619,6 +634,32 @@ class vimconnector(vimconn.VimConnector): "Not found security group {} for this tenant".format(sg) ) + def _find_nova_server(self, vm_id): + """ + Returns the VM instance from Openstack and completes it with flavor ID + Do not call nova.servers.find directly, as it does not return flavor ID with microversion>=2.47 + """ + try: + self._reload_connection() + server = self.nova.servers.find(id=vm_id) + # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema) + server_dict = server.to_dict() + try: + if server_dict["flavor"].get("original_name"): + server_dict["flavor"]["id"] = self.nova.flavors.find( + name=server_dict["flavor"]["original_name"] + ).id + except nClient.exceptions.NotFound as e: + self.logger.warning(str(e.message)) + return server_dict + except ( + ksExceptions.ClientException, + nvExceptions.ClientException, + nvExceptions.NotFound, + ConnectionError, + ) as e: + self._format_exception(e) + def check_vim_connectivity(self): # just get network list to check connectivity and credentials self.get_network_list(filter_dict={}) @@ -880,7 +921,7 @@ class vimconnector(vimconn.VimConnector): if not ip_profile.get("subnet_address"): # Fake subnet is required - subnet_rand = random.randint(0, 255) + subnet_rand = random.SystemRandom().randint(0, 255) ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand) if "ip_version" not in ip_profile: @@ -925,6 +966,15 @@ class vimconnector(vimconn.VimConnector): ip_str = str(netaddr.IPAddress(ip_int)) subnet["allocation_pools"][0]["end"] = ip_str + if ( + ip_profile.get("ipv6_address_mode") + and ip_profile["ip_version"] != "IPv4" + ): + subnet["ipv6_address_mode"] = ip_profile["ipv6_address_mode"] + # ipv6_ra_mode can be set to the same value for most use cases, see documentation: + # https://docs.openstack.org/neutron/latest/admin/config-ipv6.html#ipv6-ra-mode-and-ipv6-address-mode-combinations + subnet["ipv6_ra_mode"] = ip_profile["ipv6_address_mode"] + # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet)) self.neutron.create_subnet({"subnet": subnet}) @@ -1287,16 +1337,17 @@ class vimconnector(vimconn.VimConnector): extra_specs (dict): To be filled. Returns: - vcpus (int) Number of virtual cpus + threads (int) Number of virtual cpus """ if not numa.get("paired-threads"): return + # cpu_thread_policy "require" implies that compute node must have an STM architecture - vcpus = numa["paired-threads"] * 2 + threads = numa["paired-threads"] * 2 extra_specs["hw:cpu_thread_policy"] = "require" extra_specs["hw:cpu_policy"] = "dedicated" - return vcpus + return threads @staticmethod def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]: @@ -1306,17 +1357,17 @@ class vimconnector(vimconn.VimConnector): extra_specs (dict): To be filled. Returns: - vcpus (int) Number of virtual cpus + cores (int) Number of virtual cpus """ # cpu_thread_policy "isolate" implies that the host must not have an SMT # architecture, or a non-SMT architecture will be emulated if not numa.get("cores"): return - vcpus = numa["cores"] + cores = numa["cores"] extra_specs["hw:cpu_thread_policy"] = "isolate" extra_specs["hw:cpu_policy"] = "dedicated" - return vcpus + return cores @staticmethod def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]: @@ -1326,37 +1377,33 @@ class vimconnector(vimconn.VimConnector): extra_specs (dict): To be filled. Returns: - vcpus (int) Number of virtual cpus + threads (int) Number of virtual cpus """ # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture if not numa.get("threads"): return - vcpus = numa["threads"] + threads = numa["threads"] extra_specs["hw:cpu_thread_policy"] = "prefer" extra_specs["hw:cpu_policy"] = "dedicated" - return vcpus + return threads def _process_numa_parameters_of_flavor( - self, numas: List, extra_specs: Dict, vcpus: Optional[int] - ) -> int: + self, numas: List, extra_specs: Dict + ) -> None: """Process numa parameters and fill up extra_specs. Args: numas (list): List of dictionary which includes numa information extra_specs (dict): To be filled. - vcpus (int) Number of virtual cpus - - Returns: - vcpus (int) Number of virtual cpus """ numa_nodes = len(numas) extra_specs["hw:numa_nodes"] = str(numa_nodes) + cpu_cores, cpu_threads = 0, 0 if self.vim_type == "VIO": - extra_specs["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}' - extra_specs["vmware:latency_sensitivity_level"] = "high" + self.process_vio_numa_nodes(numa_nodes, extra_specs) for numa in numas: if "id" in numa: @@ -1370,15 +1417,34 @@ class vimconnector(vimconn.VimConnector): extra_specs["hw:cpu_sockets"] = str(numa_nodes) if "paired-threads" in numa: - vcpus = self.process_numa_paired_threads(numa, extra_specs) + threads = self.process_numa_paired_threads(numa, extra_specs) + cpu_threads += threads elif "cores" in numa: - vcpus = self.process_numa_cores(numa, extra_specs) + cores = self.process_numa_cores(numa, extra_specs) + cpu_cores += cores elif "threads" in numa: - vcpus = self.process_numa_threads(numa, extra_specs) + threads = self.process_numa_threads(numa, extra_specs) + cpu_threads += threads + + if cpu_cores: + extra_specs["hw:cpu_cores"] = str(cpu_cores) + if cpu_threads: + extra_specs["hw:cpu_threads"] = str(cpu_threads) + + @staticmethod + def process_vio_numa_nodes(numa_nodes: int, extra_specs: Dict) -> None: + """According to number of numa nodes, updates the extra_specs for VIO. - return vcpus + Args: + + numa_nodes (int): List keeps the numa node numbers + extra_specs (dict): Extra specs dict to be updated + + """ + # If there are several numas, we do not define specific affinity. + extra_specs["vmware:latency_sensitivity_level"] = "high" def _change_flavor_name( self, name: str, name_suffix: int, flavor_data: dict @@ -1405,17 +1471,13 @@ class vimconnector(vimconn.VimConnector): return name def _process_extended_config_of_flavor( - self, extended: dict, extra_specs: dict, vcpus: Optional[int] - ) -> int: + self, extended: dict, extra_specs: dict + ) -> None: """Process the extended dict to fill up extra_specs. Args: - extended (dict): Keeping the extra specification of flavor - extra_specs (dict) Dict to be filled to be used during flavor creation - vcpus (int) Number of virtual cpus - - Returns: - vcpus (int) Number of virtual cpus + extended (dict): Keeping the extra specification of flavor + extra_specs (dict) Dict to be filled to be used during flavor creation """ quotas = { @@ -1441,7 +1503,7 @@ class vimconnector(vimconn.VimConnector): numas = extended.get("numas") if numas: - vcpus = self._process_numa_parameters_of_flavor(numas, extra_specs, vcpus) + self._process_numa_parameters_of_flavor(numas, extra_specs) for quota, item in quotas.items(): if quota in extended.keys(): @@ -1462,8 +1524,6 @@ class vimconnector(vimconn.VimConnector): if extended.get(policy): extra_specs[hw_policy] = extended[policy].lower() - return vcpus - @staticmethod def _get_flavor_details(flavor_data: dict) -> Tuple: """Returns the details of flavor @@ -1513,9 +1573,7 @@ class vimconnector(vimconn.VimConnector): flavor_data ) if extended: - vcpus = self._process_extended_config_of_flavor( - extended, extra_specs, vcpus - ) + self._process_extended_config_of_flavor(extended, extra_specs) # Create flavor @@ -1536,7 +1594,6 @@ class vimconnector(vimconn.VimConnector): return new_flavor.id except nvExceptions.Conflict as e: - if change_name_if_used and retry < max_retries: continue @@ -1880,7 +1937,6 @@ class vimconnector(vimconn.VimConnector): # For VF elif net["type"] == "VF" or net["type"] == "SR-IOV": - port_dict["binding:vnic_type"] = "direct" # VIO specific Changes @@ -1922,8 +1978,14 @@ class vimconnector(vimconn.VimConnector): if net.get("mac_address"): port_dict["mac_address"] = net["mac_address"] - if net.get("ip_address"): - port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}] + ip_dual_list = [] + if ip_list := net.get("ip_address"): + if not isinstance(ip_list, list): + ip_list = [ip_list] + for ip in ip_list: + ip_dict = {"ip_address": ip} + ip_dual_list.append(ip_dict) + port_dict["fixed_ips"] = ip_dual_list # TODO add "subnet_id": def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict: @@ -1940,7 +2002,7 @@ class vimconnector(vimconn.VimConnector): """ new_port = self.neutron.create_port({"port": port_dict}) created_items["port:" + str(new_port["port"]["id"])] = True - net["mac_adress"] = new_port["port"]["mac_address"] + net["mac_address"] = new_port["port"]["mac_address"] net["vim_id"] = new_port["port"]["id"] return new_port @@ -2068,7 +2130,6 @@ class vimconnector(vimconn.VimConnector): key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id" if disk.get(key_id): - block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id] existing_vim_volumes.append({"id": disk[key_id]}) @@ -2116,11 +2177,59 @@ class vimconnector(vimconn.VimConnector): "Created volume is not valid, does not have id attribute." ) + block_device_mapping["vd" + chr(base_disk_index)] = volume.id + if disk.get("multiattach"): # multiattach volumes do not belong to VDUs + return volume_txt = "volume:" + str(volume.id) if disk.get("keep"): volume_txt += ":keep" created_items[volume_txt] = True - block_device_mapping["vd" + chr(base_disk_index)] = volume.id + + def new_shared_volumes(self, shared_volume_data) -> (str, str): + try: + volume = self.cinder.volumes.create( + size=shared_volume_data["size"], + name=shared_volume_data["name"], + volume_type="multiattach", + ) + return (volume.name, volume.id) + except (ConnectionError, KeyError) as e: + self._format_exception(e) + + def _prepare_shared_volumes( + self, + name: str, + disk: dict, + base_disk_index: int, + block_device_mapping: dict, + existing_vim_volumes: list, + created_items: dict, + ): + volumes = {volume.name: volume.id for volume in self.cinder.volumes.list()} + if volumes.get(disk["name"]): + sv_id = volumes[disk["name"]] + max_retries = 3 + vol_status = "" + # If this is not the first VM to attach the volume, volume status may be "reserved" for a short time + while max_retries: + max_retries -= 1 + volume = self.cinder.volumes.get(sv_id) + vol_status = volume.status + if volume.status not in ("in-use", "available"): + time.sleep(5) + continue + self.update_block_device_mapping( + volume=volume, + block_device_mapping=block_device_mapping, + base_disk_index=base_disk_index, + disk=disk, + created_items=created_items, + ) + return + raise vimconn.VimConnException( + "Shared volume is not prepared, status is: {}".format(vol_status), + http_code=vimconn.HTTP_Internal_Server_Error, + ) def _prepare_non_root_persistent_volumes( self, @@ -2146,18 +2255,15 @@ class vimconnector(vimconn.VimConnector): # Non-root persistent volumes # Disk may include only vim_volume_id or only vim_id." key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id" - if disk.get(key_id): - # Use existing persistent volume block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id] existing_vim_volumes.append({"id": disk[key_id]}) - else: - # Create persistent volume + volume_name = f"{name}vd{chr(base_disk_index)}" volume = self.cinder.volumes.create( size=disk["size"], - name=name + "vd" + chr(base_disk_index), + name=volume_name, # Make sure volume is in the same AZ as the VM to be attached to availability_zone=vm_av_zone, ) @@ -2182,7 +2288,6 @@ class vimconnector(vimconn.VimConnector): elapsed_time (int): Time spent while waiting """ - while elapsed_time < volume_timeout: for created_item in created_items: v, volume_id = ( @@ -2190,7 +2295,13 @@ class vimconnector(vimconn.VimConnector): created_item.split(":")[1], ) if v == "volume": - if self.cinder.volumes.get(volume_id).status != "available": + volume = self.cinder.volumes.get(volume_id) + if ( + volume.volume_type == "multiattach" + and volume.status == "in-use" + ): + return elapsed_time + elif volume.status != "available": break else: # All ready: break from while @@ -2217,7 +2328,10 @@ class vimconnector(vimconn.VimConnector): while elapsed_time < volume_timeout: for volume in existing_vim_volumes: - if self.cinder.volumes.get(volume["id"]).status != "available": + v = self.cinder.volumes.get(volume["id"]) + if v.volume_type == "multiattach" and v.status == "in-use": + return elapsed_time + elif v.status != "available": break else: # all ready: break from while break @@ -2251,7 +2365,6 @@ class vimconnector(vimconn.VimConnector): base_disk_index = ord("b") boot_volume_id = None elapsed_time = 0 - for disk in disk_list: if "image_id" in disk: # Root persistent volume @@ -2265,6 +2378,15 @@ class vimconnector(vimconn.VimConnector): existing_vim_volumes=existing_vim_volumes, created_items=created_items, ) + elif disk.get("multiattach"): + self._prepare_shared_volumes( + name=name, + disk=disk, + base_disk_index=base_disk_index, + block_device_mapping=block_device_mapping, + existing_vim_volumes=existing_vim_volumes, + created_items=created_items, + ) else: # Non-root persistent volume self._prepare_non_root_persistent_volumes( @@ -2479,7 +2601,6 @@ class vimconnector(vimconn.VimConnector): # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry # several times while not assigned: - free_floating_ip = self._get_free_floating_ip( server, floating_network ) @@ -2576,7 +2697,6 @@ class vimconnector(vimconn.VimConnector): self.neutron.update_port(port[0], port_update) except Exception: - raise vimconn.VimConnException( "It was not possible to disable port security for port {}".format( port[0] @@ -2722,7 +2842,6 @@ class vimconnector(vimconn.VimConnector): server_group_id, ) ) - # Create VM server = self.nova.servers.create( name=name, @@ -2786,20 +2905,7 @@ class vimconnector(vimconn.VimConnector): def get_vminstance(self, vm_id): """Returns the VM instance information from VIM""" - # self.logger.debug("Getting VM from VIM") - try: - self._reload_connection() - server = self.nova.servers.find(id=vm_id) - # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema) - - return server.to_dict() - except ( - ksExceptions.ClientException, - nvExceptions.ClientException, - nvExceptions.NotFound, - ConnectionError, - ) as e: - self._format_exception(e) + return self._find_nova_server(vm_id) def get_vminstance_console(self, vm_id, console_type="vnc"): """ @@ -2883,16 +2989,40 @@ class vimconnector(vimconn.VimConnector): k_id (str): Port id in the VIM """ try: + self.neutron.delete_port(k_id) + + except Exception as e: + self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e)) + + def delete_shared_volumes(self, shared_volume_vim_id: str) -> bool: + """Cinder delete volume by id. + Args: + shared_volume_vim_id (str): ID of shared volume in VIM + """ + elapsed_time = 0 + try: + while elapsed_time < server_timeout: + vol_status = self.cinder.volumes.get(shared_volume_vim_id).status + if vol_status == "available": + self.cinder.volumes.delete(shared_volume_vim_id) + return True - port_dict = self.neutron.list_ports() - existing_ports = [port["id"] for port in port_dict["ports"] if port_dict] + time.sleep(5) + elapsed_time += 5 - if k_id in existing_ports: - self.neutron.delete_port(k_id) + if elapsed_time >= server_timeout: + raise vimconn.VimConnException( + "Timeout waiting for volume " + + shared_volume_vim_id + + " to be available", + http_code=vimconn.HTTP_Request_Timeout, + ) except Exception as e: - - self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e)) + self.logger.error( + "Error deleting volume: {}: {}".format(type(e).__name__, e) + ) + self._format_exception(e) def _delete_volumes_by_id_wth_cinder( self, k: str, k_id: str, volumes_to_hold: list, created_items: dict @@ -2971,9 +3101,7 @@ class vimconnector(vimconn.VimConnector): try: k_item, k_id = self._get_item_name_id(k) - if k_item == "volume": - unavailable_vol = self._delete_volumes_by_id_wth_cinder( k, k_id, volumes_to_hold, created_items ) @@ -2982,7 +3110,6 @@ class vimconnector(vimconn.VimConnector): keep_waiting = True elif k_item == "floating_ip": - self._delete_floating_ip_by_id(k, k_id, created_items) except Exception as e: @@ -3204,7 +3331,8 @@ class vimconnector(vimconn.VimConnector): def action_vminstance(self, vm_id, action_dict, created_items={}): """Send and action over a VM instance from VIM - Returns None or the console dict if the action was successfully sent to the VIM""" + Returns None or the console dict if the action was successfully sent to the VIM + """ self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict)) try: @@ -3448,45 +3576,6 @@ class vimconnector(vimconn.VimConnector): ) ) - def delete_user(self, user_id): - """Delete a user from openstack VIM - Returns the user identifier""" - if self.debug: - print("osconnector: Deleting a user from VIM") - - try: - self._reload_connection() - self.keystone.users.delete(user_id) - - return 1, user_id - except ksExceptions.ConnectionError as e: - error_value = -vimconn.HTTP_Bad_Request - error_text = ( - type(e).__name__ - + ": " - + (str(e) if len(e.args) == 0 else str(e.args[0])) - ) - except ksExceptions.NotFound as e: - error_value = -vimconn.HTTP_Not_Found - error_text = ( - type(e).__name__ - + ": " - + (str(e) if len(e.args) == 0 else str(e.args[0])) - ) - except ksExceptions.ClientException as e: # TODO remove - error_value = -vimconn.HTTP_Bad_Request - error_text = ( - type(e).__name__ - + ": " - + (str(e) if len(e.args) == 0 else str(e.args[0])) - ) - - # TODO insert exception vimconn.HTTP_Unauthorized - # if reaching here is because an exception - self.logger.debug("delete_tenant " + error_text) - - return error_value, error_text - def get_hosts_info(self): """Get the information of deployed hosts Returns the hosts content""" @@ -3560,696 +3649,93 @@ class vimconnector(vimconn.VimConnector): return error_value, error_text - def new_classification(self, name, ctype, definition): - self.logger.debug( - "Adding a new (Traffic) Classification to VIM, named %s", name - ) + def new_affinity_group(self, affinity_group_data): + """Adds a server group to VIM + affinity_group_data contains a dictionary with information, keys: + name: name in VIM for the server group + type: affinity or anti-affinity + scope: Only nfvi-node allowed + Returns the server group identifier""" + self.logger.debug("Adding Server Group '%s'", str(affinity_group_data)) try: - new_class = None - self._reload_connection() - - if ctype not in supportedClassificationTypes: - raise vimconn.VimConnNotSupportedException( - "OpenStack VIM connector does not support provided " - "Classification Type {}, supported ones are: {}".format( - ctype, supportedClassificationTypes - ) - ) - - if not self._validate_classification(ctype, definition): - raise vimconn.VimConnException( - "Incorrect Classification definition for the type specified." - ) + name = affinity_group_data["name"] + policy = affinity_group_data["type"] - classification_dict = definition - classification_dict["name"] = name - new_class = self.neutron.create_sfc_flow_classifier( - {"flow_classifier": classification_dict} - ) + self._reload_connection() + new_server_group = self.nova.server_groups.create(name, policy) - return new_class["flow_classifier"]["id"] + return new_server_group.id except ( - neExceptions.ConnectionFailed, ksExceptions.ClientException, - neExceptions.NeutronException, + nvExceptions.ClientException, ConnectionError, + KeyError, ) as e: - self.logger.error("Creation of Classification failed.") self._format_exception(e) - def get_classification(self, class_id): - self.logger.debug(" Getting Classification %s from VIM", class_id) - filter_dict = {"id": class_id} - class_list = self.get_classification_list(filter_dict) - - if len(class_list) == 0: - raise vimconn.VimConnNotFoundException( - "Classification '{}' not found".format(class_id) - ) - elif len(class_list) > 1: - raise vimconn.VimConnConflictException( - "Found more than one Classification with this criteria" - ) - - classification = class_list[0] - - return classification - - def get_classification_list(self, filter_dict={}): - self.logger.debug( - "Getting Classifications from VIM filter: '%s'", str(filter_dict) - ) - + def get_affinity_group(self, affinity_group_id): + """Obtain server group details from the VIM. Returns the server group detais as a dict""" + self.logger.debug("Getting flavor '%s'", affinity_group_id) try: - filter_dict_os = filter_dict.copy() self._reload_connection() + server_group = self.nova.server_groups.find(id=affinity_group_id) - if self.api_version3 and "tenant_id" in filter_dict_os: - filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id") - - classification_dict = self.neutron.list_sfc_flow_classifiers( - **filter_dict_os - ) - classification_list = classification_dict["flow_classifiers"] - self.__classification_os2mano(classification_list) - - return classification_list + return server_group.to_dict() except ( - neExceptions.ConnectionFailed, + nvExceptions.NotFound, + nvExceptions.ClientException, ksExceptions.ClientException, - neExceptions.NeutronException, ConnectionError, ) as e: self._format_exception(e) - def delete_classification(self, class_id): - self.logger.debug("Deleting Classification '%s' from VIM", class_id) - + def delete_affinity_group(self, affinity_group_id): + """Deletes a server group from the VIM. Returns the old affinity_group_id""" + self.logger.debug("Getting server group '%s'", affinity_group_id) try: self._reload_connection() - self.neutron.delete_sfc_flow_classifier(class_id) + self.nova.server_groups.delete(affinity_group_id) - return class_id + return affinity_group_id except ( - neExceptions.ConnectionFailed, - neExceptions.NeutronException, + nvExceptions.NotFound, ksExceptions.ClientException, - neExceptions.NeutronException, + nvExceptions.ClientException, ConnectionError, ) as e: self._format_exception(e) - def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True): - self.logger.debug( - "Adding a new Service Function Instance to VIM, named '%s'", name - ) - + def get_vdu_state(self, vm_id, host_is_required=False) -> list: + """Getting the state of a VDU. + Args: + vm_id (str): ID of an instance + host_is_required (Boolean): If the VIM account is non-admin, host info does not appear in server_dict + and if this is set to True, it raises KeyError. + Returns: + vdu_data (list): VDU details including state, flavor, host_info, AZ + """ + self.logger.debug("Getting the status of VM") + self.logger.debug("VIM VM ID %s", vm_id) try: - new_sfi = None self._reload_connection() - correlation = None - - if sfc_encap: - correlation = "nsh" - - if len(ingress_ports) != 1: - raise vimconn.VimConnNotSupportedException( - "OpenStack VIM connector can only have 1 ingress port per SFI" - ) - - if len(egress_ports) != 1: - raise vimconn.VimConnNotSupportedException( - "OpenStack VIM connector can only have 1 egress port per SFI" - ) - - sfi_dict = { - "name": name, - "ingress": ingress_ports[0], - "egress": egress_ports[0], - "service_function_parameters": {"correlation": correlation}, - } - new_sfi = self.neutron.create_sfc_port_pair({"port_pair": sfi_dict}) - - return new_sfi["port_pair"]["id"] - except ( - neExceptions.ConnectionFailed, - ksExceptions.ClientException, - neExceptions.NeutronException, - ConnectionError, - ) as e: - if new_sfi: - try: - self.neutron.delete_sfc_port_pair(new_sfi["port_pair"]["id"]) - except Exception: - self.logger.error( - "Creation of Service Function Instance failed, with " - "subsequent deletion failure as well." - ) + server_dict = self._find_nova_server(vm_id) + srv_attr = "OS-EXT-SRV-ATTR:host" + host_info = ( + server_dict[srv_attr] if host_is_required else server_dict.get(srv_attr) + ) + vdu_data = [ + server_dict["status"], + server_dict["flavor"]["id"], + host_info, + server_dict["OS-EXT-AZ:availability_zone"], + ] + self.logger.debug("vdu_data %s", vdu_data) + return vdu_data + except Exception as e: self._format_exception(e) - def get_sfi(self, sfi_id): - self.logger.debug("Getting Service Function Instance %s from VIM", sfi_id) - filter_dict = {"id": sfi_id} - sfi_list = self.get_sfi_list(filter_dict) - - if len(sfi_list) == 0: - raise vimconn.VimConnNotFoundException( - "Service Function Instance '{}' not found".format(sfi_id) - ) - elif len(sfi_list) > 1: - raise vimconn.VimConnConflictException( - "Found more than one Service Function Instance with this criteria" - ) - - sfi = sfi_list[0] - - return sfi - - def get_sfi_list(self, filter_dict={}): - self.logger.debug( - "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict) - ) - - try: - self._reload_connection() - filter_dict_os = filter_dict.copy() - - if self.api_version3 and "tenant_id" in filter_dict_os: - filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id") - - sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os) - sfi_list = sfi_dict["port_pairs"] - self.__sfi_os2mano(sfi_list) - - return sfi_list - except ( - neExceptions.ConnectionFailed, - ksExceptions.ClientException, - neExceptions.NeutronException, - ConnectionError, - ) as e: - self._format_exception(e) - - def delete_sfi(self, sfi_id): - self.logger.debug("Deleting Service Function Instance '%s' from VIM", sfi_id) - - try: - self._reload_connection() - self.neutron.delete_sfc_port_pair(sfi_id) - - return sfi_id - except ( - neExceptions.ConnectionFailed, - neExceptions.NeutronException, - ksExceptions.ClientException, - neExceptions.NeutronException, - ConnectionError, - ) as e: - self._format_exception(e) - - def new_sf(self, name, sfis, sfc_encap=True): - self.logger.debug("Adding a new Service Function to VIM, named '%s'", name) - - try: - new_sf = None - self._reload_connection() - # correlation = None - # if sfc_encap: - # correlation = "nsh" - - for instance in sfis: - sfi = self.get_sfi(instance) - - if sfi.get("sfc_encap") != sfc_encap: - raise vimconn.VimConnNotSupportedException( - "OpenStack VIM connector requires all SFIs of the " - "same SF to share the same SFC Encapsulation" - ) - - sf_dict = {"name": name, "port_pairs": sfis} - new_sf = self.neutron.create_sfc_port_pair_group( - {"port_pair_group": sf_dict} - ) - - return new_sf["port_pair_group"]["id"] - except ( - neExceptions.ConnectionFailed, - ksExceptions.ClientException, - neExceptions.NeutronException, - ConnectionError, - ) as e: - if new_sf: - try: - self.neutron.delete_sfc_port_pair_group( - new_sf["port_pair_group"]["id"] - ) - except Exception: - self.logger.error( - "Creation of Service Function failed, with " - "subsequent deletion failure as well." - ) - - self._format_exception(e) - - def get_sf(self, sf_id): - self.logger.debug("Getting Service Function %s from VIM", sf_id) - filter_dict = {"id": sf_id} - sf_list = self.get_sf_list(filter_dict) - - if len(sf_list) == 0: - raise vimconn.VimConnNotFoundException( - "Service Function '{}' not found".format(sf_id) - ) - elif len(sf_list) > 1: - raise vimconn.VimConnConflictException( - "Found more than one Service Function with this criteria" - ) - - sf = sf_list[0] - - return sf - - def get_sf_list(self, filter_dict={}): - self.logger.debug( - "Getting Service Function from VIM filter: '%s'", str(filter_dict) - ) - - try: - self._reload_connection() - filter_dict_os = filter_dict.copy() - - if self.api_version3 and "tenant_id" in filter_dict_os: - filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id") - - sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os) - sf_list = sf_dict["port_pair_groups"] - self.__sf_os2mano(sf_list) - - return sf_list - except ( - neExceptions.ConnectionFailed, - ksExceptions.ClientException, - neExceptions.NeutronException, - ConnectionError, - ) as e: - self._format_exception(e) - - def delete_sf(self, sf_id): - self.logger.debug("Deleting Service Function '%s' from VIM", sf_id) - - try: - self._reload_connection() - self.neutron.delete_sfc_port_pair_group(sf_id) - - return sf_id - except ( - neExceptions.ConnectionFailed, - neExceptions.NeutronException, - ksExceptions.ClientException, - neExceptions.NeutronException, - ConnectionError, - ) as e: - self._format_exception(e) - - def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None): - self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name) - - try: - new_sfp = None - self._reload_connection() - # In networking-sfc the MPLS encapsulation is legacy - # should be used when no full SFC Encapsulation is intended - correlation = "mpls" - - if sfc_encap: - correlation = "nsh" - - sfp_dict = { - "name": name, - "flow_classifiers": classifications, - "port_pair_groups": sfs, - "chain_parameters": {"correlation": correlation}, - } - - if spi: - sfp_dict["chain_id"] = spi - - new_sfp = self.neutron.create_sfc_port_chain({"port_chain": sfp_dict}) - - return new_sfp["port_chain"]["id"] - except ( - neExceptions.ConnectionFailed, - ksExceptions.ClientException, - neExceptions.NeutronException, - ConnectionError, - ) as e: - if new_sfp: - try: - self.neutron.delete_sfc_port_chain(new_sfp["port_chain"]["id"]) - except Exception: - self.logger.error( - "Creation of Service Function Path failed, with " - "subsequent deletion failure as well." - ) - - self._format_exception(e) - - def get_sfp(self, sfp_id): - self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id) - - filter_dict = {"id": sfp_id} - sfp_list = self.get_sfp_list(filter_dict) - - if len(sfp_list) == 0: - raise vimconn.VimConnNotFoundException( - "Service Function Path '{}' not found".format(sfp_id) - ) - elif len(sfp_list) > 1: - raise vimconn.VimConnConflictException( - "Found more than one Service Function Path with this criteria" - ) - - sfp = sfp_list[0] - - return sfp - - def get_sfp_list(self, filter_dict={}): - self.logger.debug( - "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict) - ) - - try: - self._reload_connection() - filter_dict_os = filter_dict.copy() - - if self.api_version3 and "tenant_id" in filter_dict_os: - filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id") - - sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os) - sfp_list = sfp_dict["port_chains"] - self.__sfp_os2mano(sfp_list) - - return sfp_list - except ( - neExceptions.ConnectionFailed, - ksExceptions.ClientException, - neExceptions.NeutronException, - ConnectionError, - ) as e: - self._format_exception(e) - - def delete_sfp(self, sfp_id): - self.logger.debug("Deleting Service Function Path '%s' from VIM", sfp_id) - - try: - self._reload_connection() - self.neutron.delete_sfc_port_chain(sfp_id) - - return sfp_id - except ( - neExceptions.ConnectionFailed, - neExceptions.NeutronException, - ksExceptions.ClientException, - neExceptions.NeutronException, - ConnectionError, - ) as e: - self._format_exception(e) - - def refresh_sfps_status(self, sfp_list): - """Get the status of the service function path - Params: the list of sfp identifiers - Returns a dictionary with: - vm_id: #VIM id of this service function path - status: #Mandatory. Text with one of: - # DELETED (not found at vim) - # VIM_ERROR (Cannot connect to VIM, VIM response error, ...) - # OTHER (Vim reported other status not understood) - # ERROR (VIM indicates an ERROR status) - # ACTIVE, - # CREATING (on building process) - error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)F - """ - sfp_dict = {} - self.logger.debug( - "refresh_sfps status: Getting tenant SFP information from VIM" - ) - - for sfp_id in sfp_list: - sfp = {} - - try: - sfp_vim = self.get_sfp(sfp_id) - - if sfp_vim["spi"]: - sfp["status"] = vmStatus2manoFormat["ACTIVE"] - else: - sfp["status"] = "OTHER" - sfp["error_msg"] = "VIM status reported " + sfp["status"] - - sfp["vim_info"] = self.serialize(sfp_vim) - - if sfp_vim.get("fault"): - sfp["error_msg"] = str(sfp_vim["fault"]) - except vimconn.VimConnNotFoundException as e: - self.logger.error("Exception getting sfp status: %s", str(e)) - sfp["status"] = "DELETED" - sfp["error_msg"] = str(e) - except vimconn.VimConnException as e: - self.logger.error("Exception getting sfp status: %s", str(e)) - sfp["status"] = "VIM_ERROR" - sfp["error_msg"] = str(e) - - sfp_dict[sfp_id] = sfp - - return sfp_dict - - def refresh_sfis_status(self, sfi_list): - """Get the status of the service function instances - Params: the list of sfi identifiers - Returns a dictionary with: - vm_id: #VIM id of this service function instance - status: #Mandatory. Text with one of: - # DELETED (not found at vim) - # VIM_ERROR (Cannot connect to VIM, VIM response error, ...) - # OTHER (Vim reported other status not understood) - # ERROR (VIM indicates an ERROR status) - # ACTIVE, - # CREATING (on building process) - error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR - vim_info: #Text with plain information obtained from vim (yaml.safe_dump) - """ - sfi_dict = {} - self.logger.debug( - "refresh_sfis status: Getting tenant sfi information from VIM" - ) - - for sfi_id in sfi_list: - sfi = {} - - try: - sfi_vim = self.get_sfi(sfi_id) - - if sfi_vim: - sfi["status"] = vmStatus2manoFormat["ACTIVE"] - else: - sfi["status"] = "OTHER" - sfi["error_msg"] = "VIM status reported " + sfi["status"] - - sfi["vim_info"] = self.serialize(sfi_vim) - - if sfi_vim.get("fault"): - sfi["error_msg"] = str(sfi_vim["fault"]) - except vimconn.VimConnNotFoundException as e: - self.logger.error("Exception getting sfi status: %s", str(e)) - sfi["status"] = "DELETED" - sfi["error_msg"] = str(e) - except vimconn.VimConnException as e: - self.logger.error("Exception getting sfi status: %s", str(e)) - sfi["status"] = "VIM_ERROR" - sfi["error_msg"] = str(e) - - sfi_dict[sfi_id] = sfi - - return sfi_dict - - def refresh_sfs_status(self, sf_list): - """Get the status of the service functions - Params: the list of sf identifiers - Returns a dictionary with: - vm_id: #VIM id of this service function - status: #Mandatory. Text with one of: - # DELETED (not found at vim) - # VIM_ERROR (Cannot connect to VIM, VIM response error, ...) - # OTHER (Vim reported other status not understood) - # ERROR (VIM indicates an ERROR status) - # ACTIVE, - # CREATING (on building process) - error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR - vim_info: #Text with plain information obtained from vim (yaml.safe_dump) - """ - sf_dict = {} - self.logger.debug("refresh_sfs status: Getting tenant sf information from VIM") - - for sf_id in sf_list: - sf = {} - - try: - sf_vim = self.get_sf(sf_id) - - if sf_vim: - sf["status"] = vmStatus2manoFormat["ACTIVE"] - else: - sf["status"] = "OTHER" - sf["error_msg"] = "VIM status reported " + sf_vim["status"] - - sf["vim_info"] = self.serialize(sf_vim) - - if sf_vim.get("fault"): - sf["error_msg"] = str(sf_vim["fault"]) - except vimconn.VimConnNotFoundException as e: - self.logger.error("Exception getting sf status: %s", str(e)) - sf["status"] = "DELETED" - sf["error_msg"] = str(e) - except vimconn.VimConnException as e: - self.logger.error("Exception getting sf status: %s", str(e)) - sf["status"] = "VIM_ERROR" - sf["error_msg"] = str(e) - - sf_dict[sf_id] = sf - - return sf_dict - - def refresh_classifications_status(self, classification_list): - """Get the status of the classifications - Params: the list of classification identifiers - Returns a dictionary with: - vm_id: #VIM id of this classifier - status: #Mandatory. Text with one of: - # DELETED (not found at vim) - # VIM_ERROR (Cannot connect to VIM, VIM response error, ...) - # OTHER (Vim reported other status not understood) - # ERROR (VIM indicates an ERROR status) - # ACTIVE, - # CREATING (on building process) - error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR - vim_info: #Text with plain information obtained from vim (yaml.safe_dump) - """ - classification_dict = {} - self.logger.debug( - "refresh_classifications status: Getting tenant classification information from VIM" - ) - - for classification_id in classification_list: - classification = {} - - try: - classification_vim = self.get_classification(classification_id) - - if classification_vim: - classification["status"] = vmStatus2manoFormat["ACTIVE"] - else: - classification["status"] = "OTHER" - classification["error_msg"] = ( - "VIM status reported " + classification["status"] - ) - - classification["vim_info"] = self.serialize(classification_vim) - - if classification_vim.get("fault"): - classification["error_msg"] = str(classification_vim["fault"]) - except vimconn.VimConnNotFoundException as e: - self.logger.error("Exception getting classification status: %s", str(e)) - classification["status"] = "DELETED" - classification["error_msg"] = str(e) - except vimconn.VimConnException as e: - self.logger.error("Exception getting classification status: %s", str(e)) - classification["status"] = "VIM_ERROR" - classification["error_msg"] = str(e) - - classification_dict[classification_id] = classification - - return classification_dict - - def new_affinity_group(self, affinity_group_data): - """Adds a server group to VIM - affinity_group_data contains a dictionary with information, keys: - name: name in VIM for the server group - type: affinity or anti-affinity - scope: Only nfvi-node allowed - Returns the server group identifier""" - self.logger.debug("Adding Server Group '%s'", str(affinity_group_data)) - - try: - name = affinity_group_data["name"] - policy = affinity_group_data["type"] - - self._reload_connection() - new_server_group = self.nova.server_groups.create(name, policy) - - return new_server_group.id - except ( - ksExceptions.ClientException, - nvExceptions.ClientException, - ConnectionError, - KeyError, - ) as e: - self._format_exception(e) - - def get_affinity_group(self, affinity_group_id): - """Obtain server group details from the VIM. Returns the server group detais as a dict""" - self.logger.debug("Getting flavor '%s'", affinity_group_id) - try: - self._reload_connection() - server_group = self.nova.server_groups.find(id=affinity_group_id) - - return server_group.to_dict() - except ( - nvExceptions.NotFound, - nvExceptions.ClientException, - ksExceptions.ClientException, - ConnectionError, - ) as e: - self._format_exception(e) - - def delete_affinity_group(self, affinity_group_id): - """Deletes a server group from the VIM. Returns the old affinity_group_id""" - self.logger.debug("Getting server group '%s'", affinity_group_id) - try: - self._reload_connection() - self.nova.server_groups.delete(affinity_group_id) - - return affinity_group_id - except ( - nvExceptions.NotFound, - ksExceptions.ClientException, - nvExceptions.ClientException, - ConnectionError, - ) as e: - self._format_exception(e) - - def get_vdu_state(self, vm_id): - """ - Getting the state of a vdu - param: - vm_id: ID of an instance - """ - self.logger.debug("Getting the status of VM") - self.logger.debug("VIM VM ID %s", vm_id) - self._reload_connection() - server = self.nova.servers.find(id=vm_id) - server_dict = server.to_dict() - vdu_data = [ - server_dict["status"], - server_dict["flavor"]["id"], - server_dict["OS-EXT-SRV-ATTR:host"], - server_dict["OS-EXT-AZ:availability_zone"], - ] - self.logger.debug("vdu_data %s", vdu_data) - return vdu_data - def check_compute_availability(self, host, server_flavor_details): self._reload_connection() hypervisor_search = self.nova.hypervisors.search( @@ -4315,7 +3801,7 @@ class vimconnector(vimconn.VimConnector): """ self._reload_connection() vm_state = False - instance_state = self.get_vdu_state(vm_id) + instance_state = self.get_vdu_state(vm_id, host_is_required=True) server_flavor_id = instance_state[1] server_hypervisor_name = instance_state[2] server_availability_zone = instance_state[3] @@ -4350,17 +3836,19 @@ class vimconnector(vimconn.VimConnector): http_code=vimconn.HTTP_Bad_Request, ) if available_compute_id is not None: + # disk_over_commit parameter for live_migrate method is not valid for Nova API version >= 2.25 self.nova.servers.live_migrate( server=vm_id, host=available_compute_id, block_migration=True, - disk_over_commit=False, ) state = "MIGRATING" changed_compute_host = "" if state == "MIGRATING": vm_state = self.__wait_for_vm(vm_id, "ACTIVE") - changed_compute_host = self.get_vdu_state(vm_id)[2] + changed_compute_host = self.get_vdu_state( + vm_id, host_is_required=True + )[2] if vm_state and changed_compute_host == available_compute_id: self.logger.debug( "Instance '{}' migrated to the new compute host '{}'".format( @@ -4414,7 +3902,9 @@ class vimconnector(vimconn.VimConnector): self.nova.servers.resize(server=vm_id, flavor=new_flavor_id) vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE") if vm_state: - instance_resized_status = self.confirm_resize(vm_id) + instance_resized_status = self.confirm_resize( + vm_id, instance_status + ) return instance_resized_status else: raise nvExceptions.BadRequest( @@ -4435,7 +3925,7 @@ class vimconnector(vimconn.VimConnector): ) as e: self._format_exception(e) - def confirm_resize(self, vm_id): + def confirm_resize(self, vm_id, instance_state): """ Confirm the resize of an instance param: @@ -4444,6 +3934,30 @@ class vimconnector(vimconn.VimConnector): self._reload_connection() self.nova.servers.confirm_resize(server=vm_id) if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE": - self.__wait_for_vm(vm_id, "ACTIVE") + self.__wait_for_vm(vm_id, instance_state) instance_status = self.get_vdu_state(vm_id)[0] return instance_status + + def get_monitoring_data(self): + try: + self.logger.debug("Getting servers and ports data from Openstack VIMs.") + self._reload_connection() + all_servers = self.nova.servers.list(detailed=True) + try: + for server in all_servers: + if server.flavor.get("original_name"): + server.flavor["id"] = self.nova.flavors.find( + name=server.flavor["original_name"] + ).id + except nClient.exceptions.NotFound as e: + self.logger.warning(str(e.message)) + all_ports = self.neutron.list_ports() + return all_servers, all_ports + except ( + vimconn.VimConnException, + vimconn.VimConnNotFoundException, + vimconn.VimConnConnectionException, + ) as e: + raise vimconn.VimConnException( + f"Exception in monitoring while getting VMs and ports status: {str(e)}" + )