X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;ds=sidebyside;f=RO-VIM-openstack%2Fosm_rovim_openstack%2Fvimconn_openstack.py;h=54d0e88ad8f9799484e3613f4cbb3fbb9272f333;hb=HEAD;hp=5f66f09ae1a492a06b781c11a896334a34387741;hpb=6a6e3344cc0d68064a592941e33cdc6629eb3405;p=osm%2FRO.git diff --git a/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py b/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py index 5f66f09a..221fc241 100644 --- a/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py +++ b/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py @@ -41,6 +41,7 @@ import time from typing import Dict, List, Optional, Tuple from cinderclient import client as cClient +import cinderclient.exceptions as cExceptions from glanceclient import client as glClient import glanceclient.exc as gl1Exceptions from keystoneauth1 import session @@ -85,6 +86,16 @@ volume_timeout = 1800 server_timeout = 1800 +def catch_any_exception(func): + def format_exception(*args, **kwargs): + try: + return func(*args, *kwargs) + except Exception as e: + vimconnector._format_exception(e) + + return format_exception + + class SafeDumper(yaml.SafeDumper): def represent_data(self, data): # Openstack APIs use custom subclasses of dict and YAML safe dumper @@ -175,6 +186,8 @@ class vimconnector(vimconn.VimConnector): self.persistent_info = persistent_info self.availability_zone = persistent_info.get("availability_zone", None) + self.storage_availability_zone = None + self.vm_av_zone = None self.session = persistent_info.get("session", {"reload_client": True}) self.my_tenant_id = self.session.get("my_tenant_id") self.nova = self.session.get("nova") @@ -339,7 +352,7 @@ class vimconnector(vimconn.VimConnector): version = self.config.get("microversion") if not version: - version = "2.1" + version = "2.60" # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River # Titanium cloud and StarlingX @@ -355,12 +368,21 @@ class vimconnector(vimconn.VimConnector): endpoint_type=self.endpoint_type, region_name=region_name, ) - self.cinder = self.session["cinder"] = cClient.Client( - 2, - session=sess, - endpoint_type=self.endpoint_type, - region_name=region_name, - ) + + if sess.get_all_version_data(service_type="volumev2"): + self.cinder = self.session["cinder"] = cClient.Client( + 2, + session=sess, + endpoint_type=self.endpoint_type, + region_name=region_name, + ) + else: + self.cinder = self.session["cinder"] = cClient.Client( + 3, + session=sess, + endpoint_type=self.endpoint_type, + region_name=region_name, + ) try: self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id() @@ -516,7 +538,8 @@ class vimconnector(vimconn.VimConnector): # Types. Also, abstract vimconnector should call the validation # method before the implemented VIM connectors are called. - def _format_exception(self, exception): + @staticmethod + def _format_exception(exception): """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause""" message_error = str(exception) tip = "" @@ -526,8 +549,10 @@ class vimconnector(vimconn.VimConnector): ( neExceptions.NetworkNotFoundClient, nvExceptions.NotFound, + nvExceptions.ResourceNotFound, ksExceptions.NotFound, gl1Exceptions.HTTPNotFound, + cExceptions.NotFound, ), ): raise vimconn.VimConnNotFoundException( @@ -542,6 +567,7 @@ class vimconnector(vimconn.VimConnector): ConnectionError, ksExceptions.ConnectionError, neExceptions.ConnectionFailed, + cExceptions.ConnectionError, ), ): if type(exception).__name__ == "SSLError": @@ -556,17 +582,26 @@ class vimconnector(vimconn.VimConnector): KeyError, nvExceptions.BadRequest, ksExceptions.BadRequest, + gl1Exceptions.BadRequest, + cExceptions.BadRequest, ), ): + if message_error == "OS-EXT-SRV-ATTR:host": + tip = " (If the user does not have non-admin credentials, this attribute will be missing)" + raise vimconn.VimConnInsufficientCredentials( + type(exception).__name__ + ": " + message_error + tip + ) raise vimconn.VimConnException( type(exception).__name__ + ": " + message_error ) + elif isinstance( exception, ( nvExceptions.ClientException, ksExceptions.ClientException, neExceptions.NeutronException, + cExceptions.ClientException, ), ): raise vimconn.VimConnUnexpectedResponse( @@ -579,9 +614,10 @@ class vimconnector(vimconn.VimConnector): elif isinstance(exception, vimconn.VimConnException): raise exception else: # () - self.logger.error("General Exception " + message_error, exc_info=True) + logger = logging.getLogger("ro.vim.openstack") + logger.error("General Exception " + message_error, exc_info=True) - raise vimconn.VimConnConnectionException( + raise vimconn.VimConnException( type(exception).__name__ + ": " + message_error ) @@ -619,6 +655,32 @@ class vimconnector(vimconn.VimConnector): "Not found security group {} for this tenant".format(sg) ) + def _find_nova_server(self, vm_id): + """ + Returns the VM instance from Openstack and completes it with flavor ID + Do not call nova.servers.find directly, as it does not return flavor ID with microversion>=2.47 + """ + try: + self._reload_connection() + server = self.nova.servers.find(id=vm_id) + # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema) + server_dict = server.to_dict() + try: + if server_dict["flavor"].get("original_name"): + server_dict["flavor"]["id"] = self.nova.flavors.find( + name=server_dict["flavor"]["original_name"] + ).id + except nClient.exceptions.NotFound as e: + self.logger.warning(str(e.message)) + return server_dict + except ( + ksExceptions.ClientException, + nvExceptions.ClientException, + nvExceptions.NotFound, + ConnectionError, + ) as e: + self._format_exception(e) + def check_vim_connectivity(self): # just get network list to check connectivity and credentials self.get_network_list(filter_dict={}) @@ -632,7 +694,6 @@ class vimconnector(vimconn.VimConnector): Returns the tenant list of dictionaries: [{'name':', 'id':', ...}, ...] """ self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict)) - try: self._reload_connection() @@ -662,7 +723,6 @@ class vimconnector(vimconn.VimConnector): def new_tenant(self, tenant_name, tenant_description): """Adds a new tenant to openstack VIM. Returns the tenant identifier""" self.logger.debug("Adding a new tenant name: %s", tenant_name) - try: self._reload_connection() @@ -688,7 +748,6 @@ class vimconnector(vimconn.VimConnector): def delete_tenant(self, tenant_id): """Delete a tenant from openstack VIM. Returns the old tenant identifier""" self.logger.debug("Deleting tenant %s from VIM", tenant_id) - try: self._reload_connection() @@ -698,6 +757,7 @@ class vimconnector(vimconn.VimConnector): self.keystone.tenants.delete(tenant_id) return tenant_id + except ( ksExceptions.ConnectionError, ksExceptions.ClientException, @@ -787,7 +847,7 @@ class vimconnector(vimconn.VimConnector): "dataplane_physical_net" ) - # if it is non empty list, use the first value. If it is a string use the value directly + # if it is non-empty list, use the first value. If it is a string use the value directly if ( isinstance(provider_physical_network, (tuple, list)) and provider_physical_network @@ -803,17 +863,17 @@ class vimconnector(vimconn.VimConnector): ) if not self.config.get("multisegment_support"): - network_dict[ - "provider:physical_network" - ] = provider_physical_network + network_dict["provider:physical_network"] = ( + provider_physical_network + ) if ( provider_network_profile and "network-type" in provider_network_profile ): - network_dict[ - "provider:network_type" - ] = provider_network_profile["network-type"] + network_dict["provider:network_type"] = ( + provider_network_profile["network-type"] + ) else: network_dict["provider:network_type"] = self.config.get( "dataplane_network_type", "vlan" @@ -880,7 +940,7 @@ class vimconnector(vimconn.VimConnector): if not ip_profile.get("subnet_address"): # Fake subnet is required - subnet_rand = random.randint(0, 255) + subnet_rand = random.SystemRandom().randint(0, 255) ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand) if "ip_version" not in ip_profile: @@ -925,6 +985,15 @@ class vimconnector(vimconn.VimConnector): ip_str = str(netaddr.IPAddress(ip_int)) subnet["allocation_pools"][0]["end"] = ip_str + if ( + ip_profile.get("ipv6_address_mode") + and ip_profile["ip_version"] != "IPv4" + ): + subnet["ipv6_address_mode"] = ip_profile["ipv6_address_mode"] + # ipv6_ra_mode can be set to the same value for most use cases, see documentation: + # https://docs.openstack.org/neutron/latest/admin/config-ipv6.html#ipv6-ra-mode-and-ipv6-address-mode-combinations + subnet["ipv6_ra_mode"] = ip_profile["ipv6_address_mode"] + # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet)) self.neutron.create_subnet({"subnet": subnet}) @@ -957,6 +1026,14 @@ class vimconnector(vimconn.VimConnector): if k_item == "l2gwconn": self.neutron.delete_l2_gateway_connection(k_id) + + except (neExceptions.ConnectionFailed, ConnectionError) as e2: + self.logger.error( + "Error deleting l2 gateway connection: {}: {}".format( + type(e2).__name__, e2 + ) + ) + self._format_exception(e2) except Exception as e2: self.logger.error( "Error deleting l2 gateway connection: {}: {}".format( @@ -981,7 +1058,6 @@ class vimconnector(vimconn.VimConnector): Returns the network list of dictionaries """ self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict)) - try: self._reload_connection() filter_dict_os = filter_dict.copy() @@ -1041,6 +1117,7 @@ class vimconnector(vimconn.VimConnector): return net + @catch_any_exception def delete_network(self, net_id, created_items=None): """ Removes a tenant network from VIM and its associated elements @@ -1064,6 +1141,14 @@ class vimconnector(vimconn.VimConnector): k_item, _, k_id = k.partition(":") if k_item == "l2gwconn": self.neutron.delete_l2_gateway_connection(k_id) + + except (neExceptions.ConnectionFailed, ConnectionError) as e: + self.logger.error( + "Error deleting l2 gateway connection: {}: {}".format( + type(e).__name__, e + ) + ) + self._format_exception(e) except Exception as e: self.logger.error( "Error deleting l2 gateway connection: {}: {}".format( @@ -1076,21 +1161,22 @@ class vimconnector(vimconn.VimConnector): for p in ports["ports"]: try: self.neutron.delete_port(p["id"]) + + except (neExceptions.ConnectionFailed, ConnectionError) as e: + self.logger.error("Error deleting port %s: %s", p["id"], str(e)) + # If there is connection error, it raises. + self._format_exception(e) except Exception as e: self.logger.error("Error deleting port %s: %s", p["id"], str(e)) self.neutron.delete_network(net_id) return net_id - except ( - neExceptions.ConnectionFailed, - neExceptions.NetworkNotFoundClient, - neExceptions.NeutronException, - ksExceptions.ClientException, - neExceptions.NeutronException, - ConnectionError, - ) as e: - self._format_exception(e) + except (neExceptions.NetworkNotFoundClient, neExceptions.NotFound) as e: + # If network to be deleted is not found, it does not raise. + self.logger.warning( + f"Error deleting network: {net_id} is not found, {str(e)}" + ) def refresh_nets_status(self, net_list): """Get the status of the networks @@ -1143,13 +1229,11 @@ class vimconnector(vimconn.VimConnector): def get_flavor(self, flavor_id): """Obtain flavor details from the VIM. Returns the flavor dict details""" self.logger.debug("Getting flavor '%s'", flavor_id) - try: self._reload_connection() flavor = self.nova.flavors.find(id=flavor_id) - # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema) - return flavor.to_dict() + except ( nvExceptions.NotFound, nvExceptions.ClientException, @@ -1221,6 +1305,7 @@ class vimconnector(vimconn.VimConnector): ) except ( nvExceptions.NotFound, + nvExceptions.BadRequest, nvExceptions.ClientException, ksExceptions.ClientException, ConnectionError, @@ -1353,8 +1438,7 @@ class vimconnector(vimconn.VimConnector): cpu_cores, cpu_threads = 0, 0 if self.vim_type == "VIO": - extra_specs["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}' - extra_specs["vmware:latency_sensitivity_level"] = "high" + self.process_vio_numa_nodes(numa_nodes, extra_specs) for numa in numas: if "id" in numa: @@ -1384,6 +1468,19 @@ class vimconnector(vimconn.VimConnector): if cpu_threads: extra_specs["hw:cpu_threads"] = str(cpu_threads) + @staticmethod + def process_vio_numa_nodes(numa_nodes: int, extra_specs: Dict) -> None: + """According to number of numa nodes, updates the extra_specs for VIO. + + Args: + + numa_nodes (int): List keeps the numa node numbers + extra_specs (dict): Extra specs dict to be updated + + """ + # If there are several numas, we do not define specific affinity. + extra_specs["vmware:latency_sensitivity_level"] = "high" + def _change_flavor_name( self, name: str, name_suffix: int, flavor_data: dict ) -> str: @@ -1479,6 +1576,7 @@ class vimconnector(vimconn.VimConnector): flavor_data.get("extended"), ) + @catch_any_exception def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str: """Adds a tenant flavor to openstack VIM. if change_name_if_used is True, it will change name in case of conflict, @@ -1496,71 +1594,58 @@ class vimconnector(vimconn.VimConnector): retry = 0 max_retries = 3 name_suffix = 0 + name = flavor_data["name"] + while retry < max_retries: + retry += 1 + try: + self._reload_connection() - try: - name = flavor_data["name"] - while retry < max_retries: - retry += 1 - try: - self._reload_connection() - - if change_name_if_used: - name = self._change_flavor_name(name, name_suffix, flavor_data) + if change_name_if_used: + name = self._change_flavor_name(name, name_suffix, flavor_data) - ram, vcpus, extra_specs, extended = self._get_flavor_details( - flavor_data - ) - if extended: - self._process_extended_config_of_flavor(extended, extra_specs) - - # Create flavor - - new_flavor = self.nova.flavors.create( - name=name, - ram=ram, - vcpus=vcpus, - disk=flavor_data.get("disk", 0), - ephemeral=flavor_data.get("ephemeral", 0), - swap=flavor_data.get("swap", 0), - is_public=flavor_data.get("is_public", True), - ) + ram, vcpus, extra_specs, extended = self._get_flavor_details( + flavor_data + ) + if extended: + self._process_extended_config_of_flavor(extended, extra_specs) - # Add metadata - if extra_specs: - new_flavor.set_keys(extra_specs) + # Create flavor - return new_flavor.id + new_flavor = self.nova.flavors.create( + name=name, + ram=ram, + vcpus=vcpus, + disk=flavor_data.get("disk", 0), + ephemeral=flavor_data.get("ephemeral", 0), + swap=flavor_data.get("swap", 0), + is_public=flavor_data.get("is_public", True), + ) - except nvExceptions.Conflict as e: + # Add metadata + if extra_specs: + new_flavor.set_keys(extra_specs) - if change_name_if_used and retry < max_retries: - continue + return new_flavor.id - self._format_exception(e) + except nvExceptions.Conflict as e: + if change_name_if_used and retry < max_retries: + continue - except ( - ksExceptions.ClientException, - nvExceptions.ClientException, - ConnectionError, - KeyError, - ) as e: - self._format_exception(e) + self._format_exception(e) + @catch_any_exception def delete_flavor(self, flavor_id): """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id""" try: self._reload_connection() self.nova.flavors.delete(flavor_id) - return flavor_id - # except nvExceptions.BadRequest as e: - except ( - nvExceptions.NotFound, - ksExceptions.ClientException, - nvExceptions.ClientException, - ConnectionError, - ) as e: - self._format_exception(e) + + except (nvExceptions.NotFound, nvExceptions.ResourceNotFound) as e: + # If flavor is not found, it does not raise. + self.logger.warning( + f"Error deleting flavor: {flavor_id} is not found, {str(e.message)}" + ) def new_image(self, image_dict): """ @@ -1643,12 +1728,6 @@ class vimconnector(vimconn.VimConnector): self.glance.images.update(new_image.id, **metadata_to_load) return new_image.id - except ( - nvExceptions.Conflict, - ksExceptions.ClientException, - nvExceptions.ClientException, - ) as e: - self._format_exception(e) except ( HTTPException, gl1Exceptions.HTTPException, @@ -1664,7 +1743,10 @@ class vimconnector(vimconn.VimConnector): "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]), http_code=vimconn.HTTP_Bad_Request, ) + except Exception as e: + self._format_exception(e) + @catch_any_exception def delete_image(self, image_id): """Deletes a tenant image from openstack VIM. Returns the old id""" try: @@ -1672,36 +1754,25 @@ class vimconnector(vimconn.VimConnector): self.glance.images.delete(image_id) return image_id - except ( - nvExceptions.NotFound, - ksExceptions.ClientException, - nvExceptions.ClientException, - gl1Exceptions.CommunicationError, - gl1Exceptions.HTTPNotFound, - ConnectionError, - ) as e: # TODO remove - self._format_exception(e) + except gl1Exceptions.NotFound as e: + # If image is not found, it does not raise. + self.logger.warning( + f"Error deleting image: {image_id} is not found, {str(e)}" + ) + @catch_any_exception def get_image_id_from_path(self, path): """Get the image id from image path in the VIM database. Returns the image_id""" - try: - self._reload_connection() - images = self.glance.images.list() + self._reload_connection() + images = self.glance.images.list() - for image in images: - if image.metadata.get("location") == path: - return image.id + for image in images: + if image.metadata.get("location") == path: + return image.id - raise vimconn.VimConnNotFoundException( - "image with location '{}' not found".format(path) - ) - except ( - ksExceptions.ClientException, - nvExceptions.ClientException, - gl1Exceptions.CommunicationError, - ConnectionError, - ) as e: - self._format_exception(e) + raise vimconn.VimConnNotFoundException( + "image with location '{}' not found".format(path) + ) def get_image_list(self, filter_dict={}): """Obtain tenant images from VIM @@ -1714,7 +1785,6 @@ class vimconnector(vimconn.VimConnector): List can be empty """ self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict)) - try: self._reload_connection() # filter_dict_os = filter_dict.copy() @@ -1741,6 +1811,7 @@ class vimconnector(vimconn.VimConnector): pass return filtered_list + except ( ksExceptions.ClientException, nvExceptions.ClientException, @@ -1804,6 +1875,10 @@ class vimconnector(vimconn.VimConnector): self.availability_zone = vim_availability_zones else: self.availability_zone = self._get_openstack_availablity_zones() + if "storage_availability_zone" in self.config: + self.storage_availability_zone = self.config.get( + "storage_availability_zone" + ) def _get_vm_availability_zone( self, availability_zone_index, availability_zone_list @@ -1876,7 +1951,6 @@ class vimconnector(vimconn.VimConnector): # For VF elif net["type"] == "VF" or net["type"] == "SR-IOV": - port_dict["binding:vnic_type"] = "direct" # VIO specific Changes @@ -1918,8 +1992,14 @@ class vimconnector(vimconn.VimConnector): if net.get("mac_address"): port_dict["mac_address"] = net["mac_address"] - if net.get("ip_address"): - port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}] + ip_dual_list = [] + if ip_list := net.get("ip_address"): + if not isinstance(ip_list, list): + ip_list = [ip_list] + for ip in ip_list: + ip_dict = {"ip_address": ip} + ip_dual_list.append(ip_dict) + port_dict["fixed_ips"] = ip_dual_list # TODO add "subnet_id": def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict: @@ -1936,7 +2016,7 @@ class vimconnector(vimconn.VimConnector): """ new_port = self.neutron.create_port({"port": port_dict}) created_items["port:" + str(new_port["port"]["id"])] = True - net["mac_adress"] = new_port["port"]["mac_address"] + net["mac_address"] = new_port["port"]["mac_address"] net["vim_id"] = new_port["port"]["id"] return new_port @@ -2037,7 +2117,7 @@ class vimconnector(vimconn.VimConnector): def _prepare_persistent_root_volumes( self, name: str, - vm_av_zone: list, + storage_av_zone: list, disk: dict, base_disk_index: int, block_device_mapping: dict, @@ -2048,7 +2128,7 @@ class vimconnector(vimconn.VimConnector): Args: name (str): Name of VM instance - vm_av_zone (list): List of availability zones + storage_av_zone (list): Storage of availability zones disk (dict): Disk details base_disk_index (int): Disk index block_device_mapping (dict): Block device details @@ -2062,12 +2142,9 @@ class vimconnector(vimconn.VimConnector): # Disk may include only vim_volume_id or only vim_id." # Use existing persistent root volume finding with volume_id or vim_id key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id" - if disk.get(key_id): - block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id] existing_vim_volumes.append({"id": disk[key_id]}) - else: # Create persistent root volume volume = self.cinder.volumes.create( @@ -2075,7 +2152,7 @@ class vimconnector(vimconn.VimConnector): name=name + "vd" + chr(base_disk_index), imageRef=disk["image_id"], # Make sure volume is in the same AZ as the VM to be attached to - availability_zone=vm_av_zone, + availability_zone=storage_av_zone, ) boot_volume_id = volume.id self.update_block_device_mapping( @@ -2112,17 +2189,69 @@ class vimconnector(vimconn.VimConnector): "Created volume is not valid, does not have id attribute." ) + block_device_mapping["vd" + chr(base_disk_index)] = volume.id + if disk.get("multiattach"): # multiattach volumes do not belong to VDUs + return volume_txt = "volume:" + str(volume.id) if disk.get("keep"): volume_txt += ":keep" created_items[volume_txt] = True - block_device_mapping["vd" + chr(base_disk_index)] = volume.id + + @catch_any_exception + def new_shared_volumes(self, shared_volume_data) -> (str, str): + availability_zone = ( + self.storage_availability_zone + if self.storage_availability_zone + else self.vm_av_zone + ) + volume = self.cinder.volumes.create( + size=shared_volume_data["size"], + name=shared_volume_data["name"], + volume_type="multiattach", + availability_zone=availability_zone, + ) + return volume.name, volume.id + + def _prepare_shared_volumes( + self, + name: str, + disk: dict, + base_disk_index: int, + block_device_mapping: dict, + existing_vim_volumes: list, + created_items: dict, + ): + volumes = {volume.name: volume.id for volume in self.cinder.volumes.list()} + if volumes.get(disk["name"]): + sv_id = volumes[disk["name"]] + max_retries = 3 + vol_status = "" + # If this is not the first VM to attach the volume, volume status may be "reserved" for a short time + while max_retries: + max_retries -= 1 + volume = self.cinder.volumes.get(sv_id) + vol_status = volume.status + if volume.status not in ("in-use", "available"): + time.sleep(5) + continue + self.update_block_device_mapping( + volume=volume, + block_device_mapping=block_device_mapping, + base_disk_index=base_disk_index, + disk=disk, + created_items=created_items, + ) + return + raise vimconn.VimConnException( + "Shared volume is not prepared, status is: {}".format(vol_status), + http_code=vimconn.HTTP_Internal_Server_Error, + ) def _prepare_non_root_persistent_volumes( self, name: str, disk: dict, - vm_av_zone: list, + storage_av_zone: list, block_device_mapping: dict, base_disk_index: int, existing_vim_volumes: list, @@ -2133,7 +2262,7 @@ class vimconnector(vimconn.VimConnector): Args: name (str): Name of VM instance disk (dict): Disk details - vm_av_zone (list): List of availability zones + storage_av_zone (list): Storage of availability zones block_device_mapping (dict): Block device details base_disk_index (int): Disk index existing_vim_volumes (list): Existing disk details @@ -2142,20 +2271,17 @@ class vimconnector(vimconn.VimConnector): # Non-root persistent volumes # Disk may include only vim_volume_id or only vim_id." key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id" - if disk.get(key_id): - # Use existing persistent volume block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id] existing_vim_volumes.append({"id": disk[key_id]}) - else: - # Create persistent volume + volume_name = f"{name}vd{chr(base_disk_index)}" volume = self.cinder.volumes.create( size=disk["size"], - name=name + "vd" + chr(base_disk_index), + name=volume_name, # Make sure volume is in the same AZ as the VM to be attached to - availability_zone=vm_av_zone, + availability_zone=storage_av_zone, ) self.update_block_device_mapping( volume=volume, @@ -2178,7 +2304,6 @@ class vimconnector(vimconn.VimConnector): elapsed_time (int): Time spent while waiting """ - while elapsed_time < volume_timeout: for created_item in created_items: v, volume_id = ( @@ -2186,7 +2311,13 @@ class vimconnector(vimconn.VimConnector): created_item.split(":")[1], ) if v == "volume": - if self.cinder.volumes.get(volume_id).status != "available": + volume = self.cinder.volumes.get(volume_id) + if ( + volume.volume_type == "multiattach" + and volume.status == "in-use" + ): + return elapsed_time + elif volume.status != "available": break else: # All ready: break from while @@ -2213,7 +2344,10 @@ class vimconnector(vimconn.VimConnector): while elapsed_time < volume_timeout: for volume in existing_vim_volumes: - if self.cinder.volumes.get(volume["id"]).status != "available": + v = self.cinder.volumes.get(volume["id"]) + if v.volume_type == "multiattach" and v.status == "in-use": + return elapsed_time + elif v.status != "available": break else: # all ready: break from while break @@ -2228,7 +2362,7 @@ class vimconnector(vimconn.VimConnector): name: str, existing_vim_volumes: list, created_items: dict, - vm_av_zone: list, + storage_av_zone: list, block_device_mapping: dict, disk_list: list = None, ) -> None: @@ -2238,7 +2372,7 @@ class vimconnector(vimconn.VimConnector): name (str): Name of Instance existing_vim_volumes (list): List of existing volumes created_items (dict): All created items belongs to VM - vm_av_zone (list): VM availability zone + storage_av_zone (list): Storage availability zone block_device_mapping (dict): Block devices to be attached to VM disk_list (list): List of disks @@ -2247,14 +2381,22 @@ class vimconnector(vimconn.VimConnector): base_disk_index = ord("b") boot_volume_id = None elapsed_time = 0 - for disk in disk_list: if "image_id" in disk: # Root persistent volume base_disk_index = ord("a") boot_volume_id = self._prepare_persistent_root_volumes( name=name, - vm_av_zone=vm_av_zone, + storage_av_zone=storage_av_zone, + disk=disk, + base_disk_index=base_disk_index, + block_device_mapping=block_device_mapping, + existing_vim_volumes=existing_vim_volumes, + created_items=created_items, + ) + elif disk.get("multiattach"): + self._prepare_shared_volumes( + name=name, disk=disk, base_disk_index=base_disk_index, block_device_mapping=block_device_mapping, @@ -2266,7 +2408,7 @@ class vimconnector(vimconn.VimConnector): self._prepare_non_root_persistent_volumes( name=name, disk=disk, - vm_av_zone=vm_av_zone, + storage_av_zone=storage_av_zone, block_device_mapping=block_device_mapping, base_disk_index=base_disk_index, existing_vim_volumes=existing_vim_volumes, @@ -2475,7 +2617,6 @@ class vimconnector(vimconn.VimConnector): # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry # several times while not assigned: - free_floating_ip = self._get_free_floating_ip( server, floating_network ) @@ -2572,7 +2713,6 @@ class vimconnector(vimconn.VimConnector): self.neutron.update_port(port[0], port_update) except Exception: - raise vimconn.VimConnException( "It was not possible to disable port security for port {}".format( port[0] @@ -2650,20 +2790,19 @@ class vimconnector(vimconn.VimConnector): flavor_id, str(net_list), ) + server = None + created_items = {} + net_list_vim = [] + # list of external networks to be connected to instance, later on used to create floating_ip + external_network = [] + # List of ports with port-security disabled + no_secured_ports = [] + block_device_mapping = {} + existing_vim_volumes = [] + server_group_id = None + scheduller_hints = {} try: - server = None - created_items = {} - net_list_vim = [] - # list of external networks to be connected to instance, later on used to create floating_ip - external_network = [] - # List of ports with port-security disabled - no_secured_ports = [] - block_device_mapping = {} - existing_vim_volumes = [] - server_group_id = None - scheduller_hints = {} - # Check the Openstack Connection self._reload_connection() @@ -2681,17 +2820,23 @@ class vimconnector(vimconn.VimConnector): config_drive, userdata = self._create_user_data(cloud_config) # Get availability Zone - vm_av_zone = self._get_vm_availability_zone( + self.vm_av_zone = self._get_vm_availability_zone( availability_zone_index, availability_zone_list ) + storage_av_zone = ( + self.storage_availability_zone + if self.storage_availability_zone + else self.vm_av_zone + ) + if disk_list: # Prepare disks self._prepare_disk_for_vminstance( name=name, existing_vim_volumes=existing_vim_volumes, created_items=created_items, - vm_av_zone=vm_av_zone, + storage_av_zone=storage_av_zone, block_device_mapping=block_device_mapping, disk_list=disk_list, ) @@ -2710,7 +2855,7 @@ class vimconnector(vimconn.VimConnector): flavor_id, net_list_vim, self.config.get("security_groups"), - vm_av_zone, + self.vm_av_zone, self.config.get("keypair"), userdata, config_drive, @@ -2718,7 +2863,6 @@ class vimconnector(vimconn.VimConnector): server_group_id, ) ) - # Create VM server = self.nova.servers.create( name=name, @@ -2727,7 +2871,7 @@ class vimconnector(vimconn.VimConnector): nics=net_list_vim, security_groups=self.config.get("security_groups"), # TODO remove security_groups in future versions. Already at neutron port - availability_zone=vm_av_zone, + availability_zone=self.vm_av_zone, key_name=self.config.get("keypair"), userdata=userdata, config_drive=config_drive, @@ -2782,21 +2926,9 @@ class vimconnector(vimconn.VimConnector): def get_vminstance(self, vm_id): """Returns the VM instance information from VIM""" - # self.logger.debug("Getting VM from VIM") - try: - self._reload_connection() - server = self.nova.servers.find(id=vm_id) - # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema) - - return server.to_dict() - except ( - ksExceptions.ClientException, - nvExceptions.ClientException, - nvExceptions.NotFound, - ConnectionError, - ) as e: - self._format_exception(e) + return self._find_nova_server(vm_id) + @catch_any_exception def get_vminstance_console(self, vm_id, console_type="vnc"): """ Get a console for the virtual machine @@ -2812,66 +2944,56 @@ class vimconnector(vimconn.VimConnector): suffix: extra text, e.g. the http path and query string """ self.logger.debug("Getting VM CONSOLE from VIM") + self._reload_connection() + server = self.nova.servers.find(id=vm_id) - try: - self._reload_connection() - server = self.nova.servers.find(id=vm_id) + if console_type is None or console_type == "novnc": + console_dict = server.get_vnc_console("novnc") + elif console_type == "xvpvnc": + console_dict = server.get_vnc_console(console_type) + elif console_type == "rdp-html5": + console_dict = server.get_rdp_console(console_type) + elif console_type == "spice-html5": + console_dict = server.get_spice_console(console_type) + else: + raise vimconn.VimConnException( + "console type '{}' not allowed".format(console_type), + http_code=vimconn.HTTP_Bad_Request, + ) - if console_type is None or console_type == "novnc": - console_dict = server.get_vnc_console("novnc") - elif console_type == "xvpvnc": - console_dict = server.get_vnc_console(console_type) - elif console_type == "rdp-html5": - console_dict = server.get_rdp_console(console_type) - elif console_type == "spice-html5": - console_dict = server.get_spice_console(console_type) - else: - raise vimconn.VimConnException( - "console type '{}' not allowed".format(console_type), - http_code=vimconn.HTTP_Bad_Request, - ) + console_dict1 = console_dict.get("console") - console_dict1 = console_dict.get("console") + if console_dict1: + console_url = console_dict1.get("url") - if console_dict1: - console_url = console_dict1.get("url") + if console_url: + # parse console_url + protocol_index = console_url.find("//") + suffix_index = ( + console_url[protocol_index + 2 :].find("/") + protocol_index + 2 + ) + port_index = ( + console_url[protocol_index + 2 : suffix_index].find(":") + + protocol_index + + 2 + ) - if console_url: - # parse console_url - protocol_index = console_url.find("//") - suffix_index = ( - console_url[protocol_index + 2 :].find("/") + protocol_index + 2 - ) - port_index = ( - console_url[protocol_index + 2 : suffix_index].find(":") - + protocol_index - + 2 + if protocol_index < 0 or port_index < 0 or suffix_index < 0: + return ( + -vimconn.HTTP_Internal_Server_Error, + "Unexpected response from VIM", ) - if protocol_index < 0 or port_index < 0 or suffix_index < 0: - return ( - -vimconn.HTTP_Internal_Server_Error, - "Unexpected response from VIM", - ) + console_dict = { + "protocol": console_url[0:protocol_index], + "server": console_url[protocol_index + 2 : port_index], + "port": console_url[port_index:suffix_index], + "suffix": console_url[suffix_index + 1 :], + } + protocol_index += 2 - console_dict = { - "protocol": console_url[0:protocol_index], - "server": console_url[protocol_index + 2 : port_index], - "port": console_url[port_index:suffix_index], - "suffix": console_url[suffix_index + 1 :], - } - protocol_index += 2 - - return console_dict - raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM") - except ( - nvExceptions.NotFound, - ksExceptions.ClientException, - nvExceptions.ClientException, - nvExceptions.BadRequest, - ConnectionError, - ) as e: - self._format_exception(e) + return console_dict + raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM") def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None: """Neutron delete ports by id. @@ -2879,16 +3001,44 @@ class vimconnector(vimconn.VimConnector): k_id (str): Port id in the VIM """ try: + self.neutron.delete_port(k_id) + + except (neExceptions.ConnectionFailed, ConnectionError) as e: + self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e)) + # If there is connection error, raise. + self._format_exception(e) + except Exception as e: + self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e)) + + def delete_shared_volumes(self, shared_volume_vim_id: str) -> bool: + """Cinder delete volume by id. + Args: + shared_volume_vim_id (str): ID of shared volume in VIM + """ + elapsed_time = 0 + try: + while elapsed_time < server_timeout: + vol_status = self.cinder.volumes.get(shared_volume_vim_id).status + if vol_status == "available": + self.cinder.volumes.delete(shared_volume_vim_id) + return True - port_dict = self.neutron.list_ports() - existing_ports = [port["id"] for port in port_dict["ports"] if port_dict] + time.sleep(5) + elapsed_time += 5 - if k_id in existing_ports: - self.neutron.delete_port(k_id) + if elapsed_time >= server_timeout: + raise vimconn.VimConnException( + "Timeout waiting for volume " + + shared_volume_vim_id + + " to be available", + http_code=vimconn.HTTP_Request_Timeout, + ) except Exception as e: - - self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e)) + self.logger.error( + "Error deleting volume: {}: {}".format(type(e).__name__, e) + ) + self._format_exception(e) def _delete_volumes_by_id_wth_cinder( self, k: str, k_id: str, volumes_to_hold: list, created_items: dict @@ -2902,7 +3052,7 @@ class vimconnector(vimconn.VimConnector): """ try: if k_id in volumes_to_hold: - return + return False if self.cinder.volumes.get(k_id).status != "available": return True @@ -2911,6 +3061,11 @@ class vimconnector(vimconn.VimConnector): self.cinder.volumes.delete(k_id) created_items[k] = None + except (cExceptions.ConnectionError, ConnectionError) as e: + self.logger.error( + "Error deleting volume: {}: {}".format(type(e).__name__, e) + ) + self._format_exception(e) except Exception as e: self.logger.error( "Error deleting volume: {}: {}".format(type(e).__name__, e) @@ -2927,6 +3082,11 @@ class vimconnector(vimconn.VimConnector): self.neutron.delete_floatingip(k_id) created_items[k] = None + except (neExceptions.ConnectionFailed, ConnectionError) as e: + self.logger.error( + "Error deleting floating ip: {}: {}".format(type(e).__name__, e) + ) + self._format_exception(e) except Exception as e: self.logger.error( "Error deleting floating ip: {}: {}".format(type(e).__name__, e) @@ -2952,6 +3112,11 @@ class vimconnector(vimconn.VimConnector): if k_item == "port": self._delete_ports_by_id_wth_neutron(k_id) + except (neExceptions.ConnectionFailed, ConnectionError) as e: + self.logger.error( + "Error deleting port: {}: {}".format(type(e).__name__, e) + ) + self._format_exception(e) except Exception as e: self.logger.error( "Error deleting port: {}: {}".format(type(e).__name__, e) @@ -2967,9 +3132,7 @@ class vimconnector(vimconn.VimConnector): try: k_item, k_id = self._get_item_name_id(k) - if k_item == "volume": - unavailable_vol = self._delete_volumes_by_id_wth_cinder( k, k_id, volumes_to_hold, created_items ) @@ -2978,9 +3141,18 @@ class vimconnector(vimconn.VimConnector): keep_waiting = True elif k_item == "floating_ip": - self._delete_floating_ip_by_id(k, k_id, created_items) + except ( + cExceptions.ConnectionError, + neExceptions.ConnectionFailed, + ConnectionError, + AttributeError, + TypeError, + ) as e: + self.logger.error("Error deleting {}: {}".format(k, e)) + self._format_exception(e) + except Exception as e: self.logger.error("Error deleting {}: {}".format(k, e)) @@ -3002,6 +3174,7 @@ class vimconnector(vimconn.VimConnector): if len(key.split(":")) == 2 } + @catch_any_exception def delete_vminstance( self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None ) -> None: @@ -3046,14 +3219,9 @@ class vimconnector(vimconn.VimConnector): if keep_waiting: time.sleep(1) elapsed_time += 1 - - except ( - nvExceptions.NotFound, - ksExceptions.ClientException, - nvExceptions.ClientException, - ConnectionError, - ) as e: - self._format_exception(e) + except (nvExceptions.NotFound, nvExceptions.ResourceNotFound) as e: + # If VM does not exist, it does not raise + self.logger.warning(f"Error deleting VM: {vm_id} is not found, {str(e)}") def refresh_vms_status(self, vm_list): """Get the status of the virtual machines and their interfaces/ports @@ -3085,7 +3253,6 @@ class vimconnector(vimconn.VimConnector): self.logger.debug( "refresh_vms status: Getting tenant VM instance information from VIM" ) - for vm_id in vm_list: vm = {} @@ -3198,121 +3365,129 @@ class vimconnector(vimconn.VimConnector): return vm_dict + @catch_any_exception def action_vminstance(self, vm_id, action_dict, created_items={}): """Send and action over a VM instance from VIM - Returns None or the console dict if the action was successfully sent to the VIM""" + Returns None or the console dict if the action was successfully sent to the VIM + """ self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict)) - - try: - self._reload_connection() - server = self.nova.servers.find(id=vm_id) - - if "start" in action_dict: - if action_dict["start"] == "rebuild": - server.rebuild() - else: - if server.status == "PAUSED": - server.unpause() - elif server.status == "SUSPENDED": - server.resume() - elif server.status == "SHUTOFF": - server.start() - else: - self.logger.debug( - "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state" - ) - raise vimconn.VimConnException( - "Cannot 'start' instance while it is in active state", - http_code=vimconn.HTTP_Bad_Request, + self._reload_connection() + server = self.nova.servers.find(id=vm_id) + if "start" in action_dict: + if action_dict["start"] == "rebuild": + server.rebuild() + vm_state = self.__wait_for_vm(vm_id, "ACTIVE") + if not vm_state: + raise nvExceptions.BadRequest( + 409, + message="Cannot 'REBUILD' vm_state is in ERROR", + ) + else: + if server.status == "PAUSED": + server.unpause() + elif server.status == "SUSPENDED": + server.resume() + elif server.status == "SHUTOFF": + server.start() + vm_state = self.__wait_for_vm(vm_id, "ACTIVE") + if not vm_state: + raise nvExceptions.BadRequest( + 409, + message="Cannot 'START' vm_state is in ERROR", ) - - elif "pause" in action_dict: - server.pause() - elif "resume" in action_dict: - server.resume() - elif "shutoff" in action_dict or "shutdown" in action_dict: - self.logger.debug("server status %s", server.status) - if server.status == "ACTIVE": - server.stop() else: - self.logger.debug("ERROR: VM is not in Active state") - raise vimconn.VimConnException( - "VM is not in active state, stop operation is not allowed", - http_code=vimconn.HTTP_Bad_Request, + self.logger.debug( + "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state" ) - elif "forceOff" in action_dict: - server.stop() # TODO - elif "terminate" in action_dict: - server.delete() - elif "createImage" in action_dict: - server.create_image() - # "path":path_schema, - # "description":description_schema, - # "name":name_schema, - # "metadata":metadata_schema, - # "imageRef": id_schema, - # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] }, - elif "rebuild" in action_dict: - server.rebuild(server.image["id"]) - elif "reboot" in action_dict: - server.reboot() # reboot_type="SOFT" - elif "console" in action_dict: - console_type = action_dict["console"] - - if console_type is None or console_type == "novnc": - console_dict = server.get_vnc_console("novnc") - elif console_type == "xvpvnc": - console_dict = server.get_vnc_console(console_type) - elif console_type == "rdp-html5": - console_dict = server.get_rdp_console(console_type) - elif console_type == "spice-html5": - console_dict = server.get_spice_console(console_type) - else: raise vimconn.VimConnException( - "console type '{}' not allowed".format(console_type), + "Cannot 'start' instance while it is in active state", http_code=vimconn.HTTP_Bad_Request, ) - - try: - console_url = console_dict["console"]["url"] - # parse console_url - protocol_index = console_url.find("//") - suffix_index = ( - console_url[protocol_index + 2 :].find("/") + protocol_index + 2 - ) - port_index = ( - console_url[protocol_index + 2 : suffix_index].find(":") - + protocol_index - + 2 + elif "pause" in action_dict: + server.pause() + elif "resume" in action_dict: + server.resume() + elif "shutoff" in action_dict or "shutdown" in action_dict: + self.logger.debug("server status %s", server.status) + if server.status == "ACTIVE": + server.stop() + vm_state = self.__wait_for_vm(vm_id, "SHUTOFF") + if not vm_state: + raise nvExceptions.BadRequest( + 409, + message="Cannot 'STOP' vm_state is in ERROR", ) + else: + self.logger.debug("ERROR: VM is not in Active state") + raise vimconn.VimConnException( + "VM is not in active state, stop operation is not allowed", + http_code=vimconn.HTTP_Bad_Request, + ) + elif "forceOff" in action_dict: + server.stop() # TODO + elif "terminate" in action_dict: + server.delete() + elif "createImage" in action_dict: + server.create_image() + # "path":path_schema, + # "description":description_schema, + # "name":name_schema, + # "metadata":metadata_schema, + # "imageRef": id_schema, + # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] }, + elif "rebuild" in action_dict: + server.rebuild(server.image["id"]) + elif "reboot" in action_dict: + server.reboot() # reboot_type="SOFT" + elif "console" in action_dict: + console_type = action_dict["console"] - if protocol_index < 0 or port_index < 0 or suffix_index < 0: - raise vimconn.VimConnException( - "Unexpected response from VIM " + str(console_dict) - ) + if console_type is None or console_type == "novnc": + console_dict = server.get_vnc_console("novnc") + elif console_type == "xvpvnc": + console_dict = server.get_vnc_console(console_type) + elif console_type == "rdp-html5": + console_dict = server.get_rdp_console(console_type) + elif console_type == "spice-html5": + console_dict = server.get_spice_console(console_type) + else: + raise vimconn.VimConnException( + "console type '{}' not allowed".format(console_type), + http_code=vimconn.HTTP_Bad_Request, + ) - console_dict2 = { - "protocol": console_url[0:protocol_index], - "server": console_url[protocol_index + 2 : port_index], - "port": int(console_url[port_index + 1 : suffix_index]), - "suffix": console_url[suffix_index + 1 :], - } + try: + console_url = console_dict["console"]["url"] + # parse console_url + protocol_index = console_url.find("//") + suffix_index = ( + console_url[protocol_index + 2 :].find("/") + protocol_index + 2 + ) + port_index = ( + console_url[protocol_index + 2 : suffix_index].find(":") + + protocol_index + + 2 + ) - return console_dict2 - except Exception: + if protocol_index < 0 or port_index < 0 or suffix_index < 0: raise vimconn.VimConnException( "Unexpected response from VIM " + str(console_dict) ) - return None - except ( - ksExceptions.ClientException, - nvExceptions.ClientException, - nvExceptions.NotFound, - ConnectionError, - ) as e: - self._format_exception(e) - # TODO insert exception vimconn.HTTP_Unauthorized + console_dict2 = { + "protocol": console_url[0:protocol_index], + "server": console_url[protocol_index + 2 : port_index], + "port": int(console_url[port_index + 1 : suffix_index]), + "suffix": console_url[suffix_index + 1 :], + } + + return console_dict2 + except Exception: + raise vimconn.VimConnException( + "Unexpected response from VIM " + str(console_dict) + ) + + return None # ###### VIO Specific Changes ######### def _generate_vlanID(self): @@ -3444,45 +3619,6 @@ class vimconnector(vimconn.VimConnector): ) ) - def delete_user(self, user_id): - """Delete a user from openstack VIM - Returns the user identifier""" - if self.debug: - print("osconnector: Deleting a user from VIM") - - try: - self._reload_connection() - self.keystone.users.delete(user_id) - - return 1, user_id - except ksExceptions.ConnectionError as e: - error_value = -vimconn.HTTP_Bad_Request - error_text = ( - type(e).__name__ - + ": " - + (str(e) if len(e.args) == 0 else str(e.args[0])) - ) - except ksExceptions.NotFound as e: - error_value = -vimconn.HTTP_Not_Found - error_text = ( - type(e).__name__ - + ": " - + (str(e) if len(e.args) == 0 else str(e.args[0])) - ) - except ksExceptions.ClientException as e: # TODO remove - error_value = -vimconn.HTTP_Bad_Request - error_text = ( - type(e).__name__ - + ": " - + (str(e) if len(e.args) == 0 else str(e.args[0])) - ) - - # TODO insert exception vimconn.HTTP_Unauthorized - # if reaching here is because an exception - self.logger.debug("delete_tenant " + error_text) - - return error_value, error_text - def get_hosts_info(self): """Get the information of deployed hosts Returns the hosts content""" @@ -3580,6 +3716,12 @@ class vimconnector(vimconn.VimConnector): classification_dict = definition classification_dict["name"] = name + + self.logger.info( + "Adding a new (Traffic) Classification to VIM, named {} and {}.".format( + name, classification_dict + ) + ) new_class = self.neutron.create_sfc_flow_classifier( {"flow_classifier": classification_dict} ) @@ -3685,6 +3827,7 @@ class vimconnector(vimconn.VimConnector): "egress": egress_ports[0], "service_function_parameters": {"correlation": correlation}, } + self.logger.info("Adding a new SFI to VIM, {}.".format(sfi_dict)) new_sfi = self.neutron.create_sfc_port_pair({"port_pair": sfi_dict}) return new_sfi["port_pair"]["id"] @@ -3768,12 +3911,10 @@ class vimconnector(vimconn.VimConnector): def new_sf(self, name, sfis, sfc_encap=True): self.logger.debug("Adding a new Service Function to VIM, named '%s'", name) + new_sf = None + try: - new_sf = None self._reload_connection() - # correlation = None - # if sfc_encap: - # correlation = "nsh" for instance in sfis: sfi = self.get_sfi(instance) @@ -3785,6 +3926,8 @@ class vimconnector(vimconn.VimConnector): ) sf_dict = {"name": name, "port_pairs": sfis} + + self.logger.info("Adding a new SF to VIM, {}.".format(sf_dict)) new_sf = self.neutron.create_sfc_port_pair_group( {"port_pair_group": sf_dict} ) @@ -3798,9 +3941,8 @@ class vimconnector(vimconn.VimConnector): ) as e: if new_sf: try: - self.neutron.delete_sfc_port_pair_group( - new_sf["port_pair_group"]["id"] - ) + new_sf_id = new_sf.get("port_pair_group").get("id") + self.neutron.delete_sfc_port_pair_group(new_sf_id) except Exception: self.logger.error( "Creation of Service Function failed, with " @@ -3872,8 +4014,9 @@ class vimconnector(vimconn.VimConnector): def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None): self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name) + new_sfp = None + try: - new_sfp = None self._reload_connection() # In networking-sfc the MPLS encapsulation is legacy # should be used when no full SFC Encapsulation is intended @@ -3892,6 +4035,7 @@ class vimconnector(vimconn.VimConnector): if spi: sfp_dict["chain_id"] = spi + self.logger.info("Adding a new SFP to VIM, {}.".format(sfp_dict)) new_sfp = self.neutron.create_sfc_port_chain({"port_chain": sfp_dict}) return new_sfp["port_chain"]["id"] @@ -3903,7 +4047,8 @@ class vimconnector(vimconn.VimConnector): ) as e: if new_sfp: try: - self.neutron.delete_sfc_port_chain(new_sfp["port_chain"]["id"]) + new_sfp_id = new_sfp.get("port_chain").get("id") + self.neutron.delete_sfc_port_chain(new_sfp_id) except Exception: self.logger.error( "Creation of Service Function Path failed, with " @@ -4169,6 +4314,7 @@ class vimconnector(vimconn.VimConnector): return classification_dict + @catch_any_exception def new_affinity_group(self, affinity_group_data): """Adds a server group to VIM affinity_group_data contains a dictionary with information, keys: @@ -4177,70 +4323,50 @@ class vimconnector(vimconn.VimConnector): scope: Only nfvi-node allowed Returns the server group identifier""" self.logger.debug("Adding Server Group '%s'", str(affinity_group_data)) + name = affinity_group_data["name"] + policy = affinity_group_data["type"] + self._reload_connection() + new_server_group = self.nova.server_groups.create(name, policy) + return new_server_group.id - try: - name = affinity_group_data["name"] - policy = affinity_group_data["type"] - - self._reload_connection() - new_server_group = self.nova.server_groups.create(name, policy) - - return new_server_group.id - except ( - ksExceptions.ClientException, - nvExceptions.ClientException, - ConnectionError, - KeyError, - ) as e: - self._format_exception(e) - + @catch_any_exception def get_affinity_group(self, affinity_group_id): """Obtain server group details from the VIM. Returns the server group detais as a dict""" self.logger.debug("Getting flavor '%s'", affinity_group_id) - try: - self._reload_connection() - server_group = self.nova.server_groups.find(id=affinity_group_id) - - return server_group.to_dict() - except ( - nvExceptions.NotFound, - nvExceptions.ClientException, - ksExceptions.ClientException, - ConnectionError, - ) as e: - self._format_exception(e) + self._reload_connection() + server_group = self.nova.server_groups.find(id=affinity_group_id) + return server_group.to_dict() + @catch_any_exception def delete_affinity_group(self, affinity_group_id): """Deletes a server group from the VIM. Returns the old affinity_group_id""" self.logger.debug("Getting server group '%s'", affinity_group_id) - try: - self._reload_connection() - self.nova.server_groups.delete(affinity_group_id) - - return affinity_group_id - except ( - nvExceptions.NotFound, - ksExceptions.ClientException, - nvExceptions.ClientException, - ConnectionError, - ) as e: - self._format_exception(e) + self._reload_connection() + self.nova.server_groups.delete(affinity_group_id) + return affinity_group_id - def get_vdu_state(self, vm_id): - """ - Getting the state of a vdu - param: - vm_id: ID of an instance + @catch_any_exception + def get_vdu_state(self, vm_id, host_is_required=False) -> list: + """Getting the state of a VDU. + Args: + vm_id (str): ID of an instance + host_is_required (Boolean): If the VIM account is non-admin, host info does not appear in server_dict + and if this is set to True, it raises KeyError. + Returns: + vdu_data (list): VDU details including state, flavor, host_info, AZ """ self.logger.debug("Getting the status of VM") self.logger.debug("VIM VM ID %s", vm_id) self._reload_connection() - server = self.nova.servers.find(id=vm_id) - server_dict = server.to_dict() + server_dict = self._find_nova_server(vm_id) + srv_attr = "OS-EXT-SRV-ATTR:host" + host_info = ( + server_dict[srv_attr] if host_is_required else server_dict.get(srv_attr) + ) vdu_data = [ server_dict["status"], server_dict["flavor"]["id"], - server_dict["OS-EXT-SRV-ATTR:host"], + host_info, server_dict["OS-EXT-AZ:availability_zone"], ] self.logger.debug("vdu_data %s", vdu_data) @@ -4302,6 +4428,7 @@ class vimconnector(vimconn.VimConnector): az_check["zone_check"] = True return az_check + @catch_any_exception def migrate_instance(self, vm_id, compute_host=None): """ Migrate a vdu @@ -4311,80 +4438,76 @@ class vimconnector(vimconn.VimConnector): """ self._reload_connection() vm_state = False - instance_state = self.get_vdu_state(vm_id) + instance_state = self.get_vdu_state(vm_id, host_is_required=True) server_flavor_id = instance_state[1] server_hypervisor_name = instance_state[2] server_availability_zone = instance_state[3] - try: - server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict() - server_flavor_details = [ - server_flavor["ram"], - server_flavor["disk"], - server_flavor["vcpus"], - ] - if compute_host == server_hypervisor_name: - raise vimconn.VimConnException( - "Unable to migrate instance '{}' to the same host '{}'".format( - vm_id, compute_host - ), - http_code=vimconn.HTTP_Bad_Request, - ) - az_status = self.check_availability_zone( - server_availability_zone, - server_flavor_details, - server_hypervisor_name, - compute_host, + server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict() + server_flavor_details = [ + server_flavor["ram"], + server_flavor["disk"], + server_flavor["vcpus"], + ] + if compute_host == server_hypervisor_name: + raise vimconn.VimConnException( + "Unable to migrate instance '{}' to the same host '{}'".format( + vm_id, compute_host + ), + http_code=vimconn.HTTP_Bad_Request, ) - availability_zone_check = az_status["zone_check"] - available_compute_id = az_status.get("compute_availability") + az_status = self.check_availability_zone( + server_availability_zone, + server_flavor_details, + server_hypervisor_name, + compute_host, + ) + availability_zone_check = az_status["zone_check"] + available_compute_id = az_status.get("compute_availability") - if availability_zone_check is False: - raise vimconn.VimConnException( - "Unable to migrate instance '{}' to a different availability zone".format( - vm_id - ), - http_code=vimconn.HTTP_Bad_Request, - ) - if available_compute_id is not None: - self.nova.servers.live_migrate( - server=vm_id, - host=available_compute_id, - block_migration=True, - disk_over_commit=False, - ) - state = "MIGRATING" - changed_compute_host = "" - if state == "MIGRATING": - vm_state = self.__wait_for_vm(vm_id, "ACTIVE") - changed_compute_host = self.get_vdu_state(vm_id)[2] - if vm_state and changed_compute_host == available_compute_id: - self.logger.debug( - "Instance '{}' migrated to the new compute host '{}'".format( - vm_id, changed_compute_host - ) - ) - return state, available_compute_id - else: - raise vimconn.VimConnException( - "Migration Failed. Instance '{}' not moved to the new host {}".format( - vm_id, available_compute_id - ), - http_code=vimconn.HTTP_Bad_Request, + if availability_zone_check is False: + raise vimconn.VimConnException( + "Unable to migrate instance '{}' to a different availability zone".format( + vm_id + ), + http_code=vimconn.HTTP_Bad_Request, + ) + if available_compute_id is not None: + # disk_over_commit parameter for live_migrate method is not valid for Nova API version >= 2.25 + self.nova.servers.live_migrate( + server=vm_id, + host=available_compute_id, + block_migration=True, + ) + state = "MIGRATING" + changed_compute_host = "" + if state == "MIGRATING": + vm_state = self.__wait_for_vm(vm_id, "ACTIVE") + changed_compute_host = self.get_vdu_state(vm_id, host_is_required=True)[ + 2 + ] + if vm_state and changed_compute_host == available_compute_id: + self.logger.debug( + "Instance '{}' migrated to the new compute host '{}'".format( + vm_id, changed_compute_host ) + ) + return state, available_compute_id else: raise vimconn.VimConnException( - "Compute '{}' not available or does not have enough resources to migrate the instance".format( - available_compute_id + "Migration Failed. Instance '{}' not moved to the new host {}".format( + vm_id, available_compute_id ), http_code=vimconn.HTTP_Bad_Request, ) - except ( - nvExceptions.BadRequest, - nvExceptions.ClientException, - nvExceptions.NotFound, - ) as e: - self._format_exception(e) + else: + raise vimconn.VimConnException( + "Compute '{}' not available or does not have enough resources to migrate the instance".format( + available_compute_id + ), + http_code=vimconn.HTTP_Bad_Request, + ) + @catch_any_exception def resize_instance(self, vm_id, new_flavor_id): """ For resizing the vm based on the given @@ -4399,37 +4522,30 @@ class vimconnector(vimconn.VimConnector): instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id) old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"] new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"] - try: - if instance_status == "ACTIVE" or instance_status == "SHUTOFF": - if old_flavor_disk > new_flavor_disk: + if instance_status == "ACTIVE" or instance_status == "SHUTOFF": + if old_flavor_disk > new_flavor_disk: + raise nvExceptions.BadRequest( + 400, + message="Server disk resize failed. Resize to lower disk flavor is not allowed", + ) + else: + self.nova.servers.resize(server=vm_id, flavor=new_flavor_id) + vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE") + if vm_state: + instance_resized_status = self.confirm_resize(vm_id) + return instance_resized_status + else: raise nvExceptions.BadRequest( - 400, - message="Server disk resize failed. Resize to lower disk flavor is not allowed", + 409, + message="Cannot 'resize' vm_state is in ERROR", ) - else: - self.nova.servers.resize(server=vm_id, flavor=new_flavor_id) - vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE") - if vm_state: - instance_resized_status = self.confirm_resize(vm_id) - return instance_resized_status - else: - raise nvExceptions.BadRequest( - 409, - message="Cannot 'resize' vm_state is in ERROR", - ) - else: - self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state") - raise nvExceptions.BadRequest( - 409, - message="Cannot 'resize' instance while it is in vm_state resized", - ) - except ( - nvExceptions.BadRequest, - nvExceptions.ClientException, - nvExceptions.NotFound, - ) as e: - self._format_exception(e) + else: + self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state") + raise nvExceptions.BadRequest( + 409, + message="Cannot 'resize' instance while it is in vm_state resized", + ) def confirm_resize(self, vm_id): """ @@ -4443,3 +4559,23 @@ class vimconnector(vimconn.VimConnector): self.__wait_for_vm(vm_id, "ACTIVE") instance_status = self.get_vdu_state(vm_id)[0] return instance_status + + def get_monitoring_data(self): + try: + self.logger.debug("Getting servers and ports data from Openstack VIMs.") + self._reload_connection() + all_servers = self.nova.servers.list(detailed=True) + try: + for server in all_servers: + if server.flavor.get("original_name"): + server.flavor["id"] = self.nova.flavors.find( + name=server.flavor["original_name"] + ).id + except nClient.exceptions.NotFound as e: + self.logger.warning(str(e.message)) + all_ports = self.neutron.list_ports() + return all_servers, all_ports + except Exception as e: + raise vimconn.VimConnException( + f"Exception in monitoring while getting VMs and ports status: {str(e)}" + )