Disable the check of the release notes
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
index 621324a..54d0e88 100644 (file)
@@ -41,6 +41,7 @@ import time
 from typing import Dict, List, Optional, Tuple
 
 from cinderclient import client as cClient
+import cinderclient.exceptions as cExceptions
 from glanceclient import client as glClient
 import glanceclient.exc as gl1Exceptions
 from keystoneauth1 import session
@@ -85,6 +86,16 @@ volume_timeout = 1800
 server_timeout = 1800
 
 
+def catch_any_exception(func):
+    def format_exception(*args, **kwargs):
+        try:
+            return func(*args, *kwargs)
+        except Exception as e:
+            vimconnector._format_exception(e)
+
+    return format_exception
+
+
 class SafeDumper(yaml.SafeDumper):
     def represent_data(self, data):
         # Openstack APIs use custom subclasses of dict and YAML safe dumper
@@ -175,6 +186,8 @@ class vimconnector(vimconn.VimConnector):
 
         self.persistent_info = persistent_info
         self.availability_zone = persistent_info.get("availability_zone", None)
+        self.storage_availability_zone = None
+        self.vm_av_zone = None
         self.session = persistent_info.get("session", {"reload_client": True})
         self.my_tenant_id = self.session.get("my_tenant_id")
         self.nova = self.session.get("nova")
@@ -339,7 +352,7 @@ class vimconnector(vimconn.VimConnector):
             version = self.config.get("microversion")
 
             if not version:
-                version = "2.1"
+                version = "2.60"
 
             # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
             # Titanium cloud and StarlingX
@@ -525,7 +538,8 @@ class vimconnector(vimconn.VimConnector):
         # Types. Also, abstract vimconnector should call the validation
         # method before the implemented VIM connectors are called.
 
-    def _format_exception(self, exception):
+    @staticmethod
+    def _format_exception(exception):
         """Transform a keystone, nova, neutron  exception into a vimconn exception discovering the cause"""
         message_error = str(exception)
         tip = ""
@@ -535,8 +549,10 @@ class vimconnector(vimconn.VimConnector):
             (
                 neExceptions.NetworkNotFoundClient,
                 nvExceptions.NotFound,
+                nvExceptions.ResourceNotFound,
                 ksExceptions.NotFound,
                 gl1Exceptions.HTTPNotFound,
+                cExceptions.NotFound,
             ),
         ):
             raise vimconn.VimConnNotFoundException(
@@ -551,6 +567,7 @@ class vimconnector(vimconn.VimConnector):
                 ConnectionError,
                 ksExceptions.ConnectionError,
                 neExceptions.ConnectionFailed,
+                cExceptions.ConnectionError,
             ),
         ):
             if type(exception).__name__ == "SSLError":
@@ -565,17 +582,26 @@ class vimconnector(vimconn.VimConnector):
                 KeyError,
                 nvExceptions.BadRequest,
                 ksExceptions.BadRequest,
+                gl1Exceptions.BadRequest,
+                cExceptions.BadRequest,
             ),
         ):
+            if message_error == "OS-EXT-SRV-ATTR:host":
+                tip = " (If the user does not have non-admin credentials, this attribute will be missing)"
+                raise vimconn.VimConnInsufficientCredentials(
+                    type(exception).__name__ + ": " + message_error + tip
+                )
             raise vimconn.VimConnException(
                 type(exception).__name__ + ": " + message_error
             )
+
         elif isinstance(
             exception,
             (
                 nvExceptions.ClientException,
                 ksExceptions.ClientException,
                 neExceptions.NeutronException,
+                cExceptions.ClientException,
             ),
         ):
             raise vimconn.VimConnUnexpectedResponse(
@@ -588,9 +614,10 @@ class vimconnector(vimconn.VimConnector):
         elif isinstance(exception, vimconn.VimConnException):
             raise exception
         else:  # ()
-            self.logger.error("General Exception " + message_error, exc_info=True)
+            logger = logging.getLogger("ro.vim.openstack")
+            logger.error("General Exception " + message_error, exc_info=True)
 
-            raise vimconn.VimConnConnectionException(
+            raise vimconn.VimConnException(
                 type(exception).__name__ + ": " + message_error
             )
 
@@ -628,6 +655,32 @@ class vimconnector(vimconn.VimConnector):
                         "Not found security group {} for this tenant".format(sg)
                     )
 
+    def _find_nova_server(self, vm_id):
+        """
+        Returns the VM instance from Openstack and completes it with flavor ID
+        Do not call nova.servers.find directly, as it does not return flavor ID with microversion>=2.47
+        """
+        try:
+            self._reload_connection()
+            server = self.nova.servers.find(id=vm_id)
+            # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
+            server_dict = server.to_dict()
+            try:
+                if server_dict["flavor"].get("original_name"):
+                    server_dict["flavor"]["id"] = self.nova.flavors.find(
+                        name=server_dict["flavor"]["original_name"]
+                    ).id
+            except nClient.exceptions.NotFound as e:
+                self.logger.warning(str(e.message))
+            return server_dict
+        except (
+            ksExceptions.ClientException,
+            nvExceptions.ClientException,
+            nvExceptions.NotFound,
+            ConnectionError,
+        ) as e:
+            self._format_exception(e)
+
     def check_vim_connectivity(self):
         # just get network list to check connectivity and credentials
         self.get_network_list(filter_dict={})
@@ -641,7 +694,6 @@ class vimconnector(vimconn.VimConnector):
         Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
         """
         self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
-
         try:
             self._reload_connection()
 
@@ -671,7 +723,6 @@ class vimconnector(vimconn.VimConnector):
     def new_tenant(self, tenant_name, tenant_description):
         """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
         self.logger.debug("Adding a new tenant name: %s", tenant_name)
-
         try:
             self._reload_connection()
 
@@ -697,7 +748,6 @@ class vimconnector(vimconn.VimConnector):
     def delete_tenant(self, tenant_id):
         """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
         self.logger.debug("Deleting tenant %s from VIM", tenant_id)
-
         try:
             self._reload_connection()
 
@@ -707,6 +757,7 @@ class vimconnector(vimconn.VimConnector):
                 self.keystone.tenants.delete(tenant_id)
 
             return tenant_id
+
         except (
             ksExceptions.ConnectionError,
             ksExceptions.ClientException,
@@ -796,7 +847,7 @@ class vimconnector(vimconn.VimConnector):
                         "dataplane_physical_net"
                     )
 
-                    # if it is non empty list, use the first value. If it is a string use the value directly
+                    # if it is non-empty list, use the first value. If it is a string use the value directly
                     if (
                         isinstance(provider_physical_network, (tuple, list))
                         and provider_physical_network
@@ -812,17 +863,17 @@ class vimconnector(vimconn.VimConnector):
                     )
 
                 if not self.config.get("multisegment_support"):
-                    network_dict[
-                        "provider:physical_network"
-                    ] = provider_physical_network
+                    network_dict["provider:physical_network"] = (
+                        provider_physical_network
+                    )
 
                     if (
                         provider_network_profile
                         and "network-type" in provider_network_profile
                     ):
-                        network_dict[
-                            "provider:network_type"
-                        ] = provider_network_profile["network-type"]
+                        network_dict["provider:network_type"] = (
+                            provider_network_profile["network-type"]
+                        )
                     else:
                         network_dict["provider:network_type"] = self.config.get(
                             "dataplane_network_type", "vlan"
@@ -889,7 +940,7 @@ class vimconnector(vimconn.VimConnector):
 
             if not ip_profile.get("subnet_address"):
                 # Fake subnet is required
-                subnet_rand = random.randint(0, 255)
+                subnet_rand = random.SystemRandom().randint(0, 255)
                 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
 
             if "ip_version" not in ip_profile:
@@ -934,6 +985,15 @@ class vimconnector(vimconn.VimConnector):
                 ip_str = str(netaddr.IPAddress(ip_int))
                 subnet["allocation_pools"][0]["end"] = ip_str
 
+            if (
+                ip_profile.get("ipv6_address_mode")
+                and ip_profile["ip_version"] != "IPv4"
+            ):
+                subnet["ipv6_address_mode"] = ip_profile["ipv6_address_mode"]
+                # ipv6_ra_mode can be set to the same value for most use cases, see documentation:
+                # https://docs.openstack.org/neutron/latest/admin/config-ipv6.html#ipv6-ra-mode-and-ipv6-address-mode-combinations
+                subnet["ipv6_ra_mode"] = ip_profile["ipv6_address_mode"]
+
             # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
             self.neutron.create_subnet({"subnet": subnet})
 
@@ -966,6 +1026,14 @@ class vimconnector(vimconn.VimConnector):
 
                     if k_item == "l2gwconn":
                         self.neutron.delete_l2_gateway_connection(k_id)
+
+                except (neExceptions.ConnectionFailed, ConnectionError) as e2:
+                    self.logger.error(
+                        "Error deleting l2 gateway connection: {}: {}".format(
+                            type(e2).__name__, e2
+                        )
+                    )
+                    self._format_exception(e2)
                 except Exception as e2:
                     self.logger.error(
                         "Error deleting l2 gateway connection: {}: {}".format(
@@ -990,7 +1058,6 @@ class vimconnector(vimconn.VimConnector):
         Returns the network list of dictionaries
         """
         self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
-
         try:
             self._reload_connection()
             filter_dict_os = filter_dict.copy()
@@ -1050,6 +1117,7 @@ class vimconnector(vimconn.VimConnector):
 
         return net
 
+    @catch_any_exception
     def delete_network(self, net_id, created_items=None):
         """
         Removes a tenant network from VIM and its associated elements
@@ -1073,6 +1141,14 @@ class vimconnector(vimconn.VimConnector):
                     k_item, _, k_id = k.partition(":")
                     if k_item == "l2gwconn":
                         self.neutron.delete_l2_gateway_connection(k_id)
+
+                except (neExceptions.ConnectionFailed, ConnectionError) as e:
+                    self.logger.error(
+                        "Error deleting l2 gateway connection: {}: {}".format(
+                            type(e).__name__, e
+                        )
+                    )
+                    self._format_exception(e)
                 except Exception as e:
                     self.logger.error(
                         "Error deleting l2 gateway connection: {}: {}".format(
@@ -1085,21 +1161,22 @@ class vimconnector(vimconn.VimConnector):
             for p in ports["ports"]:
                 try:
                     self.neutron.delete_port(p["id"])
+
+                except (neExceptions.ConnectionFailed, ConnectionError) as e:
+                    self.logger.error("Error deleting port %s: %s", p["id"], str(e))
+                    # If there is connection error, it raises.
+                    self._format_exception(e)
                 except Exception as e:
                     self.logger.error("Error deleting port %s: %s", p["id"], str(e))
 
             self.neutron.delete_network(net_id)
 
             return net_id
-        except (
-            neExceptions.ConnectionFailed,
-            neExceptions.NetworkNotFoundClient,
-            neExceptions.NeutronException,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+        except (neExceptions.NetworkNotFoundClient, neExceptions.NotFound) as e:
+            # If network to be deleted is not found, it does not raise.
+            self.logger.warning(
+                f"Error deleting network: {net_id} is not found, {str(e)}"
+            )
 
     def refresh_nets_status(self, net_list):
         """Get the status of the networks
@@ -1152,13 +1229,11 @@ class vimconnector(vimconn.VimConnector):
     def get_flavor(self, flavor_id):
         """Obtain flavor details from the  VIM. Returns the flavor dict details"""
         self.logger.debug("Getting flavor '%s'", flavor_id)
-
         try:
             self._reload_connection()
             flavor = self.nova.flavors.find(id=flavor_id)
-            # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
-
             return flavor.to_dict()
+
         except (
             nvExceptions.NotFound,
             nvExceptions.ClientException,
@@ -1230,6 +1305,7 @@ class vimconnector(vimconn.VimConnector):
             )
         except (
             nvExceptions.NotFound,
+            nvExceptions.BadRequest,
             nvExceptions.ClientException,
             ksExceptions.ClientException,
             ConnectionError,
@@ -1500,6 +1576,7 @@ class vimconnector(vimconn.VimConnector):
             flavor_data.get("extended"),
         )
 
+    @catch_any_exception
     def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str:
         """Adds a tenant flavor to openstack VIM.
         if change_name_if_used is True, it will change name in case of conflict,
@@ -1517,70 +1594,58 @@ class vimconnector(vimconn.VimConnector):
         retry = 0
         max_retries = 3
         name_suffix = 0
+        name = flavor_data["name"]
+        while retry < max_retries:
+            retry += 1
+            try:
+                self._reload_connection()
 
-        try:
-            name = flavor_data["name"]
-            while retry < max_retries:
-                retry += 1
-                try:
-                    self._reload_connection()
+                if change_name_if_used:
+                    name = self._change_flavor_name(name, name_suffix, flavor_data)
 
-                    if change_name_if_used:
-                        name = self._change_flavor_name(name, name_suffix, flavor_data)
+                ram, vcpus, extra_specs, extended = self._get_flavor_details(
+                    flavor_data
+                )
+                if extended:
+                    self._process_extended_config_of_flavor(extended, extra_specs)
 
-                    ram, vcpus, extra_specs, extended = self._get_flavor_details(
-                        flavor_data
-                    )
-                    if extended:
-                        self._process_extended_config_of_flavor(extended, extra_specs)
-
-                    # Create flavor
-
-                    new_flavor = self.nova.flavors.create(
-                        name=name,
-                        ram=ram,
-                        vcpus=vcpus,
-                        disk=flavor_data.get("disk", 0),
-                        ephemeral=flavor_data.get("ephemeral", 0),
-                        swap=flavor_data.get("swap", 0),
-                        is_public=flavor_data.get("is_public", True),
-                    )
+                # Create flavor
 
-                    # Add metadata
-                    if extra_specs:
-                        new_flavor.set_keys(extra_specs)
+                new_flavor = self.nova.flavors.create(
+                    name=name,
+                    ram=ram,
+                    vcpus=vcpus,
+                    disk=flavor_data.get("disk", 0),
+                    ephemeral=flavor_data.get("ephemeral", 0),
+                    swap=flavor_data.get("swap", 0),
+                    is_public=flavor_data.get("is_public", True),
+                )
 
-                    return new_flavor.id
+                # Add metadata
+                if extra_specs:
+                    new_flavor.set_keys(extra_specs)
 
-                except nvExceptions.Conflict as e:
-                    if change_name_if_used and retry < max_retries:
-                        continue
+                return new_flavor.id
 
-                    self._format_exception(e)
+            except nvExceptions.Conflict as e:
+                if change_name_if_used and retry < max_retries:
+                    continue
 
-        except (
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            ConnectionError,
-            KeyError,
-        ) as e:
-            self._format_exception(e)
+                self._format_exception(e)
 
+    @catch_any_exception
     def delete_flavor(self, flavor_id):
         """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
         try:
             self._reload_connection()
             self.nova.flavors.delete(flavor_id)
-
             return flavor_id
-        # except nvExceptions.BadRequest as e:
-        except (
-            nvExceptions.NotFound,
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+
+        except (nvExceptions.NotFound, nvExceptions.ResourceNotFound) as e:
+            # If flavor is not found, it does not raise.
+            self.logger.warning(
+                f"Error deleting flavor: {flavor_id} is not found, {str(e.message)}"
+            )
 
     def new_image(self, image_dict):
         """
@@ -1663,12 +1728,6 @@ class vimconnector(vimconn.VimConnector):
                 self.glance.images.update(new_image.id, **metadata_to_load)
 
                 return new_image.id
-            except (
-                nvExceptions.Conflict,
-                ksExceptions.ClientException,
-                nvExceptions.ClientException,
-            ) as e:
-                self._format_exception(e)
             except (
                 HTTPException,
                 gl1Exceptions.HTTPException,
@@ -1684,7 +1743,10 @@ class vimconnector(vimconn.VimConnector):
                     "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
                     http_code=vimconn.HTTP_Bad_Request,
                 )
+            except Exception as e:
+                self._format_exception(e)
 
+    @catch_any_exception
     def delete_image(self, image_id):
         """Deletes a tenant image from openstack VIM. Returns the old id"""
         try:
@@ -1692,36 +1754,25 @@ class vimconnector(vimconn.VimConnector):
             self.glance.images.delete(image_id)
 
             return image_id
-        except (
-            nvExceptions.NotFound,
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            gl1Exceptions.CommunicationError,
-            gl1Exceptions.HTTPNotFound,
-            ConnectionError,
-        ) as e:  # TODO remove
-            self._format_exception(e)
+        except gl1Exceptions.NotFound as e:
+            # If image is not found, it does not raise.
+            self.logger.warning(
+                f"Error deleting image: {image_id} is not found, {str(e)}"
+            )
 
+    @catch_any_exception
     def get_image_id_from_path(self, path):
         """Get the image id from image path in the VIM database. Returns the image_id"""
-        try:
-            self._reload_connection()
-            images = self.glance.images.list()
+        self._reload_connection()
+        images = self.glance.images.list()
 
-            for image in images:
-                if image.metadata.get("location") == path:
-                    return image.id
+        for image in images:
+            if image.metadata.get("location") == path:
+                return image.id
 
-            raise vimconn.VimConnNotFoundException(
-                "image with location '{}' not found".format(path)
-            )
-        except (
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            gl1Exceptions.CommunicationError,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+        raise vimconn.VimConnNotFoundException(
+            "image with location '{}' not found".format(path)
+        )
 
     def get_image_list(self, filter_dict={}):
         """Obtain tenant images from VIM
@@ -1734,7 +1785,6 @@ class vimconnector(vimconn.VimConnector):
             List can be empty
         """
         self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
-
         try:
             self._reload_connection()
             # filter_dict_os = filter_dict.copy()
@@ -1761,6 +1811,7 @@ class vimconnector(vimconn.VimConnector):
                     pass
 
             return filtered_list
+
         except (
             ksExceptions.ClientException,
             nvExceptions.ClientException,
@@ -1824,6 +1875,10 @@ class vimconnector(vimconn.VimConnector):
                 self.availability_zone = vim_availability_zones
         else:
             self.availability_zone = self._get_openstack_availablity_zones()
+        if "storage_availability_zone" in self.config:
+            self.storage_availability_zone = self.config.get(
+                "storage_availability_zone"
+            )
 
     def _get_vm_availability_zone(
         self, availability_zone_index, availability_zone_list
@@ -1937,8 +1992,14 @@ class vimconnector(vimconn.VimConnector):
         if net.get("mac_address"):
             port_dict["mac_address"] = net["mac_address"]
 
-        if net.get("ip_address"):
-            port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
+        ip_dual_list = []
+        if ip_list := net.get("ip_address"):
+            if not isinstance(ip_list, list):
+                ip_list = [ip_list]
+            for ip in ip_list:
+                ip_dict = {"ip_address": ip}
+                ip_dual_list.append(ip_dict)
+            port_dict["fixed_ips"] = ip_dual_list
             # TODO add "subnet_id": <subnet_id>
 
     def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
@@ -1955,7 +2016,7 @@ class vimconnector(vimconn.VimConnector):
         """
         new_port = self.neutron.create_port({"port": port_dict})
         created_items["port:" + str(new_port["port"]["id"])] = True
-        net["mac_adress"] = new_port["port"]["mac_address"]
+        net["mac_address"] = new_port["port"]["mac_address"]
         net["vim_id"] = new_port["port"]["id"]
 
         return new_port
@@ -2056,7 +2117,7 @@ class vimconnector(vimconn.VimConnector):
     def _prepare_persistent_root_volumes(
         self,
         name: str,
-        vm_av_zone: list,
+        storage_av_zone: list,
         disk: dict,
         base_disk_index: int,
         block_device_mapping: dict,
@@ -2067,7 +2128,7 @@ class vimconnector(vimconn.VimConnector):
 
         Args:
             name    (str):                      Name of VM instance
-            vm_av_zone  (list):                 List of availability zones
+            storage_av_zone  (list):            Storage of availability zones
             disk    (dict):                     Disk details
             base_disk_index (int):              Disk index
             block_device_mapping    (dict):     Block device details
@@ -2081,11 +2142,9 @@ class vimconnector(vimconn.VimConnector):
         # Disk may include only vim_volume_id or only vim_id."
         # Use existing persistent root volume finding with volume_id or vim_id
         key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
-
         if disk.get(key_id):
             block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
             existing_vim_volumes.append({"id": disk[key_id]})
-
         else:
             # Create persistent root volume
             volume = self.cinder.volumes.create(
@@ -2093,7 +2152,7 @@ class vimconnector(vimconn.VimConnector):
                 name=name + "vd" + chr(base_disk_index),
                 imageRef=disk["image_id"],
                 # Make sure volume is in the same AZ as the VM to be attached to
-                availability_zone=vm_av_zone,
+                availability_zone=storage_av_zone,
             )
             boot_volume_id = volume.id
             self.update_block_device_mapping(
@@ -2130,17 +2189,69 @@ class vimconnector(vimconn.VimConnector):
                 "Created volume is not valid, does not have id attribute."
             )
 
+        block_device_mapping["vd" + chr(base_disk_index)] = volume.id
+        if disk.get("multiattach"):  # multiattach volumes do not belong to VDUs
+            return
         volume_txt = "volume:" + str(volume.id)
         if disk.get("keep"):
             volume_txt += ":keep"
         created_items[volume_txt] = True
-        block_device_mapping["vd" + chr(base_disk_index)] = volume.id
+
+    @catch_any_exception
+    def new_shared_volumes(self, shared_volume_data) -> (str, str):
+        availability_zone = (
+            self.storage_availability_zone
+            if self.storage_availability_zone
+            else self.vm_av_zone
+        )
+        volume = self.cinder.volumes.create(
+            size=shared_volume_data["size"],
+            name=shared_volume_data["name"],
+            volume_type="multiattach",
+            availability_zone=availability_zone,
+        )
+        return volume.name, volume.id
+
+    def _prepare_shared_volumes(
+        self,
+        name: str,
+        disk: dict,
+        base_disk_index: int,
+        block_device_mapping: dict,
+        existing_vim_volumes: list,
+        created_items: dict,
+    ):
+        volumes = {volume.name: volume.id for volume in self.cinder.volumes.list()}
+        if volumes.get(disk["name"]):
+            sv_id = volumes[disk["name"]]
+            max_retries = 3
+            vol_status = ""
+            # If this is not the first VM to attach the volume, volume status may be "reserved" for a short time
+            while max_retries:
+                max_retries -= 1
+                volume = self.cinder.volumes.get(sv_id)
+                vol_status = volume.status
+                if volume.status not in ("in-use", "available"):
+                    time.sleep(5)
+                    continue
+                self.update_block_device_mapping(
+                    volume=volume,
+                    block_device_mapping=block_device_mapping,
+                    base_disk_index=base_disk_index,
+                    disk=disk,
+                    created_items=created_items,
+                )
+                return
+            raise vimconn.VimConnException(
+                "Shared volume is not prepared, status is: {}".format(vol_status),
+                http_code=vimconn.HTTP_Internal_Server_Error,
+            )
 
     def _prepare_non_root_persistent_volumes(
         self,
         name: str,
         disk: dict,
-        vm_av_zone: list,
+        storage_av_zone: list,
         block_device_mapping: dict,
         base_disk_index: int,
         existing_vim_volumes: list,
@@ -2151,7 +2262,7 @@ class vimconnector(vimconn.VimConnector):
         Args:
             name    (str):                      Name of VM instance
             disk    (dict):                     Disk details
-            vm_av_zone  (list):                 List of availability zones
+            storage_av_zone  (list):            Storage of availability zones
             block_device_mapping    (dict):     Block device details
             base_disk_index (int):              Disk index
             existing_vim_volumes    (list):     Existing disk details
@@ -2160,19 +2271,17 @@ class vimconnector(vimconn.VimConnector):
         # Non-root persistent volumes
         # Disk may include only vim_volume_id or only vim_id."
         key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
-
         if disk.get(key_id):
             # Use existing persistent volume
             block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
             existing_vim_volumes.append({"id": disk[key_id]})
-
         else:
-            # Create persistent volume
+            volume_name = f"{name}vd{chr(base_disk_index)}"
             volume = self.cinder.volumes.create(
                 size=disk["size"],
-                name=name + "vd" + chr(base_disk_index),
+                name=volume_name,
                 # Make sure volume is in the same AZ as the VM to be attached to
-                availability_zone=vm_av_zone,
+                availability_zone=storage_av_zone,
             )
             self.update_block_device_mapping(
                 volume=volume,
@@ -2195,7 +2304,6 @@ class vimconnector(vimconn.VimConnector):
             elapsed_time    (int):          Time spent while waiting
 
         """
-
         while elapsed_time < volume_timeout:
             for created_item in created_items:
                 v, volume_id = (
@@ -2203,7 +2311,13 @@ class vimconnector(vimconn.VimConnector):
                     created_item.split(":")[1],
                 )
                 if v == "volume":
-                    if self.cinder.volumes.get(volume_id).status != "available":
+                    volume = self.cinder.volumes.get(volume_id)
+                    if (
+                        volume.volume_type == "multiattach"
+                        and volume.status == "in-use"
+                    ):
+                        return elapsed_time
+                    elif volume.status != "available":
                         break
             else:
                 # All ready: break from while
@@ -2230,7 +2344,10 @@ class vimconnector(vimconn.VimConnector):
 
         while elapsed_time < volume_timeout:
             for volume in existing_vim_volumes:
-                if self.cinder.volumes.get(volume["id"]).status != "available":
+                v = self.cinder.volumes.get(volume["id"])
+                if v.volume_type == "multiattach" and v.status == "in-use":
+                    return elapsed_time
+                elif v.status != "available":
                     break
             else:  # all ready: break from while
                 break
@@ -2245,7 +2362,7 @@ class vimconnector(vimconn.VimConnector):
         name: str,
         existing_vim_volumes: list,
         created_items: dict,
-        vm_av_zone: list,
+        storage_av_zone: list,
         block_device_mapping: dict,
         disk_list: list = None,
     ) -> None:
@@ -2255,7 +2372,7 @@ class vimconnector(vimconn.VimConnector):
             name    (str):                      Name of Instance
             existing_vim_volumes    (list):     List of existing volumes
             created_items   (dict):             All created items belongs to VM
-            vm_av_zone  (list):                 VM availability zone
+            storage_av_zone  (list):            Storage availability zone
             block_device_mapping (dict):        Block devices to be attached to VM
             disk_list   (list):                 List of disks
 
@@ -2264,14 +2381,22 @@ class vimconnector(vimconn.VimConnector):
         base_disk_index = ord("b")
         boot_volume_id = None
         elapsed_time = 0
-
         for disk in disk_list:
             if "image_id" in disk:
                 # Root persistent volume
                 base_disk_index = ord("a")
                 boot_volume_id = self._prepare_persistent_root_volumes(
                     name=name,
-                    vm_av_zone=vm_av_zone,
+                    storage_av_zone=storage_av_zone,
+                    disk=disk,
+                    base_disk_index=base_disk_index,
+                    block_device_mapping=block_device_mapping,
+                    existing_vim_volumes=existing_vim_volumes,
+                    created_items=created_items,
+                )
+            elif disk.get("multiattach"):
+                self._prepare_shared_volumes(
+                    name=name,
                     disk=disk,
                     base_disk_index=base_disk_index,
                     block_device_mapping=block_device_mapping,
@@ -2283,7 +2408,7 @@ class vimconnector(vimconn.VimConnector):
                 self._prepare_non_root_persistent_volumes(
                     name=name,
                     disk=disk,
-                    vm_av_zone=vm_av_zone,
+                    storage_av_zone=storage_av_zone,
                     block_device_mapping=block_device_mapping,
                     base_disk_index=base_disk_index,
                     existing_vim_volumes=existing_vim_volumes,
@@ -2665,20 +2790,19 @@ class vimconnector(vimconn.VimConnector):
             flavor_id,
             str(net_list),
         )
+        server = None
+        created_items = {}
+        net_list_vim = []
+        # list of external networks to be connected to instance, later on used to create floating_ip
+        external_network = []
+        # List of ports with port-security disabled
+        no_secured_ports = []
+        block_device_mapping = {}
+        existing_vim_volumes = []
+        server_group_id = None
+        scheduller_hints = {}
 
         try:
-            server = None
-            created_items = {}
-            net_list_vim = []
-            # list of external networks to be connected to instance, later on used to create floating_ip
-            external_network = []
-            # List of ports with port-security disabled
-            no_secured_ports = []
-            block_device_mapping = {}
-            existing_vim_volumes = []
-            server_group_id = None
-            scheduller_hints = {}
-
             # Check the Openstack Connection
             self._reload_connection()
 
@@ -2696,17 +2820,23 @@ class vimconnector(vimconn.VimConnector):
             config_drive, userdata = self._create_user_data(cloud_config)
 
             # Get availability Zone
-            vm_av_zone = self._get_vm_availability_zone(
+            self.vm_av_zone = self._get_vm_availability_zone(
                 availability_zone_index, availability_zone_list
             )
 
+            storage_av_zone = (
+                self.storage_availability_zone
+                if self.storage_availability_zone
+                else self.vm_av_zone
+            )
+
             if disk_list:
                 # Prepare disks
                 self._prepare_disk_for_vminstance(
                     name=name,
                     existing_vim_volumes=existing_vim_volumes,
                     created_items=created_items,
-                    vm_av_zone=vm_av_zone,
+                    storage_av_zone=storage_av_zone,
                     block_device_mapping=block_device_mapping,
                     disk_list=disk_list,
                 )
@@ -2725,7 +2855,7 @@ class vimconnector(vimconn.VimConnector):
                     flavor_id,
                     net_list_vim,
                     self.config.get("security_groups"),
-                    vm_av_zone,
+                    self.vm_av_zone,
                     self.config.get("keypair"),
                     userdata,
                     config_drive,
@@ -2733,7 +2863,6 @@ class vimconnector(vimconn.VimConnector):
                     server_group_id,
                 )
             )
-
             # Create VM
             server = self.nova.servers.create(
                 name=name,
@@ -2742,7 +2871,7 @@ class vimconnector(vimconn.VimConnector):
                 nics=net_list_vim,
                 security_groups=self.config.get("security_groups"),
                 # TODO remove security_groups in future versions. Already at neutron port
-                availability_zone=vm_av_zone,
+                availability_zone=self.vm_av_zone,
                 key_name=self.config.get("keypair"),
                 userdata=userdata,
                 config_drive=config_drive,
@@ -2797,21 +2926,9 @@ class vimconnector(vimconn.VimConnector):
 
     def get_vminstance(self, vm_id):
         """Returns the VM instance information from VIM"""
-        # self.logger.debug("Getting VM from VIM")
-        try:
-            self._reload_connection()
-            server = self.nova.servers.find(id=vm_id)
-            # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
-
-            return server.to_dict()
-        except (
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            nvExceptions.NotFound,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+        return self._find_nova_server(vm_id)
 
+    @catch_any_exception
     def get_vminstance_console(self, vm_id, console_type="vnc"):
         """
         Get a console for the virtual machine
@@ -2827,66 +2944,56 @@ class vimconnector(vimconn.VimConnector):
                 suffix:   extra text, e.g. the http path and query string
         """
         self.logger.debug("Getting VM CONSOLE from VIM")
+        self._reload_connection()
+        server = self.nova.servers.find(id=vm_id)
 
-        try:
-            self._reload_connection()
-            server = self.nova.servers.find(id=vm_id)
+        if console_type is None or console_type == "novnc":
+            console_dict = server.get_vnc_console("novnc")
+        elif console_type == "xvpvnc":
+            console_dict = server.get_vnc_console(console_type)
+        elif console_type == "rdp-html5":
+            console_dict = server.get_rdp_console(console_type)
+        elif console_type == "spice-html5":
+            console_dict = server.get_spice_console(console_type)
+        else:
+            raise vimconn.VimConnException(
+                "console type '{}' not allowed".format(console_type),
+                http_code=vimconn.HTTP_Bad_Request,
+            )
 
-            if console_type is None or console_type == "novnc":
-                console_dict = server.get_vnc_console("novnc")
-            elif console_type == "xvpvnc":
-                console_dict = server.get_vnc_console(console_type)
-            elif console_type == "rdp-html5":
-                console_dict = server.get_rdp_console(console_type)
-            elif console_type == "spice-html5":
-                console_dict = server.get_spice_console(console_type)
-            else:
-                raise vimconn.VimConnException(
-                    "console type '{}' not allowed".format(console_type),
-                    http_code=vimconn.HTTP_Bad_Request,
-                )
+        console_dict1 = console_dict.get("console")
 
-            console_dict1 = console_dict.get("console")
+        if console_dict1:
+            console_url = console_dict1.get("url")
 
-            if console_dict1:
-                console_url = console_dict1.get("url")
+            if console_url:
+                # parse console_url
+                protocol_index = console_url.find("//")
+                suffix_index = (
+                    console_url[protocol_index + 2 :].find("/") + protocol_index + 2
+                )
+                port_index = (
+                    console_url[protocol_index + 2 : suffix_index].find(":")
+                    + protocol_index
+                    + 2
+                )
 
-                if console_url:
-                    # parse console_url
-                    protocol_index = console_url.find("//")
-                    suffix_index = (
-                        console_url[protocol_index + 2 :].find("/") + protocol_index + 2
-                    )
-                    port_index = (
-                        console_url[protocol_index + 2 : suffix_index].find(":")
-                        + protocol_index
-                        + 2
+                if protocol_index < 0 or port_index < 0 or suffix_index < 0:
+                    return (
+                        -vimconn.HTTP_Internal_Server_Error,
+                        "Unexpected response from VIM",
                     )
 
-                    if protocol_index < 0 or port_index < 0 or suffix_index < 0:
-                        return (
-                            -vimconn.HTTP_Internal_Server_Error,
-                            "Unexpected response from VIM",
-                        )
-
-                    console_dict = {
-                        "protocol": console_url[0:protocol_index],
-                        "server": console_url[protocol_index + 2 : port_index],
-                        "port": console_url[port_index:suffix_index],
-                        "suffix": console_url[suffix_index + 1 :],
-                    }
-                    protocol_index += 2
+                console_dict = {
+                    "protocol": console_url[0:protocol_index],
+                    "server": console_url[protocol_index + 2 : port_index],
+                    "port": console_url[port_index:suffix_index],
+                    "suffix": console_url[suffix_index + 1 :],
+                }
+                protocol_index += 2
 
-                    return console_dict
-            raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
-        except (
-            nvExceptions.NotFound,
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            nvExceptions.BadRequest,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+                return console_dict
+        raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
 
     def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
         """Neutron delete ports by id.
@@ -2894,15 +3001,45 @@ class vimconnector(vimconn.VimConnector):
             k_id    (str):      Port id in the VIM
         """
         try:
-            port_dict = self.neutron.list_ports()
-            existing_ports = [port["id"] for port in port_dict["ports"] if port_dict]
-
-            if k_id in existing_ports:
-                self.neutron.delete_port(k_id)
+            self.neutron.delete_port(k_id)
 
+        except (neExceptions.ConnectionFailed, ConnectionError) as e:
+            self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
+            # If there is connection error, raise.
+            self._format_exception(e)
         except Exception as e:
             self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
 
+    def delete_shared_volumes(self, shared_volume_vim_id: str) -> bool:
+        """Cinder delete volume by id.
+        Args:
+            shared_volume_vim_id    (str):                  ID of shared volume in VIM
+        """
+        elapsed_time = 0
+        try:
+            while elapsed_time < server_timeout:
+                vol_status = self.cinder.volumes.get(shared_volume_vim_id).status
+                if vol_status == "available":
+                    self.cinder.volumes.delete(shared_volume_vim_id)
+                    return True
+
+                time.sleep(5)
+                elapsed_time += 5
+
+            if elapsed_time >= server_timeout:
+                raise vimconn.VimConnException(
+                    "Timeout waiting for volume "
+                    + shared_volume_vim_id
+                    + " to be available",
+                    http_code=vimconn.HTTP_Request_Timeout,
+                )
+
+        except Exception as e:
+            self.logger.error(
+                "Error deleting volume: {}: {}".format(type(e).__name__, e)
+            )
+            self._format_exception(e)
+
     def _delete_volumes_by_id_wth_cinder(
         self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
     ) -> bool:
@@ -2915,7 +3052,7 @@ class vimconnector(vimconn.VimConnector):
         """
         try:
             if k_id in volumes_to_hold:
-                return
+                return False
 
             if self.cinder.volumes.get(k_id).status != "available":
                 return True
@@ -2924,6 +3061,11 @@ class vimconnector(vimconn.VimConnector):
                 self.cinder.volumes.delete(k_id)
                 created_items[k] = None
 
+        except (cExceptions.ConnectionError, ConnectionError) as e:
+            self.logger.error(
+                "Error deleting volume: {}: {}".format(type(e).__name__, e)
+            )
+            self._format_exception(e)
         except Exception as e:
             self.logger.error(
                 "Error deleting volume: {}: {}".format(type(e).__name__, e)
@@ -2940,6 +3082,11 @@ class vimconnector(vimconn.VimConnector):
             self.neutron.delete_floatingip(k_id)
             created_items[k] = None
 
+        except (neExceptions.ConnectionFailed, ConnectionError) as e:
+            self.logger.error(
+                "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
+            )
+            self._format_exception(e)
         except Exception as e:
             self.logger.error(
                 "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
@@ -2965,6 +3112,11 @@ class vimconnector(vimconn.VimConnector):
                 if k_item == "port":
                     self._delete_ports_by_id_wth_neutron(k_id)
 
+            except (neExceptions.ConnectionFailed, ConnectionError) as e:
+                self.logger.error(
+                    "Error deleting port: {}: {}".format(type(e).__name__, e)
+                )
+                self._format_exception(e)
             except Exception as e:
                 self.logger.error(
                     "Error deleting port: {}: {}".format(type(e).__name__, e)
@@ -2980,7 +3132,6 @@ class vimconnector(vimconn.VimConnector):
 
             try:
                 k_item, k_id = self._get_item_name_id(k)
-
                 if k_item == "volume":
                     unavailable_vol = self._delete_volumes_by_id_wth_cinder(
                         k, k_id, volumes_to_hold, created_items
@@ -2992,8 +3143,18 @@ class vimconnector(vimconn.VimConnector):
                 elif k_item == "floating_ip":
                     self._delete_floating_ip_by_id(k, k_id, created_items)
 
-            except Exception as e:
-                self.logger.error("Error deleting {}: {}".format(k, e))
+            except (
+                cExceptions.ConnectionError,
+                neExceptions.ConnectionFailed,
+                ConnectionError,
+                AttributeError,
+                TypeError,
+            ) as e:
+                self.logger.error("Error deleting {}: {}".format(k, e))
+                self._format_exception(e)
+
+            except Exception as e:
+                self.logger.error("Error deleting {}: {}".format(k, e))
 
         return keep_waiting
 
@@ -3013,6 +3174,7 @@ class vimconnector(vimconn.VimConnector):
             if len(key.split(":")) == 2
         }
 
+    @catch_any_exception
     def delete_vminstance(
         self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
     ) -> None:
@@ -3057,14 +3219,9 @@ class vimconnector(vimconn.VimConnector):
                 if keep_waiting:
                     time.sleep(1)
                     elapsed_time += 1
-
-        except (
-            nvExceptions.NotFound,
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+        except (nvExceptions.NotFound, nvExceptions.ResourceNotFound) as e:
+            # If VM does not exist, it does not raise
+            self.logger.warning(f"Error deleting VM: {vm_id} is not found, {str(e)}")
 
     def refresh_vms_status(self, vm_list):
         """Get the status of the virtual machines and their interfaces/ports
@@ -3096,7 +3253,6 @@ class vimconnector(vimconn.VimConnector):
         self.logger.debug(
             "refresh_vms status: Getting tenant VM instance information from VIM"
         )
-
         for vm_id in vm_list:
             vm = {}
 
@@ -3209,122 +3365,111 @@ class vimconnector(vimconn.VimConnector):
 
         return vm_dict
 
+    @catch_any_exception
     def action_vminstance(self, vm_id, action_dict, created_items={}):
         """Send and action over a VM instance from VIM
         Returns None or the console dict if the action was successfully sent to the VIM
         """
         self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
-
-        try:
-            self._reload_connection()
-            server = self.nova.servers.find(id=vm_id)
-
-            if "start" in action_dict:
-                if action_dict["start"] == "rebuild":
-                    server.rebuild()
-                else:
-                    if server.status == "PAUSED":
-                        server.unpause()
-                    elif server.status == "SUSPENDED":
-                        server.resume()
-                    elif server.status == "SHUTOFF":
-                        server.start()
-                    else:
-                        self.logger.debug(
-                            "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
-                        )
-                        raise vimconn.VimConnException(
-                            "Cannot 'start' instance while it is in active state",
-                            http_code=vimconn.HTTP_Bad_Request,
-                        )
-
-            elif "pause" in action_dict:
-                server.pause()
-            elif "resume" in action_dict:
-                server.resume()
-            elif "shutoff" in action_dict or "shutdown" in action_dict:
-                self.logger.debug("server status %s", server.status)
-                if server.status == "ACTIVE":
-                    server.stop()
+        self._reload_connection()
+        server = self.nova.servers.find(id=vm_id)
+        if "start" in action_dict:
+            if action_dict["start"] == "rebuild":
+                server.rebuild()
+            else:
+                if server.status == "PAUSED":
+                    server.unpause()
+                elif server.status == "SUSPENDED":
+                    server.resume()
+                elif server.status == "SHUTOFF":
+                    server.start()
                 else:
-                    self.logger.debug("ERROR: VM is not in Active state")
-                    raise vimconn.VimConnException(
-                        "VM is not in active state, stop operation is not allowed",
-                        http_code=vimconn.HTTP_Bad_Request,
+                    self.logger.debug(
+                        "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
                     )
-            elif "forceOff" in action_dict:
-                server.stop()  # TODO
-            elif "terminate" in action_dict:
-                server.delete()
-            elif "createImage" in action_dict:
-                server.create_image()
-                # "path":path_schema,
-                # "description":description_schema,
-                # "name":name_schema,
-                # "metadata":metadata_schema,
-                # "imageRef": id_schema,
-                # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
-            elif "rebuild" in action_dict:
-                server.rebuild(server.image["id"])
-            elif "reboot" in action_dict:
-                server.reboot()  # reboot_type="SOFT"
-            elif "console" in action_dict:
-                console_type = action_dict["console"]
-
-                if console_type is None or console_type == "novnc":
-                    console_dict = server.get_vnc_console("novnc")
-                elif console_type == "xvpvnc":
-                    console_dict = server.get_vnc_console(console_type)
-                elif console_type == "rdp-html5":
-                    console_dict = server.get_rdp_console(console_type)
-                elif console_type == "spice-html5":
-                    console_dict = server.get_spice_console(console_type)
-                else:
                     raise vimconn.VimConnException(
-                        "console type '{}' not allowed".format(console_type),
+                        "Cannot 'start' instance while it is in active state",
                         http_code=vimconn.HTTP_Bad_Request,
                     )
+        elif "pause" in action_dict:
+            server.pause()
+        elif "resume" in action_dict:
+            server.resume()
+        elif "shutoff" in action_dict or "shutdown" in action_dict:
+            self.logger.debug("server status %s", server.status)
+            if server.status == "ACTIVE":
+                server.stop()
+            else:
+                self.logger.debug("ERROR: VM is not in Active state")
+                raise vimconn.VimConnException(
+                    "VM is not in active state, stop operation is not allowed",
+                    http_code=vimconn.HTTP_Bad_Request,
+                )
+        elif "forceOff" in action_dict:
+            server.stop()  # TODO
+        elif "terminate" in action_dict:
+            server.delete()
+        elif "createImage" in action_dict:
+            server.create_image()
+            # "path":path_schema,
+            # "description":description_schema,
+            # "name":name_schema,
+            # "metadata":metadata_schema,
+            # "imageRef": id_schema,
+            # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
+        elif "rebuild" in action_dict:
+            server.rebuild(server.image["id"])
+        elif "reboot" in action_dict:
+            server.reboot()  # reboot_type="SOFT"
+        elif "console" in action_dict:
+            console_type = action_dict["console"]
 
-                try:
-                    console_url = console_dict["console"]["url"]
-                    # parse console_url
-                    protocol_index = console_url.find("//")
-                    suffix_index = (
-                        console_url[protocol_index + 2 :].find("/") + protocol_index + 2
-                    )
-                    port_index = (
-                        console_url[protocol_index + 2 : suffix_index].find(":")
-                        + protocol_index
-                        + 2
-                    )
-
-                    if protocol_index < 0 or port_index < 0 or suffix_index < 0:
-                        raise vimconn.VimConnException(
-                            "Unexpected response from VIM " + str(console_dict)
-                        )
+            if console_type is None or console_type == "novnc":
+                console_dict = server.get_vnc_console("novnc")
+            elif console_type == "xvpvnc":
+                console_dict = server.get_vnc_console(console_type)
+            elif console_type == "rdp-html5":
+                console_dict = server.get_rdp_console(console_type)
+            elif console_type == "spice-html5":
+                console_dict = server.get_spice_console(console_type)
+            else:
+                raise vimconn.VimConnException(
+                    "console type '{}' not allowed".format(console_type),
+                    http_code=vimconn.HTTP_Bad_Request,
+                )
 
-                    console_dict2 = {
-                        "protocol": console_url[0:protocol_index],
-                        "server": console_url[protocol_index + 2 : port_index],
-                        "port": int(console_url[port_index + 1 : suffix_index]),
-                        "suffix": console_url[suffix_index + 1 :],
-                    }
+            try:
+                console_url = console_dict["console"]["url"]
+                # parse console_url
+                protocol_index = console_url.find("//")
+                suffix_index = (
+                    console_url[protocol_index + 2 :].find("/") + protocol_index + 2
+                )
+                port_index = (
+                    console_url[protocol_index + 2 : suffix_index].find(":")
+                    + protocol_index
+                    + 2
+                )
 
-                    return console_dict2
-                except Exception:
+                if protocol_index < 0 or port_index < 0 or suffix_index < 0:
                     raise vimconn.VimConnException(
                         "Unexpected response from VIM " + str(console_dict)
                     )
 
-            return None
-        except (
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            nvExceptions.NotFound,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
-        # TODO insert exception vimconn.HTTP_Unauthorized
+                console_dict2 = {
+                    "protocol": console_url[0:protocol_index],
+                    "server": console_url[protocol_index + 2 : port_index],
+                    "port": int(console_url[port_index + 1 : suffix_index]),
+                    "suffix": console_url[suffix_index + 1 :],
+                }
+
+                return console_dict2
+            except Exception:
+                raise vimconn.VimConnException(
+                    "Unexpected response from VIM " + str(console_dict)
+                )
+
+        return None
 
     # ###### VIO Specific Changes #########
     def _generate_vlanID(self):
@@ -3529,78 +3674,681 @@ class vimconnector(vimconn.VimConnector):
 
         return error_value, error_text
 
-    def new_affinity_group(self, affinity_group_data):
-        """Adds a server group to VIM
-            affinity_group_data contains a dictionary with information, keys:
-                name: name in VIM for the server group
-                type: affinity or anti-affinity
-                scope: Only nfvi-node allowed
-        Returns the server group identifier"""
-        self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
+    def new_classification(self, name, ctype, definition):
+        self.logger.debug(
+            "Adding a new (Traffic) Classification to VIM, named %s", name
+        )
 
         try:
-            name = affinity_group_data["name"]
-            policy = affinity_group_data["type"]
+            new_class = None
+            self._reload_connection()
 
+            if ctype not in supportedClassificationTypes:
+                raise vimconn.VimConnNotSupportedException(
+                    "OpenStack VIM connector does not support provided "
+                    "Classification Type {}, supported ones are: {}".format(
+                        ctype, supportedClassificationTypes
+                    )
+                )
+
+            if not self._validate_classification(ctype, definition):
+                raise vimconn.VimConnException(
+                    "Incorrect Classification definition for the type specified."
+                )
+
+            classification_dict = definition
+            classification_dict["name"] = name
+
+            self.logger.info(
+                "Adding a new (Traffic) Classification to VIM, named {} and {}.".format(
+                    name, classification_dict
+                )
+            )
+            new_class = self.neutron.create_sfc_flow_classifier(
+                {"flow_classifier": classification_dict}
+            )
+
+            return new_class["flow_classifier"]["id"]
+        except (
+            neExceptions.ConnectionFailed,
+            ksExceptions.ClientException,
+            neExceptions.NeutronException,
+            ConnectionError,
+        ) as e:
+            self.logger.error("Creation of Classification failed.")
+            self._format_exception(e)
+
+    def get_classification(self, class_id):
+        self.logger.debug(" Getting Classification %s from VIM", class_id)
+        filter_dict = {"id": class_id}
+        class_list = self.get_classification_list(filter_dict)
+
+        if len(class_list) == 0:
+            raise vimconn.VimConnNotFoundException(
+                "Classification '{}' not found".format(class_id)
+            )
+        elif len(class_list) > 1:
+            raise vimconn.VimConnConflictException(
+                "Found more than one Classification with this criteria"
+            )
+
+        classification = class_list[0]
+
+        return classification
+
+    def get_classification_list(self, filter_dict={}):
+        self.logger.debug(
+            "Getting Classifications from VIM filter: '%s'", str(filter_dict)
+        )
+
+        try:
+            filter_dict_os = filter_dict.copy()
             self._reload_connection()
-            new_server_group = self.nova.server_groups.create(name, policy)
 
-            return new_server_group.id
+            if self.api_version3 and "tenant_id" in filter_dict_os:
+                filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
+
+            classification_dict = self.neutron.list_sfc_flow_classifiers(
+                **filter_dict_os
+            )
+            classification_list = classification_dict["flow_classifiers"]
+            self.__classification_os2mano(classification_list)
+
+            return classification_list
         except (
+            neExceptions.ConnectionFailed,
             ksExceptions.ClientException,
-            nvExceptions.ClientException,
+            neExceptions.NeutronException,
             ConnectionError,
-            KeyError,
         ) as e:
             self._format_exception(e)
 
-    def get_affinity_group(self, affinity_group_id):
-        """Obtain server group details from the VIM. Returns the server group detais as a dict"""
-        self.logger.debug("Getting flavor '%s'", affinity_group_id)
+    def delete_classification(self, class_id):
+        self.logger.debug("Deleting Classification '%s' from VIM", class_id)
+
         try:
             self._reload_connection()
-            server_group = self.nova.server_groups.find(id=affinity_group_id)
+            self.neutron.delete_sfc_flow_classifier(class_id)
 
-            return server_group.to_dict()
+            return class_id
         except (
-            nvExceptions.NotFound,
-            nvExceptions.ClientException,
+            neExceptions.ConnectionFailed,
+            neExceptions.NeutronException,
             ksExceptions.ClientException,
+            neExceptions.NeutronException,
             ConnectionError,
         ) as e:
             self._format_exception(e)
 
-    def delete_affinity_group(self, affinity_group_id):
-        """Deletes a server group from the VIM. Returns the old affinity_group_id"""
-        self.logger.debug("Getting server group '%s'", affinity_group_id)
+    def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
+        self.logger.debug(
+            "Adding a new Service Function Instance to VIM, named '%s'", name
+        )
+
         try:
+            new_sfi = None
             self._reload_connection()
-            self.nova.server_groups.delete(affinity_group_id)
+            correlation = None
+
+            if sfc_encap:
+                correlation = "nsh"
+
+            if len(ingress_ports) != 1:
+                raise vimconn.VimConnNotSupportedException(
+                    "OpenStack VIM connector can only have 1 ingress port per SFI"
+                )
+
+            if len(egress_ports) != 1:
+                raise vimconn.VimConnNotSupportedException(
+                    "OpenStack VIM connector can only have 1 egress port per SFI"
+                )
+
+            sfi_dict = {
+                "name": name,
+                "ingress": ingress_ports[0],
+                "egress": egress_ports[0],
+                "service_function_parameters": {"correlation": correlation},
+            }
+            self.logger.info("Adding a new SFI to VIM, {}.".format(sfi_dict))
+            new_sfi = self.neutron.create_sfc_port_pair({"port_pair": sfi_dict})
 
-            return affinity_group_id
+            return new_sfi["port_pair"]["id"]
         except (
-            nvExceptions.NotFound,
+            neExceptions.ConnectionFailed,
             ksExceptions.ClientException,
-            nvExceptions.ClientException,
+            neExceptions.NeutronException,
+            ConnectionError,
+        ) as e:
+            if new_sfi:
+                try:
+                    self.neutron.delete_sfc_port_pair(new_sfi["port_pair"]["id"])
+                except Exception:
+                    self.logger.error(
+                        "Creation of Service Function Instance failed, with "
+                        "subsequent deletion failure as well."
+                    )
+
+            self._format_exception(e)
+
+    def get_sfi(self, sfi_id):
+        self.logger.debug("Getting Service Function Instance %s from VIM", sfi_id)
+        filter_dict = {"id": sfi_id}
+        sfi_list = self.get_sfi_list(filter_dict)
+
+        if len(sfi_list) == 0:
+            raise vimconn.VimConnNotFoundException(
+                "Service Function Instance '{}' not found".format(sfi_id)
+            )
+        elif len(sfi_list) > 1:
+            raise vimconn.VimConnConflictException(
+                "Found more than one Service Function Instance with this criteria"
+            )
+
+        sfi = sfi_list[0]
+
+        return sfi
+
+    def get_sfi_list(self, filter_dict={}):
+        self.logger.debug(
+            "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict)
+        )
+
+        try:
+            self._reload_connection()
+            filter_dict_os = filter_dict.copy()
+
+            if self.api_version3 and "tenant_id" in filter_dict_os:
+                filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
+
+            sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os)
+            sfi_list = sfi_dict["port_pairs"]
+            self.__sfi_os2mano(sfi_list)
+
+            return sfi_list
+        except (
+            neExceptions.ConnectionFailed,
+            ksExceptions.ClientException,
+            neExceptions.NeutronException,
+            ConnectionError,
+        ) as e:
+            self._format_exception(e)
+
+    def delete_sfi(self, sfi_id):
+        self.logger.debug("Deleting Service Function Instance '%s' from VIM", sfi_id)
+
+        try:
+            self._reload_connection()
+            self.neutron.delete_sfc_port_pair(sfi_id)
+
+            return sfi_id
+        except (
+            neExceptions.ConnectionFailed,
+            neExceptions.NeutronException,
+            ksExceptions.ClientException,
+            neExceptions.NeutronException,
+            ConnectionError,
+        ) as e:
+            self._format_exception(e)
+
+    def new_sf(self, name, sfis, sfc_encap=True):
+        self.logger.debug("Adding a new Service Function to VIM, named '%s'", name)
+
+        new_sf = None
+
+        try:
+            self._reload_connection()
+
+            for instance in sfis:
+                sfi = self.get_sfi(instance)
+
+                if sfi.get("sfc_encap") != sfc_encap:
+                    raise vimconn.VimConnNotSupportedException(
+                        "OpenStack VIM connector requires all SFIs of the "
+                        "same SF to share the same SFC Encapsulation"
+                    )
+
+            sf_dict = {"name": name, "port_pairs": sfis}
+
+            self.logger.info("Adding a new SF to VIM, {}.".format(sf_dict))
+            new_sf = self.neutron.create_sfc_port_pair_group(
+                {"port_pair_group": sf_dict}
+            )
+
+            return new_sf["port_pair_group"]["id"]
+        except (
+            neExceptions.ConnectionFailed,
+            ksExceptions.ClientException,
+            neExceptions.NeutronException,
+            ConnectionError,
+        ) as e:
+            if new_sf:
+                try:
+                    new_sf_id = new_sf.get("port_pair_group").get("id")
+                    self.neutron.delete_sfc_port_pair_group(new_sf_id)
+                except Exception:
+                    self.logger.error(
+                        "Creation of Service Function failed, with "
+                        "subsequent deletion failure as well."
+                    )
+
+            self._format_exception(e)
+
+    def get_sf(self, sf_id):
+        self.logger.debug("Getting Service Function %s from VIM", sf_id)
+        filter_dict = {"id": sf_id}
+        sf_list = self.get_sf_list(filter_dict)
+
+        if len(sf_list) == 0:
+            raise vimconn.VimConnNotFoundException(
+                "Service Function '{}' not found".format(sf_id)
+            )
+        elif len(sf_list) > 1:
+            raise vimconn.VimConnConflictException(
+                "Found more than one Service Function with this criteria"
+            )
+
+        sf = sf_list[0]
+
+        return sf
+
+    def get_sf_list(self, filter_dict={}):
+        self.logger.debug(
+            "Getting Service Function from VIM filter: '%s'", str(filter_dict)
+        )
+
+        try:
+            self._reload_connection()
+            filter_dict_os = filter_dict.copy()
+
+            if self.api_version3 and "tenant_id" in filter_dict_os:
+                filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
+
+            sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
+            sf_list = sf_dict["port_pair_groups"]
+            self.__sf_os2mano(sf_list)
+
+            return sf_list
+        except (
+            neExceptions.ConnectionFailed,
+            ksExceptions.ClientException,
+            neExceptions.NeutronException,
+            ConnectionError,
+        ) as e:
+            self._format_exception(e)
+
+    def delete_sf(self, sf_id):
+        self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
+
+        try:
+            self._reload_connection()
+            self.neutron.delete_sfc_port_pair_group(sf_id)
+
+            return sf_id
+        except (
+            neExceptions.ConnectionFailed,
+            neExceptions.NeutronException,
+            ksExceptions.ClientException,
+            neExceptions.NeutronException,
+            ConnectionError,
+        ) as e:
+            self._format_exception(e)
+
+    def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
+        self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name)
+
+        new_sfp = None
+
+        try:
+            self._reload_connection()
+            # In networking-sfc the MPLS encapsulation is legacy
+            # should be used when no full SFC Encapsulation is intended
+            correlation = "mpls"
+
+            if sfc_encap:
+                correlation = "nsh"
+
+            sfp_dict = {
+                "name": name,
+                "flow_classifiers": classifications,
+                "port_pair_groups": sfs,
+                "chain_parameters": {"correlation": correlation},
+            }
+
+            if spi:
+                sfp_dict["chain_id"] = spi
+
+            self.logger.info("Adding a new SFP to VIM, {}.".format(sfp_dict))
+            new_sfp = self.neutron.create_sfc_port_chain({"port_chain": sfp_dict})
+
+            return new_sfp["port_chain"]["id"]
+        except (
+            neExceptions.ConnectionFailed,
+            ksExceptions.ClientException,
+            neExceptions.NeutronException,
+            ConnectionError,
+        ) as e:
+            if new_sfp:
+                try:
+                    new_sfp_id = new_sfp.get("port_chain").get("id")
+                    self.neutron.delete_sfc_port_chain(new_sfp_id)
+                except Exception:
+                    self.logger.error(
+                        "Creation of Service Function Path failed, with "
+                        "subsequent deletion failure as well."
+                    )
+
+            self._format_exception(e)
+
+    def get_sfp(self, sfp_id):
+        self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id)
+
+        filter_dict = {"id": sfp_id}
+        sfp_list = self.get_sfp_list(filter_dict)
+
+        if len(sfp_list) == 0:
+            raise vimconn.VimConnNotFoundException(
+                "Service Function Path '{}' not found".format(sfp_id)
+            )
+        elif len(sfp_list) > 1:
+            raise vimconn.VimConnConflictException(
+                "Found more than one Service Function Path with this criteria"
+            )
+
+        sfp = sfp_list[0]
+
+        return sfp
+
+    def get_sfp_list(self, filter_dict={}):
+        self.logger.debug(
+            "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict)
+        )
+
+        try:
+            self._reload_connection()
+            filter_dict_os = filter_dict.copy()
+
+            if self.api_version3 and "tenant_id" in filter_dict_os:
+                filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
+
+            sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
+            sfp_list = sfp_dict["port_chains"]
+            self.__sfp_os2mano(sfp_list)
+
+            return sfp_list
+        except (
+            neExceptions.ConnectionFailed,
+            ksExceptions.ClientException,
+            neExceptions.NeutronException,
+            ConnectionError,
+        ) as e:
+            self._format_exception(e)
+
+    def delete_sfp(self, sfp_id):
+        self.logger.debug("Deleting Service Function Path '%s' from VIM", sfp_id)
+
+        try:
+            self._reload_connection()
+            self.neutron.delete_sfc_port_chain(sfp_id)
+
+            return sfp_id
+        except (
+            neExceptions.ConnectionFailed,
+            neExceptions.NeutronException,
+            ksExceptions.ClientException,
+            neExceptions.NeutronException,
             ConnectionError,
         ) as e:
             self._format_exception(e)
 
-    def get_vdu_state(self, vm_id):
+    def refresh_sfps_status(self, sfp_list):
+        """Get the status of the service function path
+        Params: the list of sfp identifiers
+        Returns a dictionary with:
+            vm_id:          #VIM id of this service function path
+                status:     #Mandatory. Text with one of:
+                            #  DELETED (not found at vim)
+                            #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+                            #  OTHER (Vim reported other status not understood)
+                            #  ERROR (VIM indicates an ERROR status)
+                            #  ACTIVE,
+                            #  CREATING (on building process)
+                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
+                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)F
         """
-        Getting the state of a vdu
-        param:
-            vm_id: ID of an instance
+        sfp_dict = {}
+        self.logger.debug(
+            "refresh_sfps status: Getting tenant SFP information from VIM"
+        )
+
+        for sfp_id in sfp_list:
+            sfp = {}
+
+            try:
+                sfp_vim = self.get_sfp(sfp_id)
+
+                if sfp_vim["spi"]:
+                    sfp["status"] = vmStatus2manoFormat["ACTIVE"]
+                else:
+                    sfp["status"] = "OTHER"
+                    sfp["error_msg"] = "VIM status reported " + sfp["status"]
+
+                sfp["vim_info"] = self.serialize(sfp_vim)
+
+                if sfp_vim.get("fault"):
+                    sfp["error_msg"] = str(sfp_vim["fault"])
+            except vimconn.VimConnNotFoundException as e:
+                self.logger.error("Exception getting sfp status: %s", str(e))
+                sfp["status"] = "DELETED"
+                sfp["error_msg"] = str(e)
+            except vimconn.VimConnException as e:
+                self.logger.error("Exception getting sfp status: %s", str(e))
+                sfp["status"] = "VIM_ERROR"
+                sfp["error_msg"] = str(e)
+
+            sfp_dict[sfp_id] = sfp
+
+        return sfp_dict
+
+    def refresh_sfis_status(self, sfi_list):
+        """Get the status of the service function instances
+        Params: the list of sfi identifiers
+        Returns a dictionary with:
+            vm_id:          #VIM id of this service function instance
+                status:     #Mandatory. Text with one of:
+                            #  DELETED (not found at vim)
+                            #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+                            #  OTHER (Vim reported other status not understood)
+                            #  ERROR (VIM indicates an ERROR status)
+                            #  ACTIVE,
+                            #  CREATING (on building process)
+                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
+                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+        """
+        sfi_dict = {}
+        self.logger.debug(
+            "refresh_sfis status: Getting tenant sfi information from VIM"
+        )
+
+        for sfi_id in sfi_list:
+            sfi = {}
+
+            try:
+                sfi_vim = self.get_sfi(sfi_id)
+
+                if sfi_vim:
+                    sfi["status"] = vmStatus2manoFormat["ACTIVE"]
+                else:
+                    sfi["status"] = "OTHER"
+                    sfi["error_msg"] = "VIM status reported " + sfi["status"]
+
+                sfi["vim_info"] = self.serialize(sfi_vim)
+
+                if sfi_vim.get("fault"):
+                    sfi["error_msg"] = str(sfi_vim["fault"])
+            except vimconn.VimConnNotFoundException as e:
+                self.logger.error("Exception getting sfi status: %s", str(e))
+                sfi["status"] = "DELETED"
+                sfi["error_msg"] = str(e)
+            except vimconn.VimConnException as e:
+                self.logger.error("Exception getting sfi status: %s", str(e))
+                sfi["status"] = "VIM_ERROR"
+                sfi["error_msg"] = str(e)
+
+            sfi_dict[sfi_id] = sfi
+
+        return sfi_dict
+
+    def refresh_sfs_status(self, sf_list):
+        """Get the status of the service functions
+        Params: the list of sf identifiers
+        Returns a dictionary with:
+            vm_id:          #VIM id of this service function
+                status:     #Mandatory. Text with one of:
+                            #  DELETED (not found at vim)
+                            #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+                            #  OTHER (Vim reported other status not understood)
+                            #  ERROR (VIM indicates an ERROR status)
+                            #  ACTIVE,
+                            #  CREATING (on building process)
+                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
+                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+        """
+        sf_dict = {}
+        self.logger.debug("refresh_sfs status: Getting tenant sf information from VIM")
+
+        for sf_id in sf_list:
+            sf = {}
+
+            try:
+                sf_vim = self.get_sf(sf_id)
+
+                if sf_vim:
+                    sf["status"] = vmStatus2manoFormat["ACTIVE"]
+                else:
+                    sf["status"] = "OTHER"
+                    sf["error_msg"] = "VIM status reported " + sf_vim["status"]
+
+                sf["vim_info"] = self.serialize(sf_vim)
+
+                if sf_vim.get("fault"):
+                    sf["error_msg"] = str(sf_vim["fault"])
+            except vimconn.VimConnNotFoundException as e:
+                self.logger.error("Exception getting sf status: %s", str(e))
+                sf["status"] = "DELETED"
+                sf["error_msg"] = str(e)
+            except vimconn.VimConnException as e:
+                self.logger.error("Exception getting sf status: %s", str(e))
+                sf["status"] = "VIM_ERROR"
+                sf["error_msg"] = str(e)
+
+            sf_dict[sf_id] = sf
+
+        return sf_dict
+
+    def refresh_classifications_status(self, classification_list):
+        """Get the status of the classifications
+        Params: the list of classification identifiers
+        Returns a dictionary with:
+            vm_id:          #VIM id of this classifier
+                status:     #Mandatory. Text with one of:
+                            #  DELETED (not found at vim)
+                            #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+                            #  OTHER (Vim reported other status not understood)
+                            #  ERROR (VIM indicates an ERROR status)
+                            #  ACTIVE,
+                            #  CREATING (on building process)
+                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
+                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+        """
+        classification_dict = {}
+        self.logger.debug(
+            "refresh_classifications status: Getting tenant classification information from VIM"
+        )
+
+        for classification_id in classification_list:
+            classification = {}
+
+            try:
+                classification_vim = self.get_classification(classification_id)
+
+                if classification_vim:
+                    classification["status"] = vmStatus2manoFormat["ACTIVE"]
+                else:
+                    classification["status"] = "OTHER"
+                    classification["error_msg"] = (
+                        "VIM status reported " + classification["status"]
+                    )
+
+                classification["vim_info"] = self.serialize(classification_vim)
+
+                if classification_vim.get("fault"):
+                    classification["error_msg"] = str(classification_vim["fault"])
+            except vimconn.VimConnNotFoundException as e:
+                self.logger.error("Exception getting classification status: %s", str(e))
+                classification["status"] = "DELETED"
+                classification["error_msg"] = str(e)
+            except vimconn.VimConnException as e:
+                self.logger.error("Exception getting classification status: %s", str(e))
+                classification["status"] = "VIM_ERROR"
+                classification["error_msg"] = str(e)
+
+            classification_dict[classification_id] = classification
+
+        return classification_dict
+
+    @catch_any_exception
+    def new_affinity_group(self, affinity_group_data):
+        """Adds a server group to VIM
+            affinity_group_data contains a dictionary with information, keys:
+                name: name in VIM for the server group
+                type: affinity or anti-affinity
+                scope: Only nfvi-node allowed
+        Returns the server group identifier"""
+        self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
+        name = affinity_group_data["name"]
+        policy = affinity_group_data["type"]
+        self._reload_connection()
+        new_server_group = self.nova.server_groups.create(name, policy)
+        return new_server_group.id
+
+    @catch_any_exception
+    def get_affinity_group(self, affinity_group_id):
+        """Obtain server group details from the VIM. Returns the server group detais as a dict"""
+        self.logger.debug("Getting flavor '%s'", affinity_group_id)
+        self._reload_connection()
+        server_group = self.nova.server_groups.find(id=affinity_group_id)
+        return server_group.to_dict()
+
+    @catch_any_exception
+    def delete_affinity_group(self, affinity_group_id):
+        """Deletes a server group from the VIM. Returns the old affinity_group_id"""
+        self.logger.debug("Getting server group '%s'", affinity_group_id)
+        self._reload_connection()
+        self.nova.server_groups.delete(affinity_group_id)
+        return affinity_group_id
+
+    @catch_any_exception
+    def get_vdu_state(self, vm_id, host_is_required=False) -> list:
+        """Getting the state of a VDU.
+        Args:
+            vm_id   (str): ID of an instance
+            host_is_required    (Boolean): If the VIM account is non-admin, host info does not appear in server_dict
+                                           and if this is set to True, it raises KeyError.
+        Returns:
+            vdu_data    (list): VDU details including state, flavor, host_info, AZ
         """
         self.logger.debug("Getting the status of VM")
         self.logger.debug("VIM VM ID %s", vm_id)
         self._reload_connection()
-        server = self.nova.servers.find(id=vm_id)
-        server_dict = server.to_dict()
+        server_dict = self._find_nova_server(vm_id)
+        srv_attr = "OS-EXT-SRV-ATTR:host"
+        host_info = (
+            server_dict[srv_attr] if host_is_required else server_dict.get(srv_attr)
+        )
         vdu_data = [
             server_dict["status"],
             server_dict["flavor"]["id"],
-            server_dict["OS-EXT-SRV-ATTR:host"],
+            host_info,
             server_dict["OS-EXT-AZ:availability_zone"],
         ]
         self.logger.debug("vdu_data %s", vdu_data)
@@ -3662,6 +4410,7 @@ class vimconnector(vimconn.VimConnector):
                         az_check["zone_check"] = True
         return az_check
 
+    @catch_any_exception
     def migrate_instance(self, vm_id, compute_host=None):
         """
         Migrate a vdu
@@ -3671,80 +4420,76 @@ class vimconnector(vimconn.VimConnector):
         """
         self._reload_connection()
         vm_state = False
-        instance_state = self.get_vdu_state(vm_id)
+        instance_state = self.get_vdu_state(vm_id, host_is_required=True)
         server_flavor_id = instance_state[1]
         server_hypervisor_name = instance_state[2]
         server_availability_zone = instance_state[3]
-        try:
-            server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
-            server_flavor_details = [
-                server_flavor["ram"],
-                server_flavor["disk"],
-                server_flavor["vcpus"],
-            ]
-            if compute_host == server_hypervisor_name:
-                raise vimconn.VimConnException(
-                    "Unable to migrate instance '{}' to the same host '{}'".format(
-                        vm_id, compute_host
-                    ),
-                    http_code=vimconn.HTTP_Bad_Request,
-                )
-            az_status = self.check_availability_zone(
-                server_availability_zone,
-                server_flavor_details,
-                server_hypervisor_name,
-                compute_host,
+        server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
+        server_flavor_details = [
+            server_flavor["ram"],
+            server_flavor["disk"],
+            server_flavor["vcpus"],
+        ]
+        if compute_host == server_hypervisor_name:
+            raise vimconn.VimConnException(
+                "Unable to migrate instance '{}' to the same host '{}'".format(
+                    vm_id, compute_host
+                ),
+                http_code=vimconn.HTTP_Bad_Request,
             )
-            availability_zone_check = az_status["zone_check"]
-            available_compute_id = az_status.get("compute_availability")
+        az_status = self.check_availability_zone(
+            server_availability_zone,
+            server_flavor_details,
+            server_hypervisor_name,
+            compute_host,
+        )
+        availability_zone_check = az_status["zone_check"]
+        available_compute_id = az_status.get("compute_availability")
 
-            if availability_zone_check is False:
-                raise vimconn.VimConnException(
-                    "Unable to migrate instance '{}' to a different availability zone".format(
-                        vm_id
-                    ),
-                    http_code=vimconn.HTTP_Bad_Request,
-                )
-            if available_compute_id is not None:
-                self.nova.servers.live_migrate(
-                    server=vm_id,
-                    host=available_compute_id,
-                    block_migration=True,
-                    disk_over_commit=False,
-                )
-                state = "MIGRATING"
-                changed_compute_host = ""
-                if state == "MIGRATING":
-                    vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
-                    changed_compute_host = self.get_vdu_state(vm_id)[2]
-                if vm_state and changed_compute_host == available_compute_id:
-                    self.logger.debug(
-                        "Instance '{}' migrated to the new compute host '{}'".format(
-                            vm_id, changed_compute_host
-                        )
-                    )
-                    return state, available_compute_id
-                else:
-                    raise vimconn.VimConnException(
-                        "Migration Failed. Instance '{}' not moved to the new host {}".format(
-                            vm_id, available_compute_id
-                        ),
-                        http_code=vimconn.HTTP_Bad_Request,
+        if availability_zone_check is False:
+            raise vimconn.VimConnException(
+                "Unable to migrate instance '{}' to a different availability zone".format(
+                    vm_id
+                ),
+                http_code=vimconn.HTTP_Bad_Request,
+            )
+        if available_compute_id is not None:
+            # disk_over_commit parameter for live_migrate method is not valid for Nova API version >= 2.25
+            self.nova.servers.live_migrate(
+                server=vm_id,
+                host=available_compute_id,
+                block_migration=True,
+            )
+            state = "MIGRATING"
+            changed_compute_host = ""
+            if state == "MIGRATING":
+                vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
+                changed_compute_host = self.get_vdu_state(vm_id, host_is_required=True)[
+                    2
+                ]
+            if vm_state and changed_compute_host == available_compute_id:
+                self.logger.debug(
+                    "Instance '{}' migrated to the new compute host '{}'".format(
+                        vm_id, changed_compute_host
                     )
+                )
+                return state, available_compute_id
             else:
                 raise vimconn.VimConnException(
-                    "Compute '{}' not available or does not have enough resources to migrate the instance".format(
-                        available_compute_id
+                    "Migration Failed. Instance '{}' not moved to the new host {}".format(
+                        vm_id, available_compute_id
                     ),
                     http_code=vimconn.HTTP_Bad_Request,
                 )
-        except (
-            nvExceptions.BadRequest,
-            nvExceptions.ClientException,
-            nvExceptions.NotFound,
-        ) as e:
-            self._format_exception(e)
+        else:
+            raise vimconn.VimConnException(
+                "Compute '{}' not available or does not have enough resources to migrate the instance".format(
+                    available_compute_id
+                ),
+                http_code=vimconn.HTTP_Bad_Request,
+            )
 
+    @catch_any_exception
     def resize_instance(self, vm_id, new_flavor_id):
         """
         For resizing the vm based on the given
@@ -3759,37 +4504,30 @@ class vimconnector(vimconn.VimConnector):
         instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
         old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
         new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
-        try:
-            if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
-                if old_flavor_disk > new_flavor_disk:
+        if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
+            if old_flavor_disk > new_flavor_disk:
+                raise nvExceptions.BadRequest(
+                    400,
+                    message="Server disk resize failed. Resize to lower disk flavor is not allowed",
+                )
+            else:
+                self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
+                vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
+                if vm_state:
+                    instance_resized_status = self.confirm_resize(vm_id)
+                    return instance_resized_status
+                else:
                     raise nvExceptions.BadRequest(
-                        400,
-                        message="Server disk resize failed. Resize to lower disk flavor is not allowed",
+                        409,
+                        message="Cannot 'resize' vm_state is in ERROR",
                     )
-                else:
-                    self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
-                    vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
-                    if vm_state:
-                        instance_resized_status = self.confirm_resize(vm_id)
-                        return instance_resized_status
-                    else:
-                        raise nvExceptions.BadRequest(
-                            409,
-                            message="Cannot 'resize' vm_state is in ERROR",
-                        )
 
-            else:
-                self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
-                raise nvExceptions.BadRequest(
-                    409,
-                    message="Cannot 'resize' instance while it is in vm_state resized",
-                )
-        except (
-            nvExceptions.BadRequest,
-            nvExceptions.ClientException,
-            nvExceptions.NotFound,
-        ) as e:
-            self._format_exception(e)
+        else:
+            self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
+            raise nvExceptions.BadRequest(
+                409,
+                message="Cannot 'resize' instance while it is in vm_state resized",
+            )
 
     def confirm_resize(self, vm_id):
         """
@@ -3809,13 +4547,17 @@ class vimconnector(vimconn.VimConnector):
             self.logger.debug("Getting servers and ports data from Openstack VIMs.")
             self._reload_connection()
             all_servers = self.nova.servers.list(detailed=True)
+            try:
+                for server in all_servers:
+                    if server.flavor.get("original_name"):
+                        server.flavor["id"] = self.nova.flavors.find(
+                            name=server.flavor["original_name"]
+                        ).id
+            except nClient.exceptions.NotFound as e:
+                self.logger.warning(str(e.message))
             all_ports = self.neutron.list_ports()
             return all_servers, all_ports
-        except (
-            vimconn.VimConnException,
-            vimconn.VimConnNotFoundException,
-            vimconn.VimConnConnectionException,
-        ) as e:
+        except Exception as e:
             raise vimconn.VimConnException(
                 f"Exception in monitoring while getting VMs and ports status: {str(e)}"
             )