Update pip requirements to pass stage2 and stage3 in all modules
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
index b18cf78..bca9479 100644 (file)
@@ -38,9 +38,10 @@ from pprint import pformat
 import random
 import re
 import time
 import random
 import re
 import time
-from typing import Dict, Optional, Tuple
+from typing import Dict, List, Optional, Tuple
 
 from cinderclient import client as cClient
 
 from cinderclient import client as cClient
+import cinderclient.exceptions as cExceptions
 from glanceclient import client as glClient
 import glanceclient.exc as gl1Exceptions
 from keystoneauth1 import session
 from glanceclient import client as glClient
 import glanceclient.exc as gl1Exceptions
 from keystoneauth1 import session
@@ -85,6 +86,16 @@ volume_timeout = 1800
 server_timeout = 1800
 
 
 server_timeout = 1800
 
 
+def catch_any_exception(func):
+    def format_exception(*args, **kwargs):
+        try:
+            return func(*args, *kwargs)
+        except Exception as e:
+            vimconnector._format_exception(e)
+
+    return format_exception
+
+
 class SafeDumper(yaml.SafeDumper):
     def represent_data(self, data):
         # Openstack APIs use custom subclasses of dict and YAML safe dumper
 class SafeDumper(yaml.SafeDumper):
     def represent_data(self, data):
         # Openstack APIs use custom subclasses of dict and YAML safe dumper
@@ -339,7 +350,7 @@ class vimconnector(vimconn.VimConnector):
             version = self.config.get("microversion")
 
             if not version:
             version = self.config.get("microversion")
 
             if not version:
-                version = "2.1"
+                version = "2.60"
 
             # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
             # Titanium cloud and StarlingX
 
             # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
             # Titanium cloud and StarlingX
@@ -355,12 +366,21 @@ class vimconnector(vimconn.VimConnector):
                 endpoint_type=self.endpoint_type,
                 region_name=region_name,
             )
                 endpoint_type=self.endpoint_type,
                 region_name=region_name,
             )
-            self.cinder = self.session["cinder"] = cClient.Client(
-                2,
-                session=sess,
-                endpoint_type=self.endpoint_type,
-                region_name=region_name,
-            )
+
+            if sess.get_all_version_data(service_type="volumev2"):
+                self.cinder = self.session["cinder"] = cClient.Client(
+                    2,
+                    session=sess,
+                    endpoint_type=self.endpoint_type,
+                    region_name=region_name,
+                )
+            else:
+                self.cinder = self.session["cinder"] = cClient.Client(
+                    3,
+                    session=sess,
+                    endpoint_type=self.endpoint_type,
+                    region_name=region_name,
+                )
 
             try:
                 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
 
             try:
                 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
@@ -516,7 +536,8 @@ class vimconnector(vimconn.VimConnector):
         # Types. Also, abstract vimconnector should call the validation
         # method before the implemented VIM connectors are called.
 
         # Types. Also, abstract vimconnector should call the validation
         # method before the implemented VIM connectors are called.
 
-    def _format_exception(self, exception):
+    @staticmethod
+    def _format_exception(exception):
         """Transform a keystone, nova, neutron  exception into a vimconn exception discovering the cause"""
         message_error = str(exception)
         tip = ""
         """Transform a keystone, nova, neutron  exception into a vimconn exception discovering the cause"""
         message_error = str(exception)
         tip = ""
@@ -526,8 +547,10 @@ class vimconnector(vimconn.VimConnector):
             (
                 neExceptions.NetworkNotFoundClient,
                 nvExceptions.NotFound,
             (
                 neExceptions.NetworkNotFoundClient,
                 nvExceptions.NotFound,
+                nvExceptions.ResourceNotFound,
                 ksExceptions.NotFound,
                 gl1Exceptions.HTTPNotFound,
                 ksExceptions.NotFound,
                 gl1Exceptions.HTTPNotFound,
+                cExceptions.NotFound,
             ),
         ):
             raise vimconn.VimConnNotFoundException(
             ),
         ):
             raise vimconn.VimConnNotFoundException(
@@ -542,6 +565,7 @@ class vimconnector(vimconn.VimConnector):
                 ConnectionError,
                 ksExceptions.ConnectionError,
                 neExceptions.ConnectionFailed,
                 ConnectionError,
                 ksExceptions.ConnectionError,
                 neExceptions.ConnectionFailed,
+                cExceptions.ConnectionError,
             ),
         ):
             if type(exception).__name__ == "SSLError":
             ),
         ):
             if type(exception).__name__ == "SSLError":
@@ -556,17 +580,26 @@ class vimconnector(vimconn.VimConnector):
                 KeyError,
                 nvExceptions.BadRequest,
                 ksExceptions.BadRequest,
                 KeyError,
                 nvExceptions.BadRequest,
                 ksExceptions.BadRequest,
+                gl1Exceptions.BadRequest,
+                cExceptions.BadRequest,
             ),
         ):
             ),
         ):
+            if message_error == "OS-EXT-SRV-ATTR:host":
+                tip = " (If the user does not have non-admin credentials, this attribute will be missing)"
+                raise vimconn.VimConnInsufficientCredentials(
+                    type(exception).__name__ + ": " + message_error + tip
+                )
             raise vimconn.VimConnException(
                 type(exception).__name__ + ": " + message_error
             )
             raise vimconn.VimConnException(
                 type(exception).__name__ + ": " + message_error
             )
+
         elif isinstance(
             exception,
             (
                 nvExceptions.ClientException,
                 ksExceptions.ClientException,
                 neExceptions.NeutronException,
         elif isinstance(
             exception,
             (
                 nvExceptions.ClientException,
                 ksExceptions.ClientException,
                 neExceptions.NeutronException,
+                cExceptions.ClientException,
             ),
         ):
             raise vimconn.VimConnUnexpectedResponse(
             ),
         ):
             raise vimconn.VimConnUnexpectedResponse(
@@ -579,9 +612,10 @@ class vimconnector(vimconn.VimConnector):
         elif isinstance(exception, vimconn.VimConnException):
             raise exception
         else:  # ()
         elif isinstance(exception, vimconn.VimConnException):
             raise exception
         else:  # ()
-            self.logger.error("General Exception " + message_error, exc_info=True)
+            logger = logging.getLogger("ro.vim.openstack")
+            logger.error("General Exception " + message_error, exc_info=True)
 
 
-            raise vimconn.VimConnConnectionException(
+            raise vimconn.VimConnException(
                 type(exception).__name__ + ": " + message_error
             )
 
                 type(exception).__name__ + ": " + message_error
             )
 
@@ -619,6 +653,32 @@ class vimconnector(vimconn.VimConnector):
                         "Not found security group {} for this tenant".format(sg)
                     )
 
                         "Not found security group {} for this tenant".format(sg)
                     )
 
+    def _find_nova_server(self, vm_id):
+        """
+        Returns the VM instance from Openstack and completes it with flavor ID
+        Do not call nova.servers.find directly, as it does not return flavor ID with microversion>=2.47
+        """
+        try:
+            self._reload_connection()
+            server = self.nova.servers.find(id=vm_id)
+            # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
+            server_dict = server.to_dict()
+            try:
+                if server_dict["flavor"].get("original_name"):
+                    server_dict["flavor"]["id"] = self.nova.flavors.find(
+                        name=server_dict["flavor"]["original_name"]
+                    ).id
+            except nClient.exceptions.NotFound as e:
+                self.logger.warning(str(e.message))
+            return server_dict
+        except (
+            ksExceptions.ClientException,
+            nvExceptions.ClientException,
+            nvExceptions.NotFound,
+            ConnectionError,
+        ) as e:
+            self._format_exception(e)
+
     def check_vim_connectivity(self):
         # just get network list to check connectivity and credentials
         self.get_network_list(filter_dict={})
     def check_vim_connectivity(self):
         # just get network list to check connectivity and credentials
         self.get_network_list(filter_dict={})
@@ -632,7 +692,6 @@ class vimconnector(vimconn.VimConnector):
         Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
         """
         self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
         Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
         """
         self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
-
         try:
             self._reload_connection()
 
         try:
             self._reload_connection()
 
@@ -662,7 +721,6 @@ class vimconnector(vimconn.VimConnector):
     def new_tenant(self, tenant_name, tenant_description):
         """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
         self.logger.debug("Adding a new tenant name: %s", tenant_name)
     def new_tenant(self, tenant_name, tenant_description):
         """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
         self.logger.debug("Adding a new tenant name: %s", tenant_name)
-
         try:
             self._reload_connection()
 
         try:
             self._reload_connection()
 
@@ -688,7 +746,6 @@ class vimconnector(vimconn.VimConnector):
     def delete_tenant(self, tenant_id):
         """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
         self.logger.debug("Deleting tenant %s from VIM", tenant_id)
     def delete_tenant(self, tenant_id):
         """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
         self.logger.debug("Deleting tenant %s from VIM", tenant_id)
-
         try:
             self._reload_connection()
 
         try:
             self._reload_connection()
 
@@ -698,6 +755,7 @@ class vimconnector(vimconn.VimConnector):
                 self.keystone.tenants.delete(tenant_id)
 
             return tenant_id
                 self.keystone.tenants.delete(tenant_id)
 
             return tenant_id
+
         except (
             ksExceptions.ConnectionError,
             ksExceptions.ClientException,
         except (
             ksExceptions.ConnectionError,
             ksExceptions.ClientException,
@@ -754,7 +812,7 @@ class vimconnector(vimconn.VimConnector):
             self._reload_connection()
             network_dict = {"name": net_name, "admin_state_up": True}
 
             self._reload_connection()
             network_dict = {"name": net_name, "admin_state_up": True}
 
-            if net_type in ("data", "ptp"):
+            if net_type in ("data", "ptp") or provider_network_profile:
                 provider_physical_network = None
 
                 if provider_network_profile and provider_network_profile.get(
                 provider_physical_network = None
 
                 if provider_network_profile and provider_network_profile.get(
@@ -787,7 +845,7 @@ class vimconnector(vimconn.VimConnector):
                         "dataplane_physical_net"
                     )
 
                         "dataplane_physical_net"
                     )
 
-                    # if it is non empty list, use the first value. If it is a string use the value directly
+                    # if it is non-empty list, use the first value. If it is a string use the value directly
                     if (
                         isinstance(provider_physical_network, (tuple, list))
                         and provider_physical_network
                     if (
                         isinstance(provider_physical_network, (tuple, list))
                         and provider_physical_network
@@ -880,7 +938,7 @@ class vimconnector(vimconn.VimConnector):
 
             if not ip_profile.get("subnet_address"):
                 # Fake subnet is required
 
             if not ip_profile.get("subnet_address"):
                 # Fake subnet is required
-                subnet_rand = random.randint(0, 255)
+                subnet_rand = random.SystemRandom().randint(0, 255)
                 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
 
             if "ip_version" not in ip_profile:
                 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
 
             if "ip_version" not in ip_profile:
@@ -925,6 +983,15 @@ class vimconnector(vimconn.VimConnector):
                 ip_str = str(netaddr.IPAddress(ip_int))
                 subnet["allocation_pools"][0]["end"] = ip_str
 
                 ip_str = str(netaddr.IPAddress(ip_int))
                 subnet["allocation_pools"][0]["end"] = ip_str
 
+            if (
+                ip_profile.get("ipv6_address_mode")
+                and ip_profile["ip_version"] != "IPv4"
+            ):
+                subnet["ipv6_address_mode"] = ip_profile["ipv6_address_mode"]
+                # ipv6_ra_mode can be set to the same value for most use cases, see documentation:
+                # https://docs.openstack.org/neutron/latest/admin/config-ipv6.html#ipv6-ra-mode-and-ipv6-address-mode-combinations
+                subnet["ipv6_ra_mode"] = ip_profile["ipv6_address_mode"]
+
             # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
             self.neutron.create_subnet({"subnet": subnet})
 
             # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
             self.neutron.create_subnet({"subnet": subnet})
 
@@ -957,6 +1024,14 @@ class vimconnector(vimconn.VimConnector):
 
                     if k_item == "l2gwconn":
                         self.neutron.delete_l2_gateway_connection(k_id)
 
                     if k_item == "l2gwconn":
                         self.neutron.delete_l2_gateway_connection(k_id)
+
+                except (neExceptions.ConnectionFailed, ConnectionError) as e2:
+                    self.logger.error(
+                        "Error deleting l2 gateway connection: {}: {}".format(
+                            type(e2).__name__, e2
+                        )
+                    )
+                    self._format_exception(e2)
                 except Exception as e2:
                     self.logger.error(
                         "Error deleting l2 gateway connection: {}: {}".format(
                 except Exception as e2:
                     self.logger.error(
                         "Error deleting l2 gateway connection: {}: {}".format(
@@ -981,7 +1056,6 @@ class vimconnector(vimconn.VimConnector):
         Returns the network list of dictionaries
         """
         self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
         Returns the network list of dictionaries
         """
         self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
-
         try:
             self._reload_connection()
             filter_dict_os = filter_dict.copy()
         try:
             self._reload_connection()
             filter_dict_os = filter_dict.copy()
@@ -1041,6 +1115,7 @@ class vimconnector(vimconn.VimConnector):
 
         return net
 
 
         return net
 
+    @catch_any_exception
     def delete_network(self, net_id, created_items=None):
         """
         Removes a tenant network from VIM and its associated elements
     def delete_network(self, net_id, created_items=None):
         """
         Removes a tenant network from VIM and its associated elements
@@ -1064,6 +1139,14 @@ class vimconnector(vimconn.VimConnector):
                     k_item, _, k_id = k.partition(":")
                     if k_item == "l2gwconn":
                         self.neutron.delete_l2_gateway_connection(k_id)
                     k_item, _, k_id = k.partition(":")
                     if k_item == "l2gwconn":
                         self.neutron.delete_l2_gateway_connection(k_id)
+
+                except (neExceptions.ConnectionFailed, ConnectionError) as e:
+                    self.logger.error(
+                        "Error deleting l2 gateway connection: {}: {}".format(
+                            type(e).__name__, e
+                        )
+                    )
+                    self._format_exception(e)
                 except Exception as e:
                     self.logger.error(
                         "Error deleting l2 gateway connection: {}: {}".format(
                 except Exception as e:
                     self.logger.error(
                         "Error deleting l2 gateway connection: {}: {}".format(
@@ -1076,21 +1159,22 @@ class vimconnector(vimconn.VimConnector):
             for p in ports["ports"]:
                 try:
                     self.neutron.delete_port(p["id"])
             for p in ports["ports"]:
                 try:
                     self.neutron.delete_port(p["id"])
+
+                except (neExceptions.ConnectionFailed, ConnectionError) as e:
+                    self.logger.error("Error deleting port %s: %s", p["id"], str(e))
+                    # If there is connection error, it raises.
+                    self._format_exception(e)
                 except Exception as e:
                     self.logger.error("Error deleting port %s: %s", p["id"], str(e))
 
             self.neutron.delete_network(net_id)
 
             return net_id
                 except Exception as e:
                     self.logger.error("Error deleting port %s: %s", p["id"], str(e))
 
             self.neutron.delete_network(net_id)
 
             return net_id
-        except (
-            neExceptions.ConnectionFailed,
-            neExceptions.NetworkNotFoundClient,
-            neExceptions.NeutronException,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+        except (neExceptions.NetworkNotFoundClient, neExceptions.NotFound) as e:
+            # If network to be deleted is not found, it does not raise.
+            self.logger.warning(
+                f"Error deleting network: {net_id} is not found, {str(e)}"
+            )
 
     def refresh_nets_status(self, net_list):
         """Get the status of the networks
 
     def refresh_nets_status(self, net_list):
         """Get the status of the networks
@@ -1143,13 +1227,11 @@ class vimconnector(vimconn.VimConnector):
     def get_flavor(self, flavor_id):
         """Obtain flavor details from the  VIM. Returns the flavor dict details"""
         self.logger.debug("Getting flavor '%s'", flavor_id)
     def get_flavor(self, flavor_id):
         """Obtain flavor details from the  VIM. Returns the flavor dict details"""
         self.logger.debug("Getting flavor '%s'", flavor_id)
-
         try:
             self._reload_connection()
             flavor = self.nova.flavors.find(id=flavor_id)
         try:
             self._reload_connection()
             flavor = self.nova.flavors.find(id=flavor_id)
-            # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
-
             return flavor.to_dict()
             return flavor.to_dict()
+
         except (
             nvExceptions.NotFound,
             nvExceptions.ClientException,
         except (
             nvExceptions.NotFound,
             nvExceptions.ClientException,
@@ -1221,17 +1303,21 @@ class vimconnector(vimconn.VimConnector):
             )
         except (
             nvExceptions.NotFound,
             )
         except (
             nvExceptions.NotFound,
+            nvExceptions.BadRequest,
             nvExceptions.ClientException,
             ksExceptions.ClientException,
             ConnectionError,
         ) as e:
             self._format_exception(e)
 
             nvExceptions.ClientException,
             ksExceptions.ClientException,
             ConnectionError,
         ) as e:
             self._format_exception(e)
 
-    def process_resource_quota(self, quota, prefix, extra_specs):
-        """
-        :param prefix:
-        :param extra_specs:
-        :return:
+    @staticmethod
+    def process_resource_quota(quota: dict, prefix: str, extra_specs: dict) -> None:
+        """Process resource quota and fill up extra_specs.
+        Args:
+            quota       (dict):         Keeping the quota of resurces
+            prefix      (str)           Prefix
+            extra_specs (dict)          Dict to be filled to be used during flavor creation
+
         """
         if "limit" in quota:
             extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
         """
         if "limit" in quota:
             extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
@@ -1243,200 +1329,321 @@ class vimconnector(vimconn.VimConnector):
             extra_specs["quota:" + prefix + "_shares_level"] = "custom"
             extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
 
             extra_specs["quota:" + prefix + "_shares_level"] = "custom"
             extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
 
-    def new_flavor(self, flavor_data, change_name_if_used=True):
-        """Adds a tenant flavor to openstack VIM
-        if change_name_if_used is True, it will change name in case of conflict, because it is not supported name
-         repetition
-        Returns the flavor identifier
+    @staticmethod
+    def process_numa_memory(
+        numa: dict, node_id: Optional[int], extra_specs: dict
+    ) -> None:
+        """Set the memory in extra_specs.
+        Args:
+            numa        (dict):         A dictionary which includes numa information
+            node_id     (int):          ID of numa node
+            extra_specs (dict):         To be filled.
+
+        """
+        if not numa.get("memory"):
+            return
+        memory_mb = numa["memory"] * 1024
+        memory = "hw:numa_mem.{}".format(node_id)
+        extra_specs[memory] = int(memory_mb)
+
+    @staticmethod
+    def process_numa_vcpu(numa: dict, node_id: int, extra_specs: dict) -> None:
+        """Set the cpu in extra_specs.
+        Args:
+            numa        (dict):         A dictionary which includes numa information
+            node_id     (int):          ID of numa node
+            extra_specs (dict):         To be filled.
+
+        """
+        if not numa.get("vcpu"):
+            return
+        vcpu = numa["vcpu"]
+        cpu = "hw:numa_cpus.{}".format(node_id)
+        vcpu = ",".join(map(str, vcpu))
+        extra_specs[cpu] = vcpu
+
+    @staticmethod
+    def process_numa_paired_threads(numa: dict, extra_specs: dict) -> Optional[int]:
+        """Fill up extra_specs if numa has paired-threads.
+        Args:
+            numa        (dict):         A dictionary which includes numa information
+            extra_specs (dict):         To be filled.
+
+        Returns:
+            threads       (int)           Number of virtual cpus
+
+        """
+        if not numa.get("paired-threads"):
+            return
+
+        # cpu_thread_policy "require" implies that compute node must have an STM architecture
+        threads = numa["paired-threads"] * 2
+        extra_specs["hw:cpu_thread_policy"] = "require"
+        extra_specs["hw:cpu_policy"] = "dedicated"
+        return threads
+
+    @staticmethod
+    def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]:
+        """Fill up extra_specs if numa has cores.
+        Args:
+            numa        (dict):         A dictionary which includes numa information
+            extra_specs (dict):         To be filled.
+
+        Returns:
+            cores       (int)           Number of virtual cpus
+
+        """
+        # cpu_thread_policy "isolate" implies that the host must not have an SMT
+        # architecture, or a non-SMT architecture will be emulated
+        if not numa.get("cores"):
+            return
+        cores = numa["cores"]
+        extra_specs["hw:cpu_thread_policy"] = "isolate"
+        extra_specs["hw:cpu_policy"] = "dedicated"
+        return cores
+
+    @staticmethod
+    def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]:
+        """Fill up extra_specs if numa has threads.
+        Args:
+            numa        (dict):         A dictionary which includes numa information
+            extra_specs (dict):         To be filled.
+
+        Returns:
+            threads       (int)           Number of virtual cpus
+
+        """
+        # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
+        if not numa.get("threads"):
+            return
+        threads = numa["threads"]
+        extra_specs["hw:cpu_thread_policy"] = "prefer"
+        extra_specs["hw:cpu_policy"] = "dedicated"
+        return threads
+
+    def _process_numa_parameters_of_flavor(
+        self, numas: List, extra_specs: Dict
+    ) -> None:
+        """Process numa parameters and fill up extra_specs.
+
+        Args:
+            numas   (list):             List of dictionary which includes numa information
+            extra_specs (dict):         To be filled.
+
+        """
+        numa_nodes = len(numas)
+        extra_specs["hw:numa_nodes"] = str(numa_nodes)
+        cpu_cores, cpu_threads = 0, 0
+
+        if self.vim_type == "VIO":
+            self.process_vio_numa_nodes(numa_nodes, extra_specs)
+
+        for numa in numas:
+            if "id" in numa:
+                node_id = numa["id"]
+                # overwrite ram and vcpus
+                # check if key "memory" is present in numa else use ram value at flavor
+                self.process_numa_memory(numa, node_id, extra_specs)
+                self.process_numa_vcpu(numa, node_id, extra_specs)
+
+            # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
+            extra_specs["hw:cpu_sockets"] = str(numa_nodes)
+
+            if "paired-threads" in numa:
+                threads = self.process_numa_paired_threads(numa, extra_specs)
+                cpu_threads += threads
+
+            elif "cores" in numa:
+                cores = self.process_numa_cores(numa, extra_specs)
+                cpu_cores += cores
+
+            elif "threads" in numa:
+                threads = self.process_numa_threads(numa, extra_specs)
+                cpu_threads += threads
+
+        if cpu_cores:
+            extra_specs["hw:cpu_cores"] = str(cpu_cores)
+        if cpu_threads:
+            extra_specs["hw:cpu_threads"] = str(cpu_threads)
+
+    @staticmethod
+    def process_vio_numa_nodes(numa_nodes: int, extra_specs: Dict) -> None:
+        """According to number of numa nodes, updates the extra_specs for VIO.
+
+        Args:
+
+            numa_nodes      (int):         List keeps the numa node numbers
+            extra_specs     (dict):        Extra specs dict to be updated
+
+        """
+        # If there are several numas, we do not define specific affinity.
+        extra_specs["vmware:latency_sensitivity_level"] = "high"
+
+    def _change_flavor_name(
+        self, name: str, name_suffix: int, flavor_data: dict
+    ) -> str:
+        """Change the flavor name if the name already exists.
+
+        Args:
+            name    (str):          Flavor name to be checked
+            name_suffix (int):      Suffix to be appended to name
+            flavor_data (dict):     Flavor dict
+
+        Returns:
+            name    (str):          New flavor name to be used
+
+        """
+        # Get used names
+        fl = self.nova.flavors.list()
+        fl_names = [f.name for f in fl]
+
+        while name in fl_names:
+            name_suffix += 1
+            name = flavor_data["name"] + "-" + str(name_suffix)
+
+        return name
+
+    def _process_extended_config_of_flavor(
+        self, extended: dict, extra_specs: dict
+    ) -> None:
+        """Process the extended dict to fill up extra_specs.
+        Args:
+
+            extended                    (dict):         Keeping the extra specification of flavor
+            extra_specs                 (dict)          Dict to be filled to be used during flavor creation
+
+        """
+        quotas = {
+            "cpu-quota": "cpu",
+            "mem-quota": "memory",
+            "vif-quota": "vif",
+            "disk-io-quota": "disk_io",
+        }
+
+        page_sizes = {
+            "LARGE": "large",
+            "SMALL": "small",
+            "SIZE_2MB": "2MB",
+            "SIZE_1GB": "1GB",
+            "PREFER_LARGE": "any",
+        }
+
+        policies = {
+            "cpu-pinning-policy": "hw:cpu_policy",
+            "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
+            "mem-policy": "hw:numa_mempolicy",
+        }
+
+        numas = extended.get("numas")
+        if numas:
+            self._process_numa_parameters_of_flavor(numas, extra_specs)
+
+        for quota, item in quotas.items():
+            if quota in extended.keys():
+                self.process_resource_quota(extended.get(quota), item, extra_specs)
+
+        # Set the mempage size as specified in the descriptor
+        if extended.get("mempage-size"):
+            if extended["mempage-size"] in page_sizes.keys():
+                extra_specs["hw:mem_page_size"] = page_sizes[extended["mempage-size"]]
+            else:
+                # Normally, validations in NBI should not allow to this condition.
+                self.logger.debug(
+                    "Invalid mempage-size %s. Will be ignored",
+                    extended.get("mempage-size"),
+                )
+
+        for policy, hw_policy in policies.items():
+            if extended.get(policy):
+                extra_specs[hw_policy] = extended[policy].lower()
+
+    @staticmethod
+    def _get_flavor_details(flavor_data: dict) -> Tuple:
+        """Returns the details of flavor
+        Args:
+            flavor_data     (dict):     Dictionary that includes required flavor details
+
+        Returns:
+            ram, vcpus, extra_specs, extended   (tuple):    Main items of required flavor
+
+        """
+        return (
+            flavor_data.get("ram", 64),
+            flavor_data.get("vcpus", 1),
+            {},
+            flavor_data.get("extended"),
+        )
+
+    @catch_any_exception
+    def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str:
+        """Adds a tenant flavor to openstack VIM.
+        if change_name_if_used is True, it will change name in case of conflict,
+        because it is not supported name repetition.
+
+        Args:
+            flavor_data (dict):             Flavor details to be processed
+            change_name_if_used (bool):     Change name in case of conflict
+
+        Returns:
+             flavor_id  (str):     flavor identifier
+
         """
         self.logger.debug("Adding flavor '%s'", str(flavor_data))
         retry = 0
         max_retries = 3
         name_suffix = 0
         """
         self.logger.debug("Adding flavor '%s'", str(flavor_data))
         retry = 0
         max_retries = 3
         name_suffix = 0
+        name = flavor_data["name"]
+        while retry < max_retries:
+            retry += 1
+            try:
+                self._reload_connection()
 
 
-        try:
-            name = flavor_data["name"]
-            while retry < max_retries:
-                retry += 1
-                try:
-                    self._reload_connection()
+                if change_name_if_used:
+                    name = self._change_flavor_name(name, name_suffix, flavor_data)
 
 
-                    if change_name_if_used:
-                        # get used names
-                        fl_names = []
-                        fl = self.nova.flavors.list()
-
-                        for f in fl:
-                            fl_names.append(f.name)
-
-                        while name in fl_names:
-                            name_suffix += 1
-                            name = flavor_data["name"] + "-" + str(name_suffix)
-
-                    ram = flavor_data.get("ram", 64)
-                    vcpus = flavor_data.get("vcpus", 1)
-                    extra_specs = {}
-
-                    extended = flavor_data.get("extended")
-                    if extended:
-                        numas = extended.get("numas")
-
-                        if numas:
-                            numa_nodes = len(numas)
-
-                            extra_specs["hw:numa_nodes"] = str(numa_nodes)
-
-                            if self.vim_type == "VIO":
-                                extra_specs[
-                                    "vmware:extra_config"
-                                ] = '{"numa.nodeAffinity":"0"}'
-                                extra_specs["vmware:latency_sensitivity_level"] = "high"
-
-                            for numa in numas:
-                                if "id" in numa:
-                                    node_id = numa["id"]
-
-                                    if "memory" in numa:
-                                        memory_mb = numa["memory"] * 1024
-                                        memory = "hw:numa_mem.{}".format(node_id)
-                                        extra_specs[memory] = int(memory_mb)
-
-                                    if "vcpu" in numa:
-                                        vcpu = numa["vcpu"]
-                                        cpu = "hw:numa_cpus.{}".format(node_id)
-                                        vcpu = ",".join(map(str, vcpu))
-                                        extra_specs[cpu] = vcpu
-
-                                # overwrite ram and vcpus
-                                # check if key "memory" is present in numa else use ram value at flavor
-                                # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/
-                                # implemented/virt-driver-cpu-thread-pinning.html
-                                extra_specs["hw:cpu_sockets"] = str(numa_nodes)
-
-                                if "paired-threads" in numa:
-                                    vcpus = numa["paired-threads"] * 2
-                                    # cpu_thread_policy "require" implies that the compute node must have an
-                                    # STM architecture
-                                    extra_specs["hw:cpu_thread_policy"] = "require"
-                                    extra_specs["hw:cpu_policy"] = "dedicated"
-                                elif "cores" in numa:
-                                    vcpus = numa["cores"]
-                                    # cpu_thread_policy "prefer" implies that the host must not have an SMT
-                                    # architecture, or a non-SMT architecture will be emulated
-                                    extra_specs["hw:cpu_thread_policy"] = "isolate"
-                                    extra_specs["hw:cpu_policy"] = "dedicated"
-                                elif "threads" in numa:
-                                    vcpus = numa["threads"]
-                                    # cpu_thread_policy "prefer" implies that the host may or may not have an SMT
-                                    # architecture
-                                    extra_specs["hw:cpu_thread_policy"] = "prefer"
-                                    extra_specs["hw:cpu_policy"] = "dedicated"
-                                # for interface in numa.get("interfaces",() ):
-                                #     if interface["dedicated"]=="yes":
-                                #         raise vimconn.VimConnException("Passthrough interfaces are not supported
-                                #         for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable)
-                                #     #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"'
-                                #      when a way to connect it is available
-                        elif extended.get("cpu-quota"):
-                            self.process_resource_quota(
-                                extended.get("cpu-quota"), "cpu", extra_specs
-                            )
+                ram, vcpus, extra_specs, extended = self._get_flavor_details(
+                    flavor_data
+                )
+                if extended:
+                    self._process_extended_config_of_flavor(extended, extra_specs)
 
 
-                        if extended.get("mem-quota"):
-                            self.process_resource_quota(
-                                extended.get("mem-quota"), "memory", extra_specs
-                            )
+                # Create flavor
 
 
-                        if extended.get("vif-quota"):
-                            self.process_resource_quota(
-                                extended.get("vif-quota"), "vif", extra_specs
-                            )
+                new_flavor = self.nova.flavors.create(
+                    name=name,
+                    ram=ram,
+                    vcpus=vcpus,
+                    disk=flavor_data.get("disk", 0),
+                    ephemeral=flavor_data.get("ephemeral", 0),
+                    swap=flavor_data.get("swap", 0),
+                    is_public=flavor_data.get("is_public", True),
+                )
 
 
-                        if extended.get("disk-io-quota"):
-                            self.process_resource_quota(
-                                extended.get("disk-io-quota"), "disk_io", extra_specs
-                            )
+                # Add metadata
+                if extra_specs:
+                    new_flavor.set_keys(extra_specs)
 
 
-                        # Set the mempage size as specified in the descriptor
-                        if extended.get("mempage-size"):
-                            if extended.get("mempage-size") == "LARGE":
-                                extra_specs["hw:mem_page_size"] = "large"
-                            elif extended.get("mempage-size") == "SMALL":
-                                extra_specs["hw:mem_page_size"] = "small"
-                            elif extended.get("mempage-size") == "SIZE_2MB":
-                                extra_specs["hw:mem_page_size"] = "2MB"
-                            elif extended.get("mempage-size") == "SIZE_1GB":
-                                extra_specs["hw:mem_page_size"] = "1GB"
-                            elif extended.get("mempage-size") == "PREFER_LARGE":
-                                extra_specs["hw:mem_page_size"] = "any"
-                            else:
-                                # The validations in NBI should make reaching here not possible.
-                                # If this message is shown, check validations
-                                self.logger.debug(
-                                    "Invalid mempage-size %s. Will be ignored",
-                                    extended.get("mempage-size"),
-                                )
-                        if extended.get("cpu-pinning-policy"):
-                            extra_specs["hw:cpu_policy"] = extended.get(
-                                "cpu-pinning-policy"
-                            ).lower()
-
-                        # Set the cpu thread pinning policy as specified in the descriptor
-                        if extended.get("cpu-thread-pinning-policy"):
-                            extra_specs["hw:cpu_thread_policy"] = extended.get(
-                                "cpu-thread-pinning-policy"
-                            ).lower()
-
-                        # Set the mem policy as specified in the descriptor
-                        if extended.get("mem-policy"):
-                            extra_specs["hw:numa_mempolicy"] = extended.get(
-                                "mem-policy"
-                            ).lower()
-
-                    # create flavor
-                    new_flavor = self.nova.flavors.create(
-                        name=name,
-                        ram=ram,
-                        vcpus=vcpus,
-                        disk=flavor_data.get("disk", 0),
-                        ephemeral=flavor_data.get("ephemeral", 0),
-                        swap=flavor_data.get("swap", 0),
-                        is_public=flavor_data.get("is_public", True),
-                    )
-                    # add metadata
-                    if extra_specs:
-                        new_flavor.set_keys(extra_specs)
+                return new_flavor.id
 
 
-                    return new_flavor.id
-                except nvExceptions.Conflict as e:
-                    if change_name_if_used and retry < max_retries:
-                        continue
+            except nvExceptions.Conflict as e:
+                if change_name_if_used and retry < max_retries:
+                    continue
 
 
-                    self._format_exception(e)
-        # except nvExceptions.BadRequest as e:
-        except (
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            ConnectionError,
-            KeyError,
-        ) as e:
-            self._format_exception(e)
+                self._format_exception(e)
 
 
+    @catch_any_exception
     def delete_flavor(self, flavor_id):
         """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
         try:
             self._reload_connection()
             self.nova.flavors.delete(flavor_id)
     def delete_flavor(self, flavor_id):
         """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
         try:
             self._reload_connection()
             self.nova.flavors.delete(flavor_id)
-
             return flavor_id
             return flavor_id
-        # except nvExceptions.BadRequest as e:
-        except (
-            nvExceptions.NotFound,
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+
+        except (nvExceptions.NotFound, nvExceptions.ResourceNotFound) as e:
+            # If flavor is not found, it does not raise.
+            self.logger.warning(
+                f"Error deleting flavor: {flavor_id} is not found, {str(e.message)}"
+            )
 
     def new_image(self, image_dict):
         """
 
     def new_image(self, image_dict):
         """
@@ -1519,12 +1726,6 @@ class vimconnector(vimconn.VimConnector):
                 self.glance.images.update(new_image.id, **metadata_to_load)
 
                 return new_image.id
                 self.glance.images.update(new_image.id, **metadata_to_load)
 
                 return new_image.id
-            except (
-                nvExceptions.Conflict,
-                ksExceptions.ClientException,
-                nvExceptions.ClientException,
-            ) as e:
-                self._format_exception(e)
             except (
                 HTTPException,
                 gl1Exceptions.HTTPException,
             except (
                 HTTPException,
                 gl1Exceptions.HTTPException,
@@ -1540,7 +1741,10 @@ class vimconnector(vimconn.VimConnector):
                     "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
                     http_code=vimconn.HTTP_Bad_Request,
                 )
                     "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
                     http_code=vimconn.HTTP_Bad_Request,
                 )
+            except Exception as e:
+                self._format_exception(e)
 
 
+    @catch_any_exception
     def delete_image(self, image_id):
         """Deletes a tenant image from openstack VIM. Returns the old id"""
         try:
     def delete_image(self, image_id):
         """Deletes a tenant image from openstack VIM. Returns the old id"""
         try:
@@ -1548,36 +1752,25 @@ class vimconnector(vimconn.VimConnector):
             self.glance.images.delete(image_id)
 
             return image_id
             self.glance.images.delete(image_id)
 
             return image_id
-        except (
-            nvExceptions.NotFound,
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            gl1Exceptions.CommunicationError,
-            gl1Exceptions.HTTPNotFound,
-            ConnectionError,
-        ) as e:  # TODO remove
-            self._format_exception(e)
+        except gl1Exceptions.NotFound as e:
+            # If image is not found, it does not raise.
+            self.logger.warning(
+                f"Error deleting image: {image_id} is not found, {str(e)}"
+            )
 
 
+    @catch_any_exception
     def get_image_id_from_path(self, path):
         """Get the image id from image path in the VIM database. Returns the image_id"""
     def get_image_id_from_path(self, path):
         """Get the image id from image path in the VIM database. Returns the image_id"""
-        try:
-            self._reload_connection()
-            images = self.glance.images.list()
+        self._reload_connection()
+        images = self.glance.images.list()
 
 
-            for image in images:
-                if image.metadata.get("location") == path:
-                    return image.id
+        for image in images:
+            if image.metadata.get("location") == path:
+                return image.id
 
 
-            raise vimconn.VimConnNotFoundException(
-                "image with location '{}' not found".format(path)
-            )
-        except (
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            gl1Exceptions.CommunicationError,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+        raise vimconn.VimConnNotFoundException(
+            "image with location '{}' not found".format(path)
+        )
 
     def get_image_list(self, filter_dict={}):
         """Obtain tenant images from VIM
 
     def get_image_list(self, filter_dict={}):
         """Obtain tenant images from VIM
@@ -1590,7 +1783,6 @@ class vimconnector(vimconn.VimConnector):
             List can be empty
         """
         self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
             List can be empty
         """
         self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
-
         try:
             self._reload_connection()
             # filter_dict_os = filter_dict.copy()
         try:
             self._reload_connection()
             # filter_dict_os = filter_dict.copy()
@@ -1617,6 +1809,7 @@ class vimconnector(vimconn.VimConnector):
                     pass
 
             return filtered_list
                     pass
 
             return filtered_list
+
         except (
             ksExceptions.ClientException,
             nvExceptions.ClientException,
         except (
             ksExceptions.ClientException,
             nvExceptions.ClientException,
@@ -1752,7 +1945,6 @@ class vimconnector(vimconn.VimConnector):
 
         # For VF
         elif net["type"] == "VF" or net["type"] == "SR-IOV":
 
         # For VF
         elif net["type"] == "VF" or net["type"] == "SR-IOV":
-
             port_dict["binding:vnic_type"] = "direct"
 
             # VIO specific Changes
             port_dict["binding:vnic_type"] = "direct"
 
             # VIO specific Changes
@@ -1794,8 +1986,14 @@ class vimconnector(vimconn.VimConnector):
         if net.get("mac_address"):
             port_dict["mac_address"] = net["mac_address"]
 
         if net.get("mac_address"):
             port_dict["mac_address"] = net["mac_address"]
 
-        if net.get("ip_address"):
-            port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
+        ip_dual_list = []
+        if ip_list := net.get("ip_address"):
+            if not isinstance(ip_list, list):
+                ip_list = [ip_list]
+            for ip in ip_list:
+                ip_dict = {"ip_address": ip}
+                ip_dual_list.append(ip_dict)
+            port_dict["fixed_ips"] = ip_dual_list
             # TODO add "subnet_id": <subnet_id>
 
     def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
             # TODO add "subnet_id": <subnet_id>
 
     def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
@@ -1812,7 +2010,7 @@ class vimconnector(vimconn.VimConnector):
         """
         new_port = self.neutron.create_port({"port": port_dict})
         created_items["port:" + str(new_port["port"]["id"])] = True
         """
         new_port = self.neutron.create_port({"port": port_dict})
         created_items["port:" + str(new_port["port"]["id"])] = True
-        net["mac_adress"] = new_port["port"]["mac_address"]
+        net["mac_address"] = new_port["port"]["mac_address"]
         net["vim_id"] = new_port["port"]["id"]
 
         return new_port
         net["vim_id"] = new_port["port"]["id"]
 
         return new_port
@@ -1940,7 +2138,6 @@ class vimconnector(vimconn.VimConnector):
         key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
 
         if disk.get(key_id):
         key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
 
         if disk.get(key_id):
-
             block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
             existing_vim_volumes.append({"id": disk[key_id]})
 
             block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
             existing_vim_volumes.append({"id": disk[key_id]})
 
@@ -1954,22 +2151,103 @@ class vimconnector(vimconn.VimConnector):
                 availability_zone=vm_av_zone,
             )
             boot_volume_id = volume.id
                 availability_zone=vm_av_zone,
             )
             boot_volume_id = volume.id
-            created_items["volume:" + str(volume.id)] = True
-            block_device_mapping["vd" + chr(base_disk_index)] = volume.id
+            self.update_block_device_mapping(
+                volume=volume,
+                block_device_mapping=block_device_mapping,
+                base_disk_index=base_disk_index,
+                disk=disk,
+                created_items=created_items,
+            )
 
             return boot_volume_id
 
 
             return boot_volume_id
 
-    def _prepare_non_root_persistent_volumes(
-        self,
-        name: str,
-        disk: dict,
-        vm_av_zone: list,
+    @staticmethod
+    def update_block_device_mapping(
+        volume: object,
         block_device_mapping: dict,
         base_disk_index: int,
         block_device_mapping: dict,
         base_disk_index: int,
-        existing_vim_volumes: list,
+        disk: dict,
         created_items: dict,
     ) -> None:
         created_items: dict,
     ) -> None:
-        """Prepare persistent volumes for new VM instance.
+        """Add volume information to block device mapping dict.
+        Args:
+            volume  (object):                   Created volume object
+            block_device_mapping    (dict):     Block device details
+            base_disk_index (int):              Disk index
+            disk    (dict):                     Disk details
+            created_items   (dict):             All created items belongs to VM
+        """
+        if not volume:
+            raise vimconn.VimConnException("Volume is empty.")
+
+        if not hasattr(volume, "id"):
+            raise vimconn.VimConnException(
+                "Created volume is not valid, does not have id attribute."
+            )
+
+        block_device_mapping["vd" + chr(base_disk_index)] = volume.id
+        if disk.get("multiattach"):  # multiattach volumes do not belong to VDUs
+            return
+        volume_txt = "volume:" + str(volume.id)
+        if disk.get("keep"):
+            volume_txt += ":keep"
+        created_items[volume_txt] = True
+
+    @catch_any_exception
+    def new_shared_volumes(self, shared_volume_data) -> (str, str):
+        volume = self.cinder.volumes.create(
+            size=shared_volume_data["size"],
+            name=shared_volume_data["name"],
+            volume_type="multiattach",
+        )
+        return volume.name, volume.id
+
+    def _prepare_shared_volumes(
+        self,
+        name: str,
+        disk: dict,
+        base_disk_index: int,
+        block_device_mapping: dict,
+        existing_vim_volumes: list,
+        created_items: dict,
+    ):
+        volumes = {volume.name: volume.id for volume in self.cinder.volumes.list()}
+        if volumes.get(disk["name"]):
+            sv_id = volumes[disk["name"]]
+            max_retries = 3
+            vol_status = ""
+            # If this is not the first VM to attach the volume, volume status may be "reserved" for a short time
+            while max_retries:
+                max_retries -= 1
+                volume = self.cinder.volumes.get(sv_id)
+                vol_status = volume.status
+                if volume.status not in ("in-use", "available"):
+                    time.sleep(5)
+                    continue
+                self.update_block_device_mapping(
+                    volume=volume,
+                    block_device_mapping=block_device_mapping,
+                    base_disk_index=base_disk_index,
+                    disk=disk,
+                    created_items=created_items,
+                )
+                return
+            raise vimconn.VimConnException(
+                "Shared volume is not prepared, status is: {}".format(vol_status),
+                http_code=vimconn.HTTP_Internal_Server_Error,
+            )
+
+    def _prepare_non_root_persistent_volumes(
+        self,
+        name: str,
+        disk: dict,
+        vm_av_zone: list,
+        block_device_mapping: dict,
+        base_disk_index: int,
+        existing_vim_volumes: list,
+        created_items: dict,
+    ) -> None:
+        """Prepare persistent volumes for new VM instance.
 
         Args:
             name    (str):                      Name of VM instance
 
         Args:
             name    (str):                      Name of VM instance
@@ -1983,23 +2261,25 @@ class vimconnector(vimconn.VimConnector):
         # Non-root persistent volumes
         # Disk may include only vim_volume_id or only vim_id."
         key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
         # Non-root persistent volumes
         # Disk may include only vim_volume_id or only vim_id."
         key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
-
         if disk.get(key_id):
         if disk.get(key_id):
-
             # Use existing persistent volume
             block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
             existing_vim_volumes.append({"id": disk[key_id]})
             # Use existing persistent volume
             block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
             existing_vim_volumes.append({"id": disk[key_id]})
-
         else:
         else:
-            # Create persistent volume
+            volume_name = f"{name}vd{chr(base_disk_index)}"
             volume = self.cinder.volumes.create(
                 size=disk["size"],
             volume = self.cinder.volumes.create(
                 size=disk["size"],
-                name=name + "vd" + chr(base_disk_index),
+                name=volume_name,
                 # Make sure volume is in the same AZ as the VM to be attached to
                 availability_zone=vm_av_zone,
             )
                 # Make sure volume is in the same AZ as the VM to be attached to
                 availability_zone=vm_av_zone,
             )
-            created_items["volume:" + str(volume.id)] = True
-            block_device_mapping["vd" + chr(base_disk_index)] = volume.id
+            self.update_block_device_mapping(
+                volume=volume,
+                block_device_mapping=block_device_mapping,
+                base_disk_index=base_disk_index,
+                disk=disk,
+                created_items=created_items,
+            )
 
     def _wait_for_created_volumes_availability(
         self, elapsed_time: int, created_items: dict
 
     def _wait_for_created_volumes_availability(
         self, elapsed_time: int, created_items: dict
@@ -2014,12 +2294,20 @@ class vimconnector(vimconn.VimConnector):
             elapsed_time    (int):          Time spent while waiting
 
         """
             elapsed_time    (int):          Time spent while waiting
 
         """
-
         while elapsed_time < volume_timeout:
             for created_item in created_items:
         while elapsed_time < volume_timeout:
             for created_item in created_items:
-                v, _, volume_id = created_item.partition(":")
+                v, volume_id = (
+                    created_item.split(":")[0],
+                    created_item.split(":")[1],
+                )
                 if v == "volume":
                 if v == "volume":
-                    if self.cinder.volumes.get(volume_id).status != "available":
+                    volume = self.cinder.volumes.get(volume_id)
+                    if (
+                        volume.volume_type == "multiattach"
+                        and volume.status == "in-use"
+                    ):
+                        return elapsed_time
+                    elif volume.status != "available":
                         break
             else:
                 # All ready: break from while
                         break
             else:
                 # All ready: break from while
@@ -2046,7 +2334,10 @@ class vimconnector(vimconn.VimConnector):
 
         while elapsed_time < volume_timeout:
             for volume in existing_vim_volumes:
 
         while elapsed_time < volume_timeout:
             for volume in existing_vim_volumes:
-                if self.cinder.volumes.get(volume["id"]).status != "available":
+                v = self.cinder.volumes.get(volume["id"])
+                if v.volume_type == "multiattach" and v.status == "in-use":
+                    return elapsed_time
+                elif v.status != "available":
                     break
             else:  # all ready: break from while
                 break
                     break
             else:  # all ready: break from while
                 break
@@ -2080,7 +2371,6 @@ class vimconnector(vimconn.VimConnector):
         base_disk_index = ord("b")
         boot_volume_id = None
         elapsed_time = 0
         base_disk_index = ord("b")
         boot_volume_id = None
         elapsed_time = 0
-
         for disk in disk_list:
             if "image_id" in disk:
                 # Root persistent volume
         for disk in disk_list:
             if "image_id" in disk:
                 # Root persistent volume
@@ -2094,6 +2384,15 @@ class vimconnector(vimconn.VimConnector):
                     existing_vim_volumes=existing_vim_volumes,
                     created_items=created_items,
                 )
                     existing_vim_volumes=existing_vim_volumes,
                     created_items=created_items,
                 )
+            elif disk.get("multiattach"):
+                self._prepare_shared_volumes(
+                    name=name,
+                    disk=disk,
+                    base_disk_index=base_disk_index,
+                    block_device_mapping=block_device_mapping,
+                    existing_vim_volumes=existing_vim_volumes,
+                    created_items=created_items,
+                )
             else:
                 # Non-root persistent volume
                 self._prepare_non_root_persistent_volumes(
             else:
                 # Non-root persistent volume
                 self._prepare_non_root_persistent_volumes(
@@ -2262,14 +2561,13 @@ class vimconnector(vimconn.VimConnector):
         return self.neutron.show_floatingip(free_floating_ip)
 
     def _get_free_floating_ip(
         return self.neutron.show_floatingip(free_floating_ip)
 
     def _get_free_floating_ip(
-        self, server: object, floating_network: dict, created_items: dict
+        self, server: object, floating_network: dict
     ) -> Optional[str]:
         """Get the free floating IP address.
 
         Args:
             server  (object):               Server Object
             floating_network    (dict):     Floating network details
     ) -> Optional[str]:
         """Get the free floating IP address.
 
         Args:
             server  (object):               Server Object
             floating_network    (dict):     Floating network details
-            created_items   (dict):         All created items belongs to new VM instance
 
         Returns:
             free_floating_ip    (str):      Free floating ip addr
 
         Returns:
             free_floating_ip    (str):      Free floating ip addr
@@ -2281,9 +2579,7 @@ class vimconnector(vimconn.VimConnector):
         # Randomize
         random.shuffle(floating_ips)
 
         # Randomize
         random.shuffle(floating_ips)
 
-        return self._find_floating_ip(
-            server, floating_ips, floating_network, created_items
-        )
+        return self._find_floating_ip(server, floating_ips, floating_network)
 
     def _prepare_external_network_for_vminstance(
         self,
 
     def _prepare_external_network_for_vminstance(
         self,
@@ -2311,9 +2607,8 @@ class vimconnector(vimconn.VimConnector):
                 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
                 # several times
                 while not assigned:
                 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
                 # several times
                 while not assigned:
-
                     free_floating_ip = self._get_free_floating_ip(
                     free_floating_ip = self._get_free_floating_ip(
-                        server, floating_network, created_items
+                        server, floating_network
                     )
 
                     if not free_floating_ip:
                     )
 
                     if not free_floating_ip:
@@ -2408,7 +2703,6 @@ class vimconnector(vimconn.VimConnector):
                 self.neutron.update_port(port[0], port_update)
 
             except Exception:
                 self.neutron.update_port(port[0], port_update)
 
             except Exception:
-
                 raise vimconn.VimConnException(
                     "It was not possible to disable port security for port {}".format(
                         port[0]
                 raise vimconn.VimConnException(
                     "It was not possible to disable port security for port {}".format(
                         port[0]
@@ -2478,6 +2772,7 @@ class vimconnector(vimconn.VimConnector):
             the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
             Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
             as not present.
             the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
             Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
             as not present.
+
         """
         self.logger.debug(
             "new_vminstance input: image='%s' flavor='%s' nics='%s'",
         """
         self.logger.debug(
             "new_vminstance input: image='%s' flavor='%s' nics='%s'",
@@ -2485,20 +2780,19 @@ class vimconnector(vimconn.VimConnector):
             flavor_id,
             str(net_list),
         )
             flavor_id,
             str(net_list),
         )
+        server = None
+        created_items = {}
+        net_list_vim = []
+        # list of external networks to be connected to instance, later on used to create floating_ip
+        external_network = []
+        # List of ports with port-security disabled
+        no_secured_ports = []
+        block_device_mapping = {}
+        existing_vim_volumes = []
+        server_group_id = None
+        scheduller_hints = {}
 
         try:
 
         try:
-            server = None
-            created_items = {}
-            net_list_vim = []
-            # list of external networks to be connected to instance, later on used to create floating_ip
-            external_network = []
-            # List of ports with port-security disabled
-            no_secured_ports = []
-            block_device_mapping = {}
-            existing_vim_volumes = []
-            server_group_id = None
-            scheduller_hints = {}
-
             # Check the Openstack Connection
             self._reload_connection()
 
             # Check the Openstack Connection
             self._reload_connection()
 
@@ -2553,7 +2847,6 @@ class vimconnector(vimconn.VimConnector):
                     server_group_id,
                 )
             )
                     server_group_id,
                 )
             )
-
             # Create VM
             server = self.nova.servers.create(
                 name=name,
             # Create VM
             server = self.nova.servers.create(
                 name=name,
@@ -2589,6 +2882,10 @@ class vimconnector(vimconn.VimConnector):
                 server_id = server.id
 
             try:
                 server_id = server.id
 
             try:
+                created_items = self.remove_keep_tag_from_persistent_volumes(
+                    created_items
+                )
+
                 self.delete_vminstance(server_id, created_items)
 
             except Exception as e2:
                 self.delete_vminstance(server_id, created_items)
 
             except Exception as e2:
@@ -2596,23 +2893,26 @@ class vimconnector(vimconn.VimConnector):
 
             self._format_exception(e)
 
 
             self._format_exception(e)
 
+    @staticmethod
+    def remove_keep_tag_from_persistent_volumes(created_items: Dict) -> Dict:
+        """Removes the keep flag from persistent volumes. So, those volumes could be removed.
+
+        Args:
+            created_items (dict):       All created items belongs to VM
+
+        Returns:
+            updated_created_items   (dict):     Dict which does not include keep flag for volumes.
+
+        """
+        return {
+            key.replace(":keep", ""): value for (key, value) in created_items.items()
+        }
+
     def get_vminstance(self, vm_id):
         """Returns the VM instance information from VIM"""
     def get_vminstance(self, vm_id):
         """Returns the VM instance information from VIM"""
-        # self.logger.debug("Getting VM from VIM")
-        try:
-            self._reload_connection()
-            server = self.nova.servers.find(id=vm_id)
-            # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
-
-            return server.to_dict()
-        except (
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            nvExceptions.NotFound,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+        return self._find_nova_server(vm_id)
 
 
+    @catch_any_exception
     def get_vminstance_console(self, vm_id, console_type="vnc"):
         """
         Get a console for the virtual machine
     def get_vminstance_console(self, vm_id, console_type="vnc"):
         """
         Get a console for the virtual machine
@@ -2628,66 +2928,56 @@ class vimconnector(vimconn.VimConnector):
                 suffix:   extra text, e.g. the http path and query string
         """
         self.logger.debug("Getting VM CONSOLE from VIM")
                 suffix:   extra text, e.g. the http path and query string
         """
         self.logger.debug("Getting VM CONSOLE from VIM")
+        self._reload_connection()
+        server = self.nova.servers.find(id=vm_id)
 
 
-        try:
-            self._reload_connection()
-            server = self.nova.servers.find(id=vm_id)
+        if console_type is None or console_type == "novnc":
+            console_dict = server.get_vnc_console("novnc")
+        elif console_type == "xvpvnc":
+            console_dict = server.get_vnc_console(console_type)
+        elif console_type == "rdp-html5":
+            console_dict = server.get_rdp_console(console_type)
+        elif console_type == "spice-html5":
+            console_dict = server.get_spice_console(console_type)
+        else:
+            raise vimconn.VimConnException(
+                "console type '{}' not allowed".format(console_type),
+                http_code=vimconn.HTTP_Bad_Request,
+            )
 
 
-            if console_type is None or console_type == "novnc":
-                console_dict = server.get_vnc_console("novnc")
-            elif console_type == "xvpvnc":
-                console_dict = server.get_vnc_console(console_type)
-            elif console_type == "rdp-html5":
-                console_dict = server.get_rdp_console(console_type)
-            elif console_type == "spice-html5":
-                console_dict = server.get_spice_console(console_type)
-            else:
-                raise vimconn.VimConnException(
-                    "console type '{}' not allowed".format(console_type),
-                    http_code=vimconn.HTTP_Bad_Request,
-                )
+        console_dict1 = console_dict.get("console")
 
 
-            console_dict1 = console_dict.get("console")
+        if console_dict1:
+            console_url = console_dict1.get("url")
 
 
-            if console_dict1:
-                console_url = console_dict1.get("url")
+            if console_url:
+                # parse console_url
+                protocol_index = console_url.find("//")
+                suffix_index = (
+                    console_url[protocol_index + 2 :].find("/") + protocol_index + 2
+                )
+                port_index = (
+                    console_url[protocol_index + 2 : suffix_index].find(":")
+                    + protocol_index
+                    + 2
+                )
 
 
-                if console_url:
-                    # parse console_url
-                    protocol_index = console_url.find("//")
-                    suffix_index = (
-                        console_url[protocol_index + 2 :].find("/") + protocol_index + 2
-                    )
-                    port_index = (
-                        console_url[protocol_index + 2 : suffix_index].find(":")
-                        + protocol_index
-                        + 2
+                if protocol_index < 0 or port_index < 0 or suffix_index < 0:
+                    return (
+                        -vimconn.HTTP_Internal_Server_Error,
+                        "Unexpected response from VIM",
                     )
 
                     )
 
-                    if protocol_index < 0 or port_index < 0 or suffix_index < 0:
-                        return (
-                            -vimconn.HTTP_Internal_Server_Error,
-                            "Unexpected response from VIM",
-                        )
-
-                    console_dict = {
-                        "protocol": console_url[0:protocol_index],
-                        "server": console_url[protocol_index + 2 : port_index],
-                        "port": console_url[port_index:suffix_index],
-                        "suffix": console_url[suffix_index + 1 :],
-                    }
-                    protocol_index += 2
+                console_dict = {
+                    "protocol": console_url[0:protocol_index],
+                    "server": console_url[protocol_index + 2 : port_index],
+                    "port": console_url[port_index:suffix_index],
+                    "suffix": console_url[suffix_index + 1 :],
+                }
+                protocol_index += 2
 
 
-                    return console_dict
-            raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
-        except (
-            nvExceptions.NotFound,
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            nvExceptions.BadRequest,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+                return console_dict
+        raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
 
     def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
         """Neutron delete ports by id.
 
     def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
         """Neutron delete ports by id.
@@ -2695,16 +2985,44 @@ class vimconnector(vimconn.VimConnector):
             k_id    (str):      Port id in the VIM
         """
         try:
             k_id    (str):      Port id in the VIM
         """
         try:
+            self.neutron.delete_port(k_id)
+
+        except (neExceptions.ConnectionFailed, ConnectionError) as e:
+            self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
+            # If there is connection error, raise.
+            self._format_exception(e)
+        except Exception as e:
+            self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
+
+    def delete_shared_volumes(self, shared_volume_vim_id: str) -> bool:
+        """Cinder delete volume by id.
+        Args:
+            shared_volume_vim_id    (str):                  ID of shared volume in VIM
+        """
+        elapsed_time = 0
+        try:
+            while elapsed_time < server_timeout:
+                vol_status = self.cinder.volumes.get(shared_volume_vim_id).status
+                if vol_status == "available":
+                    self.cinder.volumes.delete(shared_volume_vim_id)
+                    return True
 
 
-            port_dict = self.neutron.list_ports()
-            existing_ports = [port["id"] for port in port_dict["ports"] if port_dict]
+                time.sleep(5)
+                elapsed_time += 5
 
 
-            if k_id in existing_ports:
-                self.neutron.delete_port(k_id)
+            if elapsed_time >= server_timeout:
+                raise vimconn.VimConnException(
+                    "Timeout waiting for volume "
+                    + shared_volume_vim_id
+                    + " to be available",
+                    http_code=vimconn.HTTP_Request_Timeout,
+                )
 
         except Exception as e:
 
         except Exception as e:
-
-            self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
+            self.logger.error(
+                "Error deleting volume: {}: {}".format(type(e).__name__, e)
+            )
+            self._format_exception(e)
 
     def _delete_volumes_by_id_wth_cinder(
         self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
 
     def _delete_volumes_by_id_wth_cinder(
         self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
@@ -2718,7 +3036,7 @@ class vimconnector(vimconn.VimConnector):
         """
         try:
             if k_id in volumes_to_hold:
         """
         try:
             if k_id in volumes_to_hold:
-                return
+                return False
 
             if self.cinder.volumes.get(k_id).status != "available":
                 return True
 
             if self.cinder.volumes.get(k_id).status != "available":
                 return True
@@ -2727,6 +3045,11 @@ class vimconnector(vimconn.VimConnector):
                 self.cinder.volumes.delete(k_id)
                 created_items[k] = None
 
                 self.cinder.volumes.delete(k_id)
                 created_items[k] = None
 
+        except (cExceptions.ConnectionError, ConnectionError) as e:
+            self.logger.error(
+                "Error deleting volume: {}: {}".format(type(e).__name__, e)
+            )
+            self._format_exception(e)
         except Exception as e:
             self.logger.error(
                 "Error deleting volume: {}: {}".format(type(e).__name__, e)
         except Exception as e:
             self.logger.error(
                 "Error deleting volume: {}: {}".format(type(e).__name__, e)
@@ -2743,6 +3066,11 @@ class vimconnector(vimconn.VimConnector):
             self.neutron.delete_floatingip(k_id)
             created_items[k] = None
 
             self.neutron.delete_floatingip(k_id)
             created_items[k] = None
 
+        except (neExceptions.ConnectionFailed, ConnectionError) as e:
+            self.logger.error(
+                "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
+            )
+            self._format_exception(e)
         except Exception as e:
             self.logger.error(
                 "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
         except Exception as e:
             self.logger.error(
                 "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
@@ -2768,6 +3096,11 @@ class vimconnector(vimconn.VimConnector):
                 if k_item == "port":
                     self._delete_ports_by_id_wth_neutron(k_id)
 
                 if k_item == "port":
                     self._delete_ports_by_id_wth_neutron(k_id)
 
+            except (neExceptions.ConnectionFailed, ConnectionError) as e:
+                self.logger.error(
+                    "Error deleting port: {}: {}".format(type(e).__name__, e)
+                )
+                self._format_exception(e)
             except Exception as e:
                 self.logger.error(
                     "Error deleting port: {}: {}".format(type(e).__name__, e)
             except Exception as e:
                 self.logger.error(
                     "Error deleting port: {}: {}".format(type(e).__name__, e)
@@ -2783,9 +3116,7 @@ class vimconnector(vimconn.VimConnector):
 
             try:
                 k_item, k_id = self._get_item_name_id(k)
 
             try:
                 k_item, k_id = self._get_item_name_id(k)
-
                 if k_item == "volume":
                 if k_item == "volume":
-
                     unavailable_vol = self._delete_volumes_by_id_wth_cinder(
                         k, k_id, volumes_to_hold, created_items
                     )
                     unavailable_vol = self._delete_volumes_by_id_wth_cinder(
                         k, k_id, volumes_to_hold, created_items
                     )
@@ -2794,14 +3125,40 @@ class vimconnector(vimconn.VimConnector):
                         keep_waiting = True
 
                 elif k_item == "floating_ip":
                         keep_waiting = True
 
                 elif k_item == "floating_ip":
-
                     self._delete_floating_ip_by_id(k, k_id, created_items)
 
                     self._delete_floating_ip_by_id(k, k_id, created_items)
 
+            except (
+                cExceptions.ConnectionError,
+                neExceptions.ConnectionFailed,
+                ConnectionError,
+                AttributeError,
+                TypeError,
+            ) as e:
+                self.logger.error("Error deleting {}: {}".format(k, e))
+                self._format_exception(e)
+
             except Exception as e:
                 self.logger.error("Error deleting {}: {}".format(k, e))
 
         return keep_waiting
 
             except Exception as e:
                 self.logger.error("Error deleting {}: {}".format(k, e))
 
         return keep_waiting
 
+    @staticmethod
+    def _extract_items_wth_keep_flag_from_created_items(created_items: dict) -> dict:
+        """Remove the volumes which has key flag from created_items
+
+        Args:
+            created_items   (dict):         All created items belongs to VM
+
+        Returns:
+            created_items   (dict):         Persistent volumes eliminated created_items
+        """
+        return {
+            key: value
+            for (key, value) in created_items.items()
+            if len(key.split(":")) == 2
+        }
+
+    @catch_any_exception
     def delete_vminstance(
         self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
     ) -> None:
     def delete_vminstance(
         self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
     ) -> None:
@@ -2817,6 +3174,10 @@ class vimconnector(vimconn.VimConnector):
             volumes_to_hold = []
 
         try:
             volumes_to_hold = []
 
         try:
+            created_items = self._extract_items_wth_keep_flag_from_created_items(
+                created_items
+            )
+
             self._reload_connection()
 
             # Delete VM ports attached to the networks before the virtual machine
             self._reload_connection()
 
             # Delete VM ports attached to the networks before the virtual machine
@@ -2842,14 +3203,9 @@ class vimconnector(vimconn.VimConnector):
                 if keep_waiting:
                     time.sleep(1)
                     elapsed_time += 1
                 if keep_waiting:
                     time.sleep(1)
                     elapsed_time += 1
-
-        except (
-            nvExceptions.NotFound,
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+        except (nvExceptions.NotFound, nvExceptions.ResourceNotFound) as e:
+            # If VM does not exist, it does not raise
+            self.logger.warning(f"Error deleting VM: {vm_id} is not found, {str(e)}")
 
     def refresh_vms_status(self, vm_list):
         """Get the status of the virtual machines and their interfaces/ports
 
     def refresh_vms_status(self, vm_list):
         """Get the status of the virtual machines and their interfaces/ports
@@ -2881,7 +3237,6 @@ class vimconnector(vimconn.VimConnector):
         self.logger.debug(
             "refresh_vms status: Getting tenant VM instance information from VIM"
         )
         self.logger.debug(
             "refresh_vms status: Getting tenant VM instance information from VIM"
         )
-
         for vm_id in vm_list:
             vm = {}
 
         for vm_id in vm_list:
             vm = {}
 
@@ -2994,121 +3349,111 @@ class vimconnector(vimconn.VimConnector):
 
         return vm_dict
 
 
         return vm_dict
 
+    @catch_any_exception
     def action_vminstance(self, vm_id, action_dict, created_items={}):
         """Send and action over a VM instance from VIM
     def action_vminstance(self, vm_id, action_dict, created_items={}):
         """Send and action over a VM instance from VIM
-        Returns None or the console dict if the action was successfully sent to the VIM"""
+        Returns None or the console dict if the action was successfully sent to the VIM
+        """
         self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
         self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
-
-        try:
-            self._reload_connection()
-            server = self.nova.servers.find(id=vm_id)
-
-            if "start" in action_dict:
-                if action_dict["start"] == "rebuild":
-                    server.rebuild()
-                else:
-                    if server.status == "PAUSED":
-                        server.unpause()
-                    elif server.status == "SUSPENDED":
-                        server.resume()
-                    elif server.status == "SHUTOFF":
-                        server.start()
-                    else:
-                        self.logger.debug(
-                            "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
-                        )
-                        raise vimconn.VimConnException(
-                            "Cannot 'start' instance while it is in active state",
-                            http_code=vimconn.HTTP_Bad_Request,
-                        )
-
-            elif "pause" in action_dict:
-                server.pause()
-            elif "resume" in action_dict:
-                server.resume()
-            elif "shutoff" in action_dict or "shutdown" in action_dict:
-                self.logger.debug("server status %s", server.status)
-                if server.status == "ACTIVE":
-                    server.stop()
+        self._reload_connection()
+        server = self.nova.servers.find(id=vm_id)
+        if "start" in action_dict:
+            if action_dict["start"] == "rebuild":
+                server.rebuild()
+            else:
+                if server.status == "PAUSED":
+                    server.unpause()
+                elif server.status == "SUSPENDED":
+                    server.resume()
+                elif server.status == "SHUTOFF":
+                    server.start()
                 else:
                 else:
-                    self.logger.debug("ERROR: VM is not in Active state")
-                    raise vimconn.VimConnException(
-                        "VM is not in active state, stop operation is not allowed",
-                        http_code=vimconn.HTTP_Bad_Request,
+                    self.logger.debug(
+                        "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
                     )
                     )
-            elif "forceOff" in action_dict:
-                server.stop()  # TODO
-            elif "terminate" in action_dict:
-                server.delete()
-            elif "createImage" in action_dict:
-                server.create_image()
-                # "path":path_schema,
-                # "description":description_schema,
-                # "name":name_schema,
-                # "metadata":metadata_schema,
-                # "imageRef": id_schema,
-                # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
-            elif "rebuild" in action_dict:
-                server.rebuild(server.image["id"])
-            elif "reboot" in action_dict:
-                server.reboot()  # reboot_type="SOFT"
-            elif "console" in action_dict:
-                console_type = action_dict["console"]
-
-                if console_type is None or console_type == "novnc":
-                    console_dict = server.get_vnc_console("novnc")
-                elif console_type == "xvpvnc":
-                    console_dict = server.get_vnc_console(console_type)
-                elif console_type == "rdp-html5":
-                    console_dict = server.get_rdp_console(console_type)
-                elif console_type == "spice-html5":
-                    console_dict = server.get_spice_console(console_type)
-                else:
                     raise vimconn.VimConnException(
                     raise vimconn.VimConnException(
-                        "console type '{}' not allowed".format(console_type),
+                        "Cannot 'start' instance while it is in active state",
                         http_code=vimconn.HTTP_Bad_Request,
                     )
                         http_code=vimconn.HTTP_Bad_Request,
                     )
+        elif "pause" in action_dict:
+            server.pause()
+        elif "resume" in action_dict:
+            server.resume()
+        elif "shutoff" in action_dict or "shutdown" in action_dict:
+            self.logger.debug("server status %s", server.status)
+            if server.status == "ACTIVE":
+                server.stop()
+            else:
+                self.logger.debug("ERROR: VM is not in Active state")
+                raise vimconn.VimConnException(
+                    "VM is not in active state, stop operation is not allowed",
+                    http_code=vimconn.HTTP_Bad_Request,
+                )
+        elif "forceOff" in action_dict:
+            server.stop()  # TODO
+        elif "terminate" in action_dict:
+            server.delete()
+        elif "createImage" in action_dict:
+            server.create_image()
+            # "path":path_schema,
+            # "description":description_schema,
+            # "name":name_schema,
+            # "metadata":metadata_schema,
+            # "imageRef": id_schema,
+            # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
+        elif "rebuild" in action_dict:
+            server.rebuild(server.image["id"])
+        elif "reboot" in action_dict:
+            server.reboot()  # reboot_type="SOFT"
+        elif "console" in action_dict:
+            console_type = action_dict["console"]
 
 
-                try:
-                    console_url = console_dict["console"]["url"]
-                    # parse console_url
-                    protocol_index = console_url.find("//")
-                    suffix_index = (
-                        console_url[protocol_index + 2 :].find("/") + protocol_index + 2
-                    )
-                    port_index = (
-                        console_url[protocol_index + 2 : suffix_index].find(":")
-                        + protocol_index
-                        + 2
-                    )
-
-                    if protocol_index < 0 or port_index < 0 or suffix_index < 0:
-                        raise vimconn.VimConnException(
-                            "Unexpected response from VIM " + str(console_dict)
-                        )
+            if console_type is None or console_type == "novnc":
+                console_dict = server.get_vnc_console("novnc")
+            elif console_type == "xvpvnc":
+                console_dict = server.get_vnc_console(console_type)
+            elif console_type == "rdp-html5":
+                console_dict = server.get_rdp_console(console_type)
+            elif console_type == "spice-html5":
+                console_dict = server.get_spice_console(console_type)
+            else:
+                raise vimconn.VimConnException(
+                    "console type '{}' not allowed".format(console_type),
+                    http_code=vimconn.HTTP_Bad_Request,
+                )
 
 
-                    console_dict2 = {
-                        "protocol": console_url[0:protocol_index],
-                        "server": console_url[protocol_index + 2 : port_index],
-                        "port": int(console_url[port_index + 1 : suffix_index]),
-                        "suffix": console_url[suffix_index + 1 :],
-                    }
+            try:
+                console_url = console_dict["console"]["url"]
+                # parse console_url
+                protocol_index = console_url.find("//")
+                suffix_index = (
+                    console_url[protocol_index + 2 :].find("/") + protocol_index + 2
+                )
+                port_index = (
+                    console_url[protocol_index + 2 : suffix_index].find(":")
+                    + protocol_index
+                    + 2
+                )
 
 
-                    return console_dict2
-                except Exception:
+                if protocol_index < 0 or port_index < 0 or suffix_index < 0:
                     raise vimconn.VimConnException(
                         "Unexpected response from VIM " + str(console_dict)
                     )
 
                     raise vimconn.VimConnException(
                         "Unexpected response from VIM " + str(console_dict)
                     )
 
-            return None
-        except (
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            nvExceptions.NotFound,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
-        # TODO insert exception vimconn.HTTP_Unauthorized
+                console_dict2 = {
+                    "protocol": console_url[0:protocol_index],
+                    "server": console_url[protocol_index + 2 : port_index],
+                    "port": int(console_url[port_index + 1 : suffix_index]),
+                    "suffix": console_url[suffix_index + 1 :],
+                }
+
+                return console_dict2
+            except Exception:
+                raise vimconn.VimConnException(
+                    "Unexpected response from VIM " + str(console_dict)
+                )
+
+        return None
 
     # ###### VIO Specific Changes #########
     def _generate_vlanID(self):
 
     # ###### VIO Specific Changes #########
     def _generate_vlanID(self):
@@ -3240,104 +3585,11 @@ class vimconnector(vimconn.VimConnector):
                     )
                 )
 
                     )
                 )
 
-    # NOT USED FUNCTIONS
-
-    def new_external_port(self, port_data):
-        """Adds a external port to VIM
-        Returns the port identifier"""
-        # TODO openstack if needed
-        return (
-            -vimconn.HTTP_Internal_Server_Error,
-            "osconnector.new_external_port() not implemented",
-        )
-
-    def connect_port_network(self, port_id, network_id, admin=False):
-        """Connects a external port to a network
-        Returns status code of the VIM response"""
-        # TODO openstack if needed
-        return (
-            -vimconn.HTTP_Internal_Server_Error,
-            "osconnector.connect_port_network() not implemented",
-        )
-
-    def new_user(self, user_name, user_passwd, tenant_id=None):
-        """Adds a new user to openstack VIM
-        Returns the user identifier"""
-        self.logger.debug("osconnector: Adding a new user to VIM")
-
-        try:
-            self._reload_connection()
-            user = self.keystone.users.create(
-                user_name, password=user_passwd, default_project=tenant_id
-            )
-            # self.keystone.tenants.add_user(self.k_creds["username"], #role)
-
-            return user.id
-        except ksExceptions.ConnectionError as e:
-            error_value = -vimconn.HTTP_Bad_Request
-            error_text = (
-                type(e).__name__
-                + ": "
-                + (str(e) if len(e.args) == 0 else str(e.args[0]))
-            )
-        except ksExceptions.ClientException as e:  # TODO remove
-            error_value = -vimconn.HTTP_Bad_Request
-            error_text = (
-                type(e).__name__
-                + ": "
-                + (str(e) if len(e.args) == 0 else str(e.args[0]))
-            )
-
-        # TODO insert exception vimconn.HTTP_Unauthorized
-        # if reaching here is because an exception
-        self.logger.debug("new_user " + error_text)
-
-        return error_value, error_text
-
-    def delete_user(self, user_id):
-        """Delete a user from openstack VIM
-        Returns the user identifier"""
-        if self.debug:
-            print("osconnector: Deleting  a  user from VIM")
-
-        try:
-            self._reload_connection()
-            self.keystone.users.delete(user_id)
-
-            return 1, user_id
-        except ksExceptions.ConnectionError as e:
-            error_value = -vimconn.HTTP_Bad_Request
-            error_text = (
-                type(e).__name__
-                + ": "
-                + (str(e) if len(e.args) == 0 else str(e.args[0]))
-            )
-        except ksExceptions.NotFound as e:
-            error_value = -vimconn.HTTP_Not_Found
-            error_text = (
-                type(e).__name__
-                + ": "
-                + (str(e) if len(e.args) == 0 else str(e.args[0]))
-            )
-        except ksExceptions.ClientException as e:  # TODO remove
-            error_value = -vimconn.HTTP_Bad_Request
-            error_text = (
-                type(e).__name__
-                + ": "
-                + (str(e) if len(e.args) == 0 else str(e.args[0]))
-            )
-
-        # TODO insert exception vimconn.HTTP_Unauthorized
-        # if reaching here is because an exception
-        self.logger.debug("delete_tenant " + error_text)
-
-        return error_value, error_text
-
-    def get_hosts_info(self):
-        """Get the information of deployed hosts
-        Returns the hosts content"""
-        if self.debug:
-            print("osconnector: Getting Host info from VIM")
+    def get_hosts_info(self):
+        """Get the information of deployed hosts
+        Returns the hosts content"""
+        if self.debug:
+            print("osconnector: Getting Host info from VIM")
 
         try:
             h_list = []
 
         try:
             h_list = []
@@ -3406,619 +3658,7 @@ class vimconnector(vimconn.VimConnector):
 
         return error_value, error_text
 
 
         return error_value, error_text
 
-    def new_classification(self, name, ctype, definition):
-        self.logger.debug(
-            "Adding a new (Traffic) Classification to VIM, named %s", name
-        )
-
-        try:
-            new_class = None
-            self._reload_connection()
-
-            if ctype not in supportedClassificationTypes:
-                raise vimconn.VimConnNotSupportedException(
-                    "OpenStack VIM connector does not support provided "
-                    "Classification Type {}, supported ones are: {}".format(
-                        ctype, supportedClassificationTypes
-                    )
-                )
-
-            if not self._validate_classification(ctype, definition):
-                raise vimconn.VimConnException(
-                    "Incorrect Classification definition for the type specified."
-                )
-
-            classification_dict = definition
-            classification_dict["name"] = name
-            new_class = self.neutron.create_sfc_flow_classifier(
-                {"flow_classifier": classification_dict}
-            )
-
-            return new_class["flow_classifier"]["id"]
-        except (
-            neExceptions.ConnectionFailed,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self.logger.error("Creation of Classification failed.")
-            self._format_exception(e)
-
-    def get_classification(self, class_id):
-        self.logger.debug(" Getting Classification %s from VIM", class_id)
-        filter_dict = {"id": class_id}
-        class_list = self.get_classification_list(filter_dict)
-
-        if len(class_list) == 0:
-            raise vimconn.VimConnNotFoundException(
-                "Classification '{}' not found".format(class_id)
-            )
-        elif len(class_list) > 1:
-            raise vimconn.VimConnConflictException(
-                "Found more than one Classification with this criteria"
-            )
-
-        classification = class_list[0]
-
-        return classification
-
-    def get_classification_list(self, filter_dict={}):
-        self.logger.debug(
-            "Getting Classifications from VIM filter: '%s'", str(filter_dict)
-        )
-
-        try:
-            filter_dict_os = filter_dict.copy()
-            self._reload_connection()
-
-            if self.api_version3 and "tenant_id" in filter_dict_os:
-                filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
-
-            classification_dict = self.neutron.list_sfc_flow_classifiers(
-                **filter_dict_os
-            )
-            classification_list = classification_dict["flow_classifiers"]
-            self.__classification_os2mano(classification_list)
-
-            return classification_list
-        except (
-            neExceptions.ConnectionFailed,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
-
-    def delete_classification(self, class_id):
-        self.logger.debug("Deleting Classification '%s' from VIM", class_id)
-
-        try:
-            self._reload_connection()
-            self.neutron.delete_sfc_flow_classifier(class_id)
-
-            return class_id
-        except (
-            neExceptions.ConnectionFailed,
-            neExceptions.NeutronException,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
-
-    def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
-        self.logger.debug(
-            "Adding a new Service Function Instance to VIM, named '%s'", name
-        )
-
-        try:
-            new_sfi = None
-            self._reload_connection()
-            correlation = None
-
-            if sfc_encap:
-                correlation = "nsh"
-
-            if len(ingress_ports) != 1:
-                raise vimconn.VimConnNotSupportedException(
-                    "OpenStack VIM connector can only have 1 ingress port per SFI"
-                )
-
-            if len(egress_ports) != 1:
-                raise vimconn.VimConnNotSupportedException(
-                    "OpenStack VIM connector can only have 1 egress port per SFI"
-                )
-
-            sfi_dict = {
-                "name": name,
-                "ingress": ingress_ports[0],
-                "egress": egress_ports[0],
-                "service_function_parameters": {"correlation": correlation},
-            }
-            new_sfi = self.neutron.create_sfc_port_pair({"port_pair": sfi_dict})
-
-            return new_sfi["port_pair"]["id"]
-        except (
-            neExceptions.ConnectionFailed,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            if new_sfi:
-                try:
-                    self.neutron.delete_sfc_port_pair(new_sfi["port_pair"]["id"])
-                except Exception:
-                    self.logger.error(
-                        "Creation of Service Function Instance failed, with "
-                        "subsequent deletion failure as well."
-                    )
-
-            self._format_exception(e)
-
-    def get_sfi(self, sfi_id):
-        self.logger.debug("Getting Service Function Instance %s from VIM", sfi_id)
-        filter_dict = {"id": sfi_id}
-        sfi_list = self.get_sfi_list(filter_dict)
-
-        if len(sfi_list) == 0:
-            raise vimconn.VimConnNotFoundException(
-                "Service Function Instance '{}' not found".format(sfi_id)
-            )
-        elif len(sfi_list) > 1:
-            raise vimconn.VimConnConflictException(
-                "Found more than one Service Function Instance with this criteria"
-            )
-
-        sfi = sfi_list[0]
-
-        return sfi
-
-    def get_sfi_list(self, filter_dict={}):
-        self.logger.debug(
-            "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict)
-        )
-
-        try:
-            self._reload_connection()
-            filter_dict_os = filter_dict.copy()
-
-            if self.api_version3 and "tenant_id" in filter_dict_os:
-                filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
-
-            sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os)
-            sfi_list = sfi_dict["port_pairs"]
-            self.__sfi_os2mano(sfi_list)
-
-            return sfi_list
-        except (
-            neExceptions.ConnectionFailed,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
-
-    def delete_sfi(self, sfi_id):
-        self.logger.debug("Deleting Service Function Instance '%s' from VIM", sfi_id)
-
-        try:
-            self._reload_connection()
-            self.neutron.delete_sfc_port_pair(sfi_id)
-
-            return sfi_id
-        except (
-            neExceptions.ConnectionFailed,
-            neExceptions.NeutronException,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
-
-    def new_sf(self, name, sfis, sfc_encap=True):
-        self.logger.debug("Adding a new Service Function to VIM, named '%s'", name)
-
-        try:
-            new_sf = None
-            self._reload_connection()
-            # correlation = None
-            # if sfc_encap:
-            #     correlation = "nsh"
-
-            for instance in sfis:
-                sfi = self.get_sfi(instance)
-
-                if sfi.get("sfc_encap") != sfc_encap:
-                    raise vimconn.VimConnNotSupportedException(
-                        "OpenStack VIM connector requires all SFIs of the "
-                        "same SF to share the same SFC Encapsulation"
-                    )
-
-            sf_dict = {"name": name, "port_pairs": sfis}
-            new_sf = self.neutron.create_sfc_port_pair_group(
-                {"port_pair_group": sf_dict}
-            )
-
-            return new_sf["port_pair_group"]["id"]
-        except (
-            neExceptions.ConnectionFailed,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            if new_sf:
-                try:
-                    self.neutron.delete_sfc_port_pair_group(
-                        new_sf["port_pair_group"]["id"]
-                    )
-                except Exception:
-                    self.logger.error(
-                        "Creation of Service Function failed, with "
-                        "subsequent deletion failure as well."
-                    )
-
-            self._format_exception(e)
-
-    def get_sf(self, sf_id):
-        self.logger.debug("Getting Service Function %s from VIM", sf_id)
-        filter_dict = {"id": sf_id}
-        sf_list = self.get_sf_list(filter_dict)
-
-        if len(sf_list) == 0:
-            raise vimconn.VimConnNotFoundException(
-                "Service Function '{}' not found".format(sf_id)
-            )
-        elif len(sf_list) > 1:
-            raise vimconn.VimConnConflictException(
-                "Found more than one Service Function with this criteria"
-            )
-
-        sf = sf_list[0]
-
-        return sf
-
-    def get_sf_list(self, filter_dict={}):
-        self.logger.debug(
-            "Getting Service Function from VIM filter: '%s'", str(filter_dict)
-        )
-
-        try:
-            self._reload_connection()
-            filter_dict_os = filter_dict.copy()
-
-            if self.api_version3 and "tenant_id" in filter_dict_os:
-                filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
-
-            sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
-            sf_list = sf_dict["port_pair_groups"]
-            self.__sf_os2mano(sf_list)
-
-            return sf_list
-        except (
-            neExceptions.ConnectionFailed,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
-
-    def delete_sf(self, sf_id):
-        self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
-
-        try:
-            self._reload_connection()
-            self.neutron.delete_sfc_port_pair_group(sf_id)
-
-            return sf_id
-        except (
-            neExceptions.ConnectionFailed,
-            neExceptions.NeutronException,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
-
-    def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
-        self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name)
-
-        try:
-            new_sfp = None
-            self._reload_connection()
-            # In networking-sfc the MPLS encapsulation is legacy
-            # should be used when no full SFC Encapsulation is intended
-            correlation = "mpls"
-
-            if sfc_encap:
-                correlation = "nsh"
-
-            sfp_dict = {
-                "name": name,
-                "flow_classifiers": classifications,
-                "port_pair_groups": sfs,
-                "chain_parameters": {"correlation": correlation},
-            }
-
-            if spi:
-                sfp_dict["chain_id"] = spi
-
-            new_sfp = self.neutron.create_sfc_port_chain({"port_chain": sfp_dict})
-
-            return new_sfp["port_chain"]["id"]
-        except (
-            neExceptions.ConnectionFailed,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            if new_sfp:
-                try:
-                    self.neutron.delete_sfc_port_chain(new_sfp["port_chain"]["id"])
-                except Exception:
-                    self.logger.error(
-                        "Creation of Service Function Path failed, with "
-                        "subsequent deletion failure as well."
-                    )
-
-            self._format_exception(e)
-
-    def get_sfp(self, sfp_id):
-        self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id)
-
-        filter_dict = {"id": sfp_id}
-        sfp_list = self.get_sfp_list(filter_dict)
-
-        if len(sfp_list) == 0:
-            raise vimconn.VimConnNotFoundException(
-                "Service Function Path '{}' not found".format(sfp_id)
-            )
-        elif len(sfp_list) > 1:
-            raise vimconn.VimConnConflictException(
-                "Found more than one Service Function Path with this criteria"
-            )
-
-        sfp = sfp_list[0]
-
-        return sfp
-
-    def get_sfp_list(self, filter_dict={}):
-        self.logger.debug(
-            "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict)
-        )
-
-        try:
-            self._reload_connection()
-            filter_dict_os = filter_dict.copy()
-
-            if self.api_version3 and "tenant_id" in filter_dict_os:
-                filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
-
-            sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
-            sfp_list = sfp_dict["port_chains"]
-            self.__sfp_os2mano(sfp_list)
-
-            return sfp_list
-        except (
-            neExceptions.ConnectionFailed,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
-
-    def delete_sfp(self, sfp_id):
-        self.logger.debug("Deleting Service Function Path '%s' from VIM", sfp_id)
-
-        try:
-            self._reload_connection()
-            self.neutron.delete_sfc_port_chain(sfp_id)
-
-            return sfp_id
-        except (
-            neExceptions.ConnectionFailed,
-            neExceptions.NeutronException,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
-
-    def refresh_sfps_status(self, sfp_list):
-        """Get the status of the service function path
-        Params: the list of sfp identifiers
-        Returns a dictionary with:
-            vm_id:          #VIM id of this service function path
-                status:     #Mandatory. Text with one of:
-                            #  DELETED (not found at vim)
-                            #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
-                            #  OTHER (Vim reported other status not understood)
-                            #  ERROR (VIM indicates an ERROR status)
-                            #  ACTIVE,
-                            #  CREATING (on building process)
-                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
-                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)F
-        """
-        sfp_dict = {}
-        self.logger.debug(
-            "refresh_sfps status: Getting tenant SFP information from VIM"
-        )
-
-        for sfp_id in sfp_list:
-            sfp = {}
-
-            try:
-                sfp_vim = self.get_sfp(sfp_id)
-
-                if sfp_vim["spi"]:
-                    sfp["status"] = vmStatus2manoFormat["ACTIVE"]
-                else:
-                    sfp["status"] = "OTHER"
-                    sfp["error_msg"] = "VIM status reported " + sfp["status"]
-
-                sfp["vim_info"] = self.serialize(sfp_vim)
-
-                if sfp_vim.get("fault"):
-                    sfp["error_msg"] = str(sfp_vim["fault"])
-            except vimconn.VimConnNotFoundException as e:
-                self.logger.error("Exception getting sfp status: %s", str(e))
-                sfp["status"] = "DELETED"
-                sfp["error_msg"] = str(e)
-            except vimconn.VimConnException as e:
-                self.logger.error("Exception getting sfp status: %s", str(e))
-                sfp["status"] = "VIM_ERROR"
-                sfp["error_msg"] = str(e)
-
-            sfp_dict[sfp_id] = sfp
-
-        return sfp_dict
-
-    def refresh_sfis_status(self, sfi_list):
-        """Get the status of the service function instances
-        Params: the list of sfi identifiers
-        Returns a dictionary with:
-            vm_id:          #VIM id of this service function instance
-                status:     #Mandatory. Text with one of:
-                            #  DELETED (not found at vim)
-                            #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
-                            #  OTHER (Vim reported other status not understood)
-                            #  ERROR (VIM indicates an ERROR status)
-                            #  ACTIVE,
-                            #  CREATING (on building process)
-                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
-                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
-        """
-        sfi_dict = {}
-        self.logger.debug(
-            "refresh_sfis status: Getting tenant sfi information from VIM"
-        )
-
-        for sfi_id in sfi_list:
-            sfi = {}
-
-            try:
-                sfi_vim = self.get_sfi(sfi_id)
-
-                if sfi_vim:
-                    sfi["status"] = vmStatus2manoFormat["ACTIVE"]
-                else:
-                    sfi["status"] = "OTHER"
-                    sfi["error_msg"] = "VIM status reported " + sfi["status"]
-
-                sfi["vim_info"] = self.serialize(sfi_vim)
-
-                if sfi_vim.get("fault"):
-                    sfi["error_msg"] = str(sfi_vim["fault"])
-            except vimconn.VimConnNotFoundException as e:
-                self.logger.error("Exception getting sfi status: %s", str(e))
-                sfi["status"] = "DELETED"
-                sfi["error_msg"] = str(e)
-            except vimconn.VimConnException as e:
-                self.logger.error("Exception getting sfi status: %s", str(e))
-                sfi["status"] = "VIM_ERROR"
-                sfi["error_msg"] = str(e)
-
-            sfi_dict[sfi_id] = sfi
-
-        return sfi_dict
-
-    def refresh_sfs_status(self, sf_list):
-        """Get the status of the service functions
-        Params: the list of sf identifiers
-        Returns a dictionary with:
-            vm_id:          #VIM id of this service function
-                status:     #Mandatory. Text with one of:
-                            #  DELETED (not found at vim)
-                            #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
-                            #  OTHER (Vim reported other status not understood)
-                            #  ERROR (VIM indicates an ERROR status)
-                            #  ACTIVE,
-                            #  CREATING (on building process)
-                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
-                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
-        """
-        sf_dict = {}
-        self.logger.debug("refresh_sfs status: Getting tenant sf information from VIM")
-
-        for sf_id in sf_list:
-            sf = {}
-
-            try:
-                sf_vim = self.get_sf(sf_id)
-
-                if sf_vim:
-                    sf["status"] = vmStatus2manoFormat["ACTIVE"]
-                else:
-                    sf["status"] = "OTHER"
-                    sf["error_msg"] = "VIM status reported " + sf_vim["status"]
-
-                sf["vim_info"] = self.serialize(sf_vim)
-
-                if sf_vim.get("fault"):
-                    sf["error_msg"] = str(sf_vim["fault"])
-            except vimconn.VimConnNotFoundException as e:
-                self.logger.error("Exception getting sf status: %s", str(e))
-                sf["status"] = "DELETED"
-                sf["error_msg"] = str(e)
-            except vimconn.VimConnException as e:
-                self.logger.error("Exception getting sf status: %s", str(e))
-                sf["status"] = "VIM_ERROR"
-                sf["error_msg"] = str(e)
-
-            sf_dict[sf_id] = sf
-
-        return sf_dict
-
-    def refresh_classifications_status(self, classification_list):
-        """Get the status of the classifications
-        Params: the list of classification identifiers
-        Returns a dictionary with:
-            vm_id:          #VIM id of this classifier
-                status:     #Mandatory. Text with one of:
-                            #  DELETED (not found at vim)
-                            #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
-                            #  OTHER (Vim reported other status not understood)
-                            #  ERROR (VIM indicates an ERROR status)
-                            #  ACTIVE,
-                            #  CREATING (on building process)
-                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
-                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
-        """
-        classification_dict = {}
-        self.logger.debug(
-            "refresh_classifications status: Getting tenant classification information from VIM"
-        )
-
-        for classification_id in classification_list:
-            classification = {}
-
-            try:
-                classification_vim = self.get_classification(classification_id)
-
-                if classification_vim:
-                    classification["status"] = vmStatus2manoFormat["ACTIVE"]
-                else:
-                    classification["status"] = "OTHER"
-                    classification["error_msg"] = (
-                        "VIM status reported " + classification["status"]
-                    )
-
-                classification["vim_info"] = self.serialize(classification_vim)
-
-                if classification_vim.get("fault"):
-                    classification["error_msg"] = str(classification_vim["fault"])
-            except vimconn.VimConnNotFoundException as e:
-                self.logger.error("Exception getting classification status: %s", str(e))
-                classification["status"] = "DELETED"
-                classification["error_msg"] = str(e)
-            except vimconn.VimConnException as e:
-                self.logger.error("Exception getting classification status: %s", str(e))
-                classification["status"] = "VIM_ERROR"
-                classification["error_msg"] = str(e)
-
-            classification_dict[classification_id] = classification
-
-        return classification_dict
-
+    @catch_any_exception
     def new_affinity_group(self, affinity_group_data):
         """Adds a server group to VIM
             affinity_group_data contains a dictionary with information, keys:
     def new_affinity_group(self, affinity_group_data):
         """Adds a server group to VIM
             affinity_group_data contains a dictionary with information, keys:
@@ -4027,70 +3667,50 @@ class vimconnector(vimconn.VimConnector):
                 scope: Only nfvi-node allowed
         Returns the server group identifier"""
         self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
                 scope: Only nfvi-node allowed
         Returns the server group identifier"""
         self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
+        name = affinity_group_data["name"]
+        policy = affinity_group_data["type"]
+        self._reload_connection()
+        new_server_group = self.nova.server_groups.create(name, policy)
+        return new_server_group.id
 
 
-        try:
-            name = affinity_group_data["name"]
-            policy = affinity_group_data["type"]
-
-            self._reload_connection()
-            new_server_group = self.nova.server_groups.create(name, policy)
-
-            return new_server_group.id
-        except (
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            ConnectionError,
-            KeyError,
-        ) as e:
-            self._format_exception(e)
-
+    @catch_any_exception
     def get_affinity_group(self, affinity_group_id):
         """Obtain server group details from the VIM. Returns the server group detais as a dict"""
         self.logger.debug("Getting flavor '%s'", affinity_group_id)
     def get_affinity_group(self, affinity_group_id):
         """Obtain server group details from the VIM. Returns the server group detais as a dict"""
         self.logger.debug("Getting flavor '%s'", affinity_group_id)
-        try:
-            self._reload_connection()
-            server_group = self.nova.server_groups.find(id=affinity_group_id)
-
-            return server_group.to_dict()
-        except (
-            nvExceptions.NotFound,
-            nvExceptions.ClientException,
-            ksExceptions.ClientException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+        self._reload_connection()
+        server_group = self.nova.server_groups.find(id=affinity_group_id)
+        return server_group.to_dict()
 
 
+    @catch_any_exception
     def delete_affinity_group(self, affinity_group_id):
         """Deletes a server group from the VIM. Returns the old affinity_group_id"""
         self.logger.debug("Getting server group '%s'", affinity_group_id)
     def delete_affinity_group(self, affinity_group_id):
         """Deletes a server group from the VIM. Returns the old affinity_group_id"""
         self.logger.debug("Getting server group '%s'", affinity_group_id)
-        try:
-            self._reload_connection()
-            self.nova.server_groups.delete(affinity_group_id)
-
-            return affinity_group_id
-        except (
-            nvExceptions.NotFound,
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+        self._reload_connection()
+        self.nova.server_groups.delete(affinity_group_id)
+        return affinity_group_id
 
 
-    def get_vdu_state(self, vm_id):
-        """
-        Getting the state of a vdu
-        param:
-            vm_id: ID of an instance
+    @catch_any_exception
+    def get_vdu_state(self, vm_id, host_is_required=False) -> list:
+        """Getting the state of a VDU.
+        Args:
+            vm_id   (str): ID of an instance
+            host_is_required    (Boolean): If the VIM account is non-admin, host info does not appear in server_dict
+                                           and if this is set to True, it raises KeyError.
+        Returns:
+            vdu_data    (list): VDU details including state, flavor, host_info, AZ
         """
         self.logger.debug("Getting the status of VM")
         self.logger.debug("VIM VM ID %s", vm_id)
         self._reload_connection()
         """
         self.logger.debug("Getting the status of VM")
         self.logger.debug("VIM VM ID %s", vm_id)
         self._reload_connection()
-        server = self.nova.servers.find(id=vm_id)
-        server_dict = server.to_dict()
+        server_dict = self._find_nova_server(vm_id)
+        srv_attr = "OS-EXT-SRV-ATTR:host"
+        host_info = (
+            server_dict[srv_attr] if host_is_required else server_dict.get(srv_attr)
+        )
         vdu_data = [
             server_dict["status"],
             server_dict["flavor"]["id"],
         vdu_data = [
             server_dict["status"],
             server_dict["flavor"]["id"],
-            server_dict["OS-EXT-SRV-ATTR:host"],
+            host_info,
             server_dict["OS-EXT-AZ:availability_zone"],
         ]
         self.logger.debug("vdu_data %s", vdu_data)
             server_dict["OS-EXT-AZ:availability_zone"],
         ]
         self.logger.debug("vdu_data %s", vdu_data)
@@ -4152,6 +3772,7 @@ class vimconnector(vimconn.VimConnector):
                         az_check["zone_check"] = True
         return az_check
 
                         az_check["zone_check"] = True
         return az_check
 
+    @catch_any_exception
     def migrate_instance(self, vm_id, compute_host=None):
         """
         Migrate a vdu
     def migrate_instance(self, vm_id, compute_host=None):
         """
         Migrate a vdu
@@ -4161,80 +3782,76 @@ class vimconnector(vimconn.VimConnector):
         """
         self._reload_connection()
         vm_state = False
         """
         self._reload_connection()
         vm_state = False
-        instance_state = self.get_vdu_state(vm_id)
+        instance_state = self.get_vdu_state(vm_id, host_is_required=True)
         server_flavor_id = instance_state[1]
         server_hypervisor_name = instance_state[2]
         server_availability_zone = instance_state[3]
         server_flavor_id = instance_state[1]
         server_hypervisor_name = instance_state[2]
         server_availability_zone = instance_state[3]
-        try:
-            server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
-            server_flavor_details = [
-                server_flavor["ram"],
-                server_flavor["disk"],
-                server_flavor["vcpus"],
-            ]
-            if compute_host == server_hypervisor_name:
-                raise vimconn.VimConnException(
-                    "Unable to migrate instance '{}' to the same host '{}'".format(
-                        vm_id, compute_host
-                    ),
-                    http_code=vimconn.HTTP_Bad_Request,
-                )
-            az_status = self.check_availability_zone(
-                server_availability_zone,
-                server_flavor_details,
-                server_hypervisor_name,
-                compute_host,
+        server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
+        server_flavor_details = [
+            server_flavor["ram"],
+            server_flavor["disk"],
+            server_flavor["vcpus"],
+        ]
+        if compute_host == server_hypervisor_name:
+            raise vimconn.VimConnException(
+                "Unable to migrate instance '{}' to the same host '{}'".format(
+                    vm_id, compute_host
+                ),
+                http_code=vimconn.HTTP_Bad_Request,
             )
             )
-            availability_zone_check = az_status["zone_check"]
-            available_compute_id = az_status.get("compute_availability")
+        az_status = self.check_availability_zone(
+            server_availability_zone,
+            server_flavor_details,
+            server_hypervisor_name,
+            compute_host,
+        )
+        availability_zone_check = az_status["zone_check"]
+        available_compute_id = az_status.get("compute_availability")
 
 
-            if availability_zone_check is False:
-                raise vimconn.VimConnException(
-                    "Unable to migrate instance '{}' to a different availability zone".format(
-                        vm_id
-                    ),
-                    http_code=vimconn.HTTP_Bad_Request,
-                )
-            if available_compute_id is not None:
-                self.nova.servers.live_migrate(
-                    server=vm_id,
-                    host=available_compute_id,
-                    block_migration=True,
-                    disk_over_commit=False,
-                )
-                state = "MIGRATING"
-                changed_compute_host = ""
-                if state == "MIGRATING":
-                    vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
-                    changed_compute_host = self.get_vdu_state(vm_id)[2]
-                if vm_state and changed_compute_host == available_compute_id:
-                    self.logger.debug(
-                        "Instance '{}' migrated to the new compute host '{}'".format(
-                            vm_id, changed_compute_host
-                        )
-                    )
-                    return state, available_compute_id
-                else:
-                    raise vimconn.VimConnException(
-                        "Migration Failed. Instance '{}' not moved to the new host {}".format(
-                            vm_id, available_compute_id
-                        ),
-                        http_code=vimconn.HTTP_Bad_Request,
+        if availability_zone_check is False:
+            raise vimconn.VimConnException(
+                "Unable to migrate instance '{}' to a different availability zone".format(
+                    vm_id
+                ),
+                http_code=vimconn.HTTP_Bad_Request,
+            )
+        if available_compute_id is not None:
+            # disk_over_commit parameter for live_migrate method is not valid for Nova API version >= 2.25
+            self.nova.servers.live_migrate(
+                server=vm_id,
+                host=available_compute_id,
+                block_migration=True,
+            )
+            state = "MIGRATING"
+            changed_compute_host = ""
+            if state == "MIGRATING":
+                vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
+                changed_compute_host = self.get_vdu_state(vm_id, host_is_required=True)[
+                    2
+                ]
+            if vm_state and changed_compute_host == available_compute_id:
+                self.logger.debug(
+                    "Instance '{}' migrated to the new compute host '{}'".format(
+                        vm_id, changed_compute_host
                     )
                     )
+                )
+                return state, available_compute_id
             else:
                 raise vimconn.VimConnException(
             else:
                 raise vimconn.VimConnException(
-                    "Compute '{}' not available or does not have enough resources to migrate the instance".format(
-                        available_compute_id
+                    "Migration Failed. Instance '{}' not moved to the new host {}".format(
+                        vm_id, available_compute_id
                     ),
                     http_code=vimconn.HTTP_Bad_Request,
                 )
                     ),
                     http_code=vimconn.HTTP_Bad_Request,
                 )
-        except (
-            nvExceptions.BadRequest,
-            nvExceptions.ClientException,
-            nvExceptions.NotFound,
-        ) as e:
-            self._format_exception(e)
+        else:
+            raise vimconn.VimConnException(
+                "Compute '{}' not available or does not have enough resources to migrate the instance".format(
+                    available_compute_id
+                ),
+                http_code=vimconn.HTTP_Bad_Request,
+            )
 
 
+    @catch_any_exception
     def resize_instance(self, vm_id, new_flavor_id):
         """
         For resizing the vm based on the given
     def resize_instance(self, vm_id, new_flavor_id):
         """
         For resizing the vm based on the given
@@ -4249,37 +3866,30 @@ class vimconnector(vimconn.VimConnector):
         instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
         old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
         new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
         instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
         old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
         new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
-        try:
-            if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
-                if old_flavor_disk > new_flavor_disk:
+        if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
+            if old_flavor_disk > new_flavor_disk:
+                raise nvExceptions.BadRequest(
+                    400,
+                    message="Server disk resize failed. Resize to lower disk flavor is not allowed",
+                )
+            else:
+                self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
+                vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
+                if vm_state:
+                    instance_resized_status = self.confirm_resize(vm_id)
+                    return instance_resized_status
+                else:
                     raise nvExceptions.BadRequest(
                     raise nvExceptions.BadRequest(
-                        400,
-                        message="Server disk resize failed. Resize to lower disk flavor is not allowed",
+                        409,
+                        message="Cannot 'resize' vm_state is in ERROR",
                     )
                     )
-                else:
-                    self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
-                    vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
-                    if vm_state:
-                        instance_resized_status = self.confirm_resize(vm_id)
-                        return instance_resized_status
-                    else:
-                        raise nvExceptions.BadRequest(
-                            409,
-                            message="Cannot 'resize' vm_state is in ERROR",
-                        )
 
 
-            else:
-                self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
-                raise nvExceptions.BadRequest(
-                    409,
-                    message="Cannot 'resize' instance while it is in vm_state resized",
-                )
-        except (
-            nvExceptions.BadRequest,
-            nvExceptions.ClientException,
-            nvExceptions.NotFound,
-        ) as e:
-            self._format_exception(e)
+        else:
+            self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
+            raise nvExceptions.BadRequest(
+                409,
+                message="Cannot 'resize' instance while it is in vm_state resized",
+            )
 
     def confirm_resize(self, vm_id):
         """
 
     def confirm_resize(self, vm_id):
         """
@@ -4293,3 +3903,23 @@ class vimconnector(vimconn.VimConnector):
             self.__wait_for_vm(vm_id, "ACTIVE")
         instance_status = self.get_vdu_state(vm_id)[0]
         return instance_status
             self.__wait_for_vm(vm_id, "ACTIVE")
         instance_status = self.get_vdu_state(vm_id)[0]
         return instance_status
+
+    def get_monitoring_data(self):
+        try:
+            self.logger.debug("Getting servers and ports data from Openstack VIMs.")
+            self._reload_connection()
+            all_servers = self.nova.servers.list(detailed=True)
+            try:
+                for server in all_servers:
+                    if server.flavor.get("original_name"):
+                        server.flavor["id"] = self.nova.flavors.find(
+                            name=server.flavor["original_name"]
+                        ).id
+            except nClient.exceptions.NotFound as e:
+                self.logger.warning(str(e.message))
+            all_ports = self.neutron.list_ports()
+            return all_servers, all_ports
+        except Exception as e:
+            raise vimconn.VimConnException(
+                f"Exception in monitoring while getting VMs and ports status: {str(e)}"
+            )