Disable the check of the release notes
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
index 8f96f6b..54d0e88 100644 (file)
@@ -32,13 +32,16 @@ to the VIM connector's SFC resources as follows:
 
 import copy
 from http.client import HTTPException
+import json
 import logging
 from pprint import pformat
 import random
 import re
 import time
+from typing import Dict, List, Optional, Tuple
 
 from cinderclient import client as cClient
+import cinderclient.exceptions as cExceptions
 from glanceclient import client as glClient
 import glanceclient.exc as gl1Exceptions
 from keystoneauth1 import session
@@ -83,6 +86,16 @@ volume_timeout = 1800
 server_timeout = 1800
 
 
+def catch_any_exception(func):
+    def format_exception(*args, **kwargs):
+        try:
+            return func(*args, *kwargs)
+        except Exception as e:
+            vimconnector._format_exception(e)
+
+    return format_exception
+
+
 class SafeDumper(yaml.SafeDumper):
     def represent_data(self, data):
         # Openstack APIs use custom subclasses of dict and YAML safe dumper
@@ -173,6 +186,8 @@ class vimconnector(vimconn.VimConnector):
 
         self.persistent_info = persistent_info
         self.availability_zone = persistent_info.get("availability_zone", None)
+        self.storage_availability_zone = None
+        self.vm_av_zone = None
         self.session = persistent_info.get("session", {"reload_client": True})
         self.my_tenant_id = self.session.get("my_tenant_id")
         self.nova = self.session.get("nova")
@@ -337,7 +352,7 @@ class vimconnector(vimconn.VimConnector):
             version = self.config.get("microversion")
 
             if not version:
-                version = "2.1"
+                version = "2.60"
 
             # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
             # Titanium cloud and StarlingX
@@ -353,12 +368,21 @@ class vimconnector(vimconn.VimConnector):
                 endpoint_type=self.endpoint_type,
                 region_name=region_name,
             )
-            self.cinder = self.session["cinder"] = cClient.Client(
-                2,
-                session=sess,
-                endpoint_type=self.endpoint_type,
-                region_name=region_name,
-            )
+
+            if sess.get_all_version_data(service_type="volumev2"):
+                self.cinder = self.session["cinder"] = cClient.Client(
+                    2,
+                    session=sess,
+                    endpoint_type=self.endpoint_type,
+                    region_name=region_name,
+                )
+            else:
+                self.cinder = self.session["cinder"] = cClient.Client(
+                    3,
+                    session=sess,
+                    endpoint_type=self.endpoint_type,
+                    region_name=region_name,
+                )
 
             try:
                 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
@@ -514,7 +538,8 @@ class vimconnector(vimconn.VimConnector):
         # Types. Also, abstract vimconnector should call the validation
         # method before the implemented VIM connectors are called.
 
-    def _format_exception(self, exception):
+    @staticmethod
+    def _format_exception(exception):
         """Transform a keystone, nova, neutron  exception into a vimconn exception discovering the cause"""
         message_error = str(exception)
         tip = ""
@@ -524,8 +549,10 @@ class vimconnector(vimconn.VimConnector):
             (
                 neExceptions.NetworkNotFoundClient,
                 nvExceptions.NotFound,
+                nvExceptions.ResourceNotFound,
                 ksExceptions.NotFound,
                 gl1Exceptions.HTTPNotFound,
+                cExceptions.NotFound,
             ),
         ):
             raise vimconn.VimConnNotFoundException(
@@ -540,6 +567,7 @@ class vimconnector(vimconn.VimConnector):
                 ConnectionError,
                 ksExceptions.ConnectionError,
                 neExceptions.ConnectionFailed,
+                cExceptions.ConnectionError,
             ),
         ):
             if type(exception).__name__ == "SSLError":
@@ -554,17 +582,26 @@ class vimconnector(vimconn.VimConnector):
                 KeyError,
                 nvExceptions.BadRequest,
                 ksExceptions.BadRequest,
+                gl1Exceptions.BadRequest,
+                cExceptions.BadRequest,
             ),
         ):
+            if message_error == "OS-EXT-SRV-ATTR:host":
+                tip = " (If the user does not have non-admin credentials, this attribute will be missing)"
+                raise vimconn.VimConnInsufficientCredentials(
+                    type(exception).__name__ + ": " + message_error + tip
+                )
             raise vimconn.VimConnException(
                 type(exception).__name__ + ": " + message_error
             )
+
         elif isinstance(
             exception,
             (
                 nvExceptions.ClientException,
                 ksExceptions.ClientException,
                 neExceptions.NeutronException,
+                cExceptions.ClientException,
             ),
         ):
             raise vimconn.VimConnUnexpectedResponse(
@@ -577,9 +614,10 @@ class vimconnector(vimconn.VimConnector):
         elif isinstance(exception, vimconn.VimConnException):
             raise exception
         else:  # ()
-            self.logger.error("General Exception " + message_error, exc_info=True)
+            logger = logging.getLogger("ro.vim.openstack")
+            logger.error("General Exception " + message_error, exc_info=True)
 
-            raise vimconn.VimConnConnectionException(
+            raise vimconn.VimConnException(
                 type(exception).__name__ + ": " + message_error
             )
 
@@ -617,6 +655,32 @@ class vimconnector(vimconn.VimConnector):
                         "Not found security group {} for this tenant".format(sg)
                     )
 
+    def _find_nova_server(self, vm_id):
+        """
+        Returns the VM instance from Openstack and completes it with flavor ID
+        Do not call nova.servers.find directly, as it does not return flavor ID with microversion>=2.47
+        """
+        try:
+            self._reload_connection()
+            server = self.nova.servers.find(id=vm_id)
+            # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
+            server_dict = server.to_dict()
+            try:
+                if server_dict["flavor"].get("original_name"):
+                    server_dict["flavor"]["id"] = self.nova.flavors.find(
+                        name=server_dict["flavor"]["original_name"]
+                    ).id
+            except nClient.exceptions.NotFound as e:
+                self.logger.warning(str(e.message))
+            return server_dict
+        except (
+            ksExceptions.ClientException,
+            nvExceptions.ClientException,
+            nvExceptions.NotFound,
+            ConnectionError,
+        ) as e:
+            self._format_exception(e)
+
     def check_vim_connectivity(self):
         # just get network list to check connectivity and credentials
         self.get_network_list(filter_dict={})
@@ -630,7 +694,6 @@ class vimconnector(vimconn.VimConnector):
         Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
         """
         self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
-
         try:
             self._reload_connection()
 
@@ -660,7 +723,6 @@ class vimconnector(vimconn.VimConnector):
     def new_tenant(self, tenant_name, tenant_description):
         """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
         self.logger.debug("Adding a new tenant name: %s", tenant_name)
-
         try:
             self._reload_connection()
 
@@ -686,7 +748,6 @@ class vimconnector(vimconn.VimConnector):
     def delete_tenant(self, tenant_id):
         """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
         self.logger.debug("Deleting tenant %s from VIM", tenant_id)
-
         try:
             self._reload_connection()
 
@@ -696,6 +757,7 @@ class vimconnector(vimconn.VimConnector):
                 self.keystone.tenants.delete(tenant_id)
 
             return tenant_id
+
         except (
             ksExceptions.ConnectionError,
             ksExceptions.ClientException,
@@ -752,7 +814,7 @@ class vimconnector(vimconn.VimConnector):
             self._reload_connection()
             network_dict = {"name": net_name, "admin_state_up": True}
 
-            if net_type in ("data", "ptp"):
+            if net_type in ("data", "ptp") or provider_network_profile:
                 provider_physical_network = None
 
                 if provider_network_profile and provider_network_profile.get(
@@ -785,7 +847,7 @@ class vimconnector(vimconn.VimConnector):
                         "dataplane_physical_net"
                     )
 
-                    # if it is non empty list, use the first value. If it is a string use the value directly
+                    # if it is non-empty list, use the first value. If it is a string use the value directly
                     if (
                         isinstance(provider_physical_network, (tuple, list))
                         and provider_physical_network
@@ -801,17 +863,17 @@ class vimconnector(vimconn.VimConnector):
                     )
 
                 if not self.config.get("multisegment_support"):
-                    network_dict[
-                        "provider:physical_network"
-                    ] = provider_physical_network
+                    network_dict["provider:physical_network"] = (
+                        provider_physical_network
+                    )
 
                     if (
                         provider_network_profile
                         and "network-type" in provider_network_profile
                     ):
-                        network_dict[
-                            "provider:network_type"
-                        ] = provider_network_profile["network-type"]
+                        network_dict["provider:network_type"] = (
+                            provider_network_profile["network-type"]
+                        )
                     else:
                         network_dict["provider:network_type"] = self.config.get(
                             "dataplane_network_type", "vlan"
@@ -878,7 +940,7 @@ class vimconnector(vimconn.VimConnector):
 
             if not ip_profile.get("subnet_address"):
                 # Fake subnet is required
-                subnet_rand = random.randint(0, 255)
+                subnet_rand = random.SystemRandom().randint(0, 255)
                 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
 
             if "ip_version" not in ip_profile:
@@ -923,6 +985,15 @@ class vimconnector(vimconn.VimConnector):
                 ip_str = str(netaddr.IPAddress(ip_int))
                 subnet["allocation_pools"][0]["end"] = ip_str
 
+            if (
+                ip_profile.get("ipv6_address_mode")
+                and ip_profile["ip_version"] != "IPv4"
+            ):
+                subnet["ipv6_address_mode"] = ip_profile["ipv6_address_mode"]
+                # ipv6_ra_mode can be set to the same value for most use cases, see documentation:
+                # https://docs.openstack.org/neutron/latest/admin/config-ipv6.html#ipv6-ra-mode-and-ipv6-address-mode-combinations
+                subnet["ipv6_ra_mode"] = ip_profile["ipv6_address_mode"]
+
             # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
             self.neutron.create_subnet({"subnet": subnet})
 
@@ -955,6 +1026,14 @@ class vimconnector(vimconn.VimConnector):
 
                     if k_item == "l2gwconn":
                         self.neutron.delete_l2_gateway_connection(k_id)
+
+                except (neExceptions.ConnectionFailed, ConnectionError) as e2:
+                    self.logger.error(
+                        "Error deleting l2 gateway connection: {}: {}".format(
+                            type(e2).__name__, e2
+                        )
+                    )
+                    self._format_exception(e2)
                 except Exception as e2:
                     self.logger.error(
                         "Error deleting l2 gateway connection: {}: {}".format(
@@ -979,7 +1058,6 @@ class vimconnector(vimconn.VimConnector):
         Returns the network list of dictionaries
         """
         self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
-
         try:
             self._reload_connection()
             filter_dict_os = filter_dict.copy()
@@ -1039,6 +1117,7 @@ class vimconnector(vimconn.VimConnector):
 
         return net
 
+    @catch_any_exception
     def delete_network(self, net_id, created_items=None):
         """
         Removes a tenant network from VIM and its associated elements
@@ -1062,6 +1141,14 @@ class vimconnector(vimconn.VimConnector):
                     k_item, _, k_id = k.partition(":")
                     if k_item == "l2gwconn":
                         self.neutron.delete_l2_gateway_connection(k_id)
+
+                except (neExceptions.ConnectionFailed, ConnectionError) as e:
+                    self.logger.error(
+                        "Error deleting l2 gateway connection: {}: {}".format(
+                            type(e).__name__, e
+                        )
+                    )
+                    self._format_exception(e)
                 except Exception as e:
                     self.logger.error(
                         "Error deleting l2 gateway connection: {}: {}".format(
@@ -1074,21 +1161,22 @@ class vimconnector(vimconn.VimConnector):
             for p in ports["ports"]:
                 try:
                     self.neutron.delete_port(p["id"])
+
+                except (neExceptions.ConnectionFailed, ConnectionError) as e:
+                    self.logger.error("Error deleting port %s: %s", p["id"], str(e))
+                    # If there is connection error, it raises.
+                    self._format_exception(e)
                 except Exception as e:
                     self.logger.error("Error deleting port %s: %s", p["id"], str(e))
 
             self.neutron.delete_network(net_id)
 
             return net_id
-        except (
-            neExceptions.ConnectionFailed,
-            neExceptions.NetworkNotFoundClient,
-            neExceptions.NeutronException,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+        except (neExceptions.NetworkNotFoundClient, neExceptions.NotFound) as e:
+            # If network to be deleted is not found, it does not raise.
+            self.logger.warning(
+                f"Error deleting network: {net_id} is not found, {str(e)}"
+            )
 
     def refresh_nets_status(self, net_list):
         """Get the status of the networks
@@ -1141,13 +1229,11 @@ class vimconnector(vimconn.VimConnector):
     def get_flavor(self, flavor_id):
         """Obtain flavor details from the  VIM. Returns the flavor dict details"""
         self.logger.debug("Getting flavor '%s'", flavor_id)
-
         try:
             self._reload_connection()
             flavor = self.nova.flavors.find(id=flavor_id)
-            # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
-
             return flavor.to_dict()
+
         except (
             nvExceptions.NotFound,
             nvExceptions.ClientException,
@@ -1219,17 +1305,21 @@ class vimconnector(vimconn.VimConnector):
             )
         except (
             nvExceptions.NotFound,
+            nvExceptions.BadRequest,
             nvExceptions.ClientException,
             ksExceptions.ClientException,
             ConnectionError,
         ) as e:
             self._format_exception(e)
 
-    def process_resource_quota(self, quota, prefix, extra_specs):
-        """
-        :param prefix:
-        :param extra_specs:
-        :return:
+    @staticmethod
+    def process_resource_quota(quota: dict, prefix: str, extra_specs: dict) -> None:
+        """Process resource quota and fill up extra_specs.
+        Args:
+            quota       (dict):         Keeping the quota of resurces
+            prefix      (str)           Prefix
+            extra_specs (dict)          Dict to be filled to be used during flavor creation
+
         """
         if "limit" in quota:
             extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
@@ -1241,158 +1331,321 @@ class vimconnector(vimconn.VimConnector):
             extra_specs["quota:" + prefix + "_shares_level"] = "custom"
             extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
 
-    def new_flavor(self, flavor_data, change_name_if_used=True):
-        """Adds a tenant flavor to openstack VIM
-        if change_name_if_used is True, it will change name in case of conflict, because it is not supported name
-         repetition
-        Returns the flavor identifier
+    @staticmethod
+    def process_numa_memory(
+        numa: dict, node_id: Optional[int], extra_specs: dict
+    ) -> None:
+        """Set the memory in extra_specs.
+        Args:
+            numa        (dict):         A dictionary which includes numa information
+            node_id     (int):          ID of numa node
+            extra_specs (dict):         To be filled.
+
+        """
+        if not numa.get("memory"):
+            return
+        memory_mb = numa["memory"] * 1024
+        memory = "hw:numa_mem.{}".format(node_id)
+        extra_specs[memory] = int(memory_mb)
+
+    @staticmethod
+    def process_numa_vcpu(numa: dict, node_id: int, extra_specs: dict) -> None:
+        """Set the cpu in extra_specs.
+        Args:
+            numa        (dict):         A dictionary which includes numa information
+            node_id     (int):          ID of numa node
+            extra_specs (dict):         To be filled.
+
+        """
+        if not numa.get("vcpu"):
+            return
+        vcpu = numa["vcpu"]
+        cpu = "hw:numa_cpus.{}".format(node_id)
+        vcpu = ",".join(map(str, vcpu))
+        extra_specs[cpu] = vcpu
+
+    @staticmethod
+    def process_numa_paired_threads(numa: dict, extra_specs: dict) -> Optional[int]:
+        """Fill up extra_specs if numa has paired-threads.
+        Args:
+            numa        (dict):         A dictionary which includes numa information
+            extra_specs (dict):         To be filled.
+
+        Returns:
+            threads       (int)           Number of virtual cpus
+
+        """
+        if not numa.get("paired-threads"):
+            return
+
+        # cpu_thread_policy "require" implies that compute node must have an STM architecture
+        threads = numa["paired-threads"] * 2
+        extra_specs["hw:cpu_thread_policy"] = "require"
+        extra_specs["hw:cpu_policy"] = "dedicated"
+        return threads
+
+    @staticmethod
+    def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]:
+        """Fill up extra_specs if numa has cores.
+        Args:
+            numa        (dict):         A dictionary which includes numa information
+            extra_specs (dict):         To be filled.
+
+        Returns:
+            cores       (int)           Number of virtual cpus
+
+        """
+        # cpu_thread_policy "isolate" implies that the host must not have an SMT
+        # architecture, or a non-SMT architecture will be emulated
+        if not numa.get("cores"):
+            return
+        cores = numa["cores"]
+        extra_specs["hw:cpu_thread_policy"] = "isolate"
+        extra_specs["hw:cpu_policy"] = "dedicated"
+        return cores
+
+    @staticmethod
+    def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]:
+        """Fill up extra_specs if numa has threads.
+        Args:
+            numa        (dict):         A dictionary which includes numa information
+            extra_specs (dict):         To be filled.
+
+        Returns:
+            threads       (int)           Number of virtual cpus
+
+        """
+        # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
+        if not numa.get("threads"):
+            return
+        threads = numa["threads"]
+        extra_specs["hw:cpu_thread_policy"] = "prefer"
+        extra_specs["hw:cpu_policy"] = "dedicated"
+        return threads
+
+    def _process_numa_parameters_of_flavor(
+        self, numas: List, extra_specs: Dict
+    ) -> None:
+        """Process numa parameters and fill up extra_specs.
+
+        Args:
+            numas   (list):             List of dictionary which includes numa information
+            extra_specs (dict):         To be filled.
+
+        """
+        numa_nodes = len(numas)
+        extra_specs["hw:numa_nodes"] = str(numa_nodes)
+        cpu_cores, cpu_threads = 0, 0
+
+        if self.vim_type == "VIO":
+            self.process_vio_numa_nodes(numa_nodes, extra_specs)
+
+        for numa in numas:
+            if "id" in numa:
+                node_id = numa["id"]
+                # overwrite ram and vcpus
+                # check if key "memory" is present in numa else use ram value at flavor
+                self.process_numa_memory(numa, node_id, extra_specs)
+                self.process_numa_vcpu(numa, node_id, extra_specs)
+
+            # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
+            extra_specs["hw:cpu_sockets"] = str(numa_nodes)
+
+            if "paired-threads" in numa:
+                threads = self.process_numa_paired_threads(numa, extra_specs)
+                cpu_threads += threads
+
+            elif "cores" in numa:
+                cores = self.process_numa_cores(numa, extra_specs)
+                cpu_cores += cores
+
+            elif "threads" in numa:
+                threads = self.process_numa_threads(numa, extra_specs)
+                cpu_threads += threads
+
+        if cpu_cores:
+            extra_specs["hw:cpu_cores"] = str(cpu_cores)
+        if cpu_threads:
+            extra_specs["hw:cpu_threads"] = str(cpu_threads)
+
+    @staticmethod
+    def process_vio_numa_nodes(numa_nodes: int, extra_specs: Dict) -> None:
+        """According to number of numa nodes, updates the extra_specs for VIO.
+
+        Args:
+
+            numa_nodes      (int):         List keeps the numa node numbers
+            extra_specs     (dict):        Extra specs dict to be updated
+
+        """
+        # If there are several numas, we do not define specific affinity.
+        extra_specs["vmware:latency_sensitivity_level"] = "high"
+
+    def _change_flavor_name(
+        self, name: str, name_suffix: int, flavor_data: dict
+    ) -> str:
+        """Change the flavor name if the name already exists.
+
+        Args:
+            name    (str):          Flavor name to be checked
+            name_suffix (int):      Suffix to be appended to name
+            flavor_data (dict):     Flavor dict
+
+        Returns:
+            name    (str):          New flavor name to be used
+
+        """
+        # Get used names
+        fl = self.nova.flavors.list()
+        fl_names = [f.name for f in fl]
+
+        while name in fl_names:
+            name_suffix += 1
+            name = flavor_data["name"] + "-" + str(name_suffix)
+
+        return name
+
+    def _process_extended_config_of_flavor(
+        self, extended: dict, extra_specs: dict
+    ) -> None:
+        """Process the extended dict to fill up extra_specs.
+        Args:
+
+            extended                    (dict):         Keeping the extra specification of flavor
+            extra_specs                 (dict)          Dict to be filled to be used during flavor creation
+
+        """
+        quotas = {
+            "cpu-quota": "cpu",
+            "mem-quota": "memory",
+            "vif-quota": "vif",
+            "disk-io-quota": "disk_io",
+        }
+
+        page_sizes = {
+            "LARGE": "large",
+            "SMALL": "small",
+            "SIZE_2MB": "2MB",
+            "SIZE_1GB": "1GB",
+            "PREFER_LARGE": "any",
+        }
+
+        policies = {
+            "cpu-pinning-policy": "hw:cpu_policy",
+            "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
+            "mem-policy": "hw:numa_mempolicy",
+        }
+
+        numas = extended.get("numas")
+        if numas:
+            self._process_numa_parameters_of_flavor(numas, extra_specs)
+
+        for quota, item in quotas.items():
+            if quota in extended.keys():
+                self.process_resource_quota(extended.get(quota), item, extra_specs)
+
+        # Set the mempage size as specified in the descriptor
+        if extended.get("mempage-size"):
+            if extended["mempage-size"] in page_sizes.keys():
+                extra_specs["hw:mem_page_size"] = page_sizes[extended["mempage-size"]]
+            else:
+                # Normally, validations in NBI should not allow to this condition.
+                self.logger.debug(
+                    "Invalid mempage-size %s. Will be ignored",
+                    extended.get("mempage-size"),
+                )
+
+        for policy, hw_policy in policies.items():
+            if extended.get(policy):
+                extra_specs[hw_policy] = extended[policy].lower()
+
+    @staticmethod
+    def _get_flavor_details(flavor_data: dict) -> Tuple:
+        """Returns the details of flavor
+        Args:
+            flavor_data     (dict):     Dictionary that includes required flavor details
+
+        Returns:
+            ram, vcpus, extra_specs, extended   (tuple):    Main items of required flavor
+
+        """
+        return (
+            flavor_data.get("ram", 64),
+            flavor_data.get("vcpus", 1),
+            {},
+            flavor_data.get("extended"),
+        )
+
+    @catch_any_exception
+    def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str:
+        """Adds a tenant flavor to openstack VIM.
+        if change_name_if_used is True, it will change name in case of conflict,
+        because it is not supported name repetition.
+
+        Args:
+            flavor_data (dict):             Flavor details to be processed
+            change_name_if_used (bool):     Change name in case of conflict
+
+        Returns:
+             flavor_id  (str):     flavor identifier
+
         """
         self.logger.debug("Adding flavor '%s'", str(flavor_data))
         retry = 0
         max_retries = 3
         name_suffix = 0
+        name = flavor_data["name"]
+        while retry < max_retries:
+            retry += 1
+            try:
+                self._reload_connection()
 
-        try:
-            name = flavor_data["name"]
-            while retry < max_retries:
-                retry += 1
-                try:
-                    self._reload_connection()
-
-                    if change_name_if_used:
-                        # get used names
-                        fl_names = []
-                        fl = self.nova.flavors.list()
-
-                        for f in fl:
-                            fl_names.append(f.name)
-
-                        while name in fl_names:
-                            name_suffix += 1
-                            name = flavor_data["name"] + "-" + str(name_suffix)
-
-                    ram = flavor_data.get("ram", 64)
-                    vcpus = flavor_data.get("vcpus", 1)
-                    extra_specs = {}
-
-                    extended = flavor_data.get("extended")
-                    if extended:
-                        numas = extended.get("numas")
-
-                        if numas:
-                            numa_nodes = len(numas)
-
-                            if numa_nodes > 1:
-                                return -1, "Can not add flavor with more than one numa"
-
-                            extra_specs["hw:numa_nodes"] = str(numa_nodes)
-                            extra_specs["hw:mem_page_size"] = "large"
-                            extra_specs["hw:cpu_policy"] = "dedicated"
-                            extra_specs["hw:numa_mempolicy"] = "strict"
-
-                            if self.vim_type == "VIO":
-                                extra_specs[
-                                    "vmware:extra_config"
-                                ] = '{"numa.nodeAffinity":"0"}'
-                                extra_specs["vmware:latency_sensitivity_level"] = "high"
-
-                            for numa in numas:
-                                # overwrite ram and vcpus
-                                # check if key "memory" is present in numa else use ram value at flavor
-                                if "memory" in numa:
-                                    ram = numa["memory"] * 1024
-                                # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/
-                                # implemented/virt-driver-cpu-thread-pinning.html
-                                extra_specs["hw:cpu_sockets"] = 1
-
-                                if "paired-threads" in numa:
-                                    vcpus = numa["paired-threads"] * 2
-                                    # cpu_thread_policy "require" implies that the compute node must have an
-                                    # STM architecture
-                                    extra_specs["hw:cpu_thread_policy"] = "require"
-                                    extra_specs["hw:cpu_policy"] = "dedicated"
-                                elif "cores" in numa:
-                                    vcpus = numa["cores"]
-                                    # cpu_thread_policy "prefer" implies that the host must not have an SMT
-                                    # architecture, or a non-SMT architecture will be emulated
-                                    extra_specs["hw:cpu_thread_policy"] = "isolate"
-                                    extra_specs["hw:cpu_policy"] = "dedicated"
-                                elif "threads" in numa:
-                                    vcpus = numa["threads"]
-                                    # cpu_thread_policy "prefer" implies that the host may or may not have an SMT
-                                    # architecture
-                                    extra_specs["hw:cpu_thread_policy"] = "prefer"
-                                    extra_specs["hw:cpu_policy"] = "dedicated"
-                                # for interface in numa.get("interfaces",() ):
-                                #     if interface["dedicated"]=="yes":
-                                #         raise vimconn.VimConnException("Passthrough interfaces are not supported
-                                #         for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable)
-                                #     #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"'
-                                #      when a way to connect it is available
-                        elif extended.get("cpu-quota"):
-                            self.process_resource_quota(
-                                extended.get("cpu-quota"), "cpu", extra_specs
-                            )
-
-                        if extended.get("mem-quota"):
-                            self.process_resource_quota(
-                                extended.get("mem-quota"), "memory", extra_specs
-                            )
+                if change_name_if_used:
+                    name = self._change_flavor_name(name, name_suffix, flavor_data)
 
-                        if extended.get("vif-quota"):
-                            self.process_resource_quota(
-                                extended.get("vif-quota"), "vif", extra_specs
-                            )
+                ram, vcpus, extra_specs, extended = self._get_flavor_details(
+                    flavor_data
+                )
+                if extended:
+                    self._process_extended_config_of_flavor(extended, extra_specs)
+
+                # Create flavor
+
+                new_flavor = self.nova.flavors.create(
+                    name=name,
+                    ram=ram,
+                    vcpus=vcpus,
+                    disk=flavor_data.get("disk", 0),
+                    ephemeral=flavor_data.get("ephemeral", 0),
+                    swap=flavor_data.get("swap", 0),
+                    is_public=flavor_data.get("is_public", True),
+                )
 
-                        if extended.get("disk-io-quota"):
-                            self.process_resource_quota(
-                                extended.get("disk-io-quota"), "disk_io", extra_specs
-                            )
+                # Add metadata
+                if extra_specs:
+                    new_flavor.set_keys(extra_specs)
 
-                    # create flavor
-                    new_flavor = self.nova.flavors.create(
-                        name=name,
-                        ram=ram,
-                        vcpus=vcpus,
-                        disk=flavor_data.get("disk", 0),
-                        ephemeral=flavor_data.get("ephemeral", 0),
-                        swap=flavor_data.get("swap", 0),
-                        is_public=flavor_data.get("is_public", True),
-                    )
-                    # add metadata
-                    if extra_specs:
-                        new_flavor.set_keys(extra_specs)
+                return new_flavor.id
 
-                    return new_flavor.id
-                except nvExceptions.Conflict as e:
-                    if change_name_if_used and retry < max_retries:
-                        continue
+            except nvExceptions.Conflict as e:
+                if change_name_if_used and retry < max_retries:
+                    continue
 
-                    self._format_exception(e)
-        # except nvExceptions.BadRequest as e:
-        except (
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            ConnectionError,
-            KeyError,
-        ) as e:
-            self._format_exception(e)
+                self._format_exception(e)
 
+    @catch_any_exception
     def delete_flavor(self, flavor_id):
         """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
         try:
             self._reload_connection()
             self.nova.flavors.delete(flavor_id)
-
             return flavor_id
-        # except nvExceptions.BadRequest as e:
-        except (
-            nvExceptions.NotFound,
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+
+        except (nvExceptions.NotFound, nvExceptions.ResourceNotFound) as e:
+            # If flavor is not found, it does not raise.
+            self.logger.warning(
+                f"Error deleting flavor: {flavor_id} is not found, {str(e.message)}"
+            )
 
     def new_image(self, image_dict):
         """
@@ -1475,12 +1728,6 @@ class vimconnector(vimconn.VimConnector):
                 self.glance.images.update(new_image.id, **metadata_to_load)
 
                 return new_image.id
-            except (
-                nvExceptions.Conflict,
-                ksExceptions.ClientException,
-                nvExceptions.ClientException,
-            ) as e:
-                self._format_exception(e)
             except (
                 HTTPException,
                 gl1Exceptions.HTTPException,
@@ -1496,7 +1743,10 @@ class vimconnector(vimconn.VimConnector):
                     "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
                     http_code=vimconn.HTTP_Bad_Request,
                 )
+            except Exception as e:
+                self._format_exception(e)
 
+    @catch_any_exception
     def delete_image(self, image_id):
         """Deletes a tenant image from openstack VIM. Returns the old id"""
         try:
@@ -1504,36 +1754,25 @@ class vimconnector(vimconn.VimConnector):
             self.glance.images.delete(image_id)
 
             return image_id
-        except (
-            nvExceptions.NotFound,
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            gl1Exceptions.CommunicationError,
-            gl1Exceptions.HTTPNotFound,
-            ConnectionError,
-        ) as e:  # TODO remove
-            self._format_exception(e)
+        except gl1Exceptions.NotFound as e:
+            # If image is not found, it does not raise.
+            self.logger.warning(
+                f"Error deleting image: {image_id} is not found, {str(e)}"
+            )
 
+    @catch_any_exception
     def get_image_id_from_path(self, path):
         """Get the image id from image path in the VIM database. Returns the image_id"""
-        try:
-            self._reload_connection()
-            images = self.glance.images.list()
+        self._reload_connection()
+        images = self.glance.images.list()
 
-            for image in images:
-                if image.metadata.get("location") == path:
-                    return image.id
+        for image in images:
+            if image.metadata.get("location") == path:
+                return image.id
 
-            raise vimconn.VimConnNotFoundException(
-                "image with location '{}' not found".format(path)
-            )
-        except (
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            gl1Exceptions.CommunicationError,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+        raise vimconn.VimConnNotFoundException(
+            "image with location '{}' not found".format(path)
+        )
 
     def get_image_list(self, filter_dict={}):
         """Obtain tenant images from VIM
@@ -1546,7 +1785,6 @@ class vimconnector(vimconn.VimConnector):
             List can be empty
         """
         self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
-
         try:
             self._reload_connection()
             # filter_dict_os = filter_dict.copy()
@@ -1573,6 +1811,7 @@ class vimconnector(vimconn.VimConnector):
                     pass
 
             return filtered_list
+
         except (
             ksExceptions.ClientException,
             nvExceptions.ClientException,
@@ -1636,6 +1875,10 @@ class vimconnector(vimconn.VimConnector):
                 self.availability_zone = vim_availability_zones
         else:
             self.availability_zone = self._get_openstack_availablity_zones()
+        if "storage_availability_zone" in self.config:
+            self.storage_availability_zone = self.config.get(
+                "storage_availability_zone"
+            )
 
     def _get_vm_availability_zone(
         self, availability_zone_index, availability_zone_list
@@ -1674,258 +1917,929 @@ class vimconnector(vimconn.VimConnector):
                 "No enough availability zones at VIM for this deployment"
             )
 
-    def new_vminstance(
-        self,
-        name,
-        description,
-        start,
-        image_id,
-        flavor_id,
-        affinity_group_list,
-        net_list,
-        cloud_config=None,
-        disk_list=None,
-        availability_zone_index=None,
-        availability_zone_list=None,
-    ):
-        """Adds a VM instance to VIM
-        Params:
-            start: indicates if VM must start or boot in pause mode. Ignored
-            image_id,flavor_id: image and flavor uuid
-            affinity_group_list: list of affinity groups, each one is a dictionary.
-                Ignore if empty.
-            net_list: list of interfaces, each one is a dictionary with:
-                name:
-                net_id: network uuid to connect
-                vpci: virtual vcpi to assign, ignored because openstack lack #TODO
-                model: interface model, ignored #TODO
-                mac_address: used for  SR-IOV ifaces #TODO for other types
-                use: 'data', 'bridge',  'mgmt'
-                type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
-                vim_id: filled/added by this function
-                floating_ip: True/False (or it can be None)
-                port_security: True/False
-            'cloud_config': (optional) dictionary with:
-                'key-pairs': (optional) list of strings with the public key to be inserted to the default user
-                'users': (optional) list of users to be inserted, each item is a dict with:
-                    'name': (mandatory) user name,
-                    'key-pairs': (optional) list of strings with the public key to be inserted to the user
-                'user-data': (optional) string is a text script to be passed directly to cloud-init
-                'config-files': (optional). List of files to be transferred. Each item is a dict with:
-                    'dest': (mandatory) string with the destination absolute path
-                    'encoding': (optional, by default text). Can be one of:
-                        'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
-                    'content' (mandatory): string with the content of the file
-                    'permissions': (optional) string with file permissions, typically octal notation '0644'
-                    'owner': (optional) file owner, string with the format 'owner:group'
-                'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
-            'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
-                'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
-                'size': (mandatory) string with the size of the disk in GB
-                'vim_id' (optional) should use this existing volume id
-            availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
-            availability_zone_list: list of availability zones given by user in the VNFD descriptor.  Ignore if
-                availability_zone_index is None
-                #TODO ip, security groups
-        Returns a tuple with the instance identifier and created_items or raises an exception on error
-            created_items can be None or a dictionary where this method can include key-values that will be passed to
-            the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
-            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
-            as not present.
+    def _prepare_port_dict_security_groups(self, net: dict, port_dict: dict) -> None:
+        """Fill up the security_groups in the port_dict.
+
+        Args:
+            net (dict):             Network details
+            port_dict   (dict):     Port details
+
         """
-        self.logger.debug(
-            "new_vminstance input: image='%s' flavor='%s' nics='%s'",
-            image_id,
-            flavor_id,
-            str(net_list),
-        )
+        if (
+            self.config.get("security_groups")
+            and net.get("port_security") is not False
+            and not self.config.get("no_port_security_extension")
+        ):
+            if not self.security_groups_id:
+                self._get_ids_from_name()
 
-        try:
-            server = None
-            created_items = {}
-            # metadata = {}
-            net_list_vim = []
-            external_network = []
-            # ^list of external networks to be connected to instance, later on used to create floating_ip
-            no_secured_ports = []  # List of port-is with port-security disabled
-            self._reload_connection()
-            # metadata_vpci = {}  # For a specific neutron plugin
-            block_device_mapping = None
+            port_dict["security_groups"] = self.security_groups_id
 
-            for net in net_list:
-                if not net.get("net_id"):  # skip non connected iface
-                    continue
+    def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None:
+        """Fill up the network binding depending on network type in the port_dict.
 
-                port_dict = {
-                    "network_id": net["net_id"],
-                    "name": net.get("name"),
-                    "admin_state_up": True,
-                }
+        Args:
+            net (dict):             Network details
+            port_dict   (dict):     Port details
 
-                if (
-                    self.config.get("security_groups")
-                    and net.get("port_security") is not False
-                    and not self.config.get("no_port_security_extension")
-                ):
-                    if not self.security_groups_id:
-                        self._get_ids_from_name()
+        """
+        if not net.get("type"):
+            raise vimconn.VimConnException("Type is missing in the network details.")
 
-                    port_dict["security_groups"] = self.security_groups_id
+        if net["type"] == "virtual":
+            pass
 
-                if net["type"] == "virtual":
-                    pass
-                    # if "vpci" in net:
-                    #     metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]]
-                elif net["type"] == "VF" or net["type"] == "SR-IOV":  # for VF
-                    # if "vpci" in net:
-                    #     if "VF" not in metadata_vpci:
-                    #         metadata_vpci["VF"]=[]
-                    #     metadata_vpci["VF"].append([ net["vpci"], "" ])
-                    port_dict["binding:vnic_type"] = "direct"
-
-                    # VIO specific Changes
-                    if self.vim_type == "VIO":
-                        # Need to create port with port_security_enabled = False and no-security-groups
-                        port_dict["port_security_enabled"] = False
-                        port_dict["provider_security_groups"] = []
-                        port_dict["security_groups"] = []
-                else:  # For PT PCI-PASSTHROUGH
-                    # if "vpci" in net:
-                    #     if "PF" not in metadata_vpci:
-                    #         metadata_vpci["PF"]=[]
-                    #     metadata_vpci["PF"].append([ net["vpci"], "" ])
-                    port_dict["binding:vnic_type"] = "direct-physical"
-
-                if not port_dict["name"]:
-                    port_dict["name"] = name
-
-                if net.get("mac_address"):
-                    port_dict["mac_address"] = net["mac_address"]
-
-                if net.get("ip_address"):
-                    port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
-                    # TODO add "subnet_id": <subnet_id>
-
-                new_port = self.neutron.create_port({"port": port_dict})
-                created_items["port:" + str(new_port["port"]["id"])] = True
-                net["mac_adress"] = new_port["port"]["mac_address"]
-                net["vim_id"] = new_port["port"]["id"]
-                # if try to use a network without subnetwork, it will return a emtpy list
-                fixed_ips = new_port["port"].get("fixed_ips")
-
-                if fixed_ips:
-                    net["ip"] = fixed_ips[0].get("ip_address")
-                else:
-                    net["ip"] = None
-
-                port = {"port-id": new_port["port"]["id"]}
-                if float(self.nova.api_version.get_string()) >= 2.32:
-                    port["tag"] = new_port["port"]["name"]
-
-                net_list_vim.append(port)
-
-                if net.get("floating_ip", False):
-                    net["exit_on_floating_ip_error"] = True
-                    external_network.append(net)
-                elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
-                    net["exit_on_floating_ip_error"] = False
-                    external_network.append(net)
-                    net["floating_ip"] = self.config.get("use_floating_ip")
-
-                # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
-                # is dropped.
-                # As a workaround we wait until the VM is active and then disable the port-security
-                if net.get("port_security") is False and not self.config.get(
-                    "no_port_security_extension"
-                ):
-                    no_secured_ports.append(
-                        (
-                            new_port["port"]["id"],
-                            net.get("port_security_disable_strategy"),
-                        )
+        # For VF
+        elif net["type"] == "VF" or net["type"] == "SR-IOV":
+            port_dict["binding:vnic_type"] = "direct"
+
+            # VIO specific Changes
+            if self.vim_type == "VIO":
+                # Need to create port with port_security_enabled = False and no-security-groups
+                port_dict["port_security_enabled"] = False
+                port_dict["provider_security_groups"] = []
+                port_dict["security_groups"] = []
+
+        else:
+            # For PT PCI-PASSTHROUGH
+            port_dict["binding:vnic_type"] = "direct-physical"
+
+    @staticmethod
+    def _set_fixed_ip(new_port: dict, net: dict) -> None:
+        """Set the "ip" parameter in net dictionary.
+
+        Args:
+            new_port    (dict):     New created port
+            net         (dict):     Network details
+
+        """
+        fixed_ips = new_port["port"].get("fixed_ips")
+
+        if fixed_ips:
+            net["ip"] = fixed_ips[0].get("ip_address")
+        else:
+            net["ip"] = None
+
+    @staticmethod
+    def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None:
+        """Fill up the mac_address and fixed_ips in port_dict.
+
+        Args:
+            net (dict):             Network details
+            port_dict   (dict):     Port details
+
+        """
+        if net.get("mac_address"):
+            port_dict["mac_address"] = net["mac_address"]
+
+        ip_dual_list = []
+        if ip_list := net.get("ip_address"):
+            if not isinstance(ip_list, list):
+                ip_list = [ip_list]
+            for ip in ip_list:
+                ip_dict = {"ip_address": ip}
+                ip_dual_list.append(ip_dict)
+            port_dict["fixed_ips"] = ip_dual_list
+            # TODO add "subnet_id": <subnet_id>
+
+    def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
+        """Create new port using neutron.
+
+        Args:
+            port_dict   (dict):         Port details
+            created_items   (dict):     All created items
+            net (dict):                 Network details
+
+        Returns:
+            new_port    (dict):         New created port
+
+        """
+        new_port = self.neutron.create_port({"port": port_dict})
+        created_items["port:" + str(new_port["port"]["id"])] = True
+        net["mac_address"] = new_port["port"]["mac_address"]
+        net["vim_id"] = new_port["port"]["id"]
+
+        return new_port
+
+    def _create_port(
+        self, net: dict, name: str, created_items: dict
+    ) -> Tuple[dict, dict]:
+        """Create port using net details.
+
+        Args:
+            net (dict):                 Network details
+            name    (str):              Name to be used as network name if net dict does not include name
+            created_items   (dict):     All created items
+
+        Returns:
+            new_port, port              New created port, port dictionary
+
+        """
+
+        port_dict = {
+            "network_id": net["net_id"],
+            "name": net.get("name"),
+            "admin_state_up": True,
+        }
+
+        if not port_dict["name"]:
+            port_dict["name"] = name
+
+        self._prepare_port_dict_security_groups(net, port_dict)
+
+        self._prepare_port_dict_binding(net, port_dict)
+
+        vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict)
+
+        new_port = self._create_new_port(port_dict, created_items, net)
+
+        vimconnector._set_fixed_ip(new_port, net)
+
+        port = {"port-id": new_port["port"]["id"]}
+
+        if float(self.nova.api_version.get_string()) >= 2.32:
+            port["tag"] = new_port["port"]["name"]
+
+        return new_port, port
+
+    def _prepare_network_for_vminstance(
+        self,
+        name: str,
+        net_list: list,
+        created_items: dict,
+        net_list_vim: list,
+        external_network: list,
+        no_secured_ports: list,
+    ) -> None:
+        """Create port and fill up net dictionary for new VM instance creation.
+
+        Args:
+            name    (str):                  Name of network
+            net_list    (list):             List of networks
+            created_items   (dict):         All created items belongs to a VM
+            net_list_vim    (list):         List of ports
+            external_network    (list):     List of external-networks
+            no_secured_ports    (list):     Port security disabled ports
+        """
+
+        self._reload_connection()
+
+        for net in net_list:
+            # Skip non-connected iface
+            if not net.get("net_id"):
+                continue
+
+            new_port, port = self._create_port(net, name, created_items)
+
+            net_list_vim.append(port)
+
+            if net.get("floating_ip", False):
+                net["exit_on_floating_ip_error"] = True
+                external_network.append(net)
+
+            elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
+                net["exit_on_floating_ip_error"] = False
+                external_network.append(net)
+                net["floating_ip"] = self.config.get("use_floating_ip")
+
+            # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
+            # is dropped. As a workaround we wait until the VM is active and then disable the port-security
+            if net.get("port_security") is False and not self.config.get(
+                "no_port_security_extension"
+            ):
+                no_secured_ports.append(
+                    (
+                        new_port["port"]["id"],
+                        net.get("port_security_disable_strategy"),
                     )
+                )
 
-            # if metadata_vpci:
-            #     metadata = {"pci_assignement": json.dumps(metadata_vpci)}
-            #     if len(metadata["pci_assignement"]) >255:
-            #         #limit the metadata size
-            #         #metadata["pci_assignement"] = metadata["pci_assignement"][0:255]
-            #         self.logger.warn("Metadata deleted since it exceeds the expected length (255) ")
-            #         metadata = {}
+    def _prepare_persistent_root_volumes(
+        self,
+        name: str,
+        storage_av_zone: list,
+        disk: dict,
+        base_disk_index: int,
+        block_device_mapping: dict,
+        existing_vim_volumes: list,
+        created_items: dict,
+    ) -> Optional[str]:
+        """Prepare persistent root volumes for new VM instance.
 
-            self.logger.debug(
-                "name '%s' image_id '%s'flavor_id '%s' net_list_vim '%s' description '%s'",
-                name,
-                image_id,
-                flavor_id,
-                str(net_list_vim),
-                description,
+        Args:
+            name    (str):                      Name of VM instance
+            storage_av_zone  (list):            Storage of availability zones
+            disk    (dict):                     Disk details
+            base_disk_index (int):              Disk index
+            block_device_mapping    (dict):     Block device details
+            existing_vim_volumes    (list):     Existing disk details
+            created_items   (dict):             All created items belongs to VM
+
+        Returns:
+            boot_volume_id  (str):              ID of boot volume
+
+        """
+        # Disk may include only vim_volume_id or only vim_id."
+        # Use existing persistent root volume finding with volume_id or vim_id
+        key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
+        if disk.get(key_id):
+            block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
+            existing_vim_volumes.append({"id": disk[key_id]})
+        else:
+            # Create persistent root volume
+            volume = self.cinder.volumes.create(
+                size=disk["size"],
+                name=name + "vd" + chr(base_disk_index),
+                imageRef=disk["image_id"],
+                # Make sure volume is in the same AZ as the VM to be attached to
+                availability_zone=storage_av_zone,
+            )
+            boot_volume_id = volume.id
+            self.update_block_device_mapping(
+                volume=volume,
+                block_device_mapping=block_device_mapping,
+                base_disk_index=base_disk_index,
+                disk=disk,
+                created_items=created_items,
             )
 
-            # cloud config
-            config_drive, userdata = self._create_user_data(cloud_config)
+            return boot_volume_id
+
+    @staticmethod
+    def update_block_device_mapping(
+        volume: object,
+        block_device_mapping: dict,
+        base_disk_index: int,
+        disk: dict,
+        created_items: dict,
+    ) -> None:
+        """Add volume information to block device mapping dict.
+        Args:
+            volume  (object):                   Created volume object
+            block_device_mapping    (dict):     Block device details
+            base_disk_index (int):              Disk index
+            disk    (dict):                     Disk details
+            created_items   (dict):             All created items belongs to VM
+        """
+        if not volume:
+            raise vimconn.VimConnException("Volume is empty.")
 
-            # Create additional volumes in case these are present in disk_list
-            base_disk_index = ord("b")
-            boot_volume_id = None
-            if disk_list:
-                block_device_mapping = {}
-                for disk in disk_list:
-                    if disk.get("vim_id"):
-                        block_device_mapping["_vd" + chr(base_disk_index)] = disk[
-                            "vim_id"
-                        ]
-                    else:
-                        if "image_id" in disk:
-                            base_disk_index = ord("a")
-                            volume = self.cinder.volumes.create(
-                                size=disk["size"],
-                                name=name + "_vd" + chr(base_disk_index),
-                                imageRef=disk["image_id"],
+        if not hasattr(volume, "id"):
+            raise vimconn.VimConnException(
+                "Created volume is not valid, does not have id attribute."
+            )
+
+        block_device_mapping["vd" + chr(base_disk_index)] = volume.id
+        if disk.get("multiattach"):  # multiattach volumes do not belong to VDUs
+            return
+        volume_txt = "volume:" + str(volume.id)
+        if disk.get("keep"):
+            volume_txt += ":keep"
+        created_items[volume_txt] = True
+
+    @catch_any_exception
+    def new_shared_volumes(self, shared_volume_data) -> (str, str):
+        availability_zone = (
+            self.storage_availability_zone
+            if self.storage_availability_zone
+            else self.vm_av_zone
+        )
+        volume = self.cinder.volumes.create(
+            size=shared_volume_data["size"],
+            name=shared_volume_data["name"],
+            volume_type="multiattach",
+            availability_zone=availability_zone,
+        )
+        return volume.name, volume.id
+
+    def _prepare_shared_volumes(
+        self,
+        name: str,
+        disk: dict,
+        base_disk_index: int,
+        block_device_mapping: dict,
+        existing_vim_volumes: list,
+        created_items: dict,
+    ):
+        volumes = {volume.name: volume.id for volume in self.cinder.volumes.list()}
+        if volumes.get(disk["name"]):
+            sv_id = volumes[disk["name"]]
+            max_retries = 3
+            vol_status = ""
+            # If this is not the first VM to attach the volume, volume status may be "reserved" for a short time
+            while max_retries:
+                max_retries -= 1
+                volume = self.cinder.volumes.get(sv_id)
+                vol_status = volume.status
+                if volume.status not in ("in-use", "available"):
+                    time.sleep(5)
+                    continue
+                self.update_block_device_mapping(
+                    volume=volume,
+                    block_device_mapping=block_device_mapping,
+                    base_disk_index=base_disk_index,
+                    disk=disk,
+                    created_items=created_items,
+                )
+                return
+            raise vimconn.VimConnException(
+                "Shared volume is not prepared, status is: {}".format(vol_status),
+                http_code=vimconn.HTTP_Internal_Server_Error,
+            )
+
+    def _prepare_non_root_persistent_volumes(
+        self,
+        name: str,
+        disk: dict,
+        storage_av_zone: list,
+        block_device_mapping: dict,
+        base_disk_index: int,
+        existing_vim_volumes: list,
+        created_items: dict,
+    ) -> None:
+        """Prepare persistent volumes for new VM instance.
+
+        Args:
+            name    (str):                      Name of VM instance
+            disk    (dict):                     Disk details
+            storage_av_zone  (list):            Storage of availability zones
+            block_device_mapping    (dict):     Block device details
+            base_disk_index (int):              Disk index
+            existing_vim_volumes    (list):     Existing disk details
+            created_items   (dict):             All created items belongs to VM
+        """
+        # Non-root persistent volumes
+        # Disk may include only vim_volume_id or only vim_id."
+        key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
+        if disk.get(key_id):
+            # Use existing persistent volume
+            block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
+            existing_vim_volumes.append({"id": disk[key_id]})
+        else:
+            volume_name = f"{name}vd{chr(base_disk_index)}"
+            volume = self.cinder.volumes.create(
+                size=disk["size"],
+                name=volume_name,
+                # Make sure volume is in the same AZ as the VM to be attached to
+                availability_zone=storage_av_zone,
+            )
+            self.update_block_device_mapping(
+                volume=volume,
+                block_device_mapping=block_device_mapping,
+                base_disk_index=base_disk_index,
+                disk=disk,
+                created_items=created_items,
+            )
+
+    def _wait_for_created_volumes_availability(
+        self, elapsed_time: int, created_items: dict
+    ) -> Optional[int]:
+        """Wait till created volumes become available.
+
+        Args:
+            elapsed_time    (int):          Passed time while waiting
+            created_items   (dict):         All created items belongs to VM
+
+        Returns:
+            elapsed_time    (int):          Time spent while waiting
+
+        """
+        while elapsed_time < volume_timeout:
+            for created_item in created_items:
+                v, volume_id = (
+                    created_item.split(":")[0],
+                    created_item.split(":")[1],
+                )
+                if v == "volume":
+                    volume = self.cinder.volumes.get(volume_id)
+                    if (
+                        volume.volume_type == "multiattach"
+                        and volume.status == "in-use"
+                    ):
+                        return elapsed_time
+                    elif volume.status != "available":
+                        break
+            else:
+                # All ready: break from while
+                break
+
+            time.sleep(5)
+            elapsed_time += 5
+
+        return elapsed_time
+
+    def _wait_for_existing_volumes_availability(
+        self, elapsed_time: int, existing_vim_volumes: list
+    ) -> Optional[int]:
+        """Wait till existing volumes become available.
+
+        Args:
+            elapsed_time    (int):          Passed time while waiting
+            existing_vim_volumes   (list):  Existing volume details
+
+        Returns:
+            elapsed_time    (int):          Time spent while waiting
+
+        """
+
+        while elapsed_time < volume_timeout:
+            for volume in existing_vim_volumes:
+                v = self.cinder.volumes.get(volume["id"])
+                if v.volume_type == "multiattach" and v.status == "in-use":
+                    return elapsed_time
+                elif v.status != "available":
+                    break
+            else:  # all ready: break from while
+                break
+
+            time.sleep(5)
+            elapsed_time += 5
+
+        return elapsed_time
+
+    def _prepare_disk_for_vminstance(
+        self,
+        name: str,
+        existing_vim_volumes: list,
+        created_items: dict,
+        storage_av_zone: list,
+        block_device_mapping: dict,
+        disk_list: list = None,
+    ) -> None:
+        """Prepare all volumes for new VM instance.
+
+        Args:
+            name    (str):                      Name of Instance
+            existing_vim_volumes    (list):     List of existing volumes
+            created_items   (dict):             All created items belongs to VM
+            storage_av_zone  (list):            Storage availability zone
+            block_device_mapping (dict):        Block devices to be attached to VM
+            disk_list   (list):                 List of disks
+
+        """
+        # Create additional volumes in case these are present in disk_list
+        base_disk_index = ord("b")
+        boot_volume_id = None
+        elapsed_time = 0
+        for disk in disk_list:
+            if "image_id" in disk:
+                # Root persistent volume
+                base_disk_index = ord("a")
+                boot_volume_id = self._prepare_persistent_root_volumes(
+                    name=name,
+                    storage_av_zone=storage_av_zone,
+                    disk=disk,
+                    base_disk_index=base_disk_index,
+                    block_device_mapping=block_device_mapping,
+                    existing_vim_volumes=existing_vim_volumes,
+                    created_items=created_items,
+                )
+            elif disk.get("multiattach"):
+                self._prepare_shared_volumes(
+                    name=name,
+                    disk=disk,
+                    base_disk_index=base_disk_index,
+                    block_device_mapping=block_device_mapping,
+                    existing_vim_volumes=existing_vim_volumes,
+                    created_items=created_items,
+                )
+            else:
+                # Non-root persistent volume
+                self._prepare_non_root_persistent_volumes(
+                    name=name,
+                    disk=disk,
+                    storage_av_zone=storage_av_zone,
+                    block_device_mapping=block_device_mapping,
+                    base_disk_index=base_disk_index,
+                    existing_vim_volumes=existing_vim_volumes,
+                    created_items=created_items,
+                )
+            base_disk_index += 1
+
+        # Wait until created volumes are with status available
+        elapsed_time = self._wait_for_created_volumes_availability(
+            elapsed_time, created_items
+        )
+        # Wait until existing volumes in vim are with status available
+        elapsed_time = self._wait_for_existing_volumes_availability(
+            elapsed_time, existing_vim_volumes
+        )
+        # If we exceeded the timeout rollback
+        if elapsed_time >= volume_timeout:
+            raise vimconn.VimConnException(
+                "Timeout creating volumes for instance " + name,
+                http_code=vimconn.HTTP_Request_Timeout,
+            )
+        if boot_volume_id:
+            self.cinder.volumes.set_bootable(boot_volume_id, True)
+
+    def _find_the_external_network_for_floating_ip(self):
+        """Get the external network ip in order to create floating IP.
+
+        Returns:
+            pool_id (str):      External network pool ID
+
+        """
+
+        # Find the external network
+        external_nets = list()
+
+        for net in self.neutron.list_networks()["networks"]:
+            if net["router:external"]:
+                external_nets.append(net)
+
+        if len(external_nets) == 0:
+            raise vimconn.VimConnException(
+                "Cannot create floating_ip automatically since "
+                "no external network is present",
+                http_code=vimconn.HTTP_Conflict,
+            )
+
+        if len(external_nets) > 1:
+            raise vimconn.VimConnException(
+                "Cannot create floating_ip automatically since "
+                "multiple external networks are present",
+                http_code=vimconn.HTTP_Conflict,
+            )
+
+        # Pool ID
+        return external_nets[0].get("id")
+
+    def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None:
+        """Trigger neutron to create a new floating IP using external network ID.
+
+        Args:
+            param   (dict):             Input parameters to create a floating IP
+            created_items   (dict):     All created items belongs to new VM instance
+
+        Raises:
+
+            VimConnException
+        """
+        try:
+            self.logger.debug("Creating floating IP")
+            new_floating_ip = self.neutron.create_floatingip(param)
+            free_floating_ip = new_floating_ip["floatingip"]["id"]
+            created_items["floating_ip:" + str(free_floating_ip)] = True
+
+        except Exception as e:
+            raise vimconn.VimConnException(
+                type(e).__name__ + ": Cannot create new floating_ip " + str(e),
+                http_code=vimconn.HTTP_Conflict,
+            )
+
+    def _create_floating_ip(
+        self, floating_network: dict, server: object, created_items: dict
+    ) -> None:
+        """Get the available Pool ID and create a new floating IP.
+
+        Args:
+            floating_network    (dict):         Dict including external network ID
+            server   (object):                  Server object
+            created_items   (dict):             All created items belongs to new VM instance
+
+        """
+
+        # Pool_id is available
+        if (
+            isinstance(floating_network["floating_ip"], str)
+            and floating_network["floating_ip"].lower() != "true"
+        ):
+            pool_id = floating_network["floating_ip"]
+
+        # Find the Pool_id
+        else:
+            pool_id = self._find_the_external_network_for_floating_ip()
+
+        param = {
+            "floatingip": {
+                "floating_network_id": pool_id,
+                "tenant_id": server.tenant_id,
+            }
+        }
+
+        self._neutron_create_float_ip(param, created_items)
+
+    def _find_floating_ip(
+        self,
+        server: object,
+        floating_ips: list,
+        floating_network: dict,
+    ) -> Optional[str]:
+        """Find the available free floating IPs if there are.
+
+        Args:
+            server  (object):                   Server object
+            floating_ips    (list):             List of floating IPs
+            floating_network    (dict):         Details of floating network such as ID
+
+        Returns:
+            free_floating_ip    (str):          Free floating ip address
+
+        """
+        for fip in floating_ips:
+            if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id:
+                continue
+
+            if isinstance(floating_network["floating_ip"], str):
+                if fip.get("floating_network_id") != floating_network["floating_ip"]:
+                    continue
+
+            return fip["id"]
+
+    def _assign_floating_ip(
+        self, free_floating_ip: str, floating_network: dict
+    ) -> Dict:
+        """Assign the free floating ip address to port.
+
+        Args:
+            free_floating_ip    (str):          Floating IP to be assigned
+            floating_network    (dict):         ID of floating network
+
+        Returns:
+            fip (dict)          (dict):         Floating ip details
+
+        """
+        # The vim_id key contains the neutron.port_id
+        self.neutron.update_floatingip(
+            free_floating_ip,
+            {"floatingip": {"port_id": floating_network["vim_id"]}},
+        )
+        # For race condition ensure not re-assigned to other VM after 5 seconds
+        time.sleep(5)
+
+        return self.neutron.show_floatingip(free_floating_ip)
+
+    def _get_free_floating_ip(
+        self, server: object, floating_network: dict
+    ) -> Optional[str]:
+        """Get the free floating IP address.
+
+        Args:
+            server  (object):               Server Object
+            floating_network    (dict):     Floating network details
+
+        Returns:
+            free_floating_ip    (str):      Free floating ip addr
+
+        """
+
+        floating_ips = self.neutron.list_floatingips().get("floatingips", ())
+
+        # Randomize
+        random.shuffle(floating_ips)
+
+        return self._find_floating_ip(server, floating_ips, floating_network)
+
+    def _prepare_external_network_for_vminstance(
+        self,
+        external_network: list,
+        server: object,
+        created_items: dict,
+        vm_start_time: float,
+    ) -> None:
+        """Assign floating IP address for VM instance.
+
+        Args:
+            external_network    (list):         ID of External network
+            server  (object):                   Server Object
+            created_items   (dict):             All created items belongs to new VM instance
+            vm_start_time   (float):            Time as a floating point number expressed in seconds since the epoch, in UTC
+
+        Raises:
+            VimConnException
+
+        """
+        for floating_network in external_network:
+            try:
+                assigned = False
+                floating_ip_retries = 3
+                # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
+                # several times
+                while not assigned:
+                    free_floating_ip = self._get_free_floating_ip(
+                        server, floating_network
+                    )
+
+                    if not free_floating_ip:
+                        self._create_floating_ip(
+                            floating_network, server, created_items
+                        )
+
+                    try:
+                        # For race condition ensure not already assigned
+                        fip = self.neutron.show_floatingip(free_floating_ip)
+
+                        if fip["floatingip"].get("port_id"):
+                            continue
+
+                        # Assign floating ip
+                        fip = self._assign_floating_ip(
+                            free_floating_ip, floating_network
+                        )
+
+                        if fip["floatingip"]["port_id"] != floating_network["vim_id"]:
+                            self.logger.warning(
+                                "floating_ip {} re-assigned to other port".format(
+                                    free_floating_ip
+                                )
                             )
-                            boot_volume_id = volume.id
-                        else:
-                            volume = self.cinder.volumes.create(
-                                size=disk["size"],
-                                name=name + "_vd" + chr(base_disk_index),
+                            continue
+
+                        self.logger.debug(
+                            "Assigned floating_ip {} to VM {}".format(
+                                free_floating_ip, server.id
                             )
+                        )
+
+                        assigned = True
 
-                        created_items["volume:" + str(volume.id)] = True
-                        block_device_mapping["_vd" + chr(base_disk_index)] = volume.id
+                    except Exception as e:
+                        # Openstack need some time after VM creation to assign an IP. So retry if fails
+                        vm_status = self.nova.servers.get(server.id).status
 
-                    base_disk_index += 1
+                        if vm_status not in ("ACTIVE", "ERROR"):
+                            if time.time() - vm_start_time < server_timeout:
+                                time.sleep(5)
+                                continue
+                        elif floating_ip_retries > 0:
+                            floating_ip_retries -= 1
+                            continue
 
-                # Wait until created volumes are with status available
-                elapsed_time = 0
-                while elapsed_time < volume_timeout:
-                    for created_item in created_items:
-                        v, _, volume_id = created_item.partition(":")
-                        if v == "volume":
-                            if self.cinder.volumes.get(volume_id).status != "available":
-                                break
-                    else:  # all ready: break from while
-                        break
+                        raise vimconn.VimConnException(
+                            "Cannot create floating_ip: {} {}".format(
+                                type(e).__name__, e
+                            ),
+                            http_code=vimconn.HTTP_Conflict,
+                        )
 
-                    time.sleep(5)
-                    elapsed_time += 5
+            except Exception as e:
+                if not floating_network["exit_on_floating_ip_error"]:
+                    self.logger.error("Cannot create floating_ip. %s", str(e))
+                    continue
 
-                # If we exceeded the timeout rollback
-                if elapsed_time >= volume_timeout:
-                    raise vimconn.VimConnException(
-                        "Timeout creating volumes for instance " + name,
-                        http_code=vimconn.HTTP_Request_Timeout,
+                raise
+
+    def _update_port_security_for_vminstance(
+        self,
+        no_secured_ports: list,
+        server: object,
+    ) -> None:
+        """Updates the port security according to no_secured_ports list.
+
+        Args:
+            no_secured_ports    (list):     List of ports that security will be disabled
+            server  (object):               Server Object
+
+        Raises:
+            VimConnException
+
+        """
+        # Wait until the VM is active and then disable the port-security
+        if no_secured_ports:
+            self.__wait_for_vm(server.id, "ACTIVE")
+
+        for port in no_secured_ports:
+            port_update = {
+                "port": {"port_security_enabled": False, "security_groups": None}
+            }
+
+            if port[1] == "allow-address-pairs":
+                port_update = {
+                    "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
+                }
+
+            try:
+                self.neutron.update_port(port[0], port_update)
+
+            except Exception:
+                raise vimconn.VimConnException(
+                    "It was not possible to disable port security for port {}".format(
+                        port[0]
                     )
-                if boot_volume_id:
-                    self.cinder.volumes.set_bootable(boot_volume_id, True)
+                )
+
+    def new_vminstance(
+        self,
+        name: str,
+        description: str,
+        start: bool,
+        image_id: str,
+        flavor_id: str,
+        affinity_group_list: list,
+        net_list: list,
+        cloud_config=None,
+        disk_list=None,
+        availability_zone_index=None,
+        availability_zone_list=None,
+    ) -> tuple:
+        """Adds a VM instance to VIM.
+
+        Args:
+            name    (str):          name of VM
+            description (str):      description
+            start   (bool):         indicates if VM must start or boot in pause mode. Ignored
+            image_id    (str)       image uuid
+            flavor_id   (str)       flavor uuid
+            affinity_group_list (list):     list of affinity groups, each one is a dictionary.Ignore if empty.
+            net_list    (list):         list of interfaces, each one is a dictionary with:
+                name:   name of network
+                net_id:     network uuid to connect
+                vpci:   virtual vcpi to assign, ignored because openstack lack #TODO
+                model:  interface model, ignored #TODO
+                mac_address:    used for  SR-IOV ifaces #TODO for other types
+                use:    'data', 'bridge',  'mgmt'
+                type:   'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
+                vim_id:     filled/added by this function
+                floating_ip:    True/False (or it can be None)
+                port_security:  True/False
+            cloud_config    (dict): (optional) dictionary with:
+                key-pairs:      (optional) list of strings with the public key to be inserted to the default user
+                users:      (optional) list of users to be inserted, each item is a dict with:
+                    name:   (mandatory) user name,
+                    key-pairs: (optional) list of strings with the public key to be inserted to the user
+                user-data:  (optional) string is a text script to be passed directly to cloud-init
+                config-files:   (optional). List of files to be transferred. Each item is a dict with:
+                    dest:   (mandatory) string with the destination absolute path
+                    encoding:   (optional, by default text). Can be one of:
+                        'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+                    content :    (mandatory) string with the content of the file
+                    permissions:    (optional) string with file permissions, typically octal notation '0644'
+                    owner:  (optional) file owner, string with the format 'owner:group'
+                boot-data-drive:    boolean to indicate if user-data must be passed using a boot drive (hard disk)
+            disk_list:  (optional) list with additional disks to the VM. Each item is a dict with:
+                image_id:   (optional). VIM id of an existing image. If not provided an empty disk must be mounted
+                size:   (mandatory) string with the size of the disk in GB
+                vim_id:  (optional) should use this existing volume id
+            availability_zone_index:    Index of availability_zone_list to use for this this VM. None if not AV required
+            availability_zone_list:     list of availability zones given by user in the VNFD descriptor.  Ignore if
+                availability_zone_index is None
+                #TODO ip, security groups
+
+        Returns:
+            A tuple with the instance identifier and created_items or raises an exception on error
+            created_items can be None or a dictionary where this method can include key-values that will be passed to
+            the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
+            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+            as not present.
+
+        """
+        self.logger.debug(
+            "new_vminstance input: image='%s' flavor='%s' nics='%s'",
+            image_id,
+            flavor_id,
+            str(net_list),
+        )
+        server = None
+        created_items = {}
+        net_list_vim = []
+        # list of external networks to be connected to instance, later on used to create floating_ip
+        external_network = []
+        # List of ports with port-security disabled
+        no_secured_ports = []
+        block_device_mapping = {}
+        existing_vim_volumes = []
+        server_group_id = None
+        scheduller_hints = {}
+
+        try:
+            # Check the Openstack Connection
+            self._reload_connection()
 
-            # get availability Zone
-            vm_av_zone = self._get_vm_availability_zone(
+            # Prepare network list
+            self._prepare_network_for_vminstance(
+                name=name,
+                net_list=net_list,
+                created_items=created_items,
+                net_list_vim=net_list_vim,
+                external_network=external_network,
+                no_secured_ports=no_secured_ports,
+            )
+
+            # Cloud config
+            config_drive, userdata = self._create_user_data(cloud_config)
+
+            # Get availability Zone
+            self.vm_av_zone = self._get_vm_availability_zone(
                 availability_zone_index, availability_zone_list
             )
 
-            # Manage affinity groups/server groups
-            server_group_id = None
-            scheduller_hints = {}
+            storage_av_zone = (
+                self.storage_availability_zone
+                if self.storage_availability_zone
+                else self.vm_av_zone
+            )
+
+            if disk_list:
+                # Prepare disks
+                self._prepare_disk_for_vminstance(
+                    name=name,
+                    existing_vim_volumes=existing_vim_volumes,
+                    created_items=created_items,
+                    storage_av_zone=storage_av_zone,
+                    block_device_mapping=block_device_mapping,
+                    disk_list=disk_list,
+                )
 
             if affinity_group_list:
                 # Only first id on the list will be used. Openstack restriction
@@ -1941,7 +2855,7 @@ class vimconnector(vimconn.VimConnector):
                     flavor_id,
                     net_list_vim,
                     self.config.get("security_groups"),
-                    vm_av_zone,
+                    self.vm_av_zone,
                     self.config.get("keypair"),
                     userdata,
                     config_drive,
@@ -1949,192 +2863,34 @@ class vimconnector(vimconn.VimConnector):
                     server_group_id,
                 )
             )
+            # Create VM
             server = self.nova.servers.create(
-                name,
-                image_id,
-                flavor_id,
+                name=name,
+                image=image_id,
+                flavor=flavor_id,
                 nics=net_list_vim,
                 security_groups=self.config.get("security_groups"),
                 # TODO remove security_groups in future versions. Already at neutron port
-                availability_zone=vm_av_zone,
+                availability_zone=self.vm_av_zone,
                 key_name=self.config.get("keypair"),
                 userdata=userdata,
                 config_drive=config_drive,
                 block_device_mapping=block_device_mapping,
                 scheduler_hints=scheduller_hints,
-            )  # , description=description)
+            )
 
             vm_start_time = time.time()
-            # Previously mentioned workaround to wait until the VM is active and then disable the port-security
-            if no_secured_ports:
-                self.__wait_for_vm(server.id, "ACTIVE")
-
-            for port in no_secured_ports:
-                port_update = {
-                    "port": {"port_security_enabled": False, "security_groups": None}
-                }
 
-                if port[1] == "allow-address-pairs":
-                    port_update = {
-                        "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
-                    }
-
-                try:
-                    self.neutron.update_port(port[0], port_update)
-                except Exception:
-                    raise vimconn.VimConnException(
-                        "It was not possible to disable port security for port {}".format(
-                            port[0]
-                        )
-                    )
-
-            # print "DONE :-)", server
-
-            # pool_id = None
-            for floating_network in external_network:
-                try:
-                    assigned = False
-                    floating_ip_retries = 3
-                    # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
-                    # several times
-                    while not assigned:
-                        floating_ips = self.neutron.list_floatingips().get(
-                            "floatingips", ()
-                        )
-                        random.shuffle(floating_ips)  # randomize
-                        for fip in floating_ips:
-                            if (
-                                fip.get("port_id")
-                                or fip.get("tenant_id") != server.tenant_id
-                            ):
-                                continue
-
-                            if isinstance(floating_network["floating_ip"], str):
-                                if (
-                                    fip.get("floating_network_id")
-                                    != floating_network["floating_ip"]
-                                ):
-                                    continue
-
-                            free_floating_ip = fip["id"]
-                            break
-                        else:
-                            if (
-                                isinstance(floating_network["floating_ip"], str)
-                                and floating_network["floating_ip"].lower() != "true"
-                            ):
-                                pool_id = floating_network["floating_ip"]
-                            else:
-                                # Find the external network
-                                external_nets = list()
-
-                                for net in self.neutron.list_networks()["networks"]:
-                                    if net["router:external"]:
-                                        external_nets.append(net)
-
-                                if len(external_nets) == 0:
-                                    raise vimconn.VimConnException(
-                                        "Cannot create floating_ip automatically since "
-                                        "no external network is present",
-                                        http_code=vimconn.HTTP_Conflict,
-                                    )
-
-                                if len(external_nets) > 1:
-                                    raise vimconn.VimConnException(
-                                        "Cannot create floating_ip automatically since "
-                                        "multiple external networks are present",
-                                        http_code=vimconn.HTTP_Conflict,
-                                    )
-
-                                pool_id = external_nets[0].get("id")
-
-                            param = {
-                                "floatingip": {
-                                    "floating_network_id": pool_id,
-                                    "tenant_id": server.tenant_id,
-                                }
-                            }
-
-                            try:
-                                # self.logger.debug("Creating floating IP")
-                                new_floating_ip = self.neutron.create_floatingip(param)
-                                free_floating_ip = new_floating_ip["floatingip"]["id"]
-                                created_items[
-                                    "floating_ip:" + str(free_floating_ip)
-                                ] = True
-                            except Exception as e:
-                                raise vimconn.VimConnException(
-                                    type(e).__name__
-                                    + ": Cannot create new floating_ip "
-                                    + str(e),
-                                    http_code=vimconn.HTTP_Conflict,
-                                )
-
-                        try:
-                            # for race condition ensure not already assigned
-                            fip = self.neutron.show_floatingip(free_floating_ip)
-
-                            if fip["floatingip"]["port_id"]:
-                                continue
-
-                            # the vim_id key contains the neutron.port_id
-                            self.neutron.update_floatingip(
-                                free_floating_ip,
-                                {"floatingip": {"port_id": floating_network["vim_id"]}},
-                            )
-                            # for race condition ensure not re-assigned to other VM after 5 seconds
-                            time.sleep(5)
-                            fip = self.neutron.show_floatingip(free_floating_ip)
-
-                            if (
-                                fip["floatingip"]["port_id"]
-                                != floating_network["vim_id"]
-                            ):
-                                self.logger.error(
-                                    "floating_ip {} re-assigned to other port".format(
-                                        free_floating_ip
-                                    )
-                                )
-                                continue
-
-                            self.logger.debug(
-                                "Assigned floating_ip {} to VM {}".format(
-                                    free_floating_ip, server.id
-                                )
-                            )
-                            assigned = True
-                        except Exception as e:
-                            # openstack need some time after VM creation to assign an IP. So retry if fails
-                            vm_status = self.nova.servers.get(server.id).status
-
-                            if vm_status not in ("ACTIVE", "ERROR"):
-                                if time.time() - vm_start_time < server_timeout:
-                                    time.sleep(5)
-                                    continue
-                            elif floating_ip_retries > 0:
-                                floating_ip_retries -= 1
-                                continue
-
-                            raise vimconn.VimConnException(
-                                "Cannot create floating_ip: {} {}".format(
-                                    type(e).__name__, e
-                                ),
-                                http_code=vimconn.HTTP_Conflict,
-                            )
-
-                except Exception as e:
-                    if not floating_network["exit_on_floating_ip_error"]:
-                        self.logger.error("Cannot create floating_ip. %s", str(e))
-                        continue
+            self._update_port_security_for_vminstance(no_secured_ports, server)
 
-                    raise
+            self._prepare_external_network_for_vminstance(
+                external_network=external_network,
+                server=server,
+                created_items=created_items,
+                vm_start_time=vm_start_time,
+            )
 
             return server.id, created_items
-        # except nvExceptions.NotFound as e:
-        #     error_value=-vimconn.HTTP_Not_Found
-        #     error_text= "vm instance %s not found" % vm_id
-        # except TypeError as e:
-        #     raise vimconn.VimConnException(type(e).__name__ + ": "+  str(e), http_code=vimconn.HTTP_Bad_Request)
 
         except Exception as e:
             server_id = None
@@ -2142,29 +2898,37 @@ class vimconnector(vimconn.VimConnector):
                 server_id = server.id
 
             try:
+                created_items = self.remove_keep_tag_from_persistent_volumes(
+                    created_items
+                )
+
                 self.delete_vminstance(server_id, created_items)
+
             except Exception as e2:
                 self.logger.error("new_vminstance rollback fail {}".format(e2))
 
             self._format_exception(e)
 
+    @staticmethod
+    def remove_keep_tag_from_persistent_volumes(created_items: Dict) -> Dict:
+        """Removes the keep flag from persistent volumes. So, those volumes could be removed.
+
+        Args:
+            created_items (dict):       All created items belongs to VM
+
+        Returns:
+            updated_created_items   (dict):     Dict which does not include keep flag for volumes.
+
+        """
+        return {
+            key.replace(":keep", ""): value for (key, value) in created_items.items()
+        }
+
     def get_vminstance(self, vm_id):
         """Returns the VM instance information from VIM"""
-        # self.logger.debug("Getting VM from VIM")
-        try:
-            self._reload_connection()
-            server = self.nova.servers.find(id=vm_id)
-            # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
-
-            return server.to_dict()
-        except (
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            nvExceptions.NotFound,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+        return self._find_nova_server(vm_id)
 
+    @catch_any_exception
     def get_vminstance_console(self, vm_id, console_type="vnc"):
         """
         Get a console for the virtual machine
@@ -2180,138 +2944,284 @@ class vimconnector(vimconn.VimConnector):
                 suffix:   extra text, e.g. the http path and query string
         """
         self.logger.debug("Getting VM CONSOLE from VIM")
+        self._reload_connection()
+        server = self.nova.servers.find(id=vm_id)
+
+        if console_type is None or console_type == "novnc":
+            console_dict = server.get_vnc_console("novnc")
+        elif console_type == "xvpvnc":
+            console_dict = server.get_vnc_console(console_type)
+        elif console_type == "rdp-html5":
+            console_dict = server.get_rdp_console(console_type)
+        elif console_type == "spice-html5":
+            console_dict = server.get_spice_console(console_type)
+        else:
+            raise vimconn.VimConnException(
+                "console type '{}' not allowed".format(console_type),
+                http_code=vimconn.HTTP_Bad_Request,
+            )
+
+        console_dict1 = console_dict.get("console")
+
+        if console_dict1:
+            console_url = console_dict1.get("url")
 
+            if console_url:
+                # parse console_url
+                protocol_index = console_url.find("//")
+                suffix_index = (
+                    console_url[protocol_index + 2 :].find("/") + protocol_index + 2
+                )
+                port_index = (
+                    console_url[protocol_index + 2 : suffix_index].find(":")
+                    + protocol_index
+                    + 2
+                )
+
+                if protocol_index < 0 or port_index < 0 or suffix_index < 0:
+                    return (
+                        -vimconn.HTTP_Internal_Server_Error,
+                        "Unexpected response from VIM",
+                    )
+
+                console_dict = {
+                    "protocol": console_url[0:protocol_index],
+                    "server": console_url[protocol_index + 2 : port_index],
+                    "port": console_url[port_index:suffix_index],
+                    "suffix": console_url[suffix_index + 1 :],
+                }
+                protocol_index += 2
+
+                return console_dict
+        raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
+
+    def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
+        """Neutron delete ports by id.
+        Args:
+            k_id    (str):      Port id in the VIM
+        """
         try:
-            self._reload_connection()
-            server = self.nova.servers.find(id=vm_id)
+            self.neutron.delete_port(k_id)
 
-            if console_type is None or console_type == "novnc":
-                console_dict = server.get_vnc_console("novnc")
-            elif console_type == "xvpvnc":
-                console_dict = server.get_vnc_console(console_type)
-            elif console_type == "rdp-html5":
-                console_dict = server.get_rdp_console(console_type)
-            elif console_type == "spice-html5":
-                console_dict = server.get_spice_console(console_type)
-            else:
+        except (neExceptions.ConnectionFailed, ConnectionError) as e:
+            self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
+            # If there is connection error, raise.
+            self._format_exception(e)
+        except Exception as e:
+            self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
+
+    def delete_shared_volumes(self, shared_volume_vim_id: str) -> bool:
+        """Cinder delete volume by id.
+        Args:
+            shared_volume_vim_id    (str):                  ID of shared volume in VIM
+        """
+        elapsed_time = 0
+        try:
+            while elapsed_time < server_timeout:
+                vol_status = self.cinder.volumes.get(shared_volume_vim_id).status
+                if vol_status == "available":
+                    self.cinder.volumes.delete(shared_volume_vim_id)
+                    return True
+
+                time.sleep(5)
+                elapsed_time += 5
+
+            if elapsed_time >= server_timeout:
                 raise vimconn.VimConnException(
-                    "console type '{}' not allowed".format(console_type),
-                    http_code=vimconn.HTTP_Bad_Request,
+                    "Timeout waiting for volume "
+                    + shared_volume_vim_id
+                    + " to be available",
+                    http_code=vimconn.HTTP_Request_Timeout,
                 )
 
-            console_dict1 = console_dict.get("console")
+        except Exception as e:
+            self.logger.error(
+                "Error deleting volume: {}: {}".format(type(e).__name__, e)
+            )
+            self._format_exception(e)
+
+    def _delete_volumes_by_id_wth_cinder(
+        self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
+    ) -> bool:
+        """Cinder delete volume by id.
+        Args:
+            k   (str):                      Full item name in created_items
+            k_id    (str):                  ID of floating ip in VIM
+            volumes_to_hold (list):          Volumes not to delete
+            created_items   (dict):         All created items belongs to VM
+        """
+        try:
+            if k_id in volumes_to_hold:
+                return False
 
-            if console_dict1:
-                console_url = console_dict1.get("url")
+            if self.cinder.volumes.get(k_id).status != "available":
+                return True
 
-                if console_url:
-                    # parse console_url
-                    protocol_index = console_url.find("//")
-                    suffix_index = (
-                        console_url[protocol_index + 2 :].find("/") + protocol_index + 2
-                    )
-                    port_index = (
-                        console_url[protocol_index + 2 : suffix_index].find(":")
-                        + protocol_index
-                        + 2
+            else:
+                self.cinder.volumes.delete(k_id)
+                created_items[k] = None
+
+        except (cExceptions.ConnectionError, ConnectionError) as e:
+            self.logger.error(
+                "Error deleting volume: {}: {}".format(type(e).__name__, e)
+            )
+            self._format_exception(e)
+        except Exception as e:
+            self.logger.error(
+                "Error deleting volume: {}: {}".format(type(e).__name__, e)
+            )
+
+    def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None:
+        """Neutron delete floating ip by id.
+        Args:
+            k   (str):                      Full item name in created_items
+            k_id    (str):                  ID of floating ip in VIM
+            created_items   (dict):         All created items belongs to VM
+        """
+        try:
+            self.neutron.delete_floatingip(k_id)
+            created_items[k] = None
+
+        except (neExceptions.ConnectionFailed, ConnectionError) as e:
+            self.logger.error(
+                "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
+            )
+            self._format_exception(e)
+        except Exception as e:
+            self.logger.error(
+                "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
+            )
+
+    @staticmethod
+    def _get_item_name_id(k: str) -> Tuple[str, str]:
+        k_item, _, k_id = k.partition(":")
+        return k_item, k_id
+
+    def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None:
+        """Delete VM ports attached to the networks before deleting virtual machine.
+        Args:
+            created_items   (dict):     All created items belongs to VM
+        """
+
+        for k, v in created_items.items():
+            if not v:  # skip already deleted
+                continue
+
+            try:
+                k_item, k_id = self._get_item_name_id(k)
+                if k_item == "port":
+                    self._delete_ports_by_id_wth_neutron(k_id)
+
+            except (neExceptions.ConnectionFailed, ConnectionError) as e:
+                self.logger.error(
+                    "Error deleting port: {}: {}".format(type(e).__name__, e)
+                )
+                self._format_exception(e)
+            except Exception as e:
+                self.logger.error(
+                    "Error deleting port: {}: {}".format(type(e).__name__, e)
+                )
+
+    def _delete_created_items(
+        self, created_items: dict, volumes_to_hold: list, keep_waiting: bool
+    ) -> bool:
+        """Delete Volumes and floating ip if they exist in created_items."""
+        for k, v in created_items.items():
+            if not v:  # skip already deleted
+                continue
+
+            try:
+                k_item, k_id = self._get_item_name_id(k)
+                if k_item == "volume":
+                    unavailable_vol = self._delete_volumes_by_id_wth_cinder(
+                        k, k_id, volumes_to_hold, created_items
                     )
 
-                    if protocol_index < 0 or port_index < 0 or suffix_index < 0:
-                        return (
-                            -vimconn.HTTP_Internal_Server_Error,
-                            "Unexpected response from VIM",
-                        )
+                    if unavailable_vol:
+                        keep_waiting = True
+
+                elif k_item == "floating_ip":
+                    self._delete_floating_ip_by_id(k, k_id, created_items)
+
+            except (
+                cExceptions.ConnectionError,
+                neExceptions.ConnectionFailed,
+                ConnectionError,
+                AttributeError,
+                TypeError,
+            ) as e:
+                self.logger.error("Error deleting {}: {}".format(k, e))
+                self._format_exception(e)
+
+            except Exception as e:
+                self.logger.error("Error deleting {}: {}".format(k, e))
+
+        return keep_waiting
 
-                    console_dict = {
-                        "protocol": console_url[0:protocol_index],
-                        "server": console_url[protocol_index + 2 : port_index],
-                        "port": console_url[port_index:suffix_index],
-                        "suffix": console_url[suffix_index + 1 :],
-                    }
-                    protocol_index += 2
+    @staticmethod
+    def _extract_items_wth_keep_flag_from_created_items(created_items: dict) -> dict:
+        """Remove the volumes which has key flag from created_items
 
-                    return console_dict
-            raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
-        except (
-            nvExceptions.NotFound,
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            nvExceptions.BadRequest,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+        Args:
+            created_items   (dict):         All created items belongs to VM
 
-    def delete_vminstance(self, vm_id, created_items=None):
-        """Removes a VM instance from VIM. Returns the old identifier"""
-        # print "osconnector: Getting VM from VIM"
+        Returns:
+            created_items   (dict):         Persistent volumes eliminated created_items
+        """
+        return {
+            key: value
+            for (key, value) in created_items.items()
+            if len(key.split(":")) == 2
+        }
+
+    @catch_any_exception
+    def delete_vminstance(
+        self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
+    ) -> None:
+        """Removes a VM instance from VIM. Returns the old identifier.
+        Args:
+            vm_id   (str):              Identifier of VM instance
+            created_items   (dict):     All created items belongs to VM
+            volumes_to_hold (list):     Volumes_to_hold
+        """
         if created_items is None:
             created_items = {}
+        if volumes_to_hold is None:
+            volumes_to_hold = []
 
         try:
-            self._reload_connection()
-            # delete VM ports attached to this networks before the virtual machine
-            for k, v in created_items.items():
-                if not v:  # skip already deleted
-                    continue
+            created_items = self._extract_items_wth_keep_flag_from_created_items(
+                created_items
+            )
 
-                try:
-                    k_item, _, k_id = k.partition(":")
-                    if k_item == "port":
-                        self.neutron.delete_port(k_id)
-                except Exception as e:
-                    self.logger.error(
-                        "Error deleting port: {}: {}".format(type(e).__name__, e)
-                    )
+            self._reload_connection()
 
-            # #commented because detaching the volumes makes the servers.delete not work properly ?!?
-            # #dettach volumes attached
-            # server = self.nova.servers.get(vm_id)
-            # volumes_attached_dict = server._info["os-extended-volumes:volumes_attached"]   #volume["id"]
-            # #for volume in volumes_attached_dict:
-            # #    self.cinder.volumes.detach(volume["id"])
+            # Delete VM ports attached to the networks before the virtual machine
+            if created_items:
+                self._delete_vm_ports_attached_to_network(created_items)
 
             if vm_id:
                 self.nova.servers.delete(vm_id)
 
-            # delete volumes. Although having detached, they should have in active status before deleting
-            # we ensure in this loop
+            # Although having detached, volumes should have in active status before deleting.
+            # We ensure in this loop
             keep_waiting = True
             elapsed_time = 0
 
             while keep_waiting and elapsed_time < volume_timeout:
                 keep_waiting = False
 
-                for k, v in created_items.items():
-                    if not v:  # skip already deleted
-                        continue
-
-                    try:
-                        k_item, _, k_id = k.partition(":")
-                        if k_item == "volume":
-                            if self.cinder.volumes.get(k_id).status != "available":
-                                keep_waiting = True
-                            else:
-                                self.cinder.volumes.delete(k_id)
-                                created_items[k] = None
-                        elif k_item == "floating_ip":  # floating ip
-                            self.neutron.delete_floatingip(k_id)
-                            created_items[k] = None
-
-                    except Exception as e:
-                        self.logger.error("Error deleting {}: {}".format(k, e))
+                # Delete volumes and floating IP.
+                keep_waiting = self._delete_created_items(
+                    created_items, volumes_to_hold, keep_waiting
+                )
 
                 if keep_waiting:
                     time.sleep(1)
                     elapsed_time += 1
-
-            return None
-        except (
-            nvExceptions.NotFound,
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+        except (nvExceptions.NotFound, nvExceptions.ResourceNotFound) as e:
+            # If VM does not exist, it does not raise
+            self.logger.warning(f"Error deleting VM: {vm_id} is not found, {str(e)}")
 
     def refresh_vms_status(self, vm_list):
         """Get the status of the virtual machines and their interfaces/ports
@@ -2343,7 +3253,6 @@ class vimconnector(vimconn.VimConnector):
         self.logger.debug(
             "refresh_vms status: Getting tenant VM instance information from VIM"
         )
-
         for vm_id in vm_list:
             vm = {}
 
@@ -2456,104 +3365,111 @@ class vimconnector(vimconn.VimConnector):
 
         return vm_dict
 
+    @catch_any_exception
     def action_vminstance(self, vm_id, action_dict, created_items={}):
         """Send and action over a VM instance from VIM
-        Returns None or the console dict if the action was successfully sent to the VIM"""
+        Returns None or the console dict if the action was successfully sent to the VIM
+        """
         self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
-
-        try:
-            self._reload_connection()
-            server = self.nova.servers.find(id=vm_id)
-
-            if "start" in action_dict:
-                if action_dict["start"] == "rebuild":
-                    server.rebuild()
-                else:
-                    if server.status == "PAUSED":
-                        server.unpause()
-                    elif server.status == "SUSPENDED":
-                        server.resume()
-                    elif server.status == "SHUTOFF":
-                        server.start()
-            elif "pause" in action_dict:
-                server.pause()
-            elif "resume" in action_dict:
-                server.resume()
-            elif "shutoff" in action_dict or "shutdown" in action_dict:
-                server.stop()
-            elif "forceOff" in action_dict:
-                server.stop()  # TODO
-            elif "terminate" in action_dict:
-                server.delete()
-            elif "createImage" in action_dict:
-                server.create_image()
-                # "path":path_schema,
-                # "description":description_schema,
-                # "name":name_schema,
-                # "metadata":metadata_schema,
-                # "imageRef": id_schema,
-                # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
-            elif "rebuild" in action_dict:
-                server.rebuild(server.image["id"])
-            elif "reboot" in action_dict:
-                server.reboot()  # reboot_type="SOFT"
-            elif "console" in action_dict:
-                console_type = action_dict["console"]
-
-                if console_type is None or console_type == "novnc":
-                    console_dict = server.get_vnc_console("novnc")
-                elif console_type == "xvpvnc":
-                    console_dict = server.get_vnc_console(console_type)
-                elif console_type == "rdp-html5":
-                    console_dict = server.get_rdp_console(console_type)
-                elif console_type == "spice-html5":
-                    console_dict = server.get_spice_console(console_type)
+        self._reload_connection()
+        server = self.nova.servers.find(id=vm_id)
+        if "start" in action_dict:
+            if action_dict["start"] == "rebuild":
+                server.rebuild()
+            else:
+                if server.status == "PAUSED":
+                    server.unpause()
+                elif server.status == "SUSPENDED":
+                    server.resume()
+                elif server.status == "SHUTOFF":
+                    server.start()
                 else:
+                    self.logger.debug(
+                        "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
+                    )
                     raise vimconn.VimConnException(
-                        "console type '{}' not allowed".format(console_type),
+                        "Cannot 'start' instance while it is in active state",
                         http_code=vimconn.HTTP_Bad_Request,
                     )
+        elif "pause" in action_dict:
+            server.pause()
+        elif "resume" in action_dict:
+            server.resume()
+        elif "shutoff" in action_dict or "shutdown" in action_dict:
+            self.logger.debug("server status %s", server.status)
+            if server.status == "ACTIVE":
+                server.stop()
+            else:
+                self.logger.debug("ERROR: VM is not in Active state")
+                raise vimconn.VimConnException(
+                    "VM is not in active state, stop operation is not allowed",
+                    http_code=vimconn.HTTP_Bad_Request,
+                )
+        elif "forceOff" in action_dict:
+            server.stop()  # TODO
+        elif "terminate" in action_dict:
+            server.delete()
+        elif "createImage" in action_dict:
+            server.create_image()
+            # "path":path_schema,
+            # "description":description_schema,
+            # "name":name_schema,
+            # "metadata":metadata_schema,
+            # "imageRef": id_schema,
+            # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
+        elif "rebuild" in action_dict:
+            server.rebuild(server.image["id"])
+        elif "reboot" in action_dict:
+            server.reboot()  # reboot_type="SOFT"
+        elif "console" in action_dict:
+            console_type = action_dict["console"]
 
-                try:
-                    console_url = console_dict["console"]["url"]
-                    # parse console_url
-                    protocol_index = console_url.find("//")
-                    suffix_index = (
-                        console_url[protocol_index + 2 :].find("/") + protocol_index + 2
-                    )
-                    port_index = (
-                        console_url[protocol_index + 2 : suffix_index].find(":")
-                        + protocol_index
-                        + 2
-                    )
-
-                    if protocol_index < 0 or port_index < 0 or suffix_index < 0:
-                        raise vimconn.VimConnException(
-                            "Unexpected response from VIM " + str(console_dict)
-                        )
+            if console_type is None or console_type == "novnc":
+                console_dict = server.get_vnc_console("novnc")
+            elif console_type == "xvpvnc":
+                console_dict = server.get_vnc_console(console_type)
+            elif console_type == "rdp-html5":
+                console_dict = server.get_rdp_console(console_type)
+            elif console_type == "spice-html5":
+                console_dict = server.get_spice_console(console_type)
+            else:
+                raise vimconn.VimConnException(
+                    "console type '{}' not allowed".format(console_type),
+                    http_code=vimconn.HTTP_Bad_Request,
+                )
 
-                    console_dict2 = {
-                        "protocol": console_url[0:protocol_index],
-                        "server": console_url[protocol_index + 2 : port_index],
-                        "port": int(console_url[port_index + 1 : suffix_index]),
-                        "suffix": console_url[suffix_index + 1 :],
-                    }
+            try:
+                console_url = console_dict["console"]["url"]
+                # parse console_url
+                protocol_index = console_url.find("//")
+                suffix_index = (
+                    console_url[protocol_index + 2 :].find("/") + protocol_index + 2
+                )
+                port_index = (
+                    console_url[protocol_index + 2 : suffix_index].find(":")
+                    + protocol_index
+                    + 2
+                )
 
-                    return console_dict2
-                except Exception:
+                if protocol_index < 0 or port_index < 0 or suffix_index < 0:
                     raise vimconn.VimConnException(
                         "Unexpected response from VIM " + str(console_dict)
                     )
 
-            return None
-        except (
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            nvExceptions.NotFound,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
-        # TODO insert exception vimconn.HTTP_Unauthorized
+                console_dict2 = {
+                    "protocol": console_url[0:protocol_index],
+                    "server": console_url[protocol_index + 2 : port_index],
+                    "port": int(console_url[port_index + 1 : suffix_index]),
+                    "suffix": console_url[suffix_index + 1 :],
+                }
+
+                return console_dict2
+            except Exception:
+                raise vimconn.VimConnException(
+                    "Unexpected response from VIM " + str(console_dict)
+                )
+
+        return None
 
     # ###### VIO Specific Changes #########
     def _generate_vlanID(self):
@@ -2685,99 +3601,6 @@ class vimconnector(vimconn.VimConnector):
                     )
                 )
 
-    # NOT USED FUNCTIONS
-
-    def new_external_port(self, port_data):
-        """Adds a external port to VIM
-        Returns the port identifier"""
-        # TODO openstack if needed
-        return (
-            -vimconn.HTTP_Internal_Server_Error,
-            "osconnector.new_external_port() not implemented",
-        )
-
-    def connect_port_network(self, port_id, network_id, admin=False):
-        """Connects a external port to a network
-        Returns status code of the VIM response"""
-        # TODO openstack if needed
-        return (
-            -vimconn.HTTP_Internal_Server_Error,
-            "osconnector.connect_port_network() not implemented",
-        )
-
-    def new_user(self, user_name, user_passwd, tenant_id=None):
-        """Adds a new user to openstack VIM
-        Returns the user identifier"""
-        self.logger.debug("osconnector: Adding a new user to VIM")
-
-        try:
-            self._reload_connection()
-            user = self.keystone.users.create(
-                user_name, password=user_passwd, default_project=tenant_id
-            )
-            # self.keystone.tenants.add_user(self.k_creds["username"], #role)
-
-            return user.id
-        except ksExceptions.ConnectionError as e:
-            error_value = -vimconn.HTTP_Bad_Request
-            error_text = (
-                type(e).__name__
-                + ": "
-                + (str(e) if len(e.args) == 0 else str(e.args[0]))
-            )
-        except ksExceptions.ClientException as e:  # TODO remove
-            error_value = -vimconn.HTTP_Bad_Request
-            error_text = (
-                type(e).__name__
-                + ": "
-                + (str(e) if len(e.args) == 0 else str(e.args[0]))
-            )
-
-        # TODO insert exception vimconn.HTTP_Unauthorized
-        # if reaching here is because an exception
-        self.logger.debug("new_user " + error_text)
-
-        return error_value, error_text
-
-    def delete_user(self, user_id):
-        """Delete a user from openstack VIM
-        Returns the user identifier"""
-        if self.debug:
-            print("osconnector: Deleting  a  user from VIM")
-
-        try:
-            self._reload_connection()
-            self.keystone.users.delete(user_id)
-
-            return 1, user_id
-        except ksExceptions.ConnectionError as e:
-            error_value = -vimconn.HTTP_Bad_Request
-            error_text = (
-                type(e).__name__
-                + ": "
-                + (str(e) if len(e.args) == 0 else str(e.args[0]))
-            )
-        except ksExceptions.NotFound as e:
-            error_value = -vimconn.HTTP_Not_Found
-            error_text = (
-                type(e).__name__
-                + ": "
-                + (str(e) if len(e.args) == 0 else str(e.args[0]))
-            )
-        except ksExceptions.ClientException as e:  # TODO remove
-            error_value = -vimconn.HTTP_Bad_Request
-            error_text = (
-                type(e).__name__
-                + ": "
-                + (str(e) if len(e.args) == 0 else str(e.args[0]))
-            )
-
-        # TODO insert exception vimconn.HTTP_Unauthorized
-        # if reaching here is because an exception
-        self.logger.debug("delete_tenant " + error_text)
-
-        return error_value, error_text
-
     def get_hosts_info(self):
         """Get the information of deployed hosts
         Returns the hosts content"""
@@ -2875,6 +3698,12 @@ class vimconnector(vimconn.VimConnector):
 
             classification_dict = definition
             classification_dict["name"] = name
+
+            self.logger.info(
+                "Adding a new (Traffic) Classification to VIM, named {} and {}.".format(
+                    name, classification_dict
+                )
+            )
             new_class = self.neutron.create_sfc_flow_classifier(
                 {"flow_classifier": classification_dict}
             )
@@ -2980,6 +3809,7 @@ class vimconnector(vimconn.VimConnector):
                 "egress": egress_ports[0],
                 "service_function_parameters": {"correlation": correlation},
             }
+            self.logger.info("Adding a new SFI to VIM, {}.".format(sfi_dict))
             new_sfi = self.neutron.create_sfc_port_pair({"port_pair": sfi_dict})
 
             return new_sfi["port_pair"]["id"]
@@ -3063,12 +3893,10 @@ class vimconnector(vimconn.VimConnector):
     def new_sf(self, name, sfis, sfc_encap=True):
         self.logger.debug("Adding a new Service Function to VIM, named '%s'", name)
 
+        new_sf = None
+
         try:
-            new_sf = None
             self._reload_connection()
-            # correlation = None
-            # if sfc_encap:
-            #     correlation = "nsh"
 
             for instance in sfis:
                 sfi = self.get_sfi(instance)
@@ -3080,6 +3908,8 @@ class vimconnector(vimconn.VimConnector):
                     )
 
             sf_dict = {"name": name, "port_pairs": sfis}
+
+            self.logger.info("Adding a new SF to VIM, {}.".format(sf_dict))
             new_sf = self.neutron.create_sfc_port_pair_group(
                 {"port_pair_group": sf_dict}
             )
@@ -3093,9 +3923,8 @@ class vimconnector(vimconn.VimConnector):
         ) as e:
             if new_sf:
                 try:
-                    self.neutron.delete_sfc_port_pair_group(
-                        new_sf["port_pair_group"]["id"]
-                    )
+                    new_sf_id = new_sf.get("port_pair_group").get("id")
+                    self.neutron.delete_sfc_port_pair_group(new_sf_id)
                 except Exception:
                     self.logger.error(
                         "Creation of Service Function failed, with "
@@ -3167,8 +3996,9 @@ class vimconnector(vimconn.VimConnector):
     def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
         self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name)
 
+        new_sfp = None
+
         try:
-            new_sfp = None
             self._reload_connection()
             # In networking-sfc the MPLS encapsulation is legacy
             # should be used when no full SFC Encapsulation is intended
@@ -3187,6 +4017,7 @@ class vimconnector(vimconn.VimConnector):
             if spi:
                 sfp_dict["chain_id"] = spi
 
+            self.logger.info("Adding a new SFP to VIM, {}.".format(sfp_dict))
             new_sfp = self.neutron.create_sfc_port_chain({"port_chain": sfp_dict})
 
             return new_sfp["port_chain"]["id"]
@@ -3198,7 +4029,8 @@ class vimconnector(vimconn.VimConnector):
         ) as e:
             if new_sfp:
                 try:
-                    self.neutron.delete_sfc_port_chain(new_sfp["port_chain"]["id"])
+                    new_sfp_id = new_sfp.get("port_chain").get("id")
+                    self.neutron.delete_sfc_port_chain(new_sfp_id)
                 except Exception:
                     self.logger.error(
                         "Creation of Service Function Path failed, with "
@@ -3464,6 +4296,7 @@ class vimconnector(vimconn.VimConnector):
 
         return classification_dict
 
+    @catch_any_exception
     def new_affinity_group(self, affinity_group_data):
         """Adds a server group to VIM
             affinity_group_data contains a dictionary with information, keys:
@@ -3472,51 +4305,259 @@ class vimconnector(vimconn.VimConnector):
                 scope: Only nfvi-node allowed
         Returns the server group identifier"""
         self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
+        name = affinity_group_data["name"]
+        policy = affinity_group_data["type"]
+        self._reload_connection()
+        new_server_group = self.nova.server_groups.create(name, policy)
+        return new_server_group.id
 
-        try:
-            name = affinity_group_data["name"]
-            policy = affinity_group_data["type"]
-
-            self._reload_connection()
-            new_server_group = self.nova.server_groups.create(name, policy)
-
-            return new_server_group.id
-        except (
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            ConnectionError,
-            KeyError,
-        ) as e:
-            self._format_exception(e)
-
+    @catch_any_exception
     def get_affinity_group(self, affinity_group_id):
         """Obtain server group details from the VIM. Returns the server group detais as a dict"""
         self.logger.debug("Getting flavor '%s'", affinity_group_id)
-        try:
-            self._reload_connection()
-            server_group = self.nova.server_groups.find(id=affinity_group_id)
-
-            return server_group.to_dict()
-        except (
-            nvExceptions.NotFound,
-            nvExceptions.ClientException,
-            ksExceptions.ClientException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+        self._reload_connection()
+        server_group = self.nova.server_groups.find(id=affinity_group_id)
+        return server_group.to_dict()
 
+    @catch_any_exception
     def delete_affinity_group(self, affinity_group_id):
         """Deletes a server group from the VIM. Returns the old affinity_group_id"""
         self.logger.debug("Getting server group '%s'", affinity_group_id)
+        self._reload_connection()
+        self.nova.server_groups.delete(affinity_group_id)
+        return affinity_group_id
+
+    @catch_any_exception
+    def get_vdu_state(self, vm_id, host_is_required=False) -> list:
+        """Getting the state of a VDU.
+        Args:
+            vm_id   (str): ID of an instance
+            host_is_required    (Boolean): If the VIM account is non-admin, host info does not appear in server_dict
+                                           and if this is set to True, it raises KeyError.
+        Returns:
+            vdu_data    (list): VDU details including state, flavor, host_info, AZ
+        """
+        self.logger.debug("Getting the status of VM")
+        self.logger.debug("VIM VM ID %s", vm_id)
+        self._reload_connection()
+        server_dict = self._find_nova_server(vm_id)
+        srv_attr = "OS-EXT-SRV-ATTR:host"
+        host_info = (
+            server_dict[srv_attr] if host_is_required else server_dict.get(srv_attr)
+        )
+        vdu_data = [
+            server_dict["status"],
+            server_dict["flavor"]["id"],
+            host_info,
+            server_dict["OS-EXT-AZ:availability_zone"],
+        ]
+        self.logger.debug("vdu_data %s", vdu_data)
+        return vdu_data
+
+    def check_compute_availability(self, host, server_flavor_details):
+        self._reload_connection()
+        hypervisor_search = self.nova.hypervisors.search(
+            hypervisor_match=host, servers=True
+        )
+        for hypervisor in hypervisor_search:
+            hypervisor_id = hypervisor.to_dict()["id"]
+            hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
+            hypervisor_dict = hypervisor_details.to_dict()
+            hypervisor_temp = json.dumps(hypervisor_dict)
+            hypervisor_json = json.loads(hypervisor_temp)
+            resources_available = [
+                hypervisor_json["free_ram_mb"],
+                hypervisor_json["disk_available_least"],
+                hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
+            ]
+            compute_available = all(
+                x > y for x, y in zip(resources_available, server_flavor_details)
+            )
+            if compute_available:
+                return host
+
+    def check_availability_zone(
+        self, old_az, server_flavor_details, old_host, host=None
+    ):
+        self._reload_connection()
+        az_check = {"zone_check": False, "compute_availability": None}
+        aggregates_list = self.nova.aggregates.list()
+        for aggregate in aggregates_list:
+            aggregate_details = aggregate.to_dict()
+            aggregate_temp = json.dumps(aggregate_details)
+            aggregate_json = json.loads(aggregate_temp)
+            if aggregate_json["availability_zone"] == old_az:
+                hosts_list = aggregate_json["hosts"]
+                if host is not None:
+                    if host in hosts_list:
+                        az_check["zone_check"] = True
+                        available_compute_id = self.check_compute_availability(
+                            host, server_flavor_details
+                        )
+                        if available_compute_id is not None:
+                            az_check["compute_availability"] = available_compute_id
+                else:
+                    for check_host in hosts_list:
+                        if check_host != old_host:
+                            available_compute_id = self.check_compute_availability(
+                                check_host, server_flavor_details
+                            )
+                            if available_compute_id is not None:
+                                az_check["zone_check"] = True
+                                az_check["compute_availability"] = available_compute_id
+                                break
+                    else:
+                        az_check["zone_check"] = True
+        return az_check
+
+    @catch_any_exception
+    def migrate_instance(self, vm_id, compute_host=None):
+        """
+        Migrate a vdu
+        param:
+            vm_id: ID of an instance
+            compute_host: Host to migrate the vdu to
+        """
+        self._reload_connection()
+        vm_state = False
+        instance_state = self.get_vdu_state(vm_id, host_is_required=True)
+        server_flavor_id = instance_state[1]
+        server_hypervisor_name = instance_state[2]
+        server_availability_zone = instance_state[3]
+        server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
+        server_flavor_details = [
+            server_flavor["ram"],
+            server_flavor["disk"],
+            server_flavor["vcpus"],
+        ]
+        if compute_host == server_hypervisor_name:
+            raise vimconn.VimConnException(
+                "Unable to migrate instance '{}' to the same host '{}'".format(
+                    vm_id, compute_host
+                ),
+                http_code=vimconn.HTTP_Bad_Request,
+            )
+        az_status = self.check_availability_zone(
+            server_availability_zone,
+            server_flavor_details,
+            server_hypervisor_name,
+            compute_host,
+        )
+        availability_zone_check = az_status["zone_check"]
+        available_compute_id = az_status.get("compute_availability")
+
+        if availability_zone_check is False:
+            raise vimconn.VimConnException(
+                "Unable to migrate instance '{}' to a different availability zone".format(
+                    vm_id
+                ),
+                http_code=vimconn.HTTP_Bad_Request,
+            )
+        if available_compute_id is not None:
+            # disk_over_commit parameter for live_migrate method is not valid for Nova API version >= 2.25
+            self.nova.servers.live_migrate(
+                server=vm_id,
+                host=available_compute_id,
+                block_migration=True,
+            )
+            state = "MIGRATING"
+            changed_compute_host = ""
+            if state == "MIGRATING":
+                vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
+                changed_compute_host = self.get_vdu_state(vm_id, host_is_required=True)[
+                    2
+                ]
+            if vm_state and changed_compute_host == available_compute_id:
+                self.logger.debug(
+                    "Instance '{}' migrated to the new compute host '{}'".format(
+                        vm_id, changed_compute_host
+                    )
+                )
+                return state, available_compute_id
+            else:
+                raise vimconn.VimConnException(
+                    "Migration Failed. Instance '{}' not moved to the new host {}".format(
+                        vm_id, available_compute_id
+                    ),
+                    http_code=vimconn.HTTP_Bad_Request,
+                )
+        else:
+            raise vimconn.VimConnException(
+                "Compute '{}' not available or does not have enough resources to migrate the instance".format(
+                    available_compute_id
+                ),
+                http_code=vimconn.HTTP_Bad_Request,
+            )
+
+    @catch_any_exception
+    def resize_instance(self, vm_id, new_flavor_id):
+        """
+        For resizing the vm based on the given
+        flavor details
+        param:
+            vm_id : ID of an instance
+            new_flavor_id : Flavor id to be resized
+        Return the status of a resized instance
+        """
+        self._reload_connection()
+        self.logger.debug("resize the flavor of an instance")
+        instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
+        old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
+        new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
+        if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
+            if old_flavor_disk > new_flavor_disk:
+                raise nvExceptions.BadRequest(
+                    400,
+                    message="Server disk resize failed. Resize to lower disk flavor is not allowed",
+                )
+            else:
+                self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
+                vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
+                if vm_state:
+                    instance_resized_status = self.confirm_resize(vm_id)
+                    return instance_resized_status
+                else:
+                    raise nvExceptions.BadRequest(
+                        409,
+                        message="Cannot 'resize' vm_state is in ERROR",
+                    )
+
+        else:
+            self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
+            raise nvExceptions.BadRequest(
+                409,
+                message="Cannot 'resize' instance while it is in vm_state resized",
+            )
+
+    def confirm_resize(self, vm_id):
+        """
+        Confirm the resize of an instance
+        param:
+            vm_id: ID of an instance
+        """
+        self._reload_connection()
+        self.nova.servers.confirm_resize(server=vm_id)
+        if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
+            self.__wait_for_vm(vm_id, "ACTIVE")
+        instance_status = self.get_vdu_state(vm_id)[0]
+        return instance_status
+
+    def get_monitoring_data(self):
         try:
+            self.logger.debug("Getting servers and ports data from Openstack VIMs.")
             self._reload_connection()
-            self.nova.server_groups.delete(affinity_group_id)
-
-            return affinity_group_id
-        except (
-            nvExceptions.NotFound,
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+            all_servers = self.nova.servers.list(detailed=True)
+            try:
+                for server in all_servers:
+                    if server.flavor.get("original_name"):
+                        server.flavor["id"] = self.nova.flavors.find(
+                            name=server.flavor["original_name"]
+                        ).id
+            except nClient.exceptions.NotFound as e:
+                self.logger.warning(str(e.message))
+            all_ports = self.neutron.list_ports()
+            return all_servers, all_ports
+        except Exception as e:
+            raise vimconn.VimConnException(
+                f"Exception in monitoring while getting VMs and ports status: {str(e)}"
+            )