Update from master part 2
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
index 3d54903..82a1e37 100644 (file)
@@ -32,11 +32,13 @@ to the VIM connector's SFC resources as follows:
 
 import copy
 from http.client import HTTPException
 
 import copy
 from http.client import HTTPException
+import json
 import logging
 from pprint import pformat
 import random
 import re
 import time
 import logging
 from pprint import pformat
 import random
 import re
 import time
+from typing import Dict, List, Optional, Tuple
 
 from cinderclient import client as cClient
 from glanceclient import client as glClient
 
 from cinderclient import client as cClient
 from glanceclient import client as glClient
@@ -337,7 +339,7 @@ class vimconnector(vimconn.VimConnector):
             version = self.config.get("microversion")
 
             if not version:
             version = self.config.get("microversion")
 
             if not version:
-                version = "2.1"
+                version = "2.60"
 
             # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
             # Titanium cloud and StarlingX
 
             # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
             # Titanium cloud and StarlingX
@@ -353,12 +355,21 @@ class vimconnector(vimconn.VimConnector):
                 endpoint_type=self.endpoint_type,
                 region_name=region_name,
             )
                 endpoint_type=self.endpoint_type,
                 region_name=region_name,
             )
-            self.cinder = self.session["cinder"] = cClient.Client(
-                2,
-                session=sess,
-                endpoint_type=self.endpoint_type,
-                region_name=region_name,
-            )
+
+            if sess.get_all_version_data(service_type="volumev2"):
+                self.cinder = self.session["cinder"] = cClient.Client(
+                    2,
+                    session=sess,
+                    endpoint_type=self.endpoint_type,
+                    region_name=region_name,
+                )
+            else:
+                self.cinder = self.session["cinder"] = cClient.Client(
+                    3,
+                    session=sess,
+                    endpoint_type=self.endpoint_type,
+                    region_name=region_name,
+                )
 
             try:
                 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
 
             try:
                 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
@@ -752,7 +763,7 @@ class vimconnector(vimconn.VimConnector):
             self._reload_connection()
             network_dict = {"name": net_name, "admin_state_up": True}
 
             self._reload_connection()
             network_dict = {"name": net_name, "admin_state_up": True}
 
-            if net_type in ("data", "ptp"):
+            if net_type in ("data", "ptp") or provider_network_profile:
                 provider_physical_network = None
 
                 if provider_network_profile and provider_network_profile.get(
                 provider_physical_network = None
 
                 if provider_network_profile and provider_network_profile.get(
@@ -878,7 +889,7 @@ class vimconnector(vimconn.VimConnector):
 
             if not ip_profile.get("subnet_address"):
                 # Fake subnet is required
 
             if not ip_profile.get("subnet_address"):
                 # Fake subnet is required
-                subnet_rand = random.randint(0, 255)
+                subnet_rand = random.SystemRandom().randint(0, 255)
                 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
 
             if "ip_version" not in ip_profile:
                 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
 
             if "ip_version" not in ip_profile:
@@ -923,6 +934,15 @@ class vimconnector(vimconn.VimConnector):
                 ip_str = str(netaddr.IPAddress(ip_int))
                 subnet["allocation_pools"][0]["end"] = ip_str
 
                 ip_str = str(netaddr.IPAddress(ip_int))
                 subnet["allocation_pools"][0]["end"] = ip_str
 
+            if (
+                ip_profile.get("ipv6_address_mode")
+                and ip_profile["ip_version"] != "IPv4"
+            ):
+                subnet["ipv6_address_mode"] = ip_profile["ipv6_address_mode"]
+                # ipv6_ra_mode can be set to the same value for most use cases, see documentation:
+                # https://docs.openstack.org/neutron/latest/admin/config-ipv6.html#ipv6-ra-mode-and-ipv6-address-mode-combinations
+                subnet["ipv6_ra_mode"] = ip_profile["ipv6_address_mode"]
+
             # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
             self.neutron.create_subnet({"subnet": subnet})
 
             # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
             self.neutron.create_subnet({"subnet": subnet})
 
@@ -1225,11 +1245,14 @@ class vimconnector(vimconn.VimConnector):
         ) as e:
             self._format_exception(e)
 
         ) as e:
             self._format_exception(e)
 
-    def process_resource_quota(self, quota, prefix, extra_specs):
-        """
-        :param prefix:
-        :param extra_specs:
-        :return:
+    @staticmethod
+    def process_resource_quota(quota: dict, prefix: str, extra_specs: dict) -> None:
+        """Process resource quota and fill up extra_specs.
+        Args:
+            quota       (dict):         Keeping the quota of resurces
+            prefix      (str)           Prefix
+            extra_specs (dict)          Dict to be filled to be used during flavor creation
+
         """
         if "limit" in quota:
             extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
         """
         if "limit" in quota:
             extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
@@ -1241,11 +1264,263 @@ class vimconnector(vimconn.VimConnector):
             extra_specs["quota:" + prefix + "_shares_level"] = "custom"
             extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
 
             extra_specs["quota:" + prefix + "_shares_level"] = "custom"
             extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
 
-    def new_flavor(self, flavor_data, change_name_if_used=True):
-        """Adds a tenant flavor to openstack VIM
-        if change_name_if_used is True, it will change name in case of conflict, because it is not supported name
-         repetition
-        Returns the flavor identifier
+    @staticmethod
+    def process_numa_memory(
+        numa: dict, node_id: Optional[int], extra_specs: dict
+    ) -> None:
+        """Set the memory in extra_specs.
+        Args:
+            numa        (dict):         A dictionary which includes numa information
+            node_id     (int):          ID of numa node
+            extra_specs (dict):         To be filled.
+
+        """
+        if not numa.get("memory"):
+            return
+        memory_mb = numa["memory"] * 1024
+        memory = "hw:numa_mem.{}".format(node_id)
+        extra_specs[memory] = int(memory_mb)
+
+    @staticmethod
+    def process_numa_vcpu(numa: dict, node_id: int, extra_specs: dict) -> None:
+        """Set the cpu in extra_specs.
+        Args:
+            numa        (dict):         A dictionary which includes numa information
+            node_id     (int):          ID of numa node
+            extra_specs (dict):         To be filled.
+
+        """
+        if not numa.get("vcpu"):
+            return
+        vcpu = numa["vcpu"]
+        cpu = "hw:numa_cpus.{}".format(node_id)
+        vcpu = ",".join(map(str, vcpu))
+        extra_specs[cpu] = vcpu
+
+    @staticmethod
+    def process_numa_paired_threads(numa: dict, extra_specs: dict) -> Optional[int]:
+        """Fill up extra_specs if numa has paired-threads.
+        Args:
+            numa        (dict):         A dictionary which includes numa information
+            extra_specs (dict):         To be filled.
+
+        Returns:
+            threads       (int)           Number of virtual cpus
+
+        """
+        if not numa.get("paired-threads"):
+            return
+
+        # cpu_thread_policy "require" implies that compute node must have an STM architecture
+        threads = numa["paired-threads"] * 2
+        extra_specs["hw:cpu_thread_policy"] = "require"
+        extra_specs["hw:cpu_policy"] = "dedicated"
+        return threads
+
+    @staticmethod
+    def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]:
+        """Fill up extra_specs if numa has cores.
+        Args:
+            numa        (dict):         A dictionary which includes numa information
+            extra_specs (dict):         To be filled.
+
+        Returns:
+            cores       (int)           Number of virtual cpus
+
+        """
+        # cpu_thread_policy "isolate" implies that the host must not have an SMT
+        # architecture, or a non-SMT architecture will be emulated
+        if not numa.get("cores"):
+            return
+        cores = numa["cores"]
+        extra_specs["hw:cpu_thread_policy"] = "isolate"
+        extra_specs["hw:cpu_policy"] = "dedicated"
+        return cores
+
+    @staticmethod
+    def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]:
+        """Fill up extra_specs if numa has threads.
+        Args:
+            numa        (dict):         A dictionary which includes numa information
+            extra_specs (dict):         To be filled.
+
+        Returns:
+            threads       (int)           Number of virtual cpus
+
+        """
+        # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
+        if not numa.get("threads"):
+            return
+        threads = numa["threads"]
+        extra_specs["hw:cpu_thread_policy"] = "prefer"
+        extra_specs["hw:cpu_policy"] = "dedicated"
+        return threads
+
+    def _process_numa_parameters_of_flavor(
+        self, numas: List, extra_specs: Dict
+    ) -> None:
+        """Process numa parameters and fill up extra_specs.
+
+        Args:
+            numas   (list):             List of dictionary which includes numa information
+            extra_specs (dict):         To be filled.
+
+        """
+        numa_nodes = len(numas)
+        extra_specs["hw:numa_nodes"] = str(numa_nodes)
+        cpu_cores, cpu_threads = 0, 0
+
+        if self.vim_type == "VIO":
+            self.process_vio_numa_nodes(numa_nodes, extra_specs)
+
+        for numa in numas:
+            if "id" in numa:
+                node_id = numa["id"]
+                # overwrite ram and vcpus
+                # check if key "memory" is present in numa else use ram value at flavor
+                self.process_numa_memory(numa, node_id, extra_specs)
+                self.process_numa_vcpu(numa, node_id, extra_specs)
+
+            # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
+            extra_specs["hw:cpu_sockets"] = str(numa_nodes)
+
+            if "paired-threads" in numa:
+                threads = self.process_numa_paired_threads(numa, extra_specs)
+                cpu_threads += threads
+
+            elif "cores" in numa:
+                cores = self.process_numa_cores(numa, extra_specs)
+                cpu_cores += cores
+
+            elif "threads" in numa:
+                threads = self.process_numa_threads(numa, extra_specs)
+                cpu_threads += threads
+
+        if cpu_cores:
+            extra_specs["hw:cpu_cores"] = str(cpu_cores)
+        if cpu_threads:
+            extra_specs["hw:cpu_threads"] = str(cpu_threads)
+
+    @staticmethod
+    def process_vio_numa_nodes(numa_nodes: int, extra_specs: Dict) -> None:
+        """According to number of numa nodes, updates the extra_specs for VIO.
+
+        Args:
+
+            numa_nodes      (int):         List keeps the numa node numbers
+            extra_specs     (dict):        Extra specs dict to be updated
+
+        """
+        # If there are several numas, we do not define specific affinity.
+        extra_specs["vmware:latency_sensitivity_level"] = "high"
+
+    def _change_flavor_name(
+        self, name: str, name_suffix: int, flavor_data: dict
+    ) -> str:
+        """Change the flavor name if the name already exists.
+
+        Args:
+            name    (str):          Flavor name to be checked
+            name_suffix (int):      Suffix to be appended to name
+            flavor_data (dict):     Flavor dict
+
+        Returns:
+            name    (str):          New flavor name to be used
+
+        """
+        # Get used names
+        fl = self.nova.flavors.list()
+        fl_names = [f.name for f in fl]
+
+        while name in fl_names:
+            name_suffix += 1
+            name = flavor_data["name"] + "-" + str(name_suffix)
+
+        return name
+
+    def _process_extended_config_of_flavor(
+        self, extended: dict, extra_specs: dict
+    ) -> None:
+        """Process the extended dict to fill up extra_specs.
+        Args:
+
+            extended                    (dict):         Keeping the extra specification of flavor
+            extra_specs                 (dict)          Dict to be filled to be used during flavor creation
+
+        """
+        quotas = {
+            "cpu-quota": "cpu",
+            "mem-quota": "memory",
+            "vif-quota": "vif",
+            "disk-io-quota": "disk_io",
+        }
+
+        page_sizes = {
+            "LARGE": "large",
+            "SMALL": "small",
+            "SIZE_2MB": "2MB",
+            "SIZE_1GB": "1GB",
+            "PREFER_LARGE": "any",
+        }
+
+        policies = {
+            "cpu-pinning-policy": "hw:cpu_policy",
+            "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
+            "mem-policy": "hw:numa_mempolicy",
+        }
+
+        numas = extended.get("numas")
+        if numas:
+            self._process_numa_parameters_of_flavor(numas, extra_specs)
+
+        for quota, item in quotas.items():
+            if quota in extended.keys():
+                self.process_resource_quota(extended.get(quota), item, extra_specs)
+
+        # Set the mempage size as specified in the descriptor
+        if extended.get("mempage-size"):
+            if extended["mempage-size"] in page_sizes.keys():
+                extra_specs["hw:mem_page_size"] = page_sizes[extended["mempage-size"]]
+            else:
+                # Normally, validations in NBI should not allow to this condition.
+                self.logger.debug(
+                    "Invalid mempage-size %s. Will be ignored",
+                    extended.get("mempage-size"),
+                )
+
+        for policy, hw_policy in policies.items():
+            if extended.get(policy):
+                extra_specs[hw_policy] = extended[policy].lower()
+
+    @staticmethod
+    def _get_flavor_details(flavor_data: dict) -> Tuple:
+        """Returns the details of flavor
+        Args:
+            flavor_data     (dict):     Dictionary that includes required flavor details
+
+        Returns:
+            ram, vcpus, extra_specs, extended   (tuple):    Main items of required flavor
+
+        """
+        return (
+            flavor_data.get("ram", 64),
+            flavor_data.get("vcpus", 1),
+            {},
+            flavor_data.get("extended"),
+        )
+
+    def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str:
+        """Adds a tenant flavor to openstack VIM.
+        if change_name_if_used is True, it will change name in case of conflict,
+        because it is not supported name repetition.
+
+        Args:
+            flavor_data (dict):             Flavor details to be processed
+            change_name_if_used (bool):     Change name in case of conflict
+
+        Returns:
+             flavor_id  (str):     flavor identifier
+
         """
         self.logger.debug("Adding flavor '%s'", str(flavor_data))
         retry = 0
         """
         self.logger.debug("Adding flavor '%s'", str(flavor_data))
         retry = 0
@@ -1260,96 +1535,16 @@ class vimconnector(vimconn.VimConnector):
                     self._reload_connection()
 
                     if change_name_if_used:
                     self._reload_connection()
 
                     if change_name_if_used:
-                        # get used names
-                        fl_names = []
-                        fl = self.nova.flavors.list()
-
-                        for f in fl:
-                            fl_names.append(f.name)
+                        name = self._change_flavor_name(name, name_suffix, flavor_data)
 
 
-                        while name in fl_names:
-                            name_suffix += 1
-                            name = flavor_data["name"] + "-" + str(name_suffix)
-
-                    ram = flavor_data.get("ram", 64)
-                    vcpus = flavor_data.get("vcpus", 1)
-                    extra_specs = {}
-
-                    extended = flavor_data.get("extended")
+                    ram, vcpus, extra_specs, extended = self._get_flavor_details(
+                        flavor_data
+                    )
                     if extended:
                     if extended:
-                        numas = extended.get("numas")
-
-                        if numas:
-                            numa_nodes = len(numas)
-
-                            if numa_nodes > 1:
-                                return -1, "Can not add flavor with more than one numa"
-
-                            extra_specs["hw:numa_nodes"] = str(numa_nodes)
-                            extra_specs["hw:mem_page_size"] = "large"
-                            extra_specs["hw:cpu_policy"] = "dedicated"
-                            extra_specs["hw:numa_mempolicy"] = "strict"
-
-                            if self.vim_type == "VIO":
-                                extra_specs[
-                                    "vmware:extra_config"
-                                ] = '{"numa.nodeAffinity":"0"}'
-                                extra_specs["vmware:latency_sensitivity_level"] = "high"
-
-                            for numa in numas:
-                                # overwrite ram and vcpus
-                                # check if key "memory" is present in numa else use ram value at flavor
-                                if "memory" in numa:
-                                    ram = numa["memory"] * 1024
-                                # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/
-                                # implemented/virt-driver-cpu-thread-pinning.html
-                                extra_specs["hw:cpu_sockets"] = 1
-
-                                if "paired-threads" in numa:
-                                    vcpus = numa["paired-threads"] * 2
-                                    # cpu_thread_policy "require" implies that the compute node must have an
-                                    # STM architecture
-                                    extra_specs["hw:cpu_thread_policy"] = "require"
-                                    extra_specs["hw:cpu_policy"] = "dedicated"
-                                elif "cores" in numa:
-                                    vcpus = numa["cores"]
-                                    # cpu_thread_policy "prefer" implies that the host must not have an SMT
-                                    # architecture, or a non-SMT architecture will be emulated
-                                    extra_specs["hw:cpu_thread_policy"] = "isolate"
-                                    extra_specs["hw:cpu_policy"] = "dedicated"
-                                elif "threads" in numa:
-                                    vcpus = numa["threads"]
-                                    # cpu_thread_policy "prefer" implies that the host may or may not have an SMT
-                                    # architecture
-                                    extra_specs["hw:cpu_thread_policy"] = "prefer"
-                                    extra_specs["hw:cpu_policy"] = "dedicated"
-                                # for interface in numa.get("interfaces",() ):
-                                #     if interface["dedicated"]=="yes":
-                                #         raise vimconn.VimConnException("Passthrough interfaces are not supported
-                                #         for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable)
-                                #     #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"'
-                                #      when a way to connect it is available
-                        elif extended.get("cpu-quota"):
-                            self.process_resource_quota(
-                                extended.get("cpu-quota"), "cpu", extra_specs
-                            )
+                        self._process_extended_config_of_flavor(extended, extra_specs)
 
 
-                        if extended.get("mem-quota"):
-                            self.process_resource_quota(
-                                extended.get("mem-quota"), "memory", extra_specs
-                            )
-
-                        if extended.get("vif-quota"):
-                            self.process_resource_quota(
-                                extended.get("vif-quota"), "vif", extra_specs
-                            )
-
-                        if extended.get("disk-io-quota"):
-                            self.process_resource_quota(
-                                extended.get("disk-io-quota"), "disk_io", extra_specs
-                            )
+                    # Create flavor
 
 
-                    # create flavor
                     new_flavor = self.nova.flavors.create(
                         name=name,
                         ram=ram,
                     new_flavor = self.nova.flavors.create(
                         name=name,
                         ram=ram,
@@ -1359,17 +1554,19 @@ class vimconnector(vimconn.VimConnector):
                         swap=flavor_data.get("swap", 0),
                         is_public=flavor_data.get("is_public", True),
                     )
                         swap=flavor_data.get("swap", 0),
                         is_public=flavor_data.get("is_public", True),
                     )
-                    # add metadata
+
+                    # Add metadata
                     if extra_specs:
                         new_flavor.set_keys(extra_specs)
 
                     return new_flavor.id
                     if extra_specs:
                         new_flavor.set_keys(extra_specs)
 
                     return new_flavor.id
+
                 except nvExceptions.Conflict as e:
                     if change_name_if_used and retry < max_retries:
                         continue
 
                     self._format_exception(e)
                 except nvExceptions.Conflict as e:
                     if change_name_if_used and retry < max_retries:
                         continue
 
                     self._format_exception(e)
-        # except nvExceptions.BadRequest as e:
+
         except (
             ksExceptions.ClientException,
             nvExceptions.ClientException,
         except (
             ksExceptions.ClientException,
             nvExceptions.ClientException,
@@ -1674,1813 +1871,1739 @@ class vimconnector(vimconn.VimConnector):
                 "No enough availability zones at VIM for this deployment"
             )
 
                 "No enough availability zones at VIM for this deployment"
             )
 
-    def new_vminstance(
-        self,
-        name,
-        description,
-        start,
-        image_id,
-        flavor_id,
-        affinity_group_list,
-        net_list,
-        cloud_config=None,
-        disk_list=None,
-        availability_zone_index=None,
-        availability_zone_list=None,
-    ):
-        """Adds a VM instance to VIM
-        Params:
-            start: indicates if VM must start or boot in pause mode. Ignored
-            image_id,flavor_id: image and flavor uuid
-            affinity_group_list: list of affinity groups, each one is a dictionary.
-                Ignore if empty.
-            net_list: list of interfaces, each one is a dictionary with:
-                name:
-                net_id: network uuid to connect
-                vpci: virtual vcpi to assign, ignored because openstack lack #TODO
-                model: interface model, ignored #TODO
-                mac_address: used for  SR-IOV ifaces #TODO for other types
-                use: 'data', 'bridge',  'mgmt'
-                type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
-                vim_id: filled/added by this function
-                floating_ip: True/False (or it can be None)
-                port_security: True/False
-            'cloud_config': (optional) dictionary with:
-                'key-pairs': (optional) list of strings with the public key to be inserted to the default user
-                'users': (optional) list of users to be inserted, each item is a dict with:
-                    'name': (mandatory) user name,
-                    'key-pairs': (optional) list of strings with the public key to be inserted to the user
-                'user-data': (optional) string is a text script to be passed directly to cloud-init
-                'config-files': (optional). List of files to be transferred. Each item is a dict with:
-                    'dest': (mandatory) string with the destination absolute path
-                    'encoding': (optional, by default text). Can be one of:
-                        'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
-                    'content' (mandatory): string with the content of the file
-                    'permissions': (optional) string with file permissions, typically octal notation '0644'
-                    'owner': (optional) file owner, string with the format 'owner:group'
-                'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
-            'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
-                'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
-                'size': (mandatory) string with the size of the disk in GB
-                'vim_id' (optional) should use this existing volume id
-            availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
-            availability_zone_list: list of availability zones given by user in the VNFD descriptor.  Ignore if
-                availability_zone_index is None
-                #TODO ip, security groups
-        Returns a tuple with the instance identifier and created_items or raises an exception on error
-            created_items can be None or a dictionary where this method can include key-values that will be passed to
-            the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
-            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
-            as not present.
-        """
-        self.logger.debug(
-            "new_vminstance input: image='%s' flavor='%s' nics='%s'",
-            image_id,
-            flavor_id,
-            str(net_list),
-        )
+    def _prepare_port_dict_security_groups(self, net: dict, port_dict: dict) -> None:
+        """Fill up the security_groups in the port_dict.
 
 
-        try:
-            server = None
-            created_items = {}
-            # metadata = {}
-            net_list_vim = []
-            external_network = []
-            # ^list of external networks to be connected to instance, later on used to create floating_ip
-            no_secured_ports = []  # List of port-is with port-security disabled
-            self._reload_connection()
-            # metadata_vpci = {}  # For a specific neutron plugin
-            block_device_mapping = None
+        Args:
+            net (dict):             Network details
+            port_dict   (dict):     Port details
 
 
-            for net in net_list:
-                if not net.get("net_id"):  # skip non connected iface
-                    continue
+        """
+        if (
+            self.config.get("security_groups")
+            and net.get("port_security") is not False
+            and not self.config.get("no_port_security_extension")
+        ):
+            if not self.security_groups_id:
+                self._get_ids_from_name()
 
 
-                port_dict = {
-                    "network_id": net["net_id"],
-                    "name": net.get("name"),
-                    "admin_state_up": True,
-                }
+            port_dict["security_groups"] = self.security_groups_id
 
 
-                if (
-                    self.config.get("security_groups")
-                    and net.get("port_security") is not False
-                    and not self.config.get("no_port_security_extension")
-                ):
-                    if not self.security_groups_id:
-                        self._get_ids_from_name()
+    def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None:
+        """Fill up the network binding depending on network type in the port_dict.
 
 
-                    port_dict["security_groups"] = self.security_groups_id
+        Args:
+            net (dict):             Network details
+            port_dict   (dict):     Port details
 
 
-                if net["type"] == "virtual":
-                    pass
-                    # if "vpci" in net:
-                    #     metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]]
-                elif net["type"] == "VF" or net["type"] == "SR-IOV":  # for VF
-                    # if "vpci" in net:
-                    #     if "VF" not in metadata_vpci:
-                    #         metadata_vpci["VF"]=[]
-                    #     metadata_vpci["VF"].append([ net["vpci"], "" ])
-                    port_dict["binding:vnic_type"] = "direct"
-
-                    # VIO specific Changes
-                    if self.vim_type == "VIO":
-                        # Need to create port with port_security_enabled = False and no-security-groups
-                        port_dict["port_security_enabled"] = False
-                        port_dict["provider_security_groups"] = []
-                        port_dict["security_groups"] = []
-                else:  # For PT PCI-PASSTHROUGH
-                    # if "vpci" in net:
-                    #     if "PF" not in metadata_vpci:
-                    #         metadata_vpci["PF"]=[]
-                    #     metadata_vpci["PF"].append([ net["vpci"], "" ])
-                    port_dict["binding:vnic_type"] = "direct-physical"
-
-                if not port_dict["name"]:
-                    port_dict["name"] = name
-
-                if net.get("mac_address"):
-                    port_dict["mac_address"] = net["mac_address"]
-
-                if net.get("ip_address"):
-                    port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
-                    # TODO add "subnet_id": <subnet_id>
-
-                new_port = self.neutron.create_port({"port": port_dict})
-                created_items["port:" + str(new_port["port"]["id"])] = True
-                net["mac_adress"] = new_port["port"]["mac_address"]
-                net["vim_id"] = new_port["port"]["id"]
-                # if try to use a network without subnetwork, it will return a emtpy list
-                fixed_ips = new_port["port"].get("fixed_ips")
-
-                if fixed_ips:
-                    net["ip"] = fixed_ips[0].get("ip_address")
-                else:
-                    net["ip"] = None
-
-                port = {"port-id": new_port["port"]["id"]}
-                if float(self.nova.api_version.get_string()) >= 2.32:
-                    port["tag"] = new_port["port"]["name"]
-
-                net_list_vim.append(port)
-
-                if net.get("floating_ip", False):
-                    net["exit_on_floating_ip_error"] = True
-                    external_network.append(net)
-                elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
-                    net["exit_on_floating_ip_error"] = False
-                    external_network.append(net)
-                    net["floating_ip"] = self.config.get("use_floating_ip")
-
-                # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
-                # is dropped.
-                # As a workaround we wait until the VM is active and then disable the port-security
-                if net.get("port_security") is False and not self.config.get(
-                    "no_port_security_extension"
-                ):
-                    no_secured_ports.append(
-                        (
-                            new_port["port"]["id"],
-                            net.get("port_security_disable_strategy"),
-                        )
-                    )
+        """
+        if not net.get("type"):
+            raise vimconn.VimConnException("Type is missing in the network details.")
 
 
-            # if metadata_vpci:
-            #     metadata = {"pci_assignement": json.dumps(metadata_vpci)}
-            #     if len(metadata["pci_assignement"]) >255:
-            #         #limit the metadata size
-            #         #metadata["pci_assignement"] = metadata["pci_assignement"][0:255]
-            #         self.logger.warn("Metadata deleted since it exceeds the expected length (255) ")
-            #         metadata = {}
+        if net["type"] == "virtual":
+            pass
 
 
-            self.logger.debug(
-                "name '%s' image_id '%s'flavor_id '%s' net_list_vim '%s' description '%s'",
-                name,
-                image_id,
-                flavor_id,
-                str(net_list_vim),
-                description,
-            )
+        # For VF
+        elif net["type"] == "VF" or net["type"] == "SR-IOV":
+            port_dict["binding:vnic_type"] = "direct"
 
 
-            # cloud config
-            config_drive, userdata = self._create_user_data(cloud_config)
+            # VIO specific Changes
+            if self.vim_type == "VIO":
+                # Need to create port with port_security_enabled = False and no-security-groups
+                port_dict["port_security_enabled"] = False
+                port_dict["provider_security_groups"] = []
+                port_dict["security_groups"] = []
 
 
-            # get availability Zone
-            vm_av_zone = self._get_vm_availability_zone(
-                availability_zone_index, availability_zone_list
-            )
+        else:
+            # For PT PCI-PASSTHROUGH
+            port_dict["binding:vnic_type"] = "direct-physical"
 
 
-            # Create additional volumes in case these are present in disk_list
-            existing_vim_volumes = []
-            base_disk_index = ord("b")
-            boot_volume_id = None
-            if disk_list:
-                block_device_mapping = {}
-                for disk in disk_list:
-                    if disk.get("vim_id"):
-                        block_device_mapping["_vd" + chr(base_disk_index)] = disk[
-                            "vim_id"
-                        ]
-                        existing_vim_volumes.append({"id": disk["vim_id"]})
-                    else:
-                        if "image_id" in disk:
-                            base_disk_index = ord("a")
-                            volume = self.cinder.volumes.create(
-                                size=disk["size"],
-                                name=name + "_vd" + chr(base_disk_index),
-                                imageRef=disk["image_id"],
-                                # Make sure volume is in the same AZ as the VM to be attached to
-                                availability_zone=vm_av_zone,
-                            )
-                            boot_volume_id = volume.id
-                        else:
-                            volume = self.cinder.volumes.create(
-                                size=disk["size"],
-                                name=name + "_vd" + chr(base_disk_index),
-                                # Make sure volume is in the same AZ as the VM to be attached to
-                                availability_zone=vm_av_zone,
-                            )
+    @staticmethod
+    def _set_fixed_ip(new_port: dict, net: dict) -> None:
+        """Set the "ip" parameter in net dictionary.
 
 
-                        created_items["volume:" + str(volume.id)] = True
-                        block_device_mapping["_vd" + chr(base_disk_index)] = volume.id
+        Args:
+            new_port    (dict):     New created port
+            net         (dict):     Network details
 
 
-                    base_disk_index += 1
+        """
+        fixed_ips = new_port["port"].get("fixed_ips")
 
 
-                # Wait until created volumes are with status available
-                elapsed_time = 0
-                while elapsed_time < volume_timeout:
-                    for created_item in created_items:
-                        v, _, volume_id = created_item.partition(":")
-                        if v == "volume":
-                            if self.cinder.volumes.get(volume_id).status != "available":
-                                break
-                    else:  # all ready: break from while
-                        break
+        if fixed_ips:
+            net["ip"] = fixed_ips[0].get("ip_address")
+        else:
+            net["ip"] = None
 
 
-                    time.sleep(5)
-                    elapsed_time += 5
+    @staticmethod
+    def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None:
+        """Fill up the mac_address and fixed_ips in port_dict.
 
 
-                # Wait until existing volumes in vim are with status available
-                while elapsed_time < volume_timeout:
-                    for volume in existing_vim_volumes:
-                        if self.cinder.volumes.get(volume["id"]).status != "available":
-                            break
-                    else:  # all ready: break from while
-                        break
+        Args:
+            net (dict):             Network details
+            port_dict   (dict):     Port details
 
 
-                    time.sleep(5)
-                    elapsed_time += 5
+        """
+        if net.get("mac_address"):
+            port_dict["mac_address"] = net["mac_address"]
+
+        ip_dual_list = []
+        if ip_list := net.get("ip_address"):
+            if not isinstance(ip_list, list):
+                ip_list = [ip_list]
+            for ip in ip_list:
+                ip_dict = {"ip_address": ip}
+                ip_dual_list.append(ip_dict)
+            port_dict["fixed_ips"] = ip_dual_list
+            # TODO add "subnet_id": <subnet_id>
+
+    def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
+        """Create new port using neutron.
 
 
-                # If we exceeded the timeout rollback
-                if elapsed_time >= volume_timeout:
-                    raise vimconn.VimConnException(
-                        "Timeout creating volumes for instance " + name,
-                        http_code=vimconn.HTTP_Request_Timeout,
-                    )
-                if boot_volume_id:
-                    self.cinder.volumes.set_bootable(boot_volume_id, True)
+        Args:
+            port_dict   (dict):         Port details
+            created_items   (dict):     All created items
+            net (dict):                 Network details
 
 
-            # Manage affinity groups/server groups
-            server_group_id = None
-            scheduller_hints = {}
+        Returns:
+            new_port    (dict):         New created port
 
 
-            if affinity_group_list:
-                # Only first id on the list will be used. Openstack restriction
-                server_group_id = affinity_group_list[0]["affinity_group_id"]
-                scheduller_hints["group"] = server_group_id
+        """
+        new_port = self.neutron.create_port({"port": port_dict})
+        created_items["port:" + str(new_port["port"]["id"])] = True
+        net["mac_address"] = new_port["port"]["mac_address"]
+        net["vim_id"] = new_port["port"]["id"]
 
 
-            self.logger.debug(
-                "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
-                "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
-                "block_device_mapping={}, server_group={})".format(
-                    name,
-                    image_id,
-                    flavor_id,
-                    net_list_vim,
-                    self.config.get("security_groups"),
-                    vm_av_zone,
-                    self.config.get("keypair"),
-                    userdata,
-                    config_drive,
-                    block_device_mapping,
-                    server_group_id,
-                )
-            )
-            server = self.nova.servers.create(
-                name,
-                image_id,
-                flavor_id,
-                nics=net_list_vim,
-                security_groups=self.config.get("security_groups"),
-                # TODO remove security_groups in future versions. Already at neutron port
-                availability_zone=vm_av_zone,
-                key_name=self.config.get("keypair"),
-                userdata=userdata,
-                config_drive=config_drive,
-                block_device_mapping=block_device_mapping,
-                scheduler_hints=scheduller_hints,
-            )  # , description=description)
+        return new_port
 
 
-            vm_start_time = time.time()
-            # Previously mentioned workaround to wait until the VM is active and then disable the port-security
-            if no_secured_ports:
-                self.__wait_for_vm(server.id, "ACTIVE")
+    def _create_port(
+        self, net: dict, name: str, created_items: dict
+    ) -> Tuple[dict, dict]:
+        """Create port using net details.
 
 
-            for port in no_secured_ports:
-                port_update = {
-                    "port": {"port_security_enabled": False, "security_groups": None}
-                }
+        Args:
+            net (dict):                 Network details
+            name    (str):              Name to be used as network name if net dict does not include name
+            created_items   (dict):     All created items
 
 
-                if port[1] == "allow-address-pairs":
-                    port_update = {
-                        "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
-                    }
+        Returns:
+            new_port, port              New created port, port dictionary
 
 
-                try:
-                    self.neutron.update_port(port[0], port_update)
-                except Exception:
-                    raise vimconn.VimConnException(
-                        "It was not possible to disable port security for port {}".format(
-                            port[0]
-                        )
-                    )
+        """
 
 
-            # print "DONE :-)", server
+        port_dict = {
+            "network_id": net["net_id"],
+            "name": net.get("name"),
+            "admin_state_up": True,
+        }
 
 
-            # pool_id = None
-            for floating_network in external_network:
-                try:
-                    assigned = False
-                    floating_ip_retries = 3
-                    # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
-                    # several times
-                    while not assigned:
-                        floating_ips = self.neutron.list_floatingips().get(
-                            "floatingips", ()
-                        )
-                        random.shuffle(floating_ips)  # randomize
-                        for fip in floating_ips:
-                            if (
-                                fip.get("port_id")
-                                or fip.get("tenant_id") != server.tenant_id
-                            ):
-                                continue
+        if not port_dict["name"]:
+            port_dict["name"] = name
 
 
-                            if isinstance(floating_network["floating_ip"], str):
-                                if (
-                                    fip.get("floating_network_id")
-                                    != floating_network["floating_ip"]
-                                ):
-                                    continue
+        self._prepare_port_dict_security_groups(net, port_dict)
 
 
-                            free_floating_ip = fip["id"]
-                            break
-                        else:
-                            if (
-                                isinstance(floating_network["floating_ip"], str)
-                                and floating_network["floating_ip"].lower() != "true"
-                            ):
-                                pool_id = floating_network["floating_ip"]
-                            else:
-                                # Find the external network
-                                external_nets = list()
-
-                                for net in self.neutron.list_networks()["networks"]:
-                                    if net["router:external"]:
-                                        external_nets.append(net)
-
-                                if len(external_nets) == 0:
-                                    raise vimconn.VimConnException(
-                                        "Cannot create floating_ip automatically since "
-                                        "no external network is present",
-                                        http_code=vimconn.HTTP_Conflict,
-                                    )
+        self._prepare_port_dict_binding(net, port_dict)
 
 
-                                if len(external_nets) > 1:
-                                    raise vimconn.VimConnException(
-                                        "Cannot create floating_ip automatically since "
-                                        "multiple external networks are present",
-                                        http_code=vimconn.HTTP_Conflict,
-                                    )
+        vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict)
 
 
-                                pool_id = external_nets[0].get("id")
-
-                            param = {
-                                "floatingip": {
-                                    "floating_network_id": pool_id,
-                                    "tenant_id": server.tenant_id,
-                                }
-                            }
-
-                            try:
-                                # self.logger.debug("Creating floating IP")
-                                new_floating_ip = self.neutron.create_floatingip(param)
-                                free_floating_ip = new_floating_ip["floatingip"]["id"]
-                                created_items[
-                                    "floating_ip:" + str(free_floating_ip)
-                                ] = True
-                            except Exception as e:
-                                raise vimconn.VimConnException(
-                                    type(e).__name__
-                                    + ": Cannot create new floating_ip "
-                                    + str(e),
-                                    http_code=vimconn.HTTP_Conflict,
-                                )
+        new_port = self._create_new_port(port_dict, created_items, net)
 
 
-                        try:
-                            # for race condition ensure not already assigned
-                            fip = self.neutron.show_floatingip(free_floating_ip)
+        vimconnector._set_fixed_ip(new_port, net)
 
 
-                            if fip["floatingip"]["port_id"]:
-                                continue
+        port = {"port-id": new_port["port"]["id"]}
 
 
-                            # the vim_id key contains the neutron.port_id
-                            self.neutron.update_floatingip(
-                                free_floating_ip,
-                                {"floatingip": {"port_id": floating_network["vim_id"]}},
-                            )
-                            # for race condition ensure not re-assigned to other VM after 5 seconds
-                            time.sleep(5)
-                            fip = self.neutron.show_floatingip(free_floating_ip)
+        if float(self.nova.api_version.get_string()) >= 2.32:
+            port["tag"] = new_port["port"]["name"]
 
 
-                            if (
-                                fip["floatingip"]["port_id"]
-                                != floating_network["vim_id"]
-                            ):
-                                self.logger.error(
-                                    "floating_ip {} re-assigned to other port".format(
-                                        free_floating_ip
-                                    )
-                                )
-                                continue
+        return new_port, port
 
 
-                            self.logger.debug(
-                                "Assigned floating_ip {} to VM {}".format(
-                                    free_floating_ip, server.id
-                                )
-                            )
-                            assigned = True
-                        except Exception as e:
-                            # openstack need some time after VM creation to assign an IP. So retry if fails
-                            vm_status = self.nova.servers.get(server.id).status
-
-                            if vm_status not in ("ACTIVE", "ERROR"):
-                                if time.time() - vm_start_time < server_timeout:
-                                    time.sleep(5)
-                                    continue
-                            elif floating_ip_retries > 0:
-                                floating_ip_retries -= 1
-                                continue
+    def _prepare_network_for_vminstance(
+        self,
+        name: str,
+        net_list: list,
+        created_items: dict,
+        net_list_vim: list,
+        external_network: list,
+        no_secured_ports: list,
+    ) -> None:
+        """Create port and fill up net dictionary for new VM instance creation.
 
 
-                            raise vimconn.VimConnException(
-                                "Cannot create floating_ip: {} {}".format(
-                                    type(e).__name__, e
-                                ),
-                                http_code=vimconn.HTTP_Conflict,
-                            )
+        Args:
+            name    (str):                  Name of network
+            net_list    (list):             List of networks
+            created_items   (dict):         All created items belongs to a VM
+            net_list_vim    (list):         List of ports
+            external_network    (list):     List of external-networks
+            no_secured_ports    (list):     Port security disabled ports
+        """
 
 
-                except Exception as e:
-                    if not floating_network["exit_on_floating_ip_error"]:
-                        self.logger.error("Cannot create floating_ip. %s", str(e))
-                        continue
+        self._reload_connection()
 
 
-                    raise
+        for net in net_list:
+            # Skip non-connected iface
+            if not net.get("net_id"):
+                continue
 
 
-            return server.id, created_items
-        # except nvExceptions.NotFound as e:
-        #     error_value=-vimconn.HTTP_Not_Found
-        #     error_text= "vm instance %s not found" % vm_id
-        # except TypeError as e:
-        #     raise vimconn.VimConnException(type(e).__name__ + ": "+  str(e), http_code=vimconn.HTTP_Bad_Request)
+            new_port, port = self._create_port(net, name, created_items)
 
 
-        except Exception as e:
-            server_id = None
-            if server:
-                server_id = server.id
+            net_list_vim.append(port)
 
 
-            try:
-                self.delete_vminstance(server_id, created_items)
-            except Exception as e2:
-                self.logger.error("new_vminstance rollback fail {}".format(e2))
+            if net.get("floating_ip", False):
+                net["exit_on_floating_ip_error"] = True
+                external_network.append(net)
 
 
-            self._format_exception(e)
+            elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
+                net["exit_on_floating_ip_error"] = False
+                external_network.append(net)
+                net["floating_ip"] = self.config.get("use_floating_ip")
 
 
-    def get_vminstance(self, vm_id):
-        """Returns the VM instance information from VIM"""
-        # self.logger.debug("Getting VM from VIM")
-        try:
-            self._reload_connection()
-            server = self.nova.servers.find(id=vm_id)
-            # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
+            # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
+            # is dropped. As a workaround we wait until the VM is active and then disable the port-security
+            if net.get("port_security") is False and not self.config.get(
+                "no_port_security_extension"
+            ):
+                no_secured_ports.append(
+                    (
+                        new_port["port"]["id"],
+                        net.get("port_security_disable_strategy"),
+                    )
+                )
 
 
-            return server.to_dict()
-        except (
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            nvExceptions.NotFound,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+    def _prepare_persistent_root_volumes(
+        self,
+        name: str,
+        vm_av_zone: list,
+        disk: dict,
+        base_disk_index: int,
+        block_device_mapping: dict,
+        existing_vim_volumes: list,
+        created_items: dict,
+    ) -> Optional[str]:
+        """Prepare persistent root volumes for new VM instance.
 
 
-    def get_vminstance_console(self, vm_id, console_type="vnc"):
-        """
-        Get a console for the virtual machine
-        Params:
-            vm_id: uuid of the VM
-            console_type, can be:
-                "novnc" (by default), "xvpvnc" for VNC types,
-                "rdp-html5" for RDP types, "spice-html5" for SPICE types
-        Returns dict with the console parameters:
-                protocol: ssh, ftp, http, https, ...
-                server:   usually ip address
-                port:     the http, ssh, ... port
-                suffix:   extra text, e.g. the http path and query string
-        """
-        self.logger.debug("Getting VM CONSOLE from VIM")
+        Args:
+            name    (str):                      Name of VM instance
+            vm_av_zone  (list):                 List of availability zones
+            disk    (dict):                     Disk details
+            base_disk_index (int):              Disk index
+            block_device_mapping    (dict):     Block device details
+            existing_vim_volumes    (list):     Existing disk details
+            created_items   (dict):             All created items belongs to VM
 
 
-        try:
-            self._reload_connection()
-            server = self.nova.servers.find(id=vm_id)
+        Returns:
+            boot_volume_id  (str):              ID of boot volume
 
 
-            if console_type is None or console_type == "novnc":
-                console_dict = server.get_vnc_console("novnc")
-            elif console_type == "xvpvnc":
-                console_dict = server.get_vnc_console(console_type)
-            elif console_type == "rdp-html5":
-                console_dict = server.get_rdp_console(console_type)
-            elif console_type == "spice-html5":
-                console_dict = server.get_spice_console(console_type)
-            else:
-                raise vimconn.VimConnException(
-                    "console type '{}' not allowed".format(console_type),
-                    http_code=vimconn.HTTP_Bad_Request,
-                )
+        """
+        # Disk may include only vim_volume_id or only vim_id."
+        # Use existing persistent root volume finding with volume_id or vim_id
+        key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
 
 
-            console_dict1 = console_dict.get("console")
+        if disk.get(key_id):
+            block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
+            existing_vim_volumes.append({"id": disk[key_id]})
 
 
-            if console_dict1:
-                console_url = console_dict1.get("url")
+        else:
+            # Create persistent root volume
+            volume = self.cinder.volumes.create(
+                size=disk["size"],
+                name=name + "vd" + chr(base_disk_index),
+                imageRef=disk["image_id"],
+                # Make sure volume is in the same AZ as the VM to be attached to
+                availability_zone=vm_av_zone,
+            )
+            boot_volume_id = volume.id
+            self.update_block_device_mapping(
+                volume=volume,
+                block_device_mapping=block_device_mapping,
+                base_disk_index=base_disk_index,
+                disk=disk,
+                created_items=created_items,
+            )
 
 
-                if console_url:
-                    # parse console_url
-                    protocol_index = console_url.find("//")
-                    suffix_index = (
-                        console_url[protocol_index + 2 :].find("/") + protocol_index + 2
-                    )
-                    port_index = (
-                        console_url[protocol_index + 2 : suffix_index].find(":")
-                        + protocol_index
-                        + 2
-                    )
+            return boot_volume_id
+
+    @staticmethod
+    def update_block_device_mapping(
+        volume: object,
+        block_device_mapping: dict,
+        base_disk_index: int,
+        disk: dict,
+        created_items: dict,
+    ) -> None:
+        """Add volume information to block device mapping dict.
+        Args:
+            volume  (object):                   Created volume object
+            block_device_mapping    (dict):     Block device details
+            base_disk_index (int):              Disk index
+            disk    (dict):                     Disk details
+            created_items   (dict):             All created items belongs to VM
+        """
+        if not volume:
+            raise vimconn.VimConnException("Volume is empty.")
 
 
-                    if protocol_index < 0 or port_index < 0 or suffix_index < 0:
-                        return (
-                            -vimconn.HTTP_Internal_Server_Error,
-                            "Unexpected response from VIM",
-                        )
+        if not hasattr(volume, "id"):
+            raise vimconn.VimConnException(
+                "Created volume is not valid, does not have id attribute."
+            )
 
 
-                    console_dict = {
-                        "protocol": console_url[0:protocol_index],
-                        "server": console_url[protocol_index + 2 : port_index],
-                        "port": console_url[port_index:suffix_index],
-                        "suffix": console_url[suffix_index + 1 :],
-                    }
-                    protocol_index += 2
+        volume_txt = "volume:" + str(volume.id)
+        if disk.get("keep"):
+            volume_txt += ":keep"
+        created_items[volume_txt] = True
+        block_device_mapping["vd" + chr(base_disk_index)] = volume.id
 
 
-                    return console_dict
-            raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
-        except (
-            nvExceptions.NotFound,
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            nvExceptions.BadRequest,
-            ConnectionError,
-        ) as e:
+    def new_shared_volumes(self, shared_volume_data) -> (str, str):
+        try:
+            volume = self.cinder.volumes.create(
+                size=shared_volume_data["size"],
+                name=shared_volume_data["name"],
+                volume_type="multiattach",
+            )
+            return (volume.name, volume.id)
+        except (ConnectionError, KeyError) as e:
             self._format_exception(e)
 
             self._format_exception(e)
 
-    def delete_vminstance(self, vm_id, created_items=None, volumes_to_hold=None):
-        """Removes a VM instance from VIM. Returns the old identifier"""
-        # print "osconnector: Getting VM from VIM"
-        if created_items is None:
-            created_items = {}
+    def _prepare_shared_volumes(
+        self,
+        name: str,
+        disk: dict,
+        base_disk_index: int,
+        block_device_mapping: dict,
+        existing_vim_volumes: list,
+        created_items: dict,
+    ):
+        volumes = {volume.name: volume.id for volume in self.cinder.volumes.list()}
+        if volumes.get(disk["name"]):
+            sv_id = volumes[disk["name"]]
+            volume = self.cinder.volumes.get(sv_id)
+            self.update_block_device_mapping(
+                volume=volume,
+                block_device_mapping=block_device_mapping,
+                base_disk_index=base_disk_index,
+                disk=disk,
+                created_items=created_items,
+            )
 
 
-        try:
-            self._reload_connection()
-            # delete VM ports attached to this networks before the virtual machine
-            for k, v in created_items.items():
-                if not v:  # skip already deleted
-                    continue
+    def _prepare_non_root_persistent_volumes(
+        self,
+        name: str,
+        disk: dict,
+        vm_av_zone: list,
+        block_device_mapping: dict,
+        base_disk_index: int,
+        existing_vim_volumes: list,
+        created_items: dict,
+    ) -> None:
+        """Prepare persistent volumes for new VM instance.
 
 
-                try:
-                    k_item, _, k_id = k.partition(":")
-                    if k_item == "port":
-                        self.neutron.delete_port(k_id)
-                except Exception as e:
-                    self.logger.error(
-                        "Error deleting port: {}: {}".format(type(e).__name__, e)
-                    )
+        Args:
+            name    (str):                      Name of VM instance
+            disk    (dict):                     Disk details
+            vm_av_zone  (list):                 List of availability zones
+            block_device_mapping    (dict):     Block device details
+            base_disk_index (int):              Disk index
+            existing_vim_volumes    (list):     Existing disk details
+            created_items   (dict):             All created items belongs to VM
+        """
+        # Non-root persistent volumes
+        # Disk may include only vim_volume_id or only vim_id."
+        key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
+        if disk.get(key_id):
+            # Use existing persistent volume
+            block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
+            existing_vim_volumes.append({"id": disk[key_id]})
+        else:
+            volume_name = f"{name}vd{chr(base_disk_index)}"
+            volume = self.cinder.volumes.create(
+                size=disk["size"],
+                name=volume_name,
+                # Make sure volume is in the same AZ as the VM to be attached to
+                availability_zone=vm_av_zone,
+            )
+            self.update_block_device_mapping(
+                volume=volume,
+                block_device_mapping=block_device_mapping,
+                base_disk_index=base_disk_index,
+                disk=disk,
+                created_items=created_items,
+            )
 
 
-            # #commented because detaching the volumes makes the servers.delete not work properly ?!?
-            # #dettach volumes attached
-            # server = self.nova.servers.get(vm_id)
-            # volumes_attached_dict = server._info["os-extended-volumes:volumes_attached"]   #volume["id"]
-            # #for volume in volumes_attached_dict:
-            # #    self.cinder.volumes.detach(volume["id"])
+    def _wait_for_created_volumes_availability(
+        self, elapsed_time: int, created_items: dict
+    ) -> Optional[int]:
+        """Wait till created volumes become available.
 
 
-            if vm_id:
-                self.nova.servers.delete(vm_id)
+        Args:
+            elapsed_time    (int):          Passed time while waiting
+            created_items   (dict):         All created items belongs to VM
 
 
-            # delete volumes. Although having detached, they should have in active status before deleting
-            # we ensure in this loop
-            keep_waiting = True
-            elapsed_time = 0
+        Returns:
+            elapsed_time    (int):          Time spent while waiting
 
 
-            while keep_waiting and elapsed_time < volume_timeout:
-                keep_waiting = False
+        """
+        while elapsed_time < volume_timeout:
+            for created_item in created_items:
+                v, volume_id = (
+                    created_item.split(":")[0],
+                    created_item.split(":")[1],
+                )
+                if v == "volume":
+                    volume = self.cinder.volumes.get(volume_id)
+                    if (
+                        volume.volume_type == "multiattach"
+                        and volume.status == "in-use"
+                    ):
+                        return elapsed_time
+                    elif volume.status != "available":
+                        break
+            else:
+                # All ready: break from while
+                break
 
 
-                for k, v in created_items.items():
-                    if not v:  # skip already deleted
-                        continue
+            time.sleep(5)
+            elapsed_time += 5
 
 
-                    try:
-                        k_item, _, k_id = k.partition(":")
-                        if k_item == "volume":
-                            if self.cinder.volumes.get(k_id).status != "available":
-                                keep_waiting = True
-                            else:
-                                if k_id not in volumes_to_hold:
-                                    self.cinder.volumes.delete(k_id)
-                                    created_items[k] = None
-                        elif k_item == "floating_ip":  # floating ip
-                            self.neutron.delete_floatingip(k_id)
-                            created_items[k] = None
+        return elapsed_time
 
 
-                    except Exception as e:
-                        self.logger.error("Error deleting {}: {}".format(k, e))
+    def _wait_for_existing_volumes_availability(
+        self, elapsed_time: int, existing_vim_volumes: list
+    ) -> Optional[int]:
+        """Wait till existing volumes become available.
 
 
-                if keep_waiting:
-                    time.sleep(1)
-                    elapsed_time += 1
+        Args:
+            elapsed_time    (int):          Passed time while waiting
+            existing_vim_volumes   (list):  Existing volume details
 
 
-            return None
-        except (
-            nvExceptions.NotFound,
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+        Returns:
+            elapsed_time    (int):          Time spent while waiting
 
 
-    def refresh_vms_status(self, vm_list):
-        """Get the status of the virtual machines and their interfaces/ports
-        Params: the list of VM identifiers
-        Returns a dictionary with:
-            vm_id:          #VIM id of this Virtual Machine
-                status:     #Mandatory. Text with one of:
-                            #  DELETED (not found at vim)
-                            #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
-                            #  OTHER (Vim reported other status not understood)
-                            #  ERROR (VIM indicates an ERROR status)
-                            #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
-                            #  CREATING (on building process), ERROR
-                            #  ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
-                            #
-                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
-                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
-                interfaces:
-                 -  vim_info:         #Text with plain information obtained from vim (yaml.safe_dump)
-                    mac_address:      #Text format XX:XX:XX:XX:XX:XX
-                    vim_net_id:       #network id where this interface is connected
-                    vim_interface_id: #interface/port VIM id
-                    ip_address:       #null, or text with IPv4, IPv6 address
-                    compute_node:     #identification of compute node where PF,VF interface is allocated
-                    pci:              #PCI address of the NIC that hosts the PF,VF
-                    vlan:             #physical VLAN used for VF
         """
         """
-        vm_dict = {}
-        self.logger.debug(
-            "refresh_vms status: Getting tenant VM instance information from VIM"
-        )
 
 
-        for vm_id in vm_list:
-            vm = {}
+        while elapsed_time < volume_timeout:
+            for volume in existing_vim_volumes:
+                v = self.cinder.volumes.get(volume["id"])
+                if v.volume_type == "multiattach" and v.status == "in-use":
+                    return elapsed_time
+                elif v.status != "available":
+                    break
+            else:  # all ready: break from while
+                break
 
 
-            try:
-                vm_vim = self.get_vminstance(vm_id)
+            time.sleep(5)
+            elapsed_time += 5
 
 
-                if vm_vim["status"] in vmStatus2manoFormat:
-                    vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
-                else:
-                    vm["status"] = "OTHER"
-                    vm["error_msg"] = "VIM status reported " + vm_vim["status"]
+        return elapsed_time
 
 
-                vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
-                vm_vim.pop("user_data", None)
-                vm["vim_info"] = self.serialize(vm_vim)
+    def _prepare_disk_for_vminstance(
+        self,
+        name: str,
+        existing_vim_volumes: list,
+        created_items: dict,
+        vm_av_zone: list,
+        block_device_mapping: dict,
+        disk_list: list = None,
+    ) -> None:
+        """Prepare all volumes for new VM instance.
 
 
-                vm["interfaces"] = []
-                if vm_vim.get("fault"):
-                    vm["error_msg"] = str(vm_vim["fault"])
+        Args:
+            name    (str):                      Name of Instance
+            existing_vim_volumes    (list):     List of existing volumes
+            created_items   (dict):             All created items belongs to VM
+            vm_av_zone  (list):                 VM availability zone
+            block_device_mapping (dict):        Block devices to be attached to VM
+            disk_list   (list):                 List of disks
 
 
-                # get interfaces
-                try:
-                    self._reload_connection()
-                    port_dict = self.neutron.list_ports(device_id=vm_id)
+        """
+        # Create additional volumes in case these are present in disk_list
+        base_disk_index = ord("b")
+        boot_volume_id = None
+        elapsed_time = 0
+        for disk in disk_list:
+            if "image_id" in disk:
+                # Root persistent volume
+                base_disk_index = ord("a")
+                boot_volume_id = self._prepare_persistent_root_volumes(
+                    name=name,
+                    vm_av_zone=vm_av_zone,
+                    disk=disk,
+                    base_disk_index=base_disk_index,
+                    block_device_mapping=block_device_mapping,
+                    existing_vim_volumes=existing_vim_volumes,
+                    created_items=created_items,
+                )
+            elif disk.get("multiattach"):
+                self._prepare_shared_volumes(
+                    name=name,
+                    disk=disk,
+                    base_disk_index=base_disk_index,
+                    block_device_mapping=block_device_mapping,
+                    existing_vim_volumes=existing_vim_volumes,
+                    created_items=created_items,
+                )
+            else:
+                # Non-root persistent volume
+                self._prepare_non_root_persistent_volumes(
+                    name=name,
+                    disk=disk,
+                    vm_av_zone=vm_av_zone,
+                    block_device_mapping=block_device_mapping,
+                    base_disk_index=base_disk_index,
+                    existing_vim_volumes=existing_vim_volumes,
+                    created_items=created_items,
+                )
+            base_disk_index += 1
 
 
-                    for port in port_dict["ports"]:
-                        interface = {}
-                        interface["vim_info"] = self.serialize(port)
-                        interface["mac_address"] = port.get("mac_address")
-                        interface["vim_net_id"] = port["network_id"]
-                        interface["vim_interface_id"] = port["id"]
-                        # check if OS-EXT-SRV-ATTR:host is there,
-                        # in case of non-admin credentials, it will be missing
+        # Wait until created volumes are with status available
+        elapsed_time = self._wait_for_created_volumes_availability(
+            elapsed_time, created_items
+        )
+        # Wait until existing volumes in vim are with status available
+        elapsed_time = self._wait_for_existing_volumes_availability(
+            elapsed_time, existing_vim_volumes
+        )
+        # If we exceeded the timeout rollback
+        if elapsed_time >= volume_timeout:
+            raise vimconn.VimConnException(
+                "Timeout creating volumes for instance " + name,
+                http_code=vimconn.HTTP_Request_Timeout,
+            )
+        if boot_volume_id:
+            self.cinder.volumes.set_bootable(boot_volume_id, True)
 
 
-                        if vm_vim.get("OS-EXT-SRV-ATTR:host"):
-                            interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
+    def _find_the_external_network_for_floating_ip(self):
+        """Get the external network ip in order to create floating IP.
 
 
-                        interface["pci"] = None
+        Returns:
+            pool_id (str):      External network pool ID
 
 
-                        # check if binding:profile is there,
-                        # in case of non-admin credentials, it will be missing
-                        if port.get("binding:profile"):
-                            if port["binding:profile"].get("pci_slot"):
-                                # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
-                                #  the slot to 0x00
-                                # TODO: This is just a workaround valid for niantinc. Find a better way to do so
-                                #   CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2)   assuming there are 2 ports per nic
-                                pci = port["binding:profile"]["pci_slot"]
-                                # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
-                                interface["pci"] = pci
+        """
 
 
-                        interface["vlan"] = None
+        # Find the external network
+        external_nets = list()
 
 
-                        if port.get("binding:vif_details"):
-                            interface["vlan"] = port["binding:vif_details"].get("vlan")
+        for net in self.neutron.list_networks()["networks"]:
+            if net["router:external"]:
+                external_nets.append(net)
 
 
-                        # Get vlan from network in case not present in port for those old openstacks and cases where
-                        # it is needed vlan at PT
-                        if not interface["vlan"]:
-                            # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
-                            network = self.neutron.show_network(port["network_id"])
+        if len(external_nets) == 0:
+            raise vimconn.VimConnException(
+                "Cannot create floating_ip automatically since "
+                "no external network is present",
+                http_code=vimconn.HTTP_Conflict,
+            )
 
 
-                            if (
-                                network["network"].get("provider:network_type")
-                                == "vlan"
-                            ):
-                                # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
-                                interface["vlan"] = network["network"].get(
-                                    "provider:segmentation_id"
-                                )
+        if len(external_nets) > 1:
+            raise vimconn.VimConnException(
+                "Cannot create floating_ip automatically since "
+                "multiple external networks are present",
+                http_code=vimconn.HTTP_Conflict,
+            )
 
 
-                        ips = []
-                        # look for floating ip address
-                        try:
-                            floating_ip_dict = self.neutron.list_floatingips(
-                                port_id=port["id"]
-                            )
+        # Pool ID
+        return external_nets[0].get("id")
 
 
-                            if floating_ip_dict.get("floatingips"):
-                                ips.append(
-                                    floating_ip_dict["floatingips"][0].get(
-                                        "floating_ip_address"
-                                    )
-                                )
-                        except Exception:
-                            pass
+    def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None:
+        """Trigger neutron to create a new floating IP using external network ID.
 
 
-                        for subnet in port["fixed_ips"]:
-                            ips.append(subnet["ip_address"])
+        Args:
+            param   (dict):             Input parameters to create a floating IP
+            created_items   (dict):     All created items belongs to new VM instance
 
 
-                        interface["ip_address"] = ";".join(ips)
-                        vm["interfaces"].append(interface)
-                except Exception as e:
-                    self.logger.error(
-                        "Error getting vm interface information {}: {}".format(
-                            type(e).__name__, e
-                        ),
-                        exc_info=True,
-                    )
-            except vimconn.VimConnNotFoundException as e:
-                self.logger.error("Exception getting vm status: %s", str(e))
-                vm["status"] = "DELETED"
-                vm["error_msg"] = str(e)
-            except vimconn.VimConnException as e:
-                self.logger.error("Exception getting vm status: %s", str(e))
-                vm["status"] = "VIM_ERROR"
-                vm["error_msg"] = str(e)
+        Raises:
 
 
-            vm_dict[vm_id] = vm
+            VimConnException
+        """
+        try:
+            self.logger.debug("Creating floating IP")
+            new_floating_ip = self.neutron.create_floatingip(param)
+            free_floating_ip = new_floating_ip["floatingip"]["id"]
+            created_items["floating_ip:" + str(free_floating_ip)] = True
 
 
-        return vm_dict
+        except Exception as e:
+            raise vimconn.VimConnException(
+                type(e).__name__ + ": Cannot create new floating_ip " + str(e),
+                http_code=vimconn.HTTP_Conflict,
+            )
 
 
-    def action_vminstance(self, vm_id, action_dict, created_items={}):
-        """Send and action over a VM instance from VIM
-        Returns None or the console dict if the action was successfully sent to the VIM"""
-        self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
+    def _create_floating_ip(
+        self, floating_network: dict, server: object, created_items: dict
+    ) -> None:
+        """Get the available Pool ID and create a new floating IP.
 
 
-        try:
-            self._reload_connection()
-            server = self.nova.servers.find(id=vm_id)
+        Args:
+            floating_network    (dict):         Dict including external network ID
+            server   (object):                  Server object
+            created_items   (dict):             All created items belongs to new VM instance
 
 
-            if "start" in action_dict:
-                if action_dict["start"] == "rebuild":
-                    server.rebuild()
-                else:
-                    if server.status == "PAUSED":
-                        server.unpause()
-                    elif server.status == "SUSPENDED":
-                        server.resume()
-                    elif server.status == "SHUTOFF":
-                        server.start()
-            elif "pause" in action_dict:
-                server.pause()
-            elif "resume" in action_dict:
-                server.resume()
-            elif "shutoff" in action_dict or "shutdown" in action_dict:
-                server.stop()
-            elif "forceOff" in action_dict:
-                server.stop()  # TODO
-            elif "terminate" in action_dict:
-                server.delete()
-            elif "createImage" in action_dict:
-                server.create_image()
-                # "path":path_schema,
-                # "description":description_schema,
-                # "name":name_schema,
-                # "metadata":metadata_schema,
-                # "imageRef": id_schema,
-                # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
-            elif "rebuild" in action_dict:
-                server.rebuild(server.image["id"])
-            elif "reboot" in action_dict:
-                server.reboot()  # reboot_type="SOFT"
-            elif "console" in action_dict:
-                console_type = action_dict["console"]
+        """
 
 
-                if console_type is None or console_type == "novnc":
-                    console_dict = server.get_vnc_console("novnc")
-                elif console_type == "xvpvnc":
-                    console_dict = server.get_vnc_console(console_type)
-                elif console_type == "rdp-html5":
-                    console_dict = server.get_rdp_console(console_type)
-                elif console_type == "spice-html5":
-                    console_dict = server.get_spice_console(console_type)
-                else:
-                    raise vimconn.VimConnException(
-                        "console type '{}' not allowed".format(console_type),
-                        http_code=vimconn.HTTP_Bad_Request,
-                    )
+        # Pool_id is available
+        if (
+            isinstance(floating_network["floating_ip"], str)
+            and floating_network["floating_ip"].lower() != "true"
+        ):
+            pool_id = floating_network["floating_ip"]
 
 
-                try:
-                    console_url = console_dict["console"]["url"]
-                    # parse console_url
-                    protocol_index = console_url.find("//")
-                    suffix_index = (
-                        console_url[protocol_index + 2 :].find("/") + protocol_index + 2
-                    )
-                    port_index = (
-                        console_url[protocol_index + 2 : suffix_index].find(":")
-                        + protocol_index
-                        + 2
-                    )
+        # Find the Pool_id
+        else:
+            pool_id = self._find_the_external_network_for_floating_ip()
 
 
-                    if protocol_index < 0 or port_index < 0 or suffix_index < 0:
-                        raise vimconn.VimConnException(
-                            "Unexpected response from VIM " + str(console_dict)
-                        )
+        param = {
+            "floatingip": {
+                "floating_network_id": pool_id,
+                "tenant_id": server.tenant_id,
+            }
+        }
 
 
-                    console_dict2 = {
-                        "protocol": console_url[0:protocol_index],
-                        "server": console_url[protocol_index + 2 : port_index],
-                        "port": int(console_url[port_index + 1 : suffix_index]),
-                        "suffix": console_url[suffix_index + 1 :],
-                    }
+        self._neutron_create_float_ip(param, created_items)
 
 
-                    return console_dict2
-                except Exception:
-                    raise vimconn.VimConnException(
-                        "Unexpected response from VIM " + str(console_dict)
-                    )
+    def _find_floating_ip(
+        self,
+        server: object,
+        floating_ips: list,
+        floating_network: dict,
+    ) -> Optional[str]:
+        """Find the available free floating IPs if there are.
 
 
-            return None
-        except (
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            nvExceptions.NotFound,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
-        # TODO insert exception vimconn.HTTP_Unauthorized
+        Args:
+            server  (object):                   Server object
+            floating_ips    (list):             List of floating IPs
+            floating_network    (dict):         Details of floating network such as ID
+
+        Returns:
+            free_floating_ip    (str):          Free floating ip address
 
 
-    # ###### VIO Specific Changes #########
-    def _generate_vlanID(self):
-        """
-        Method to get unused vlanID
-            Args:
-                None
-            Returns:
-                vlanID
         """
         """
-        # Get used VLAN IDs
-        usedVlanIDs = []
-        networks = self.get_network_list()
+        for fip in floating_ips:
+            if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id:
+                continue
 
 
-        for net in networks:
-            if net.get("provider:segmentation_id"):
-                usedVlanIDs.append(net.get("provider:segmentation_id"))
+            if isinstance(floating_network["floating_ip"], str):
+                if fip.get("floating_network_id") != floating_network["floating_ip"]:
+                    continue
 
 
-        used_vlanIDs = set(usedVlanIDs)
+            return fip["id"]
 
 
-        # find unused VLAN ID
-        for vlanID_range in self.config.get("dataplane_net_vlan_range"):
-            try:
-                start_vlanid, end_vlanid = map(
-                    int, vlanID_range.replace(" ", "").split("-")
-                )
+    def _assign_floating_ip(
+        self, free_floating_ip: str, floating_network: dict
+    ) -> Dict:
+        """Assign the free floating ip address to port.
 
 
-                for vlanID in range(start_vlanid, end_vlanid + 1):
-                    if vlanID not in used_vlanIDs:
-                        return vlanID
-            except Exception as exp:
-                raise vimconn.VimConnException(
-                    "Exception {} occurred while generating VLAN ID.".format(exp)
-                )
-        else:
-            raise vimconn.VimConnConflictException(
-                "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
-                    self.config.get("dataplane_net_vlan_range")
-                )
-            )
+        Args:
+            free_floating_ip    (str):          Floating IP to be assigned
+            floating_network    (dict):         ID of floating network
+
+        Returns:
+            fip (dict)          (dict):         Floating ip details
 
 
-    def _generate_multisegment_vlanID(self):
         """
         """
-        Method to get unused vlanID
+        # The vim_id key contains the neutron.port_id
+        self.neutron.update_floatingip(
+            free_floating_ip,
+            {"floatingip": {"port_id": floating_network["vim_id"]}},
+        )
+        # For race condition ensure not re-assigned to other VM after 5 seconds
+        time.sleep(5)
+
+        return self.neutron.show_floatingip(free_floating_ip)
+
+    def _get_free_floating_ip(
+        self, server: object, floating_network: dict
+    ) -> Optional[str]:
+        """Get the free floating IP address.
+
         Args:
         Args:
-            None
+            server  (object):               Server Object
+            floating_network    (dict):     Floating network details
+
         Returns:
         Returns:
-            vlanID
+            free_floating_ip    (str):      Free floating ip addr
+
         """
         """
-        # Get used VLAN IDs
-        usedVlanIDs = []
-        networks = self.get_network_list()
-        for net in networks:
-            if net.get("provider:network_type") == "vlan" and net.get(
-                "provider:segmentation_id"
-            ):
-                usedVlanIDs.append(net.get("provider:segmentation_id"))
-            elif net.get("segments"):
-                for segment in net.get("segments"):
-                    if segment.get("provider:network_type") == "vlan" and segment.get(
-                        "provider:segmentation_id"
-                    ):
-                        usedVlanIDs.append(segment.get("provider:segmentation_id"))
 
 
-        used_vlanIDs = set(usedVlanIDs)
+        floating_ips = self.neutron.list_floatingips().get("floatingips", ())
 
 
-        # find unused VLAN ID
-        for vlanID_range in self.config.get("multisegment_vlan_range"):
-            try:
-                start_vlanid, end_vlanid = map(
-                    int, vlanID_range.replace(" ", "").split("-")
-                )
+        # Randomize
+        random.shuffle(floating_ips)
 
 
-                for vlanID in range(start_vlanid, end_vlanid + 1):
-                    if vlanID not in used_vlanIDs:
-                        return vlanID
-            except Exception as exp:
-                raise vimconn.VimConnException(
-                    "Exception {} occurred while generating VLAN ID.".format(exp)
-                )
-        else:
-            raise vimconn.VimConnConflictException(
-                "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
-                    self.config.get("multisegment_vlan_range")
-                )
-            )
+        return self._find_floating_ip(server, floating_ips, floating_network)
+
+    def _prepare_external_network_for_vminstance(
+        self,
+        external_network: list,
+        server: object,
+        created_items: dict,
+        vm_start_time: float,
+    ) -> None:
+        """Assign floating IP address for VM instance.
+
+        Args:
+            external_network    (list):         ID of External network
+            server  (object):                   Server Object
+            created_items   (dict):             All created items belongs to new VM instance
+            vm_start_time   (float):            Time as a floating point number expressed in seconds since the epoch, in UTC
+
+        Raises:
+            VimConnException
 
 
-    def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
-        """
-        Method to validate user given vlanID ranges
-            Args:  None
-            Returns: None
         """
         """
-        for vlanID_range in input_vlan_range:
-            vlan_range = vlanID_range.replace(" ", "")
-            # validate format
-            vlanID_pattern = r"(\d)*-(\d)*$"
-            match_obj = re.match(vlanID_pattern, vlan_range)
-            if not match_obj:
-                raise vimconn.VimConnConflictException(
-                    "Invalid VLAN range for {}: {}.You must provide "
-                    "'{}' in format [start_ID - end_ID].".format(
-                        text_vlan_range, vlanID_range, text_vlan_range
+        for floating_network in external_network:
+            try:
+                assigned = False
+                floating_ip_retries = 3
+                # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
+                # several times
+                while not assigned:
+                    free_floating_ip = self._get_free_floating_ip(
+                        server, floating_network
                     )
                     )
-                )
 
 
-            start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
-            if start_vlanid <= 0:
-                raise vimconn.VimConnConflictException(
-                    "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
-                    "networks valid IDs are 1 to 4094 ".format(
-                        text_vlan_range, vlanID_range
-                    )
-                )
+                    if not free_floating_ip:
+                        self._create_floating_ip(
+                            floating_network, server, created_items
+                        )
+
+                    try:
+                        # For race condition ensure not already assigned
+                        fip = self.neutron.show_floatingip(free_floating_ip)
+
+                        if fip["floatingip"].get("port_id"):
+                            continue
+
+                        # Assign floating ip
+                        fip = self._assign_floating_ip(
+                            free_floating_ip, floating_network
+                        )
+
+                        if fip["floatingip"]["port_id"] != floating_network["vim_id"]:
+                            self.logger.warning(
+                                "floating_ip {} re-assigned to other port".format(
+                                    free_floating_ip
+                                )
+                            )
+                            continue
+
+                        self.logger.debug(
+                            "Assigned floating_ip {} to VM {}".format(
+                                free_floating_ip, server.id
+                            )
+                        )
+
+                        assigned = True
+
+                    except Exception as e:
+                        # Openstack need some time after VM creation to assign an IP. So retry if fails
+                        vm_status = self.nova.servers.get(server.id).status
+
+                        if vm_status not in ("ACTIVE", "ERROR"):
+                            if time.time() - vm_start_time < server_timeout:
+                                time.sleep(5)
+                                continue
+                        elif floating_ip_retries > 0:
+                            floating_ip_retries -= 1
+                            continue
+
+                        raise vimconn.VimConnException(
+                            "Cannot create floating_ip: {} {}".format(
+                                type(e).__name__, e
+                            ),
+                            http_code=vimconn.HTTP_Conflict,
+                        )
+
+            except Exception as e:
+                if not floating_network["exit_on_floating_ip_error"]:
+                    self.logger.error("Cannot create floating_ip. %s", str(e))
+                    continue
+
+                raise
+
+    def _update_port_security_for_vminstance(
+        self,
+        no_secured_ports: list,
+        server: object,
+    ) -> None:
+        """Updates the port security according to no_secured_ports list.
+
+        Args:
+            no_secured_ports    (list):     List of ports that security will be disabled
+            server  (object):               Server Object
+
+        Raises:
+            VimConnException
+
+        """
+        # Wait until the VM is active and then disable the port-security
+        if no_secured_ports:
+            self.__wait_for_vm(server.id, "ACTIVE")
+
+        for port in no_secured_ports:
+            port_update = {
+                "port": {"port_security_enabled": False, "security_groups": None}
+            }
+
+            if port[1] == "allow-address-pairs":
+                port_update = {
+                    "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
+                }
+
+            try:
+                self.neutron.update_port(port[0], port_update)
 
 
-            if end_vlanid > 4094:
-                raise vimconn.VimConnConflictException(
-                    "Invalid VLAN range for {}: {}. End VLAN ID can not be "
-                    "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
-                        text_vlan_range, vlanID_range
+            except Exception:
+                raise vimconn.VimConnException(
+                    "It was not possible to disable port security for port {}".format(
+                        port[0]
                     )
                 )
 
                     )
                 )
 
-            if start_vlanid > end_vlanid:
-                raise vimconn.VimConnConflictException(
-                    "Invalid VLAN range for {}: {}. You must provide '{}'"
-                    " in format start_ID - end_ID and start_ID < end_ID ".format(
-                        text_vlan_range, vlanID_range, text_vlan_range
-                    )
-                )
+    def new_vminstance(
+        self,
+        name: str,
+        description: str,
+        start: bool,
+        image_id: str,
+        flavor_id: str,
+        affinity_group_list: list,
+        net_list: list,
+        cloud_config=None,
+        disk_list=None,
+        availability_zone_index=None,
+        availability_zone_list=None,
+    ) -> tuple:
+        """Adds a VM instance to VIM.
 
 
-    # NOT USED FUNCTIONS
+        Args:
+            name    (str):          name of VM
+            description (str):      description
+            start   (bool):         indicates if VM must start or boot in pause mode. Ignored
+            image_id    (str)       image uuid
+            flavor_id   (str)       flavor uuid
+            affinity_group_list (list):     list of affinity groups, each one is a dictionary.Ignore if empty.
+            net_list    (list):         list of interfaces, each one is a dictionary with:
+                name:   name of network
+                net_id:     network uuid to connect
+                vpci:   virtual vcpi to assign, ignored because openstack lack #TODO
+                model:  interface model, ignored #TODO
+                mac_address:    used for  SR-IOV ifaces #TODO for other types
+                use:    'data', 'bridge',  'mgmt'
+                type:   'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
+                vim_id:     filled/added by this function
+                floating_ip:    True/False (or it can be None)
+                port_security:  True/False
+            cloud_config    (dict): (optional) dictionary with:
+                key-pairs:      (optional) list of strings with the public key to be inserted to the default user
+                users:      (optional) list of users to be inserted, each item is a dict with:
+                    name:   (mandatory) user name,
+                    key-pairs: (optional) list of strings with the public key to be inserted to the user
+                user-data:  (optional) string is a text script to be passed directly to cloud-init
+                config-files:   (optional). List of files to be transferred. Each item is a dict with:
+                    dest:   (mandatory) string with the destination absolute path
+                    encoding:   (optional, by default text). Can be one of:
+                        'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+                    content :    (mandatory) string with the content of the file
+                    permissions:    (optional) string with file permissions, typically octal notation '0644'
+                    owner:  (optional) file owner, string with the format 'owner:group'
+                boot-data-drive:    boolean to indicate if user-data must be passed using a boot drive (hard disk)
+            disk_list:  (optional) list with additional disks to the VM. Each item is a dict with:
+                image_id:   (optional). VIM id of an existing image. If not provided an empty disk must be mounted
+                size:   (mandatory) string with the size of the disk in GB
+                vim_id:  (optional) should use this existing volume id
+            availability_zone_index:    Index of availability_zone_list to use for this this VM. None if not AV required
+            availability_zone_list:     list of availability zones given by user in the VNFD descriptor.  Ignore if
+                availability_zone_index is None
+                #TODO ip, security groups
 
 
-    def new_external_port(self, port_data):
-        """Adds a external port to VIM
-        Returns the port identifier"""
-        # TODO openstack if needed
-        return (
-            -vimconn.HTTP_Internal_Server_Error,
-            "osconnector.new_external_port() not implemented",
-        )
+        Returns:
+            A tuple with the instance identifier and created_items or raises an exception on error
+            created_items can be None or a dictionary where this method can include key-values that will be passed to
+            the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
+            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+            as not present.
 
 
-    def connect_port_network(self, port_id, network_id, admin=False):
-        """Connects a external port to a network
-        Returns status code of the VIM response"""
-        # TODO openstack if needed
-        return (
-            -vimconn.HTTP_Internal_Server_Error,
-            "osconnector.connect_port_network() not implemented",
+        """
+        self.logger.debug(
+            "new_vminstance input: image='%s' flavor='%s' nics='%s'",
+            image_id,
+            flavor_id,
+            str(net_list),
         )
 
         )
 
-    def new_user(self, user_name, user_passwd, tenant_id=None):
-        """Adds a new user to openstack VIM
-        Returns the user identifier"""
-        self.logger.debug("osconnector: Adding a new user to VIM")
-
         try:
         try:
+            server = None
+            created_items = {}
+            net_list_vim = []
+            # list of external networks to be connected to instance, later on used to create floating_ip
+            external_network = []
+            # List of ports with port-security disabled
+            no_secured_ports = []
+            block_device_mapping = {}
+            existing_vim_volumes = []
+            server_group_id = None
+            scheduller_hints = {}
+
+            # Check the Openstack Connection
             self._reload_connection()
             self._reload_connection()
-            user = self.keystone.users.create(
-                user_name, password=user_passwd, default_project=tenant_id
-            )
-            # self.keystone.tenants.add_user(self.k_creds["username"], #role)
 
 
-            return user.id
-        except ksExceptions.ConnectionError as e:
-            error_value = -vimconn.HTTP_Bad_Request
-            error_text = (
-                type(e).__name__
-                + ": "
-                + (str(e) if len(e.args) == 0 else str(e.args[0]))
-            )
-        except ksExceptions.ClientException as e:  # TODO remove
-            error_value = -vimconn.HTTP_Bad_Request
-            error_text = (
-                type(e).__name__
-                + ": "
-                + (str(e) if len(e.args) == 0 else str(e.args[0]))
+            # Prepare network list
+            self._prepare_network_for_vminstance(
+                name=name,
+                net_list=net_list,
+                created_items=created_items,
+                net_list_vim=net_list_vim,
+                external_network=external_network,
+                no_secured_ports=no_secured_ports,
             )
 
             )
 
-        # TODO insert exception vimconn.HTTP_Unauthorized
-        # if reaching here is because an exception
-        self.logger.debug("new_user " + error_text)
+            # Cloud config
+            config_drive, userdata = self._create_user_data(cloud_config)
 
 
-        return error_value, error_text
+            # Get availability Zone
+            vm_av_zone = self._get_vm_availability_zone(
+                availability_zone_index, availability_zone_list
+            )
 
 
-    def delete_user(self, user_id):
-        """Delete a user from openstack VIM
-        Returns the user identifier"""
-        if self.debug:
-            print("osconnector: Deleting  a  user from VIM")
+            if disk_list:
+                # Prepare disks
+                self._prepare_disk_for_vminstance(
+                    name=name,
+                    existing_vim_volumes=existing_vim_volumes,
+                    created_items=created_items,
+                    vm_av_zone=vm_av_zone,
+                    block_device_mapping=block_device_mapping,
+                    disk_list=disk_list,
+                )
 
 
-        try:
-            self._reload_connection()
-            self.keystone.users.delete(user_id)
+            if affinity_group_list:
+                # Only first id on the list will be used. Openstack restriction
+                server_group_id = affinity_group_list[0]["affinity_group_id"]
+                scheduller_hints["group"] = server_group_id
 
 
-            return 1, user_id
-        except ksExceptions.ConnectionError as e:
-            error_value = -vimconn.HTTP_Bad_Request
-            error_text = (
-                type(e).__name__
-                + ": "
-                + (str(e) if len(e.args) == 0 else str(e.args[0]))
-            )
-        except ksExceptions.NotFound as e:
-            error_value = -vimconn.HTTP_Not_Found
-            error_text = (
-                type(e).__name__
-                + ": "
-                + (str(e) if len(e.args) == 0 else str(e.args[0]))
+            self.logger.debug(
+                "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
+                "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
+                "block_device_mapping={}, server_group={})".format(
+                    name,
+                    image_id,
+                    flavor_id,
+                    net_list_vim,
+                    self.config.get("security_groups"),
+                    vm_av_zone,
+                    self.config.get("keypair"),
+                    userdata,
+                    config_drive,
+                    block_device_mapping,
+                    server_group_id,
+                )
             )
             )
-        except ksExceptions.ClientException as e:  # TODO remove
-            error_value = -vimconn.HTTP_Bad_Request
-            error_text = (
-                type(e).__name__
-                + ": "
-                + (str(e) if len(e.args) == 0 else str(e.args[0]))
+            # Create VM
+            server = self.nova.servers.create(
+                name=name,
+                image=image_id,
+                flavor=flavor_id,
+                nics=net_list_vim,
+                security_groups=self.config.get("security_groups"),
+                # TODO remove security_groups in future versions. Already at neutron port
+                availability_zone=vm_av_zone,
+                key_name=self.config.get("keypair"),
+                userdata=userdata,
+                config_drive=config_drive,
+                block_device_mapping=block_device_mapping,
+                scheduler_hints=scheduller_hints,
             )
 
             )
 
-        # TODO insert exception vimconn.HTTP_Unauthorized
-        # if reaching here is because an exception
-        self.logger.debug("delete_tenant " + error_text)
+            vm_start_time = time.time()
 
 
-        return error_value, error_text
+            self._update_port_security_for_vminstance(no_secured_ports, server)
 
 
-    def get_hosts_info(self):
-        """Get the information of deployed hosts
-        Returns the hosts content"""
-        if self.debug:
-            print("osconnector: Getting Host info from VIM")
+            self._prepare_external_network_for_vminstance(
+                external_network=external_network,
+                server=server,
+                created_items=created_items,
+                vm_start_time=vm_start_time,
+            )
 
 
-        try:
-            h_list = []
-            self._reload_connection()
-            hypervisors = self.nova.hypervisors.list()
+            return server.id, created_items
 
 
-            for hype in hypervisors:
-                h_list.append(hype.to_dict())
+        except Exception as e:
+            server_id = None
+            if server:
+                server_id = server.id
 
 
-            return 1, {"hosts": h_list}
-        except nvExceptions.NotFound as e:
-            error_value = -vimconn.HTTP_Not_Found
-            error_text = str(e) if len(e.args) == 0 else str(e.args[0])
-        except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
-            error_value = -vimconn.HTTP_Bad_Request
-            error_text = (
-                type(e).__name__
-                + ": "
-                + (str(e) if len(e.args) == 0 else str(e.args[0]))
-            )
+            try:
+                created_items = self.remove_keep_tag_from_persistent_volumes(
+                    created_items
+                )
 
 
-        # TODO insert exception vimconn.HTTP_Unauthorized
-        # if reaching here is because an exception
-        self.logger.debug("get_hosts_info " + error_text)
+                self.delete_vminstance(server_id, created_items)
 
 
-        return error_value, error_text
+            except Exception as e2:
+                self.logger.error("new_vminstance rollback fail {}".format(e2))
 
 
-    def get_hosts(self, vim_tenant):
-        """Get the hosts and deployed instances
-        Returns the hosts content"""
-        r, hype_dict = self.get_hosts_info()
+            self._format_exception(e)
 
 
-        if r < 0:
-            return r, hype_dict
+    @staticmethod
+    def remove_keep_tag_from_persistent_volumes(created_items: Dict) -> Dict:
+        """Removes the keep flag from persistent volumes. So, those volumes could be removed.
 
 
-        hypervisors = hype_dict["hosts"]
+        Args:
+            created_items (dict):       All created items belongs to VM
+
+        Returns:
+            updated_created_items   (dict):     Dict which does not include keep flag for volumes.
 
 
+        """
+        return {
+            key.replace(":keep", ""): value for (key, value) in created_items.items()
+        }
+
+    def get_vminstance(self, vm_id):
+        """Returns the VM instance information from VIM"""
+        # self.logger.debug("Getting VM from VIM")
         try:
         try:
-            servers = self.nova.servers.list()
-            for hype in hypervisors:
-                for server in servers:
-                    if (
-                        server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
-                        == hype["hypervisor_hostname"]
-                    ):
-                        if "vm" in hype:
-                            hype["vm"].append(server.id)
-                        else:
-                            hype["vm"] = [server.id]
+            self._reload_connection()
+            server = self.nova.servers.find(id=vm_id)
+            # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
 
 
-            return 1, hype_dict
-        except nvExceptions.NotFound as e:
-            error_value = -vimconn.HTTP_Not_Found
-            error_text = str(e) if len(e.args) == 0 else str(e.args[0])
-        except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
-            error_value = -vimconn.HTTP_Bad_Request
-            error_text = (
-                type(e).__name__
-                + ": "
-                + (str(e) if len(e.args) == 0 else str(e.args[0]))
-            )
+            return server.to_dict()
+        except (
+            ksExceptions.ClientException,
+            nvExceptions.ClientException,
+            nvExceptions.NotFound,
+            ConnectionError,
+        ) as e:
+            self._format_exception(e)
 
 
-        # TODO insert exception vimconn.HTTP_Unauthorized
-        # if reaching here is because an exception
-        self.logger.debug("get_hosts " + error_text)
+    def get_vminstance_console(self, vm_id, console_type="vnc"):
+        """
+        Get a console for the virtual machine
+        Params:
+            vm_id: uuid of the VM
+            console_type, can be:
+                "novnc" (by default), "xvpvnc" for VNC types,
+                "rdp-html5" for RDP types, "spice-html5" for SPICE types
+        Returns dict with the console parameters:
+                protocol: ssh, ftp, http, https, ...
+                server:   usually ip address
+                port:     the http, ssh, ... port
+                suffix:   extra text, e.g. the http path and query string
+        """
+        self.logger.debug("Getting VM CONSOLE from VIM")
 
 
-        return error_value, error_text
+        try:
+            self._reload_connection()
+            server = self.nova.servers.find(id=vm_id)
 
 
-    def new_classification(self, name, ctype, definition):
-        self.logger.debug(
-            "Adding a new (Traffic) Classification to VIM, named %s", name
-        )
+            if console_type is None or console_type == "novnc":
+                console_dict = server.get_vnc_console("novnc")
+            elif console_type == "xvpvnc":
+                console_dict = server.get_vnc_console(console_type)
+            elif console_type == "rdp-html5":
+                console_dict = server.get_rdp_console(console_type)
+            elif console_type == "spice-html5":
+                console_dict = server.get_spice_console(console_type)
+            else:
+                raise vimconn.VimConnException(
+                    "console type '{}' not allowed".format(console_type),
+                    http_code=vimconn.HTTP_Bad_Request,
+                )
+
+            console_dict1 = console_dict.get("console")
 
 
-        try:
-            new_class = None
-            self._reload_connection()
+            if console_dict1:
+                console_url = console_dict1.get("url")
 
 
-            if ctype not in supportedClassificationTypes:
-                raise vimconn.VimConnNotSupportedException(
-                    "OpenStack VIM connector does not support provided "
-                    "Classification Type {}, supported ones are: {}".format(
-                        ctype, supportedClassificationTypes
+                if console_url:
+                    # parse console_url
+                    protocol_index = console_url.find("//")
+                    suffix_index = (
+                        console_url[protocol_index + 2 :].find("/") + protocol_index + 2
+                    )
+                    port_index = (
+                        console_url[protocol_index + 2 : suffix_index].find(":")
+                        + protocol_index
+                        + 2
                     )
                     )
-                )
 
 
-            if not self._validate_classification(ctype, definition):
-                raise vimconn.VimConnException(
-                    "Incorrect Classification definition for the type specified."
-                )
+                    if protocol_index < 0 or port_index < 0 or suffix_index < 0:
+                        return (
+                            -vimconn.HTTP_Internal_Server_Error,
+                            "Unexpected response from VIM",
+                        )
 
 
-            classification_dict = definition
-            classification_dict["name"] = name
-            new_class = self.neutron.create_sfc_flow_classifier(
-                {"flow_classifier": classification_dict}
-            )
+                    console_dict = {
+                        "protocol": console_url[0:protocol_index],
+                        "server": console_url[protocol_index + 2 : port_index],
+                        "port": console_url[port_index:suffix_index],
+                        "suffix": console_url[suffix_index + 1 :],
+                    }
+                    protocol_index += 2
 
 
-            return new_class["flow_classifier"]["id"]
+                    return console_dict
+            raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
         except (
         except (
-            neExceptions.ConnectionFailed,
+            nvExceptions.NotFound,
             ksExceptions.ClientException,
             ksExceptions.ClientException,
-            neExceptions.NeutronException,
+            nvExceptions.ClientException,
+            nvExceptions.BadRequest,
             ConnectionError,
         ) as e:
             ConnectionError,
         ) as e:
-            self.logger.error("Creation of Classification failed.")
             self._format_exception(e)
 
             self._format_exception(e)
 
-    def get_classification(self, class_id):
-        self.logger.debug(" Getting Classification %s from VIM", class_id)
-        filter_dict = {"id": class_id}
-        class_list = self.get_classification_list(filter_dict)
-
-        if len(class_list) == 0:
-            raise vimconn.VimConnNotFoundException(
-                "Classification '{}' not found".format(class_id)
-            )
-        elif len(class_list) > 1:
-            raise vimconn.VimConnConflictException(
-                "Found more than one Classification with this criteria"
-            )
-
-        classification = class_list[0]
+    def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
+        """Neutron delete ports by id.
+        Args:
+            k_id    (str):      Port id in the VIM
+        """
+        try:
+            port_dict = self.neutron.list_ports()
+            existing_ports = [port["id"] for port in port_dict["ports"] if port_dict]
 
 
-        return classification
+            if k_id in existing_ports:
+                self.neutron.delete_port(k_id)
 
 
-    def get_classification_list(self, filter_dict={}):
-        self.logger.debug(
-            "Getting Classifications from VIM filter: '%s'", str(filter_dict)
-        )
+        except Exception as e:
+            self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
 
 
+    def delete_shared_volumes(self, shared_volume_vim_id: str) -> bool:
+        """Cinder delete volume by id.
+        Args:
+            shared_volume_vim_id    (str):                  ID of shared volume in VIM
+        """
         try:
         try:
-            filter_dict_os = filter_dict.copy()
-            self._reload_connection()
+            if self.cinder.volumes.get(shared_volume_vim_id).status != "available":
+                return True
 
 
-            if self.api_version3 and "tenant_id" in filter_dict_os:
-                filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
+            else:
+                self.cinder.volumes.delete(shared_volume_vim_id)
 
 
-            classification_dict = self.neutron.list_sfc_flow_classifiers(
-                **filter_dict_os
+        except Exception as e:
+            self.logger.error(
+                "Error deleting volume: {}: {}".format(type(e).__name__, e)
             )
             )
-            classification_list = classification_dict["flow_classifiers"]
-            self.__classification_os2mano(classification_list)
-
-            return classification_list
-        except (
-            neExceptions.ConnectionFailed,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
-
-    def delete_classification(self, class_id):
-        self.logger.debug("Deleting Classification '%s' from VIM", class_id)
 
 
+    def _delete_volumes_by_id_wth_cinder(
+        self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
+    ) -> bool:
+        """Cinder delete volume by id.
+        Args:
+            k   (str):                      Full item name in created_items
+            k_id    (str):                  ID of floating ip in VIM
+            volumes_to_hold (list):          Volumes not to delete
+            created_items   (dict):         All created items belongs to VM
+        """
         try:
         try:
-            self._reload_connection()
-            self.neutron.delete_sfc_flow_classifier(class_id)
+            if k_id in volumes_to_hold:
+                return
 
 
-            return class_id
-        except (
-            neExceptions.ConnectionFailed,
-            neExceptions.NeutronException,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+            if self.cinder.volumes.get(k_id).status != "available":
+                return True
 
 
-    def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
-        self.logger.debug(
-            "Adding a new Service Function Instance to VIM, named '%s'", name
-        )
+            else:
+                self.cinder.volumes.delete(k_id)
+                created_items[k] = None
 
 
-        try:
-            new_sfi = None
-            self._reload_connection()
-            correlation = None
+        except Exception as e:
+            self.logger.error(
+                "Error deleting volume: {}: {}".format(type(e).__name__, e)
+            )
 
 
-            if sfc_encap:
-                correlation = "nsh"
+    def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None:
+        """Neutron delete floating ip by id.
+        Args:
+            k   (str):                      Full item name in created_items
+            k_id    (str):                  ID of floating ip in VIM
+            created_items   (dict):         All created items belongs to VM
+        """
+        try:
+            self.neutron.delete_floatingip(k_id)
+            created_items[k] = None
 
 
-            if len(ingress_ports) != 1:
-                raise vimconn.VimConnNotSupportedException(
-                    "OpenStack VIM connector can only have 1 ingress port per SFI"
-                )
+        except Exception as e:
+            self.logger.error(
+                "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
+            )
 
 
-            if len(egress_ports) != 1:
-                raise vimconn.VimConnNotSupportedException(
-                    "OpenStack VIM connector can only have 1 egress port per SFI"
-                )
+    @staticmethod
+    def _get_item_name_id(k: str) -> Tuple[str, str]:
+        k_item, _, k_id = k.partition(":")
+        return k_item, k_id
 
 
-            sfi_dict = {
-                "name": name,
-                "ingress": ingress_ports[0],
-                "egress": egress_ports[0],
-                "service_function_parameters": {"correlation": correlation},
-            }
-            new_sfi = self.neutron.create_sfc_port_pair({"port_pair": sfi_dict})
+    def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None:
+        """Delete VM ports attached to the networks before deleting virtual machine.
+        Args:
+            created_items   (dict):     All created items belongs to VM
+        """
 
 
-            return new_sfi["port_pair"]["id"]
-        except (
-            neExceptions.ConnectionFailed,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            if new_sfi:
-                try:
-                    self.neutron.delete_sfc_port_pair(new_sfi["port_pair"]["id"])
-                except Exception:
-                    self.logger.error(
-                        "Creation of Service Function Instance failed, with "
-                        "subsequent deletion failure as well."
-                    )
+        for k, v in created_items.items():
+            if not v:  # skip already deleted
+                continue
 
 
-            self._format_exception(e)
+            try:
+                k_item, k_id = self._get_item_name_id(k)
+                if k_item == "port":
+                    self._delete_ports_by_id_wth_neutron(k_id)
 
 
-    def get_sfi(self, sfi_id):
-        self.logger.debug("Getting Service Function Instance %s from VIM", sfi_id)
-        filter_dict = {"id": sfi_id}
-        sfi_list = self.get_sfi_list(filter_dict)
+            except Exception as e:
+                self.logger.error(
+                    "Error deleting port: {}: {}".format(type(e).__name__, e)
+                )
 
 
-        if len(sfi_list) == 0:
-            raise vimconn.VimConnNotFoundException(
-                "Service Function Instance '{}' not found".format(sfi_id)
-            )
-        elif len(sfi_list) > 1:
-            raise vimconn.VimConnConflictException(
-                "Found more than one Service Function Instance with this criteria"
-            )
+    def _delete_created_items(
+        self, created_items: dict, volumes_to_hold: list, keep_waiting: bool
+    ) -> bool:
+        """Delete Volumes and floating ip if they exist in created_items."""
+        for k, v in created_items.items():
+            if not v:  # skip already deleted
+                continue
 
 
-        sfi = sfi_list[0]
+            try:
+                k_item, k_id = self._get_item_name_id(k)
+                if k_item == "volume":
+                    unavailable_vol = self._delete_volumes_by_id_wth_cinder(
+                        k, k_id, volumes_to_hold, created_items
+                    )
 
 
-        return sfi
+                    if unavailable_vol:
+                        keep_waiting = True
 
 
-    def get_sfi_list(self, filter_dict={}):
-        self.logger.debug(
-            "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict)
-        )
+                elif k_item == "floating_ip":
+                    self._delete_floating_ip_by_id(k, k_id, created_items)
 
 
-        try:
-            self._reload_connection()
-            filter_dict_os = filter_dict.copy()
+            except Exception as e:
+                self.logger.error("Error deleting {}: {}".format(k, e))
 
 
-            if self.api_version3 and "tenant_id" in filter_dict_os:
-                filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
+        return keep_waiting
 
 
-            sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os)
-            sfi_list = sfi_dict["port_pairs"]
-            self.__sfi_os2mano(sfi_list)
+    @staticmethod
+    def _extract_items_wth_keep_flag_from_created_items(created_items: dict) -> dict:
+        """Remove the volumes which has key flag from created_items
 
 
-            return sfi_list
-        except (
-            neExceptions.ConnectionFailed,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+        Args:
+            created_items   (dict):         All created items belongs to VM
 
 
-    def delete_sfi(self, sfi_id):
-        self.logger.debug("Deleting Service Function Instance '%s' from VIM", sfi_id)
+        Returns:
+            created_items   (dict):         Persistent volumes eliminated created_items
+        """
+        return {
+            key: value
+            for (key, value) in created_items.items()
+            if len(key.split(":")) == 2
+        }
+
+    def delete_vminstance(
+        self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
+    ) -> None:
+        """Removes a VM instance from VIM. Returns the old identifier.
+        Args:
+            vm_id   (str):              Identifier of VM instance
+            created_items   (dict):     All created items belongs to VM
+            volumes_to_hold (list):     Volumes_to_hold
+        """
+        if created_items is None:
+            created_items = {}
+        if volumes_to_hold is None:
+            volumes_to_hold = []
 
         try:
 
         try:
+            created_items = self._extract_items_wth_keep_flag_from_created_items(
+                created_items
+            )
+
             self._reload_connection()
             self._reload_connection()
-            self.neutron.delete_sfc_port_pair(sfi_id)
 
 
-            return sfi_id
-        except (
-            neExceptions.ConnectionFailed,
-            neExceptions.NeutronException,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+            # Delete VM ports attached to the networks before the virtual machine
+            if created_items:
+                self._delete_vm_ports_attached_to_network(created_items)
 
 
-    def new_sf(self, name, sfis, sfc_encap=True):
-        self.logger.debug("Adding a new Service Function to VIM, named '%s'", name)
+            if vm_id:
+                self.nova.servers.delete(vm_id)
 
 
-        try:
-            new_sf = None
-            self._reload_connection()
-            # correlation = None
-            # if sfc_encap:
-            #     correlation = "nsh"
+            # Although having detached, volumes should have in active status before deleting.
+            # We ensure in this loop
+            keep_waiting = True
+            elapsed_time = 0
 
 
-            for instance in sfis:
-                sfi = self.get_sfi(instance)
+            while keep_waiting and elapsed_time < volume_timeout:
+                keep_waiting = False
 
 
-                if sfi.get("sfc_encap") != sfc_encap:
-                    raise vimconn.VimConnNotSupportedException(
-                        "OpenStack VIM connector requires all SFIs of the "
-                        "same SF to share the same SFC Encapsulation"
-                    )
+                # Delete volumes and floating IP.
+                keep_waiting = self._delete_created_items(
+                    created_items, volumes_to_hold, keep_waiting
+                )
 
 
-            sf_dict = {"name": name, "port_pairs": sfis}
-            new_sf = self.neutron.create_sfc_port_pair_group(
-                {"port_pair_group": sf_dict}
-            )
+                if keep_waiting:
+                    time.sleep(1)
+                    elapsed_time += 1
 
 
-            return new_sf["port_pair_group"]["id"]
         except (
         except (
-            neExceptions.ConnectionFailed,
+            nvExceptions.NotFound,
             ksExceptions.ClientException,
             ksExceptions.ClientException,
-            neExceptions.NeutronException,
+            nvExceptions.ClientException,
             ConnectionError,
         ) as e:
             ConnectionError,
         ) as e:
-            if new_sf:
-                try:
-                    self.neutron.delete_sfc_port_pair_group(
-                        new_sf["port_pair_group"]["id"]
-                    )
-                except Exception:
-                    self.logger.error(
-                        "Creation of Service Function failed, with "
-                        "subsequent deletion failure as well."
-                    )
-
             self._format_exception(e)
 
             self._format_exception(e)
 
-    def get_sf(self, sf_id):
-        self.logger.debug("Getting Service Function %s from VIM", sf_id)
-        filter_dict = {"id": sf_id}
-        sf_list = self.get_sf_list(filter_dict)
+    def refresh_vms_status(self, vm_list):
+        """Get the status of the virtual machines and their interfaces/ports
+        Params: the list of VM identifiers
+        Returns a dictionary with:
+            vm_id:          #VIM id of this Virtual Machine
+                status:     #Mandatory. Text with one of:
+                            #  DELETED (not found at vim)
+                            #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+                            #  OTHER (Vim reported other status not understood)
+                            #  ERROR (VIM indicates an ERROR status)
+                            #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
+                            #  CREATING (on building process), ERROR
+                            #  ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
+                            #
+                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
+                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+                interfaces:
+                 -  vim_info:         #Text with plain information obtained from vim (yaml.safe_dump)
+                    mac_address:      #Text format XX:XX:XX:XX:XX:XX
+                    vim_net_id:       #network id where this interface is connected
+                    vim_interface_id: #interface/port VIM id
+                    ip_address:       #null, or text with IPv4, IPv6 address
+                    compute_node:     #identification of compute node where PF,VF interface is allocated
+                    pci:              #PCI address of the NIC that hosts the PF,VF
+                    vlan:             #physical VLAN used for VF
+        """
+        vm_dict = {}
+        self.logger.debug(
+            "refresh_vms status: Getting tenant VM instance information from VIM"
+        )
+
+        for vm_id in vm_list:
+            vm = {}
 
 
-        if len(sf_list) == 0:
-            raise vimconn.VimConnNotFoundException(
-                "Service Function '{}' not found".format(sf_id)
-            )
-        elif len(sf_list) > 1:
-            raise vimconn.VimConnConflictException(
-                "Found more than one Service Function with this criteria"
-            )
+            try:
+                vm_vim = self.get_vminstance(vm_id)
 
 
-        sf = sf_list[0]
+                if vm_vim["status"] in vmStatus2manoFormat:
+                    vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
+                else:
+                    vm["status"] = "OTHER"
+                    vm["error_msg"] = "VIM status reported " + vm_vim["status"]
 
 
-        return sf
+                vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
+                vm_vim.pop("user_data", None)
+                vm["vim_info"] = self.serialize(vm_vim)
 
 
-    def get_sf_list(self, filter_dict={}):
-        self.logger.debug(
-            "Getting Service Function from VIM filter: '%s'", str(filter_dict)
-        )
+                vm["interfaces"] = []
+                if vm_vim.get("fault"):
+                    vm["error_msg"] = str(vm_vim["fault"])
 
 
-        try:
-            self._reload_connection()
-            filter_dict_os = filter_dict.copy()
+                # get interfaces
+                try:
+                    self._reload_connection()
+                    port_dict = self.neutron.list_ports(device_id=vm_id)
 
 
-            if self.api_version3 and "tenant_id" in filter_dict_os:
-                filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
+                    for port in port_dict["ports"]:
+                        interface = {}
+                        interface["vim_info"] = self.serialize(port)
+                        interface["mac_address"] = port.get("mac_address")
+                        interface["vim_net_id"] = port["network_id"]
+                        interface["vim_interface_id"] = port["id"]
+                        # check if OS-EXT-SRV-ATTR:host is there,
+                        # in case of non-admin credentials, it will be missing
 
 
-            sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
-            sf_list = sf_dict["port_pair_groups"]
-            self.__sf_os2mano(sf_list)
+                        if vm_vim.get("OS-EXT-SRV-ATTR:host"):
+                            interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
 
 
-            return sf_list
-        except (
-            neExceptions.ConnectionFailed,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+                        interface["pci"] = None
+
+                        # check if binding:profile is there,
+                        # in case of non-admin credentials, it will be missing
+                        if port.get("binding:profile"):
+                            if port["binding:profile"].get("pci_slot"):
+                                # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
+                                #  the slot to 0x00
+                                # TODO: This is just a workaround valid for niantinc. Find a better way to do so
+                                #   CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2)   assuming there are 2 ports per nic
+                                pci = port["binding:profile"]["pci_slot"]
+                                # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
+                                interface["pci"] = pci
 
 
-    def delete_sf(self, sf_id):
-        self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
+                        interface["vlan"] = None
 
 
-        try:
-            self._reload_connection()
-            self.neutron.delete_sfc_port_pair_group(sf_id)
+                        if port.get("binding:vif_details"):
+                            interface["vlan"] = port["binding:vif_details"].get("vlan")
 
 
-            return sf_id
-        except (
-            neExceptions.ConnectionFailed,
-            neExceptions.NeutronException,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+                        # Get vlan from network in case not present in port for those old openstacks and cases where
+                        # it is needed vlan at PT
+                        if not interface["vlan"]:
+                            # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
+                            network = self.neutron.show_network(port["network_id"])
 
 
-    def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
-        self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name)
+                            if (
+                                network["network"].get("provider:network_type")
+                                == "vlan"
+                            ):
+                                # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
+                                interface["vlan"] = network["network"].get(
+                                    "provider:segmentation_id"
+                                )
 
 
-        try:
-            new_sfp = None
-            self._reload_connection()
-            # In networking-sfc the MPLS encapsulation is legacy
-            # should be used when no full SFC Encapsulation is intended
-            correlation = "mpls"
-
-            if sfc_encap:
-                correlation = "nsh"
-
-            sfp_dict = {
-                "name": name,
-                "flow_classifiers": classifications,
-                "port_pair_groups": sfs,
-                "chain_parameters": {"correlation": correlation},
-            }
+                        ips = []
+                        # look for floating ip address
+                        try:
+                            floating_ip_dict = self.neutron.list_floatingips(
+                                port_id=port["id"]
+                            )
 
 
-            if spi:
-                sfp_dict["chain_id"] = spi
+                            if floating_ip_dict.get("floatingips"):
+                                ips.append(
+                                    floating_ip_dict["floatingips"][0].get(
+                                        "floating_ip_address"
+                                    )
+                                )
+                        except Exception:
+                            pass
 
 
-            new_sfp = self.neutron.create_sfc_port_chain({"port_chain": sfp_dict})
+                        for subnet in port["fixed_ips"]:
+                            ips.append(subnet["ip_address"])
 
 
-            return new_sfp["port_chain"]["id"]
-        except (
-            neExceptions.ConnectionFailed,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            if new_sfp:
-                try:
-                    self.neutron.delete_sfc_port_chain(new_sfp["port_chain"]["id"])
-                except Exception:
+                        interface["ip_address"] = ";".join(ips)
+                        vm["interfaces"].append(interface)
+                except Exception as e:
                     self.logger.error(
                     self.logger.error(
-                        "Creation of Service Function Path failed, with "
-                        "subsequent deletion failure as well."
+                        "Error getting vm interface information {}: {}".format(
+                            type(e).__name__, e
+                        ),
+                        exc_info=True,
                     )
                     )
+            except vimconn.VimConnNotFoundException as e:
+                self.logger.error("Exception getting vm status: %s", str(e))
+                vm["status"] = "DELETED"
+                vm["error_msg"] = str(e)
+            except vimconn.VimConnException as e:
+                self.logger.error("Exception getting vm status: %s", str(e))
+                vm["status"] = "VIM_ERROR"
+                vm["error_msg"] = str(e)
 
 
-            self._format_exception(e)
-
-    def get_sfp(self, sfp_id):
-        self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id)
-
-        filter_dict = {"id": sfp_id}
-        sfp_list = self.get_sfp_list(filter_dict)
-
-        if len(sfp_list) == 0:
-            raise vimconn.VimConnNotFoundException(
-                "Service Function Path '{}' not found".format(sfp_id)
-            )
-        elif len(sfp_list) > 1:
-            raise vimconn.VimConnConflictException(
-                "Found more than one Service Function Path with this criteria"
-            )
-
-        sfp = sfp_list[0]
+            vm_dict[vm_id] = vm
 
 
-        return sfp
+        return vm_dict
 
 
-    def get_sfp_list(self, filter_dict={}):
-        self.logger.debug(
-            "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict)
-        )
+    def action_vminstance(self, vm_id, action_dict, created_items={}):
+        """Send and action over a VM instance from VIM
+        Returns None or the console dict if the action was successfully sent to the VIM
+        """
+        self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
 
         try:
             self._reload_connection()
 
         try:
             self._reload_connection()
-            filter_dict_os = filter_dict.copy()
+            server = self.nova.servers.find(id=vm_id)
 
 
-            if self.api_version3 and "tenant_id" in filter_dict_os:
-                filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
+            if "start" in action_dict:
+                if action_dict["start"] == "rebuild":
+                    server.rebuild()
+                else:
+                    if server.status == "PAUSED":
+                        server.unpause()
+                    elif server.status == "SUSPENDED":
+                        server.resume()
+                    elif server.status == "SHUTOFF":
+                        server.start()
+                    else:
+                        self.logger.debug(
+                            "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
+                        )
+                        raise vimconn.VimConnException(
+                            "Cannot 'start' instance while it is in active state",
+                            http_code=vimconn.HTTP_Bad_Request,
+                        )
+
+            elif "pause" in action_dict:
+                server.pause()
+            elif "resume" in action_dict:
+                server.resume()
+            elif "shutoff" in action_dict or "shutdown" in action_dict:
+                self.logger.debug("server status %s", server.status)
+                if server.status == "ACTIVE":
+                    server.stop()
+                else:
+                    self.logger.debug("ERROR: VM is not in Active state")
+                    raise vimconn.VimConnException(
+                        "VM is not in active state, stop operation is not allowed",
+                        http_code=vimconn.HTTP_Bad_Request,
+                    )
+            elif "forceOff" in action_dict:
+                server.stop()  # TODO
+            elif "terminate" in action_dict:
+                server.delete()
+            elif "createImage" in action_dict:
+                server.create_image()
+                # "path":path_schema,
+                # "description":description_schema,
+                # "name":name_schema,
+                # "metadata":metadata_schema,
+                # "imageRef": id_schema,
+                # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
+            elif "rebuild" in action_dict:
+                server.rebuild(server.image["id"])
+            elif "reboot" in action_dict:
+                server.reboot()  # reboot_type="SOFT"
+            elif "console" in action_dict:
+                console_type = action_dict["console"]
 
 
-            sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
-            sfp_list = sfp_dict["port_chains"]
-            self.__sfp_os2mano(sfp_list)
+                if console_type is None or console_type == "novnc":
+                    console_dict = server.get_vnc_console("novnc")
+                elif console_type == "xvpvnc":
+                    console_dict = server.get_vnc_console(console_type)
+                elif console_type == "rdp-html5":
+                    console_dict = server.get_rdp_console(console_type)
+                elif console_type == "spice-html5":
+                    console_dict = server.get_spice_console(console_type)
+                else:
+                    raise vimconn.VimConnException(
+                        "console type '{}' not allowed".format(console_type),
+                        http_code=vimconn.HTTP_Bad_Request,
+                    )
 
 
-            return sfp_list
-        except (
-            neExceptions.ConnectionFailed,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+                try:
+                    console_url = console_dict["console"]["url"]
+                    # parse console_url
+                    protocol_index = console_url.find("//")
+                    suffix_index = (
+                        console_url[protocol_index + 2 :].find("/") + protocol_index + 2
+                    )
+                    port_index = (
+                        console_url[protocol_index + 2 : suffix_index].find(":")
+                        + protocol_index
+                        + 2
+                    )
+
+                    if protocol_index < 0 or port_index < 0 or suffix_index < 0:
+                        raise vimconn.VimConnException(
+                            "Unexpected response from VIM " + str(console_dict)
+                        )
 
 
-    def delete_sfp(self, sfp_id):
-        self.logger.debug("Deleting Service Function Path '%s' from VIM", sfp_id)
+                    console_dict2 = {
+                        "protocol": console_url[0:protocol_index],
+                        "server": console_url[protocol_index + 2 : port_index],
+                        "port": int(console_url[port_index + 1 : suffix_index]),
+                        "suffix": console_url[suffix_index + 1 :],
+                    }
 
 
-        try:
-            self._reload_connection()
-            self.neutron.delete_sfc_port_chain(sfp_id)
+                    return console_dict2
+                except Exception:
+                    raise vimconn.VimConnException(
+                        "Unexpected response from VIM " + str(console_dict)
+                    )
 
 
-            return sfp_id
+            return None
         except (
         except (
-            neExceptions.ConnectionFailed,
-            neExceptions.NeutronException,
             ksExceptions.ClientException,
             ksExceptions.ClientException,
-            neExceptions.NeutronException,
+            nvExceptions.ClientException,
+            nvExceptions.NotFound,
             ConnectionError,
         ) as e:
             self._format_exception(e)
             ConnectionError,
         ) as e:
             self._format_exception(e)
+        # TODO insert exception vimconn.HTTP_Unauthorized
 
 
-    def refresh_sfps_status(self, sfp_list):
-        """Get the status of the service function path
-        Params: the list of sfp identifiers
-        Returns a dictionary with:
-            vm_id:          #VIM id of this service function path
-                status:     #Mandatory. Text with one of:
-                            #  DELETED (not found at vim)
-                            #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
-                            #  OTHER (Vim reported other status not understood)
-                            #  ERROR (VIM indicates an ERROR status)
-                            #  ACTIVE,
-                            #  CREATING (on building process)
-                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
-                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)F
+    # ###### VIO Specific Changes #########
+    def _generate_vlanID(self):
         """
         """
-        sfp_dict = {}
-        self.logger.debug(
-            "refresh_sfps status: Getting tenant SFP information from VIM"
-        )
-
-        for sfp_id in sfp_list:
-            sfp = {}
-
-            try:
-                sfp_vim = self.get_sfp(sfp_id)
-
-                if sfp_vim["spi"]:
-                    sfp["status"] = vmStatus2manoFormat["ACTIVE"]
-                else:
-                    sfp["status"] = "OTHER"
-                    sfp["error_msg"] = "VIM status reported " + sfp["status"]
+        Method to get unused vlanID
+            Args:
+                None
+            Returns:
+                vlanID
+        """
+        # Get used VLAN IDs
+        usedVlanIDs = []
+        networks = self.get_network_list()
 
 
-                sfp["vim_info"] = self.serialize(sfp_vim)
+        for net in networks:
+            if net.get("provider:segmentation_id"):
+                usedVlanIDs.append(net.get("provider:segmentation_id"))
 
 
-                if sfp_vim.get("fault"):
-                    sfp["error_msg"] = str(sfp_vim["fault"])
-            except vimconn.VimConnNotFoundException as e:
-                self.logger.error("Exception getting sfp status: %s", str(e))
-                sfp["status"] = "DELETED"
-                sfp["error_msg"] = str(e)
-            except vimconn.VimConnException as e:
-                self.logger.error("Exception getting sfp status: %s", str(e))
-                sfp["status"] = "VIM_ERROR"
-                sfp["error_msg"] = str(e)
+        used_vlanIDs = set(usedVlanIDs)
 
 
-            sfp_dict[sfp_id] = sfp
+        # find unused VLAN ID
+        for vlanID_range in self.config.get("dataplane_net_vlan_range"):
+            try:
+                start_vlanid, end_vlanid = map(
+                    int, vlanID_range.replace(" ", "").split("-")
+                )
 
 
-        return sfp_dict
+                for vlanID in range(start_vlanid, end_vlanid + 1):
+                    if vlanID not in used_vlanIDs:
+                        return vlanID
+            except Exception as exp:
+                raise vimconn.VimConnException(
+                    "Exception {} occurred while generating VLAN ID.".format(exp)
+                )
+        else:
+            raise vimconn.VimConnConflictException(
+                "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
+                    self.config.get("dataplane_net_vlan_range")
+                )
+            )
 
 
-    def refresh_sfis_status(self, sfi_list):
-        """Get the status of the service function instances
-        Params: the list of sfi identifiers
-        Returns a dictionary with:
-            vm_id:          #VIM id of this service function instance
-                status:     #Mandatory. Text with one of:
-                            #  DELETED (not found at vim)
-                            #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
-                            #  OTHER (Vim reported other status not understood)
-                            #  ERROR (VIM indicates an ERROR status)
-                            #  ACTIVE,
-                            #  CREATING (on building process)
-                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
-                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+    def _generate_multisegment_vlanID(self):
         """
         """
-        sfi_dict = {}
-        self.logger.debug(
-            "refresh_sfis status: Getting tenant sfi information from VIM"
-        )
+        Method to get unused vlanID
+        Args:
+            None
+        Returns:
+            vlanID
+        """
+        # Get used VLAN IDs
+        usedVlanIDs = []
+        networks = self.get_network_list()
+        for net in networks:
+            if net.get("provider:network_type") == "vlan" and net.get(
+                "provider:segmentation_id"
+            ):
+                usedVlanIDs.append(net.get("provider:segmentation_id"))
+            elif net.get("segments"):
+                for segment in net.get("segments"):
+                    if segment.get("provider:network_type") == "vlan" and segment.get(
+                        "provider:segmentation_id"
+                    ):
+                        usedVlanIDs.append(segment.get("provider:segmentation_id"))
 
 
-        for sfi_id in sfi_list:
-            sfi = {}
+        used_vlanIDs = set(usedVlanIDs)
 
 
+        # find unused VLAN ID
+        for vlanID_range in self.config.get("multisegment_vlan_range"):
             try:
             try:
-                sfi_vim = self.get_sfi(sfi_id)
-
-                if sfi_vim:
-                    sfi["status"] = vmStatus2manoFormat["ACTIVE"]
-                else:
-                    sfi["status"] = "OTHER"
-                    sfi["error_msg"] = "VIM status reported " + sfi["status"]
-
-                sfi["vim_info"] = self.serialize(sfi_vim)
-
-                if sfi_vim.get("fault"):
-                    sfi["error_msg"] = str(sfi_vim["fault"])
-            except vimconn.VimConnNotFoundException as e:
-                self.logger.error("Exception getting sfi status: %s", str(e))
-                sfi["status"] = "DELETED"
-                sfi["error_msg"] = str(e)
-            except vimconn.VimConnException as e:
-                self.logger.error("Exception getting sfi status: %s", str(e))
-                sfi["status"] = "VIM_ERROR"
-                sfi["error_msg"] = str(e)
-
-            sfi_dict[sfi_id] = sfi
+                start_vlanid, end_vlanid = map(
+                    int, vlanID_range.replace(" ", "").split("-")
+                )
 
 
-        return sfi_dict
+                for vlanID in range(start_vlanid, end_vlanid + 1):
+                    if vlanID not in used_vlanIDs:
+                        return vlanID
+            except Exception as exp:
+                raise vimconn.VimConnException(
+                    "Exception {} occurred while generating VLAN ID.".format(exp)
+                )
+        else:
+            raise vimconn.VimConnConflictException(
+                "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
+                    self.config.get("multisegment_vlan_range")
+                )
+            )
 
 
-    def refresh_sfs_status(self, sf_list):
-        """Get the status of the service functions
-        Params: the list of sf identifiers
-        Returns a dictionary with:
-            vm_id:          #VIM id of this service function
-                status:     #Mandatory. Text with one of:
-                            #  DELETED (not found at vim)
-                            #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
-                            #  OTHER (Vim reported other status not understood)
-                            #  ERROR (VIM indicates an ERROR status)
-                            #  ACTIVE,
-                            #  CREATING (on building process)
-                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
-                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+    def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
+        """
+        Method to validate user given vlanID ranges
+            Args:  None
+            Returns: None
         """
         """
-        sf_dict = {}
-        self.logger.debug("refresh_sfs status: Getting tenant sf information from VIM")
+        for vlanID_range in input_vlan_range:
+            vlan_range = vlanID_range.replace(" ", "")
+            # validate format
+            vlanID_pattern = r"(\d)*-(\d)*$"
+            match_obj = re.match(vlanID_pattern, vlan_range)
+            if not match_obj:
+                raise vimconn.VimConnConflictException(
+                    "Invalid VLAN range for {}: {}.You must provide "
+                    "'{}' in format [start_ID - end_ID].".format(
+                        text_vlan_range, vlanID_range, text_vlan_range
+                    )
+                )
 
 
-        for sf_id in sf_list:
-            sf = {}
+            start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
+            if start_vlanid <= 0:
+                raise vimconn.VimConnConflictException(
+                    "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
+                    "networks valid IDs are 1 to 4094 ".format(
+                        text_vlan_range, vlanID_range
+                    )
+                )
 
 
-            try:
-                sf_vim = self.get_sf(sf_id)
+            if end_vlanid > 4094:
+                raise vimconn.VimConnConflictException(
+                    "Invalid VLAN range for {}: {}. End VLAN ID can not be "
+                    "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
+                        text_vlan_range, vlanID_range
+                    )
+                )
 
 
-                if sf_vim:
-                    sf["status"] = vmStatus2manoFormat["ACTIVE"]
-                else:
-                    sf["status"] = "OTHER"
-                    sf["error_msg"] = "VIM status reported " + sf_vim["status"]
+            if start_vlanid > end_vlanid:
+                raise vimconn.VimConnConflictException(
+                    "Invalid VLAN range for {}: {}. You must provide '{}'"
+                    " in format start_ID - end_ID and start_ID < end_ID ".format(
+                        text_vlan_range, vlanID_range, text_vlan_range
+                    )
+                )
 
 
-                sf["vim_info"] = self.serialize(sf_vim)
+    def get_hosts_info(self):
+        """Get the information of deployed hosts
+        Returns the hosts content"""
+        if self.debug:
+            print("osconnector: Getting Host info from VIM")
 
 
-                if sf_vim.get("fault"):
-                    sf["error_msg"] = str(sf_vim["fault"])
-            except vimconn.VimConnNotFoundException as e:
-                self.logger.error("Exception getting sf status: %s", str(e))
-                sf["status"] = "DELETED"
-                sf["error_msg"] = str(e)
-            except vimconn.VimConnException as e:
-                self.logger.error("Exception getting sf status: %s", str(e))
-                sf["status"] = "VIM_ERROR"
-                sf["error_msg"] = str(e)
+        try:
+            h_list = []
+            self._reload_connection()
+            hypervisors = self.nova.hypervisors.list()
 
 
-            sf_dict[sf_id] = sf
+            for hype in hypervisors:
+                h_list.append(hype.to_dict())
 
 
-        return sf_dict
+            return 1, {"hosts": h_list}
+        except nvExceptions.NotFound as e:
+            error_value = -vimconn.HTTP_Not_Found
+            error_text = str(e) if len(e.args) == 0 else str(e.args[0])
+        except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
+            error_value = -vimconn.HTTP_Bad_Request
+            error_text = (
+                type(e).__name__
+                + ": "
+                + (str(e) if len(e.args) == 0 else str(e.args[0]))
+            )
 
 
-    def refresh_classifications_status(self, classification_list):
-        """Get the status of the classifications
-        Params: the list of classification identifiers
-        Returns a dictionary with:
-            vm_id:          #VIM id of this classifier
-                status:     #Mandatory. Text with one of:
-                            #  DELETED (not found at vim)
-                            #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
-                            #  OTHER (Vim reported other status not understood)
-                            #  ERROR (VIM indicates an ERROR status)
-                            #  ACTIVE,
-                            #  CREATING (on building process)
-                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
-                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
-        """
-        classification_dict = {}
-        self.logger.debug(
-            "refresh_classifications status: Getting tenant classification information from VIM"
-        )
+        # TODO insert exception vimconn.HTTP_Unauthorized
+        # if reaching here is because an exception
+        self.logger.debug("get_hosts_info " + error_text)
 
 
-        for classification_id in classification_list:
-            classification = {}
+        return error_value, error_text
 
 
-            try:
-                classification_vim = self.get_classification(classification_id)
+    def get_hosts(self, vim_tenant):
+        """Get the hosts and deployed instances
+        Returns the hosts content"""
+        r, hype_dict = self.get_hosts_info()
 
 
-                if classification_vim:
-                    classification["status"] = vmStatus2manoFormat["ACTIVE"]
-                else:
-                    classification["status"] = "OTHER"
-                    classification["error_msg"] = (
-                        "VIM status reported " + classification["status"]
-                    )
+        if r < 0:
+            return r, hype_dict
 
 
-                classification["vim_info"] = self.serialize(classification_vim)
+        hypervisors = hype_dict["hosts"]
 
 
-                if classification_vim.get("fault"):
-                    classification["error_msg"] = str(classification_vim["fault"])
-            except vimconn.VimConnNotFoundException as e:
-                self.logger.error("Exception getting classification status: %s", str(e))
-                classification["status"] = "DELETED"
-                classification["error_msg"] = str(e)
-            except vimconn.VimConnException as e:
-                self.logger.error("Exception getting classification status: %s", str(e))
-                classification["status"] = "VIM_ERROR"
-                classification["error_msg"] = str(e)
+        try:
+            servers = self.nova.servers.list()
+            for hype in hypervisors:
+                for server in servers:
+                    if (
+                        server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
+                        == hype["hypervisor_hostname"]
+                    ):
+                        if "vm" in hype:
+                            hype["vm"].append(server.id)
+                        else:
+                            hype["vm"] = [server.id]
 
 
-            classification_dict[classification_id] = classification
+            return 1, hype_dict
+        except nvExceptions.NotFound as e:
+            error_value = -vimconn.HTTP_Not_Found
+            error_text = str(e) if len(e.args) == 0 else str(e.args[0])
+        except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
+            error_value = -vimconn.HTTP_Bad_Request
+            error_text = (
+                type(e).__name__
+                + ": "
+                + (str(e) if len(e.args) == 0 else str(e.args[0]))
+            )
+
+        # TODO insert exception vimconn.HTTP_Unauthorized
+        # if reaching here is because an exception
+        self.logger.debug("get_hosts " + error_text)
 
 
-        return classification_dict
+        return error_value, error_text
 
     def new_affinity_group(self, affinity_group_data):
         """Adds a server group to VIM
 
     def new_affinity_group(self, affinity_group_data):
         """Adds a server group to VIM
@@ -3538,3 +3661,237 @@ class vimconnector(vimconn.VimConnector):
             ConnectionError,
         ) as e:
             self._format_exception(e)
             ConnectionError,
         ) as e:
             self._format_exception(e)
+
+    def get_vdu_state(self, vm_id):
+        """
+        Getting the state of a vdu
+        param:
+            vm_id: ID of an instance
+        """
+        self.logger.debug("Getting the status of VM")
+        self.logger.debug("VIM VM ID %s", vm_id)
+        self._reload_connection()
+        server = self.nova.servers.find(id=vm_id)
+        server_dict = server.to_dict()
+        vdu_data = [
+            server_dict["status"],
+            server_dict["flavor"]["id"],
+            server_dict["OS-EXT-SRV-ATTR:host"],
+            server_dict["OS-EXT-AZ:availability_zone"],
+        ]
+        self.logger.debug("vdu_data %s", vdu_data)
+        return vdu_data
+
+    def check_compute_availability(self, host, server_flavor_details):
+        self._reload_connection()
+        hypervisor_search = self.nova.hypervisors.search(
+            hypervisor_match=host, servers=True
+        )
+        for hypervisor in hypervisor_search:
+            hypervisor_id = hypervisor.to_dict()["id"]
+            hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
+            hypervisor_dict = hypervisor_details.to_dict()
+            hypervisor_temp = json.dumps(hypervisor_dict)
+            hypervisor_json = json.loads(hypervisor_temp)
+            resources_available = [
+                hypervisor_json["free_ram_mb"],
+                hypervisor_json["disk_available_least"],
+                hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
+            ]
+            compute_available = all(
+                x > y for x, y in zip(resources_available, server_flavor_details)
+            )
+            if compute_available:
+                return host
+
+    def check_availability_zone(
+        self, old_az, server_flavor_details, old_host, host=None
+    ):
+        self._reload_connection()
+        az_check = {"zone_check": False, "compute_availability": None}
+        aggregates_list = self.nova.aggregates.list()
+        for aggregate in aggregates_list:
+            aggregate_details = aggregate.to_dict()
+            aggregate_temp = json.dumps(aggregate_details)
+            aggregate_json = json.loads(aggregate_temp)
+            if aggregate_json["availability_zone"] == old_az:
+                hosts_list = aggregate_json["hosts"]
+                if host is not None:
+                    if host in hosts_list:
+                        az_check["zone_check"] = True
+                        available_compute_id = self.check_compute_availability(
+                            host, server_flavor_details
+                        )
+                        if available_compute_id is not None:
+                            az_check["compute_availability"] = available_compute_id
+                else:
+                    for check_host in hosts_list:
+                        if check_host != old_host:
+                            available_compute_id = self.check_compute_availability(
+                                check_host, server_flavor_details
+                            )
+                            if available_compute_id is not None:
+                                az_check["zone_check"] = True
+                                az_check["compute_availability"] = available_compute_id
+                                break
+                    else:
+                        az_check["zone_check"] = True
+        return az_check
+
+    def migrate_instance(self, vm_id, compute_host=None):
+        """
+        Migrate a vdu
+        param:
+            vm_id: ID of an instance
+            compute_host: Host to migrate the vdu to
+        """
+        self._reload_connection()
+        vm_state = False
+        instance_state = self.get_vdu_state(vm_id)
+        server_flavor_id = instance_state[1]
+        server_hypervisor_name = instance_state[2]
+        server_availability_zone = instance_state[3]
+        try:
+            server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
+            server_flavor_details = [
+                server_flavor["ram"],
+                server_flavor["disk"],
+                server_flavor["vcpus"],
+            ]
+            if compute_host == server_hypervisor_name:
+                raise vimconn.VimConnException(
+                    "Unable to migrate instance '{}' to the same host '{}'".format(
+                        vm_id, compute_host
+                    ),
+                    http_code=vimconn.HTTP_Bad_Request,
+                )
+            az_status = self.check_availability_zone(
+                server_availability_zone,
+                server_flavor_details,
+                server_hypervisor_name,
+                compute_host,
+            )
+            availability_zone_check = az_status["zone_check"]
+            available_compute_id = az_status.get("compute_availability")
+
+            if availability_zone_check is False:
+                raise vimconn.VimConnException(
+                    "Unable to migrate instance '{}' to a different availability zone".format(
+                        vm_id
+                    ),
+                    http_code=vimconn.HTTP_Bad_Request,
+                )
+            if available_compute_id is not None:
+                self.nova.servers.live_migrate(
+                    server=vm_id,
+                    host=available_compute_id,
+                    block_migration=True,
+                    disk_over_commit=False,
+                )
+                state = "MIGRATING"
+                changed_compute_host = ""
+                if state == "MIGRATING":
+                    vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
+                    changed_compute_host = self.get_vdu_state(vm_id)[2]
+                if vm_state and changed_compute_host == available_compute_id:
+                    self.logger.debug(
+                        "Instance '{}' migrated to the new compute host '{}'".format(
+                            vm_id, changed_compute_host
+                        )
+                    )
+                    return state, available_compute_id
+                else:
+                    raise vimconn.VimConnException(
+                        "Migration Failed. Instance '{}' not moved to the new host {}".format(
+                            vm_id, available_compute_id
+                        ),
+                        http_code=vimconn.HTTP_Bad_Request,
+                    )
+            else:
+                raise vimconn.VimConnException(
+                    "Compute '{}' not available or does not have enough resources to migrate the instance".format(
+                        available_compute_id
+                    ),
+                    http_code=vimconn.HTTP_Bad_Request,
+                )
+        except (
+            nvExceptions.BadRequest,
+            nvExceptions.ClientException,
+            nvExceptions.NotFound,
+        ) as e:
+            self._format_exception(e)
+
+    def resize_instance(self, vm_id, new_flavor_id):
+        """
+        For resizing the vm based on the given
+        flavor details
+        param:
+            vm_id : ID of an instance
+            new_flavor_id : Flavor id to be resized
+        Return the status of a resized instance
+        """
+        self._reload_connection()
+        self.logger.debug("resize the flavor of an instance")
+        instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
+        old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
+        new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
+        try:
+            if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
+                if old_flavor_disk > new_flavor_disk:
+                    raise nvExceptions.BadRequest(
+                        400,
+                        message="Server disk resize failed. Resize to lower disk flavor is not allowed",
+                    )
+                else:
+                    self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
+                    vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
+                    if vm_state:
+                        instance_resized_status = self.confirm_resize(vm_id)
+                        return instance_resized_status
+                    else:
+                        raise nvExceptions.BadRequest(
+                            409,
+                            message="Cannot 'resize' vm_state is in ERROR",
+                        )
+
+            else:
+                self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
+                raise nvExceptions.BadRequest(
+                    409,
+                    message="Cannot 'resize' instance while it is in vm_state resized",
+                )
+        except (
+            nvExceptions.BadRequest,
+            nvExceptions.ClientException,
+            nvExceptions.NotFound,
+        ) as e:
+            self._format_exception(e)
+
+    def confirm_resize(self, vm_id):
+        """
+        Confirm the resize of an instance
+        param:
+            vm_id: ID of an instance
+        """
+        self._reload_connection()
+        self.nova.servers.confirm_resize(server=vm_id)
+        if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
+            self.__wait_for_vm(vm_id, "ACTIVE")
+        instance_status = self.get_vdu_state(vm_id)[0]
+        return instance_status
+
+    def get_monitoring_data(self):
+        try:
+            self.logger.debug("Getting servers and ports data from Openstack VIMs.")
+            self._reload_connection()
+            all_servers = self.nova.servers.list(detailed=True)
+            all_ports = self.neutron.list_ports()
+            return all_servers, all_ports
+        except (
+            vimconn.VimConnException,
+            vimconn.VimConnNotFoundException,
+            vimconn.VimConnConnectionException,
+        ) as e:
+            raise vimconn.VimConnException(
+                f"Exception in monitoring while getting VMs and ports status: {str(e)}"
+            )