Revert "Revert "Removing unused methods from RO module""
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
index 6b46dc1..9faf98d 100644 (file)
@@ -38,6 +38,7 @@ from pprint import pformat
 import random
 import re
 import time
 import random
 import re
 import time
+from typing import Dict, List, Optional, Tuple
 
 from cinderclient import client as cClient
 from glanceclient import client as glClient
 
 from cinderclient import client as cClient
 from glanceclient import client as glClient
@@ -753,7 +754,7 @@ class vimconnector(vimconn.VimConnector):
             self._reload_connection()
             network_dict = {"name": net_name, "admin_state_up": True}
 
             self._reload_connection()
             network_dict = {"name": net_name, "admin_state_up": True}
 
-            if net_type in ("data", "ptp"):
+            if net_type in ("data", "ptp") or provider_network_profile:
                 provider_physical_network = None
 
                 if provider_network_profile and provider_network_profile.get(
                 provider_physical_network = None
 
                 if provider_network_profile and provider_network_profile.get(
@@ -1226,11 +1227,14 @@ class vimconnector(vimconn.VimConnector):
         ) as e:
             self._format_exception(e)
 
         ) as e:
             self._format_exception(e)
 
-    def process_resource_quota(self, quota, prefix, extra_specs):
-        """
-        :param prefix:
-        :param extra_specs:
-        :return:
+    @staticmethod
+    def process_resource_quota(quota: dict, prefix: str, extra_specs: dict) -> None:
+        """Process resource quota and fill up extra_specs.
+        Args:
+            quota       (dict):         Keeping the quota of resurces
+            prefix      (str)           Prefix
+            extra_specs (dict)          Dict to be filled to be used during flavor creation
+
         """
         if "limit" in quota:
             extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
         """
         if "limit" in quota:
             extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
@@ -1242,11 +1246,267 @@ class vimconnector(vimconn.VimConnector):
             extra_specs["quota:" + prefix + "_shares_level"] = "custom"
             extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
 
             extra_specs["quota:" + prefix + "_shares_level"] = "custom"
             extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
 
-    def new_flavor(self, flavor_data, change_name_if_used=True):
-        """Adds a tenant flavor to openstack VIM
-        if change_name_if_used is True, it will change name in case of conflict, because it is not supported name
-         repetition
-        Returns the flavor identifier
+    @staticmethod
+    def process_numa_memory(
+        numa: dict, node_id: Optional[int], extra_specs: dict
+    ) -> None:
+        """Set the memory in extra_specs.
+        Args:
+            numa        (dict):         A dictionary which includes numa information
+            node_id     (int):          ID of numa node
+            extra_specs (dict):         To be filled.
+
+        """
+        if not numa.get("memory"):
+            return
+        memory_mb = numa["memory"] * 1024
+        memory = "hw:numa_mem.{}".format(node_id)
+        extra_specs[memory] = int(memory_mb)
+
+    @staticmethod
+    def process_numa_vcpu(numa: dict, node_id: int, extra_specs: dict) -> None:
+        """Set the cpu in extra_specs.
+        Args:
+            numa        (dict):         A dictionary which includes numa information
+            node_id     (int):          ID of numa node
+            extra_specs (dict):         To be filled.
+
+        """
+        if not numa.get("vcpu"):
+            return
+        vcpu = numa["vcpu"]
+        cpu = "hw:numa_cpus.{}".format(node_id)
+        vcpu = ",".join(map(str, vcpu))
+        extra_specs[cpu] = vcpu
+
+    @staticmethod
+    def process_numa_paired_threads(numa: dict, extra_specs: dict) -> Optional[int]:
+        """Fill up extra_specs if numa has paired-threads.
+        Args:
+            numa        (dict):         A dictionary which includes numa information
+            extra_specs (dict):         To be filled.
+
+        Returns:
+            threads       (int)           Number of virtual cpus
+
+        """
+        if not numa.get("paired-threads"):
+            return
+
+        # cpu_thread_policy "require" implies that compute node must have an STM architecture
+        threads = numa["paired-threads"] * 2
+        extra_specs["hw:cpu_thread_policy"] = "require"
+        extra_specs["hw:cpu_policy"] = "dedicated"
+        return threads
+
+    @staticmethod
+    def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]:
+        """Fill up extra_specs if numa has cores.
+        Args:
+            numa        (dict):         A dictionary which includes numa information
+            extra_specs (dict):         To be filled.
+
+        Returns:
+            cores       (int)           Number of virtual cpus
+
+        """
+        # cpu_thread_policy "isolate" implies that the host must not have an SMT
+        # architecture, or a non-SMT architecture will be emulated
+        if not numa.get("cores"):
+            return
+        cores = numa["cores"]
+        extra_specs["hw:cpu_thread_policy"] = "isolate"
+        extra_specs["hw:cpu_policy"] = "dedicated"
+        return cores
+
+    @staticmethod
+    def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]:
+        """Fill up extra_specs if numa has threads.
+        Args:
+            numa        (dict):         A dictionary which includes numa information
+            extra_specs (dict):         To be filled.
+
+        Returns:
+            threads       (int)           Number of virtual cpus
+
+        """
+        # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
+        if not numa.get("threads"):
+            return
+        threads = numa["threads"]
+        extra_specs["hw:cpu_thread_policy"] = "prefer"
+        extra_specs["hw:cpu_policy"] = "dedicated"
+        return threads
+
+    def _process_numa_parameters_of_flavor(
+        self, numas: List, extra_specs: Dict
+    ) -> None:
+        """Process numa parameters and fill up extra_specs.
+
+        Args:
+            numas   (list):             List of dictionary which includes numa information
+            extra_specs (dict):         To be filled.
+
+        """
+        numa_nodes = len(numas)
+        extra_specs["hw:numa_nodes"] = str(numa_nodes)
+        cpu_cores, cpu_threads = 0, 0
+
+        if self.vim_type == "VIO":
+            self.process_vio_numa_nodes(numa_nodes, extra_specs)
+
+        for numa in numas:
+            if "id" in numa:
+                node_id = numa["id"]
+                # overwrite ram and vcpus
+                # check if key "memory" is present in numa else use ram value at flavor
+                self.process_numa_memory(numa, node_id, extra_specs)
+                self.process_numa_vcpu(numa, node_id, extra_specs)
+
+            # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
+            extra_specs["hw:cpu_sockets"] = str(numa_nodes)
+
+            if "paired-threads" in numa:
+                threads = self.process_numa_paired_threads(numa, extra_specs)
+                cpu_threads += threads
+
+            elif "cores" in numa:
+                cores = self.process_numa_cores(numa, extra_specs)
+                cpu_cores += cores
+
+            elif "threads" in numa:
+                threads = self.process_numa_threads(numa, extra_specs)
+                cpu_threads += threads
+
+        if cpu_cores:
+            extra_specs["hw:cpu_cores"] = str(cpu_cores)
+        if cpu_threads:
+            extra_specs["hw:cpu_threads"] = str(cpu_threads)
+
+    @staticmethod
+    def process_vio_numa_nodes(numa_nodes: int, extra_specs: Dict) -> None:
+        """According to number of numa nodes, updates the extra_specs for VIO.
+
+        Args:
+
+            numa_nodes      (int):         List keeps the numa node numbers
+            extra_specs     (dict):        Extra specs dict to be updated
+
+        """
+        # If there is not any numa, numas_nodes equals to 0.
+        if not numa_nodes:
+            extra_specs["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}'
+
+        # If there are several numas, we do not define specific affinity.
+        extra_specs["vmware:latency_sensitivity_level"] = "high"
+
+    def _change_flavor_name(
+        self, name: str, name_suffix: int, flavor_data: dict
+    ) -> str:
+        """Change the flavor name if the name already exists.
+
+        Args:
+            name    (str):          Flavor name to be checked
+            name_suffix (int):      Suffix to be appended to name
+            flavor_data (dict):     Flavor dict
+
+        Returns:
+            name    (str):          New flavor name to be used
+
+        """
+        # Get used names
+        fl = self.nova.flavors.list()
+        fl_names = [f.name for f in fl]
+
+        while name in fl_names:
+            name_suffix += 1
+            name = flavor_data["name"] + "-" + str(name_suffix)
+
+        return name
+
+    def _process_extended_config_of_flavor(
+        self, extended: dict, extra_specs: dict
+    ) -> None:
+        """Process the extended dict to fill up extra_specs.
+        Args:
+
+            extended                    (dict):         Keeping the extra specification of flavor
+            extra_specs                 (dict)          Dict to be filled to be used during flavor creation
+
+        """
+        quotas = {
+            "cpu-quota": "cpu",
+            "mem-quota": "memory",
+            "vif-quota": "vif",
+            "disk-io-quota": "disk_io",
+        }
+
+        page_sizes = {
+            "LARGE": "large",
+            "SMALL": "small",
+            "SIZE_2MB": "2MB",
+            "SIZE_1GB": "1GB",
+            "PREFER_LARGE": "any",
+        }
+
+        policies = {
+            "cpu-pinning-policy": "hw:cpu_policy",
+            "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
+            "mem-policy": "hw:numa_mempolicy",
+        }
+
+        numas = extended.get("numas")
+        if numas:
+            self._process_numa_parameters_of_flavor(numas, extra_specs)
+
+        for quota, item in quotas.items():
+            if quota in extended.keys():
+                self.process_resource_quota(extended.get(quota), item, extra_specs)
+
+        # Set the mempage size as specified in the descriptor
+        if extended.get("mempage-size"):
+            if extended["mempage-size"] in page_sizes.keys():
+                extra_specs["hw:mem_page_size"] = page_sizes[extended["mempage-size"]]
+            else:
+                # Normally, validations in NBI should not allow to this condition.
+                self.logger.debug(
+                    "Invalid mempage-size %s. Will be ignored",
+                    extended.get("mempage-size"),
+                )
+
+        for policy, hw_policy in policies.items():
+            if extended.get(policy):
+                extra_specs[hw_policy] = extended[policy].lower()
+
+    @staticmethod
+    def _get_flavor_details(flavor_data: dict) -> Tuple:
+        """Returns the details of flavor
+        Args:
+            flavor_data     (dict):     Dictionary that includes required flavor details
+
+        Returns:
+            ram, vcpus, extra_specs, extended   (tuple):    Main items of required flavor
+
+        """
+        return (
+            flavor_data.get("ram", 64),
+            flavor_data.get("vcpus", 1),
+            {},
+            flavor_data.get("extended"),
+        )
+
+    def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str:
+        """Adds a tenant flavor to openstack VIM.
+        if change_name_if_used is True, it will change name in case of conflict,
+        because it is not supported name repetition.
+
+        Args:
+            flavor_data (dict):             Flavor details to be processed
+            change_name_if_used (bool):     Change name in case of conflict
+
+        Returns:
+             flavor_id  (str):     flavor identifier
+
         """
         self.logger.debug("Adding flavor '%s'", str(flavor_data))
         retry = 0
         """
         self.logger.debug("Adding flavor '%s'", str(flavor_data))
         retry = 0
@@ -1261,116 +1521,16 @@ class vimconnector(vimconn.VimConnector):
                     self._reload_connection()
 
                     if change_name_if_used:
                     self._reload_connection()
 
                     if change_name_if_used:
-                        # get used names
-                        fl_names = []
-                        fl = self.nova.flavors.list()
+                        name = self._change_flavor_name(name, name_suffix, flavor_data)
 
 
-                        for f in fl:
-                            fl_names.append(f.name)
-
-                        while name in fl_names:
-                            name_suffix += 1
-                            name = flavor_data["name"] + "-" + str(name_suffix)
-
-                    ram = flavor_data.get("ram", 64)
-                    vcpus = flavor_data.get("vcpus", 1)
-                    extra_specs = {}
-
-                    extended = flavor_data.get("extended")
+                    ram, vcpus, extra_specs, extended = self._get_flavor_details(
+                        flavor_data
+                    )
                     if extended:
                     if extended:
-                        numas = extended.get("numas")
-
-                        if numas:
-                            numa_nodes = len(numas)
-
-                            if numa_nodes > 1:
-                                return -1, "Can not add flavor with more than one numa"
-
-                            extra_specs["hw:numa_nodes"] = str(numa_nodes)
-                            extra_specs["hw:mem_page_size"] = "large"
-                            extra_specs["hw:cpu_policy"] = "dedicated"
-                            extra_specs["hw:numa_mempolicy"] = "strict"
-
-                            if self.vim_type == "VIO":
-                                extra_specs[
-                                    "vmware:extra_config"
-                                ] = '{"numa.nodeAffinity":"0"}'
-                                extra_specs["vmware:latency_sensitivity_level"] = "high"
-
-                            for numa in numas:
-                                # overwrite ram and vcpus
-                                # check if key "memory" is present in numa else use ram value at flavor
-                                if "memory" in numa:
-                                    ram = numa["memory"] * 1024
-                                # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/
-                                # implemented/virt-driver-cpu-thread-pinning.html
-                                extra_specs["hw:cpu_sockets"] = 1
-
-                                if "paired-threads" in numa:
-                                    vcpus = numa["paired-threads"] * 2
-                                    # cpu_thread_policy "require" implies that the compute node must have an
-                                    # STM architecture
-                                    extra_specs["hw:cpu_thread_policy"] = "require"
-                                    extra_specs["hw:cpu_policy"] = "dedicated"
-                                elif "cores" in numa:
-                                    vcpus = numa["cores"]
-                                    # cpu_thread_policy "prefer" implies that the host must not have an SMT
-                                    # architecture, or a non-SMT architecture will be emulated
-                                    extra_specs["hw:cpu_thread_policy"] = "isolate"
-                                    extra_specs["hw:cpu_policy"] = "dedicated"
-                                elif "threads" in numa:
-                                    vcpus = numa["threads"]
-                                    # cpu_thread_policy "prefer" implies that the host may or may not have an SMT
-                                    # architecture
-                                    extra_specs["hw:cpu_thread_policy"] = "prefer"
-                                    extra_specs["hw:cpu_policy"] = "dedicated"
-                                # for interface in numa.get("interfaces",() ):
-                                #     if interface["dedicated"]=="yes":
-                                #         raise vimconn.VimConnException("Passthrough interfaces are not supported
-                                #         for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable)
-                                #     #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"'
-                                #      when a way to connect it is available
-                        elif extended.get("cpu-quota"):
-                            self.process_resource_quota(
-                                extended.get("cpu-quota"), "cpu", extra_specs
-                            )
-
-                        if extended.get("mem-quota"):
-                            self.process_resource_quota(
-                                extended.get("mem-quota"), "memory", extra_specs
-                            )
-
-                        if extended.get("vif-quota"):
-                            self.process_resource_quota(
-                                extended.get("vif-quota"), "vif", extra_specs
-                            )
-
-                        if extended.get("disk-io-quota"):
-                            self.process_resource_quota(
-                                extended.get("disk-io-quota"), "disk_io", extra_specs
-                            )
+                        self._process_extended_config_of_flavor(extended, extra_specs)
 
 
-                        # Set the mempage size as specified in the descriptor
-                        if extended.get("mempage-size"):
-                            if extended.get("mempage-size") == "LARGE":
-                                extra_specs["hw:mem_page_size"] = "large"
-                            elif extended.get("mempage-size") == "SMALL":
-                                extra_specs["hw:mem_page_size"] = "small"
-                            elif extended.get("mempage-size") == "SIZE_2MB":
-                                extra_specs["hw:mem_page_size"] = "2MB"
-                            elif extended.get("mempage-size") == "SIZE_1GB":
-                                extra_specs["hw:mem_page_size"] = "1GB"
-                            elif extended.get("mempage-size") == "PREFER_LARGE":
-                                extra_specs["hw:mem_page_size"] = "any"
-                            else:
-                                # The validations in NBI should make reaching here not possible.
-                                # If this message is shown, check validations
-                                self.logger.debug(
-                                    "Invalid mempage-size %s. Will be ignored",
-                                    extended.get("mempage-size"),
-                                )
+                    # Create flavor
 
 
-                    # create flavor
                     new_flavor = self.nova.flavors.create(
                         name=name,
                         ram=ram,
                     new_flavor = self.nova.flavors.create(
                         name=name,
                         ram=ram,
@@ -1380,17 +1540,19 @@ class vimconnector(vimconn.VimConnector):
                         swap=flavor_data.get("swap", 0),
                         is_public=flavor_data.get("is_public", True),
                     )
                         swap=flavor_data.get("swap", 0),
                         is_public=flavor_data.get("is_public", True),
                     )
-                    # add metadata
+
+                    # Add metadata
                     if extra_specs:
                         new_flavor.set_keys(extra_specs)
 
                     return new_flavor.id
                     if extra_specs:
                         new_flavor.set_keys(extra_specs)
 
                     return new_flavor.id
+
                 except nvExceptions.Conflict as e:
                     if change_name_if_used and retry < max_retries:
                         continue
 
                     self._format_exception(e)
                 except nvExceptions.Conflict as e:
                     if change_name_if_used and retry < max_retries:
                         continue
 
                     self._format_exception(e)
-        # except nvExceptions.BadRequest as e:
+
         except (
             ksExceptions.ClientException,
             nvExceptions.ClientException,
         except (
             ksExceptions.ClientException,
             nvExceptions.ClientException,
@@ -1695,1813 +1857,1672 @@ class vimconnector(vimconn.VimConnector):
                 "No enough availability zones at VIM for this deployment"
             )
 
                 "No enough availability zones at VIM for this deployment"
             )
 
-    def new_vminstance(
-        self,
-        name,
-        description,
-        start,
-        image_id,
-        flavor_id,
-        affinity_group_list,
-        net_list,
-        cloud_config=None,
-        disk_list=None,
-        availability_zone_index=None,
-        availability_zone_list=None,
-    ):
-        """Adds a VM instance to VIM
-        Params:
-            start: indicates if VM must start or boot in pause mode. Ignored
-            image_id,flavor_id: image and flavor uuid
-            affinity_group_list: list of affinity groups, each one is a dictionary.
-                Ignore if empty.
-            net_list: list of interfaces, each one is a dictionary with:
-                name:
-                net_id: network uuid to connect
-                vpci: virtual vcpi to assign, ignored because openstack lack #TODO
-                model: interface model, ignored #TODO
-                mac_address: used for  SR-IOV ifaces #TODO for other types
-                use: 'data', 'bridge',  'mgmt'
-                type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
-                vim_id: filled/added by this function
-                floating_ip: True/False (or it can be None)
-                port_security: True/False
-            'cloud_config': (optional) dictionary with:
-                'key-pairs': (optional) list of strings with the public key to be inserted to the default user
-                'users': (optional) list of users to be inserted, each item is a dict with:
-                    'name': (mandatory) user name,
-                    'key-pairs': (optional) list of strings with the public key to be inserted to the user
-                'user-data': (optional) string is a text script to be passed directly to cloud-init
-                'config-files': (optional). List of files to be transferred. Each item is a dict with:
-                    'dest': (mandatory) string with the destination absolute path
-                    'encoding': (optional, by default text). Can be one of:
-                        'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
-                    'content' (mandatory): string with the content of the file
-                    'permissions': (optional) string with file permissions, typically octal notation '0644'
-                    'owner': (optional) file owner, string with the format 'owner:group'
-                'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
-            'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
-                'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
-                'size': (mandatory) string with the size of the disk in GB
-                'vim_id' (optional) should use this existing volume id
-            availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
-            availability_zone_list: list of availability zones given by user in the VNFD descriptor.  Ignore if
-                availability_zone_index is None
-                #TODO ip, security groups
-        Returns a tuple with the instance identifier and created_items or raises an exception on error
-            created_items can be None or a dictionary where this method can include key-values that will be passed to
-            the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
-            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
-            as not present.
+    def _prepare_port_dict_security_groups(self, net: dict, port_dict: dict) -> None:
+        """Fill up the security_groups in the port_dict.
+
+        Args:
+            net (dict):             Network details
+            port_dict   (dict):     Port details
+
         """
         """
-        self.logger.debug(
-            "new_vminstance input: image='%s' flavor='%s' nics='%s'",
-            image_id,
-            flavor_id,
-            str(net_list),
-        )
+        if (
+            self.config.get("security_groups")
+            and net.get("port_security") is not False
+            and not self.config.get("no_port_security_extension")
+        ):
+            if not self.security_groups_id:
+                self._get_ids_from_name()
 
 
-        try:
-            server = None
-            created_items = {}
-            # metadata = {}
-            net_list_vim = []
-            external_network = []
-            # ^list of external networks to be connected to instance, later on used to create floating_ip
-            no_secured_ports = []  # List of port-is with port-security disabled
-            self._reload_connection()
-            # metadata_vpci = {}  # For a specific neutron plugin
-            block_device_mapping = None
+            port_dict["security_groups"] = self.security_groups_id
 
 
-            for net in net_list:
-                if not net.get("net_id"):  # skip non connected iface
-                    continue
+    def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None:
+        """Fill up the network binding depending on network type in the port_dict.
 
 
-                port_dict = {
-                    "network_id": net["net_id"],
-                    "name": net.get("name"),
-                    "admin_state_up": True,
-                }
+        Args:
+            net (dict):             Network details
+            port_dict   (dict):     Port details
 
 
-                if (
-                    self.config.get("security_groups")
-                    and net.get("port_security") is not False
-                    and not self.config.get("no_port_security_extension")
-                ):
-                    if not self.security_groups_id:
-                        self._get_ids_from_name()
+        """
+        if not net.get("type"):
+            raise vimconn.VimConnException("Type is missing in the network details.")
 
 
-                    port_dict["security_groups"] = self.security_groups_id
+        if net["type"] == "virtual":
+            pass
 
 
-                if net["type"] == "virtual":
-                    pass
-                    # if "vpci" in net:
-                    #     metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]]
-                elif net["type"] == "VF" or net["type"] == "SR-IOV":  # for VF
-                    # if "vpci" in net:
-                    #     if "VF" not in metadata_vpci:
-                    #         metadata_vpci["VF"]=[]
-                    #     metadata_vpci["VF"].append([ net["vpci"], "" ])
-                    port_dict["binding:vnic_type"] = "direct"
-
-                    # VIO specific Changes
-                    if self.vim_type == "VIO":
-                        # Need to create port with port_security_enabled = False and no-security-groups
-                        port_dict["port_security_enabled"] = False
-                        port_dict["provider_security_groups"] = []
-                        port_dict["security_groups"] = []
-                else:  # For PT PCI-PASSTHROUGH
-                    # if "vpci" in net:
-                    #     if "PF" not in metadata_vpci:
-                    #         metadata_vpci["PF"]=[]
-                    #     metadata_vpci["PF"].append([ net["vpci"], "" ])
-                    port_dict["binding:vnic_type"] = "direct-physical"
-
-                if not port_dict["name"]:
-                    port_dict["name"] = name
-
-                if net.get("mac_address"):
-                    port_dict["mac_address"] = net["mac_address"]
-
-                if net.get("ip_address"):
-                    port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
-                    # TODO add "subnet_id": <subnet_id>
-
-                new_port = self.neutron.create_port({"port": port_dict})
-                created_items["port:" + str(new_port["port"]["id"])] = True
-                net["mac_adress"] = new_port["port"]["mac_address"]
-                net["vim_id"] = new_port["port"]["id"]
-                # if try to use a network without subnetwork, it will return a emtpy list
-                fixed_ips = new_port["port"].get("fixed_ips")
-
-                if fixed_ips:
-                    net["ip"] = fixed_ips[0].get("ip_address")
-                else:
-                    net["ip"] = None
-
-                port = {"port-id": new_port["port"]["id"]}
-                if float(self.nova.api_version.get_string()) >= 2.32:
-                    port["tag"] = new_port["port"]["name"]
-
-                net_list_vim.append(port)
-
-                if net.get("floating_ip", False):
-                    net["exit_on_floating_ip_error"] = True
-                    external_network.append(net)
-                elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
-                    net["exit_on_floating_ip_error"] = False
-                    external_network.append(net)
-                    net["floating_ip"] = self.config.get("use_floating_ip")
-
-                # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
-                # is dropped.
-                # As a workaround we wait until the VM is active and then disable the port-security
-                if net.get("port_security") is False and not self.config.get(
-                    "no_port_security_extension"
-                ):
-                    no_secured_ports.append(
-                        (
-                            new_port["port"]["id"],
-                            net.get("port_security_disable_strategy"),
-                        )
-                    )
+        # For VF
+        elif net["type"] == "VF" or net["type"] == "SR-IOV":
+            port_dict["binding:vnic_type"] = "direct"
 
 
-            # if metadata_vpci:
-            #     metadata = {"pci_assignement": json.dumps(metadata_vpci)}
-            #     if len(metadata["pci_assignement"]) >255:
-            #         #limit the metadata size
-            #         #metadata["pci_assignement"] = metadata["pci_assignement"][0:255]
-            #         self.logger.warn("Metadata deleted since it exceeds the expected length (255) ")
-            #         metadata = {}
+            # VIO specific Changes
+            if self.vim_type == "VIO":
+                # Need to create port with port_security_enabled = False and no-security-groups
+                port_dict["port_security_enabled"] = False
+                port_dict["provider_security_groups"] = []
+                port_dict["security_groups"] = []
 
 
-            self.logger.debug(
-                "name '%s' image_id '%s'flavor_id '%s' net_list_vim '%s' description '%s'",
-                name,
-                image_id,
-                flavor_id,
-                str(net_list_vim),
-                description,
-            )
+        else:
+            # For PT PCI-PASSTHROUGH
+            port_dict["binding:vnic_type"] = "direct-physical"
 
 
-            # cloud config
-            config_drive, userdata = self._create_user_data(cloud_config)
+    @staticmethod
+    def _set_fixed_ip(new_port: dict, net: dict) -> None:
+        """Set the "ip" parameter in net dictionary.
 
 
-            # get availability Zone
-            vm_av_zone = self._get_vm_availability_zone(
-                availability_zone_index, availability_zone_list
-            )
+        Args:
+            new_port    (dict):     New created port
+            net         (dict):     Network details
 
 
-            # Create additional volumes in case these are present in disk_list
-            existing_vim_volumes = []
-            base_disk_index = ord("b")
-            boot_volume_id = None
-            if disk_list:
-                block_device_mapping = {}
-                for disk in disk_list:
-                    if disk.get("vim_id"):
-                        block_device_mapping["_vd" + chr(base_disk_index)] = disk[
-                            "vim_id"
-                        ]
-                        existing_vim_volumes.append({"id": disk["vim_id"]})
-                    else:
-                        if "image_id" in disk:
-                            base_disk_index = ord("a")
-                            volume = self.cinder.volumes.create(
-                                size=disk["size"],
-                                name=name + "_vd" + chr(base_disk_index),
-                                imageRef=disk["image_id"],
-                                # Make sure volume is in the same AZ as the VM to be attached to
-                                availability_zone=vm_av_zone,
-                            )
-                            boot_volume_id = volume.id
-                        else:
-                            volume = self.cinder.volumes.create(
-                                size=disk["size"],
-                                name=name + "_vd" + chr(base_disk_index),
-                                # Make sure volume is in the same AZ as the VM to be attached to
-                                availability_zone=vm_av_zone,
-                            )
+        """
+        fixed_ips = new_port["port"].get("fixed_ips")
 
 
-                        created_items["volume:" + str(volume.id)] = True
-                        block_device_mapping["_vd" + chr(base_disk_index)] = volume.id
+        if fixed_ips:
+            net["ip"] = fixed_ips[0].get("ip_address")
+        else:
+            net["ip"] = None
 
 
-                    base_disk_index += 1
+    @staticmethod
+    def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None:
+        """Fill up the mac_address and fixed_ips in port_dict.
 
 
-                # Wait until created volumes are with status available
-                elapsed_time = 0
-                while elapsed_time < volume_timeout:
-                    for created_item in created_items:
-                        v, _, volume_id = created_item.partition(":")
-                        if v == "volume":
-                            if self.cinder.volumes.get(volume_id).status != "available":
-                                break
-                    else:  # all ready: break from while
-                        break
+        Args:
+            net (dict):             Network details
+            port_dict   (dict):     Port details
 
 
-                    time.sleep(5)
-                    elapsed_time += 5
+        """
+        if net.get("mac_address"):
+            port_dict["mac_address"] = net["mac_address"]
 
 
-                # Wait until existing volumes in vim are with status available
-                while elapsed_time < volume_timeout:
-                    for volume in existing_vim_volumes:
-                        if self.cinder.volumes.get(volume["id"]).status != "available":
-                            break
-                    else:  # all ready: break from while
-                        break
+        if net.get("ip_address"):
+            port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
+            # TODO add "subnet_id": <subnet_id>
 
 
-                    time.sleep(5)
-                    elapsed_time += 5
+    def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
+        """Create new port using neutron.
 
 
-                # If we exceeded the timeout rollback
-                if elapsed_time >= volume_timeout:
-                    raise vimconn.VimConnException(
-                        "Timeout creating volumes for instance " + name,
-                        http_code=vimconn.HTTP_Request_Timeout,
-                    )
-                if boot_volume_id:
-                    self.cinder.volumes.set_bootable(boot_volume_id, True)
+        Args:
+            port_dict   (dict):         Port details
+            created_items   (dict):     All created items
+            net (dict):                 Network details
 
 
-            # Manage affinity groups/server groups
-            server_group_id = None
-            scheduller_hints = {}
+        Returns:
+            new_port    (dict):         New created port
 
 
-            if affinity_group_list:
-                # Only first id on the list will be used. Openstack restriction
-                server_group_id = affinity_group_list[0]["affinity_group_id"]
-                scheduller_hints["group"] = server_group_id
+        """
+        new_port = self.neutron.create_port({"port": port_dict})
+        created_items["port:" + str(new_port["port"]["id"])] = True
+        net["mac_adress"] = new_port["port"]["mac_address"]
+        net["vim_id"] = new_port["port"]["id"]
 
 
-            self.logger.debug(
-                "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
-                "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
-                "block_device_mapping={}, server_group={})".format(
-                    name,
-                    image_id,
-                    flavor_id,
-                    net_list_vim,
-                    self.config.get("security_groups"),
-                    vm_av_zone,
-                    self.config.get("keypair"),
-                    userdata,
-                    config_drive,
-                    block_device_mapping,
-                    server_group_id,
-                )
-            )
-            server = self.nova.servers.create(
-                name,
-                image_id,
-                flavor_id,
-                nics=net_list_vim,
-                security_groups=self.config.get("security_groups"),
-                # TODO remove security_groups in future versions. Already at neutron port
-                availability_zone=vm_av_zone,
-                key_name=self.config.get("keypair"),
-                userdata=userdata,
-                config_drive=config_drive,
-                block_device_mapping=block_device_mapping,
-                scheduler_hints=scheduller_hints,
-            )  # , description=description)
+        return new_port
 
 
-            vm_start_time = time.time()
-            # Previously mentioned workaround to wait until the VM is active and then disable the port-security
-            if no_secured_ports:
-                self.__wait_for_vm(server.id, "ACTIVE")
+    def _create_port(
+        self, net: dict, name: str, created_items: dict
+    ) -> Tuple[dict, dict]:
+        """Create port using net details.
 
 
-            for port in no_secured_ports:
-                port_update = {
-                    "port": {"port_security_enabled": False, "security_groups": None}
-                }
+        Args:
+            net (dict):                 Network details
+            name    (str):              Name to be used as network name if net dict does not include name
+            created_items   (dict):     All created items
 
 
-                if port[1] == "allow-address-pairs":
-                    port_update = {
-                        "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
-                    }
+        Returns:
+            new_port, port              New created port, port dictionary
 
 
-                try:
-                    self.neutron.update_port(port[0], port_update)
-                except Exception:
-                    raise vimconn.VimConnException(
-                        "It was not possible to disable port security for port {}".format(
-                            port[0]
-                        )
-                    )
+        """
 
 
-            # print "DONE :-)", server
+        port_dict = {
+            "network_id": net["net_id"],
+            "name": net.get("name"),
+            "admin_state_up": True,
+        }
 
 
-            # pool_id = None
-            for floating_network in external_network:
-                try:
-                    assigned = False
-                    floating_ip_retries = 3
-                    # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
-                    # several times
-                    while not assigned:
-                        floating_ips = self.neutron.list_floatingips().get(
-                            "floatingips", ()
-                        )
-                        random.shuffle(floating_ips)  # randomize
-                        for fip in floating_ips:
-                            if (
-                                fip.get("port_id")
-                                or fip.get("tenant_id") != server.tenant_id
-                            ):
-                                continue
+        if not port_dict["name"]:
+            port_dict["name"] = name
 
 
-                            if isinstance(floating_network["floating_ip"], str):
-                                if (
-                                    fip.get("floating_network_id")
-                                    != floating_network["floating_ip"]
-                                ):
-                                    continue
+        self._prepare_port_dict_security_groups(net, port_dict)
 
 
-                            free_floating_ip = fip["id"]
-                            break
-                        else:
-                            if (
-                                isinstance(floating_network["floating_ip"], str)
-                                and floating_network["floating_ip"].lower() != "true"
-                            ):
-                                pool_id = floating_network["floating_ip"]
-                            else:
-                                # Find the external network
-                                external_nets = list()
-
-                                for net in self.neutron.list_networks()["networks"]:
-                                    if net["router:external"]:
-                                        external_nets.append(net)
-
-                                if len(external_nets) == 0:
-                                    raise vimconn.VimConnException(
-                                        "Cannot create floating_ip automatically since "
-                                        "no external network is present",
-                                        http_code=vimconn.HTTP_Conflict,
-                                    )
+        self._prepare_port_dict_binding(net, port_dict)
 
 
-                                if len(external_nets) > 1:
-                                    raise vimconn.VimConnException(
-                                        "Cannot create floating_ip automatically since "
-                                        "multiple external networks are present",
-                                        http_code=vimconn.HTTP_Conflict,
-                                    )
+        vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict)
 
 
-                                pool_id = external_nets[0].get("id")
-
-                            param = {
-                                "floatingip": {
-                                    "floating_network_id": pool_id,
-                                    "tenant_id": server.tenant_id,
-                                }
-                            }
-
-                            try:
-                                # self.logger.debug("Creating floating IP")
-                                new_floating_ip = self.neutron.create_floatingip(param)
-                                free_floating_ip = new_floating_ip["floatingip"]["id"]
-                                created_items[
-                                    "floating_ip:" + str(free_floating_ip)
-                                ] = True
-                            except Exception as e:
-                                raise vimconn.VimConnException(
-                                    type(e).__name__
-                                    + ": Cannot create new floating_ip "
-                                    + str(e),
-                                    http_code=vimconn.HTTP_Conflict,
-                                )
+        new_port = self._create_new_port(port_dict, created_items, net)
 
 
-                        try:
-                            # for race condition ensure not already assigned
-                            fip = self.neutron.show_floatingip(free_floating_ip)
+        vimconnector._set_fixed_ip(new_port, net)
 
 
-                            if fip["floatingip"]["port_id"]:
-                                continue
+        port = {"port-id": new_port["port"]["id"]}
 
 
-                            # the vim_id key contains the neutron.port_id
-                            self.neutron.update_floatingip(
-                                free_floating_ip,
-                                {"floatingip": {"port_id": floating_network["vim_id"]}},
-                            )
-                            # for race condition ensure not re-assigned to other VM after 5 seconds
-                            time.sleep(5)
-                            fip = self.neutron.show_floatingip(free_floating_ip)
+        if float(self.nova.api_version.get_string()) >= 2.32:
+            port["tag"] = new_port["port"]["name"]
 
 
-                            if (
-                                fip["floatingip"]["port_id"]
-                                != floating_network["vim_id"]
-                            ):
-                                self.logger.error(
-                                    "floating_ip {} re-assigned to other port".format(
-                                        free_floating_ip
-                                    )
-                                )
-                                continue
+        return new_port, port
 
 
-                            self.logger.debug(
-                                "Assigned floating_ip {} to VM {}".format(
-                                    free_floating_ip, server.id
-                                )
-                            )
-                            assigned = True
-                        except Exception as e:
-                            # openstack need some time after VM creation to assign an IP. So retry if fails
-                            vm_status = self.nova.servers.get(server.id).status
-
-                            if vm_status not in ("ACTIVE", "ERROR"):
-                                if time.time() - vm_start_time < server_timeout:
-                                    time.sleep(5)
-                                    continue
-                            elif floating_ip_retries > 0:
-                                floating_ip_retries -= 1
-                                continue
+    def _prepare_network_for_vminstance(
+        self,
+        name: str,
+        net_list: list,
+        created_items: dict,
+        net_list_vim: list,
+        external_network: list,
+        no_secured_ports: list,
+    ) -> None:
+        """Create port and fill up net dictionary for new VM instance creation.
 
 
-                            raise vimconn.VimConnException(
-                                "Cannot create floating_ip: {} {}".format(
-                                    type(e).__name__, e
-                                ),
-                                http_code=vimconn.HTTP_Conflict,
-                            )
+        Args:
+            name    (str):                  Name of network
+            net_list    (list):             List of networks
+            created_items   (dict):         All created items belongs to a VM
+            net_list_vim    (list):         List of ports
+            external_network    (list):     List of external-networks
+            no_secured_ports    (list):     Port security disabled ports
+        """
 
 
-                except Exception as e:
-                    if not floating_network["exit_on_floating_ip_error"]:
-                        self.logger.error("Cannot create floating_ip. %s", str(e))
-                        continue
+        self._reload_connection()
 
 
-                    raise
+        for net in net_list:
+            # Skip non-connected iface
+            if not net.get("net_id"):
+                continue
 
 
-            return server.id, created_items
-        # except nvExceptions.NotFound as e:
-        #     error_value=-vimconn.HTTP_Not_Found
-        #     error_text= "vm instance %s not found" % vm_id
-        # except TypeError as e:
-        #     raise vimconn.VimConnException(type(e).__name__ + ": "+  str(e), http_code=vimconn.HTTP_Bad_Request)
+            new_port, port = self._create_port(net, name, created_items)
 
 
-        except Exception as e:
-            server_id = None
-            if server:
-                server_id = server.id
+            net_list_vim.append(port)
 
 
-            try:
-                self.delete_vminstance(server_id, created_items)
-            except Exception as e2:
-                self.logger.error("new_vminstance rollback fail {}".format(e2))
+            if net.get("floating_ip", False):
+                net["exit_on_floating_ip_error"] = True
+                external_network.append(net)
 
 
-            self._format_exception(e)
+            elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
+                net["exit_on_floating_ip_error"] = False
+                external_network.append(net)
+                net["floating_ip"] = self.config.get("use_floating_ip")
 
 
-    def get_vminstance(self, vm_id):
-        """Returns the VM instance information from VIM"""
-        # self.logger.debug("Getting VM from VIM")
-        try:
-            self._reload_connection()
-            server = self.nova.servers.find(id=vm_id)
-            # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
+            # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
+            # is dropped. As a workaround we wait until the VM is active and then disable the port-security
+            if net.get("port_security") is False and not self.config.get(
+                "no_port_security_extension"
+            ):
+                no_secured_ports.append(
+                    (
+                        new_port["port"]["id"],
+                        net.get("port_security_disable_strategy"),
+                    )
+                )
 
 
-            return server.to_dict()
-        except (
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            nvExceptions.NotFound,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+    def _prepare_persistent_root_volumes(
+        self,
+        name: str,
+        vm_av_zone: list,
+        disk: dict,
+        base_disk_index: int,
+        block_device_mapping: dict,
+        existing_vim_volumes: list,
+        created_items: dict,
+    ) -> Optional[str]:
+        """Prepare persistent root volumes for new VM instance.
+
+        Args:
+            name    (str):                      Name of VM instance
+            vm_av_zone  (list):                 List of availability zones
+            disk    (dict):                     Disk details
+            base_disk_index (int):              Disk index
+            block_device_mapping    (dict):     Block device details
+            existing_vim_volumes    (list):     Existing disk details
+            created_items   (dict):             All created items belongs to VM
+
+        Returns:
+            boot_volume_id  (str):              ID of boot volume
 
 
-    def get_vminstance_console(self, vm_id, console_type="vnc"):
         """
         """
-        Get a console for the virtual machine
-        Params:
-            vm_id: uuid of the VM
-            console_type, can be:
-                "novnc" (by default), "xvpvnc" for VNC types,
-                "rdp-html5" for RDP types, "spice-html5" for SPICE types
-        Returns dict with the console parameters:
-                protocol: ssh, ftp, http, https, ...
-                server:   usually ip address
-                port:     the http, ssh, ... port
-                suffix:   extra text, e.g. the http path and query string
+        # Disk may include only vim_volume_id or only vim_id."
+        # Use existing persistent root volume finding with volume_id or vim_id
+        key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
+
+        if disk.get(key_id):
+            block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
+            existing_vim_volumes.append({"id": disk[key_id]})
+
+        else:
+            # Create persistent root volume
+            volume = self.cinder.volumes.create(
+                size=disk["size"],
+                name=name + "vd" + chr(base_disk_index),
+                imageRef=disk["image_id"],
+                # Make sure volume is in the same AZ as the VM to be attached to
+                availability_zone=vm_av_zone,
+            )
+            boot_volume_id = volume.id
+            self.update_block_device_mapping(
+                volume=volume,
+                block_device_mapping=block_device_mapping,
+                base_disk_index=base_disk_index,
+                disk=disk,
+                created_items=created_items,
+            )
+
+            return boot_volume_id
+
+    @staticmethod
+    def update_block_device_mapping(
+        volume: object,
+        block_device_mapping: dict,
+        base_disk_index: int,
+        disk: dict,
+        created_items: dict,
+    ) -> None:
+        """Add volume information to block device mapping dict.
+        Args:
+            volume  (object):                   Created volume object
+            block_device_mapping    (dict):     Block device details
+            base_disk_index (int):              Disk index
+            disk    (dict):                     Disk details
+            created_items   (dict):             All created items belongs to VM
         """
         """
-        self.logger.debug("Getting VM CONSOLE from VIM")
+        if not volume:
+            raise vimconn.VimConnException("Volume is empty.")
 
 
-        try:
-            self._reload_connection()
-            server = self.nova.servers.find(id=vm_id)
+        if not hasattr(volume, "id"):
+            raise vimconn.VimConnException(
+                "Created volume is not valid, does not have id attribute."
+            )
 
 
-            if console_type is None or console_type == "novnc":
-                console_dict = server.get_vnc_console("novnc")
-            elif console_type == "xvpvnc":
-                console_dict = server.get_vnc_console(console_type)
-            elif console_type == "rdp-html5":
-                console_dict = server.get_rdp_console(console_type)
-            elif console_type == "spice-html5":
-                console_dict = server.get_spice_console(console_type)
-            else:
-                raise vimconn.VimConnException(
-                    "console type '{}' not allowed".format(console_type),
-                    http_code=vimconn.HTTP_Bad_Request,
-                )
+        volume_txt = "volume:" + str(volume.id)
+        if disk.get("keep"):
+            volume_txt += ":keep"
+        created_items[volume_txt] = True
+        block_device_mapping["vd" + chr(base_disk_index)] = volume.id
 
 
-            console_dict1 = console_dict.get("console")
+    def _prepare_non_root_persistent_volumes(
+        self,
+        name: str,
+        disk: dict,
+        vm_av_zone: list,
+        block_device_mapping: dict,
+        base_disk_index: int,
+        existing_vim_volumes: list,
+        created_items: dict,
+    ) -> None:
+        """Prepare persistent volumes for new VM instance.
 
 
-            if console_dict1:
-                console_url = console_dict1.get("url")
+        Args:
+            name    (str):                      Name of VM instance
+            disk    (dict):                     Disk details
+            vm_av_zone  (list):                 List of availability zones
+            block_device_mapping    (dict):     Block device details
+            base_disk_index (int):              Disk index
+            existing_vim_volumes    (list):     Existing disk details
+            created_items   (dict):             All created items belongs to VM
+        """
+        # Non-root persistent volumes
+        # Disk may include only vim_volume_id or only vim_id."
+        key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
 
 
-                if console_url:
-                    # parse console_url
-                    protocol_index = console_url.find("//")
-                    suffix_index = (
-                        console_url[protocol_index + 2 :].find("/") + protocol_index + 2
-                    )
-                    port_index = (
-                        console_url[protocol_index + 2 : suffix_index].find(":")
-                        + protocol_index
-                        + 2
-                    )
+        if disk.get(key_id):
+            # Use existing persistent volume
+            block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
+            existing_vim_volumes.append({"id": disk[key_id]})
 
 
-                    if protocol_index < 0 or port_index < 0 or suffix_index < 0:
-                        return (
-                            -vimconn.HTTP_Internal_Server_Error,
-                            "Unexpected response from VIM",
-                        )
+        else:
+            # Create persistent volume
+            volume = self.cinder.volumes.create(
+                size=disk["size"],
+                name=name + "vd" + chr(base_disk_index),
+                # Make sure volume is in the same AZ as the VM to be attached to
+                availability_zone=vm_av_zone,
+            )
+            self.update_block_device_mapping(
+                volume=volume,
+                block_device_mapping=block_device_mapping,
+                base_disk_index=base_disk_index,
+                disk=disk,
+                created_items=created_items,
+            )
 
 
-                    console_dict = {
-                        "protocol": console_url[0:protocol_index],
-                        "server": console_url[protocol_index + 2 : port_index],
-                        "port": console_url[port_index:suffix_index],
-                        "suffix": console_url[suffix_index + 1 :],
-                    }
-                    protocol_index += 2
+    def _wait_for_created_volumes_availability(
+        self, elapsed_time: int, created_items: dict
+    ) -> Optional[int]:
+        """Wait till created volumes become available.
 
 
-                    return console_dict
-            raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
-        except (
-            nvExceptions.NotFound,
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            nvExceptions.BadRequest,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+        Args:
+            elapsed_time    (int):          Passed time while waiting
+            created_items   (dict):         All created items belongs to VM
 
 
-    def delete_vminstance(self, vm_id, created_items=None, volumes_to_hold=None):
-        """Removes a VM instance from VIM. Returns the old identifier"""
-        # print "osconnector: Getting VM from VIM"
-        if created_items is None:
-            created_items = {}
+        Returns:
+            elapsed_time    (int):          Time spent while waiting
 
 
-        try:
-            self._reload_connection()
-            # delete VM ports attached to this networks before the virtual machine
-            for k, v in created_items.items():
-                if not v:  # skip already deleted
-                    continue
+        """
 
 
-                try:
-                    k_item, _, k_id = k.partition(":")
-                    if k_item == "port":
-                        self.neutron.delete_port(k_id)
-                except Exception as e:
-                    self.logger.error(
-                        "Error deleting port: {}: {}".format(type(e).__name__, e)
-                    )
+        while elapsed_time < volume_timeout:
+            for created_item in created_items:
+                v, volume_id = (
+                    created_item.split(":")[0],
+                    created_item.split(":")[1],
+                )
+                if v == "volume":
+                    if self.cinder.volumes.get(volume_id).status != "available":
+                        break
+            else:
+                # All ready: break from while
+                break
 
 
-            # #commented because detaching the volumes makes the servers.delete not work properly ?!?
-            # #dettach volumes attached
-            # server = self.nova.servers.get(vm_id)
-            # volumes_attached_dict = server._info["os-extended-volumes:volumes_attached"]   #volume["id"]
-            # #for volume in volumes_attached_dict:
-            # #    self.cinder.volumes.detach(volume["id"])
+            time.sleep(5)
+            elapsed_time += 5
 
 
-            if vm_id:
-                self.nova.servers.delete(vm_id)
+        return elapsed_time
 
 
-            # delete volumes. Although having detached, they should have in active status before deleting
-            # we ensure in this loop
-            keep_waiting = True
-            elapsed_time = 0
+    def _wait_for_existing_volumes_availability(
+        self, elapsed_time: int, existing_vim_volumes: list
+    ) -> Optional[int]:
+        """Wait till existing volumes become available.
 
 
-            while keep_waiting and elapsed_time < volume_timeout:
-                keep_waiting = False
+        Args:
+            elapsed_time    (int):          Passed time while waiting
+            existing_vim_volumes   (list):  Existing volume details
 
 
-                for k, v in created_items.items():
-                    if not v:  # skip already deleted
-                        continue
+        Returns:
+            elapsed_time    (int):          Time spent while waiting
 
 
-                    try:
-                        k_item, _, k_id = k.partition(":")
-                        if k_item == "volume":
-                            if self.cinder.volumes.get(k_id).status != "available":
-                                keep_waiting = True
-                            else:
-                                if k_id not in volumes_to_hold:
-                                    self.cinder.volumes.delete(k_id)
-                                    created_items[k] = None
-                        elif k_item == "floating_ip":  # floating ip
-                            self.neutron.delete_floatingip(k_id)
-                            created_items[k] = None
+        """
 
 
-                    except Exception as e:
-                        self.logger.error("Error deleting {}: {}".format(k, e))
+        while elapsed_time < volume_timeout:
+            for volume in existing_vim_volumes:
+                if self.cinder.volumes.get(volume["id"]).status != "available":
+                    break
+            else:  # all ready: break from while
+                break
 
 
-                if keep_waiting:
-                    time.sleep(1)
-                    elapsed_time += 1
+            time.sleep(5)
+            elapsed_time += 5
 
 
-            return None
-        except (
-            nvExceptions.NotFound,
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+        return elapsed_time
+
+    def _prepare_disk_for_vminstance(
+        self,
+        name: str,
+        existing_vim_volumes: list,
+        created_items: dict,
+        vm_av_zone: list,
+        block_device_mapping: dict,
+        disk_list: list = None,
+    ) -> None:
+        """Prepare all volumes for new VM instance.
+
+        Args:
+            name    (str):                      Name of Instance
+            existing_vim_volumes    (list):     List of existing volumes
+            created_items   (dict):             All created items belongs to VM
+            vm_av_zone  (list):                 VM availability zone
+            block_device_mapping (dict):        Block devices to be attached to VM
+            disk_list   (list):                 List of disks
 
 
-    def refresh_vms_status(self, vm_list):
-        """Get the status of the virtual machines and their interfaces/ports
-        Params: the list of VM identifiers
-        Returns a dictionary with:
-            vm_id:          #VIM id of this Virtual Machine
-                status:     #Mandatory. Text with one of:
-                            #  DELETED (not found at vim)
-                            #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
-                            #  OTHER (Vim reported other status not understood)
-                            #  ERROR (VIM indicates an ERROR status)
-                            #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
-                            #  CREATING (on building process), ERROR
-                            #  ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
-                            #
-                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
-                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
-                interfaces:
-                 -  vim_info:         #Text with plain information obtained from vim (yaml.safe_dump)
-                    mac_address:      #Text format XX:XX:XX:XX:XX:XX
-                    vim_net_id:       #network id where this interface is connected
-                    vim_interface_id: #interface/port VIM id
-                    ip_address:       #null, or text with IPv4, IPv6 address
-                    compute_node:     #identification of compute node where PF,VF interface is allocated
-                    pci:              #PCI address of the NIC that hosts the PF,VF
-                    vlan:             #physical VLAN used for VF
         """
         """
-        vm_dict = {}
-        self.logger.debug(
-            "refresh_vms status: Getting tenant VM instance information from VIM"
+        # Create additional volumes in case these are present in disk_list
+        base_disk_index = ord("b")
+        boot_volume_id = None
+        elapsed_time = 0
+
+        for disk in disk_list:
+            if "image_id" in disk:
+                # Root persistent volume
+                base_disk_index = ord("a")
+                boot_volume_id = self._prepare_persistent_root_volumes(
+                    name=name,
+                    vm_av_zone=vm_av_zone,
+                    disk=disk,
+                    base_disk_index=base_disk_index,
+                    block_device_mapping=block_device_mapping,
+                    existing_vim_volumes=existing_vim_volumes,
+                    created_items=created_items,
+                )
+            else:
+                # Non-root persistent volume
+                self._prepare_non_root_persistent_volumes(
+                    name=name,
+                    disk=disk,
+                    vm_av_zone=vm_av_zone,
+                    block_device_mapping=block_device_mapping,
+                    base_disk_index=base_disk_index,
+                    existing_vim_volumes=existing_vim_volumes,
+                    created_items=created_items,
+                )
+            base_disk_index += 1
+
+        # Wait until created volumes are with status available
+        elapsed_time = self._wait_for_created_volumes_availability(
+            elapsed_time, created_items
+        )
+        # Wait until existing volumes in vim are with status available
+        elapsed_time = self._wait_for_existing_volumes_availability(
+            elapsed_time, existing_vim_volumes
         )
         )
+        # If we exceeded the timeout rollback
+        if elapsed_time >= volume_timeout:
+            raise vimconn.VimConnException(
+                "Timeout creating volumes for instance " + name,
+                http_code=vimconn.HTTP_Request_Timeout,
+            )
+        if boot_volume_id:
+            self.cinder.volumes.set_bootable(boot_volume_id, True)
 
 
-        for vm_id in vm_list:
-            vm = {}
+    def _find_the_external_network_for_floating_ip(self):
+        """Get the external network ip in order to create floating IP.
 
 
-            try:
-                vm_vim = self.get_vminstance(vm_id)
+        Returns:
+            pool_id (str):      External network pool ID
 
 
-                if vm_vim["status"] in vmStatus2manoFormat:
-                    vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
-                else:
-                    vm["status"] = "OTHER"
-                    vm["error_msg"] = "VIM status reported " + vm_vim["status"]
+        """
 
 
-                vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
-                vm_vim.pop("user_data", None)
-                vm["vim_info"] = self.serialize(vm_vim)
+        # Find the external network
+        external_nets = list()
 
 
-                vm["interfaces"] = []
-                if vm_vim.get("fault"):
-                    vm["error_msg"] = str(vm_vim["fault"])
+        for net in self.neutron.list_networks()["networks"]:
+            if net["router:external"]:
+                external_nets.append(net)
 
 
-                # get interfaces
-                try:
-                    self._reload_connection()
-                    port_dict = self.neutron.list_ports(device_id=vm_id)
+        if len(external_nets) == 0:
+            raise vimconn.VimConnException(
+                "Cannot create floating_ip automatically since "
+                "no external network is present",
+                http_code=vimconn.HTTP_Conflict,
+            )
 
 
-                    for port in port_dict["ports"]:
-                        interface = {}
-                        interface["vim_info"] = self.serialize(port)
-                        interface["mac_address"] = port.get("mac_address")
-                        interface["vim_net_id"] = port["network_id"]
-                        interface["vim_interface_id"] = port["id"]
-                        # check if OS-EXT-SRV-ATTR:host is there,
-                        # in case of non-admin credentials, it will be missing
+        if len(external_nets) > 1:
+            raise vimconn.VimConnException(
+                "Cannot create floating_ip automatically since "
+                "multiple external networks are present",
+                http_code=vimconn.HTTP_Conflict,
+            )
 
 
-                        if vm_vim.get("OS-EXT-SRV-ATTR:host"):
-                            interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
+        # Pool ID
+        return external_nets[0].get("id")
 
 
-                        interface["pci"] = None
+    def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None:
+        """Trigger neutron to create a new floating IP using external network ID.
 
 
-                        # check if binding:profile is there,
-                        # in case of non-admin credentials, it will be missing
-                        if port.get("binding:profile"):
-                            if port["binding:profile"].get("pci_slot"):
-                                # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
-                                #  the slot to 0x00
-                                # TODO: This is just a workaround valid for niantinc. Find a better way to do so
-                                #   CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2)   assuming there are 2 ports per nic
-                                pci = port["binding:profile"]["pci_slot"]
-                                # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
-                                interface["pci"] = pci
+        Args:
+            param   (dict):             Input parameters to create a floating IP
+            created_items   (dict):     All created items belongs to new VM instance
 
 
-                        interface["vlan"] = None
+        Raises:
 
 
-                        if port.get("binding:vif_details"):
-                            interface["vlan"] = port["binding:vif_details"].get("vlan")
+            VimConnException
+        """
+        try:
+            self.logger.debug("Creating floating IP")
+            new_floating_ip = self.neutron.create_floatingip(param)
+            free_floating_ip = new_floating_ip["floatingip"]["id"]
+            created_items["floating_ip:" + str(free_floating_ip)] = True
 
 
-                        # Get vlan from network in case not present in port for those old openstacks and cases where
-                        # it is needed vlan at PT
-                        if not interface["vlan"]:
-                            # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
-                            network = self.neutron.show_network(port["network_id"])
+        except Exception as e:
+            raise vimconn.VimConnException(
+                type(e).__name__ + ": Cannot create new floating_ip " + str(e),
+                http_code=vimconn.HTTP_Conflict,
+            )
 
 
-                            if (
-                                network["network"].get("provider:network_type")
-                                == "vlan"
-                            ):
-                                # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
-                                interface["vlan"] = network["network"].get(
-                                    "provider:segmentation_id"
-                                )
+    def _create_floating_ip(
+        self, floating_network: dict, server: object, created_items: dict
+    ) -> None:
+        """Get the available Pool ID and create a new floating IP.
 
 
-                        ips = []
-                        # look for floating ip address
-                        try:
-                            floating_ip_dict = self.neutron.list_floatingips(
-                                port_id=port["id"]
-                            )
-
-                            if floating_ip_dict.get("floatingips"):
-                                ips.append(
-                                    floating_ip_dict["floatingips"][0].get(
-                                        "floating_ip_address"
-                                    )
-                                )
-                        except Exception:
-                            pass
-
-                        for subnet in port["fixed_ips"]:
-                            ips.append(subnet["ip_address"])
-
-                        interface["ip_address"] = ";".join(ips)
-                        vm["interfaces"].append(interface)
-                except Exception as e:
-                    self.logger.error(
-                        "Error getting vm interface information {}: {}".format(
-                            type(e).__name__, e
-                        ),
-                        exc_info=True,
-                    )
-            except vimconn.VimConnNotFoundException as e:
-                self.logger.error("Exception getting vm status: %s", str(e))
-                vm["status"] = "DELETED"
-                vm["error_msg"] = str(e)
-            except vimconn.VimConnException as e:
-                self.logger.error("Exception getting vm status: %s", str(e))
-                vm["status"] = "VIM_ERROR"
-                vm["error_msg"] = str(e)
-
-            vm_dict[vm_id] = vm
-
-        return vm_dict
-
-    def action_vminstance(self, vm_id, action_dict, created_items={}):
-        """Send and action over a VM instance from VIM
-        Returns None or the console dict if the action was successfully sent to the VIM"""
-        self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
+        Args:
+            floating_network    (dict):         Dict including external network ID
+            server   (object):                  Server object
+            created_items   (dict):             All created items belongs to new VM instance
 
 
-        try:
-            self._reload_connection()
-            server = self.nova.servers.find(id=vm_id)
+        """
 
 
-            if "start" in action_dict:
-                if action_dict["start"] == "rebuild":
-                    server.rebuild()
-                else:
-                    if server.status == "PAUSED":
-                        server.unpause()
-                    elif server.status == "SUSPENDED":
-                        server.resume()
-                    elif server.status == "SHUTOFF":
-                        server.start()
-            elif "pause" in action_dict:
-                server.pause()
-            elif "resume" in action_dict:
-                server.resume()
-            elif "shutoff" in action_dict or "shutdown" in action_dict:
-                server.stop()
-            elif "forceOff" in action_dict:
-                server.stop()  # TODO
-            elif "terminate" in action_dict:
-                server.delete()
-            elif "createImage" in action_dict:
-                server.create_image()
-                # "path":path_schema,
-                # "description":description_schema,
-                # "name":name_schema,
-                # "metadata":metadata_schema,
-                # "imageRef": id_schema,
-                # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
-            elif "rebuild" in action_dict:
-                server.rebuild(server.image["id"])
-            elif "reboot" in action_dict:
-                server.reboot()  # reboot_type="SOFT"
-            elif "console" in action_dict:
-                console_type = action_dict["console"]
+        # Pool_id is available
+        if (
+            isinstance(floating_network["floating_ip"], str)
+            and floating_network["floating_ip"].lower() != "true"
+        ):
+            pool_id = floating_network["floating_ip"]
 
 
-                if console_type is None or console_type == "novnc":
-                    console_dict = server.get_vnc_console("novnc")
-                elif console_type == "xvpvnc":
-                    console_dict = server.get_vnc_console(console_type)
-                elif console_type == "rdp-html5":
-                    console_dict = server.get_rdp_console(console_type)
-                elif console_type == "spice-html5":
-                    console_dict = server.get_spice_console(console_type)
-                else:
-                    raise vimconn.VimConnException(
-                        "console type '{}' not allowed".format(console_type),
-                        http_code=vimconn.HTTP_Bad_Request,
-                    )
+        # Find the Pool_id
+        else:
+            pool_id = self._find_the_external_network_for_floating_ip()
 
 
-                try:
-                    console_url = console_dict["console"]["url"]
-                    # parse console_url
-                    protocol_index = console_url.find("//")
-                    suffix_index = (
-                        console_url[protocol_index + 2 :].find("/") + protocol_index + 2
-                    )
-                    port_index = (
-                        console_url[protocol_index + 2 : suffix_index].find(":")
-                        + protocol_index
-                        + 2
-                    )
+        param = {
+            "floatingip": {
+                "floating_network_id": pool_id,
+                "tenant_id": server.tenant_id,
+            }
+        }
 
 
-                    if protocol_index < 0 or port_index < 0 or suffix_index < 0:
-                        raise vimconn.VimConnException(
-                            "Unexpected response from VIM " + str(console_dict)
-                        )
+        self._neutron_create_float_ip(param, created_items)
 
 
-                    console_dict2 = {
-                        "protocol": console_url[0:protocol_index],
-                        "server": console_url[protocol_index + 2 : port_index],
-                        "port": int(console_url[port_index + 1 : suffix_index]),
-                        "suffix": console_url[suffix_index + 1 :],
-                    }
+    def _find_floating_ip(
+        self,
+        server: object,
+        floating_ips: list,
+        floating_network: dict,
+    ) -> Optional[str]:
+        """Find the available free floating IPs if there are.
 
 
-                    return console_dict2
-                except Exception:
-                    raise vimconn.VimConnException(
-                        "Unexpected response from VIM " + str(console_dict)
-                    )
+        Args:
+            server  (object):                   Server object
+            floating_ips    (list):             List of floating IPs
+            floating_network    (dict):         Details of floating network such as ID
 
 
-            return None
-        except (
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            nvExceptions.NotFound,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
-        # TODO insert exception vimconn.HTTP_Unauthorized
+        Returns:
+            free_floating_ip    (str):          Free floating ip address
 
 
-    # ###### VIO Specific Changes #########
-    def _generate_vlanID(self):
-        """
-        Method to get unused vlanID
-            Args:
-                None
-            Returns:
-                vlanID
         """
         """
-        # Get used VLAN IDs
-        usedVlanIDs = []
-        networks = self.get_network_list()
+        for fip in floating_ips:
+            if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id:
+                continue
 
 
-        for net in networks:
-            if net.get("provider:segmentation_id"):
-                usedVlanIDs.append(net.get("provider:segmentation_id"))
-
-        used_vlanIDs = set(usedVlanIDs)
+            if isinstance(floating_network["floating_ip"], str):
+                if fip.get("floating_network_id") != floating_network["floating_ip"]:
+                    continue
 
 
-        # find unused VLAN ID
-        for vlanID_range in self.config.get("dataplane_net_vlan_range"):
-            try:
-                start_vlanid, end_vlanid = map(
-                    int, vlanID_range.replace(" ", "").split("-")
-                )
+            return fip["id"]
 
 
-                for vlanID in range(start_vlanid, end_vlanid + 1):
-                    if vlanID not in used_vlanIDs:
-                        return vlanID
-            except Exception as exp:
-                raise vimconn.VimConnException(
-                    "Exception {} occurred while generating VLAN ID.".format(exp)
-                )
-        else:
-            raise vimconn.VimConnConflictException(
-                "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
-                    self.config.get("dataplane_net_vlan_range")
-                )
-            )
+    def _assign_floating_ip(
+        self, free_floating_ip: str, floating_network: dict
+    ) -> Dict:
+        """Assign the free floating ip address to port.
 
 
-    def _generate_multisegment_vlanID(self):
-        """
-        Method to get unused vlanID
         Args:
         Args:
-            None
-        Returns:
-            vlanID
-        """
-        # Get used VLAN IDs
-        usedVlanIDs = []
-        networks = self.get_network_list()
-        for net in networks:
-            if net.get("provider:network_type") == "vlan" and net.get(
-                "provider:segmentation_id"
-            ):
-                usedVlanIDs.append(net.get("provider:segmentation_id"))
-            elif net.get("segments"):
-                for segment in net.get("segments"):
-                    if segment.get("provider:network_type") == "vlan" and segment.get(
-                        "provider:segmentation_id"
-                    ):
-                        usedVlanIDs.append(segment.get("provider:segmentation_id"))
-
-        used_vlanIDs = set(usedVlanIDs)
-
-        # find unused VLAN ID
-        for vlanID_range in self.config.get("multisegment_vlan_range"):
-            try:
-                start_vlanid, end_vlanid = map(
-                    int, vlanID_range.replace(" ", "").split("-")
-                )
+            free_floating_ip    (str):          Floating IP to be assigned
+            floating_network    (dict):         ID of floating network
 
 
-                for vlanID in range(start_vlanid, end_vlanid + 1):
-                    if vlanID not in used_vlanIDs:
-                        return vlanID
-            except Exception as exp:
-                raise vimconn.VimConnException(
-                    "Exception {} occurred while generating VLAN ID.".format(exp)
-                )
-        else:
-            raise vimconn.VimConnConflictException(
-                "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
-                    self.config.get("multisegment_vlan_range")
-                )
-            )
+        Returns:
+            fip (dict)          (dict):         Floating ip details
 
 
-    def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
         """
         """
-        Method to validate user given vlanID ranges
-            Args:  None
-            Returns: None
-        """
-        for vlanID_range in input_vlan_range:
-            vlan_range = vlanID_range.replace(" ", "")
-            # validate format
-            vlanID_pattern = r"(\d)*-(\d)*$"
-            match_obj = re.match(vlanID_pattern, vlan_range)
-            if not match_obj:
-                raise vimconn.VimConnConflictException(
-                    "Invalid VLAN range for {}: {}.You must provide "
-                    "'{}' in format [start_ID - end_ID].".format(
-                        text_vlan_range, vlanID_range, text_vlan_range
-                    )
-                )
+        # The vim_id key contains the neutron.port_id
+        self.neutron.update_floatingip(
+            free_floating_ip,
+            {"floatingip": {"port_id": floating_network["vim_id"]}},
+        )
+        # For race condition ensure not re-assigned to other VM after 5 seconds
+        time.sleep(5)
 
 
-            start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
-            if start_vlanid <= 0:
-                raise vimconn.VimConnConflictException(
-                    "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
-                    "networks valid IDs are 1 to 4094 ".format(
-                        text_vlan_range, vlanID_range
-                    )
-                )
+        return self.neutron.show_floatingip(free_floating_ip)
 
 
-            if end_vlanid > 4094:
-                raise vimconn.VimConnConflictException(
-                    "Invalid VLAN range for {}: {}. End VLAN ID can not be "
-                    "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
-                        text_vlan_range, vlanID_range
-                    )
-                )
-
-            if start_vlanid > end_vlanid:
-                raise vimconn.VimConnConflictException(
-                    "Invalid VLAN range for {}: {}. You must provide '{}'"
-                    " in format start_ID - end_ID and start_ID < end_ID ".format(
-                        text_vlan_range, vlanID_range, text_vlan_range
-                    )
-                )
+    def _get_free_floating_ip(
+        self, server: object, floating_network: dict
+    ) -> Optional[str]:
+        """Get the free floating IP address.
 
 
-    # NOT USED FUNCTIONS
+        Args:
+            server  (object):               Server Object
+            floating_network    (dict):     Floating network details
 
 
-    def new_external_port(self, port_data):
-        """Adds a external port to VIM
-        Returns the port identifier"""
-        # TODO openstack if needed
-        return (
-            -vimconn.HTTP_Internal_Server_Error,
-            "osconnector.new_external_port() not implemented",
-        )
+        Returns:
+            free_floating_ip    (str):      Free floating ip addr
 
 
-    def connect_port_network(self, port_id, network_id, admin=False):
-        """Connects a external port to a network
-        Returns status code of the VIM response"""
-        # TODO openstack if needed
-        return (
-            -vimconn.HTTP_Internal_Server_Error,
-            "osconnector.connect_port_network() not implemented",
-        )
+        """
 
 
-    def new_user(self, user_name, user_passwd, tenant_id=None):
-        """Adds a new user to openstack VIM
-        Returns the user identifier"""
-        self.logger.debug("osconnector: Adding a new user to VIM")
+        floating_ips = self.neutron.list_floatingips().get("floatingips", ())
 
 
-        try:
-            self._reload_connection()
-            user = self.keystone.users.create(
-                user_name, password=user_passwd, default_project=tenant_id
-            )
-            # self.keystone.tenants.add_user(self.k_creds["username"], #role)
+        # Randomize
+        random.shuffle(floating_ips)
 
 
-            return user.id
-        except ksExceptions.ConnectionError as e:
-            error_value = -vimconn.HTTP_Bad_Request
-            error_text = (
-                type(e).__name__
-                + ": "
-                + (str(e) if len(e.args) == 0 else str(e.args[0]))
-            )
-        except ksExceptions.ClientException as e:  # TODO remove
-            error_value = -vimconn.HTTP_Bad_Request
-            error_text = (
-                type(e).__name__
-                + ": "
-                + (str(e) if len(e.args) == 0 else str(e.args[0]))
-            )
+        return self._find_floating_ip(server, floating_ips, floating_network)
 
 
-        # TODO insert exception vimconn.HTTP_Unauthorized
-        # if reaching here is because an exception
-        self.logger.debug("new_user " + error_text)
+    def _prepare_external_network_for_vminstance(
+        self,
+        external_network: list,
+        server: object,
+        created_items: dict,
+        vm_start_time: float,
+    ) -> None:
+        """Assign floating IP address for VM instance.
 
 
-        return error_value, error_text
+        Args:
+            external_network    (list):         ID of External network
+            server  (object):                   Server Object
+            created_items   (dict):             All created items belongs to new VM instance
+            vm_start_time   (float):            Time as a floating point number expressed in seconds since the epoch, in UTC
 
 
-    def delete_user(self, user_id):
-        """Delete a user from openstack VIM
-        Returns the user identifier"""
-        if self.debug:
-            print("osconnector: Deleting  a  user from VIM")
+        Raises:
+            VimConnException
 
 
-        try:
-            self._reload_connection()
-            self.keystone.users.delete(user_id)
+        """
+        for floating_network in external_network:
+            try:
+                assigned = False
+                floating_ip_retries = 3
+                # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
+                # several times
+                while not assigned:
+                    free_floating_ip = self._get_free_floating_ip(
+                        server, floating_network
+                    )
 
 
-            return 1, user_id
-        except ksExceptions.ConnectionError as e:
-            error_value = -vimconn.HTTP_Bad_Request
-            error_text = (
-                type(e).__name__
-                + ": "
-                + (str(e) if len(e.args) == 0 else str(e.args[0]))
-            )
-        except ksExceptions.NotFound as e:
-            error_value = -vimconn.HTTP_Not_Found
-            error_text = (
-                type(e).__name__
-                + ": "
-                + (str(e) if len(e.args) == 0 else str(e.args[0]))
-            )
-        except ksExceptions.ClientException as e:  # TODO remove
-            error_value = -vimconn.HTTP_Bad_Request
-            error_text = (
-                type(e).__name__
-                + ": "
-                + (str(e) if len(e.args) == 0 else str(e.args[0]))
-            )
+                    if not free_floating_ip:
+                        self._create_floating_ip(
+                            floating_network, server, created_items
+                        )
 
 
-        # TODO insert exception vimconn.HTTP_Unauthorized
-        # if reaching here is because an exception
-        self.logger.debug("delete_tenant " + error_text)
+                    try:
+                        # For race condition ensure not already assigned
+                        fip = self.neutron.show_floatingip(free_floating_ip)
 
 
-        return error_value, error_text
+                        if fip["floatingip"].get("port_id"):
+                            continue
 
 
-    def get_hosts_info(self):
-        """Get the information of deployed hosts
-        Returns the hosts content"""
-        if self.debug:
-            print("osconnector: Getting Host info from VIM")
+                        # Assign floating ip
+                        fip = self._assign_floating_ip(
+                            free_floating_ip, floating_network
+                        )
 
 
-        try:
-            h_list = []
-            self._reload_connection()
-            hypervisors = self.nova.hypervisors.list()
+                        if fip["floatingip"]["port_id"] != floating_network["vim_id"]:
+                            self.logger.warning(
+                                "floating_ip {} re-assigned to other port".format(
+                                    free_floating_ip
+                                )
+                            )
+                            continue
 
 
-            for hype in hypervisors:
-                h_list.append(hype.to_dict())
+                        self.logger.debug(
+                            "Assigned floating_ip {} to VM {}".format(
+                                free_floating_ip, server.id
+                            )
+                        )
 
 
-            return 1, {"hosts": h_list}
-        except nvExceptions.NotFound as e:
-            error_value = -vimconn.HTTP_Not_Found
-            error_text = str(e) if len(e.args) == 0 else str(e.args[0])
-        except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
-            error_value = -vimconn.HTTP_Bad_Request
-            error_text = (
-                type(e).__name__
-                + ": "
-                + (str(e) if len(e.args) == 0 else str(e.args[0]))
-            )
+                        assigned = True
 
 
-        # TODO insert exception vimconn.HTTP_Unauthorized
-        # if reaching here is because an exception
-        self.logger.debug("get_hosts_info " + error_text)
+                    except Exception as e:
+                        # Openstack need some time after VM creation to assign an IP. So retry if fails
+                        vm_status = self.nova.servers.get(server.id).status
 
 
-        return error_value, error_text
+                        if vm_status not in ("ACTIVE", "ERROR"):
+                            if time.time() - vm_start_time < server_timeout:
+                                time.sleep(5)
+                                continue
+                        elif floating_ip_retries > 0:
+                            floating_ip_retries -= 1
+                            continue
 
 
-    def get_hosts(self, vim_tenant):
-        """Get the hosts and deployed instances
-        Returns the hosts content"""
-        r, hype_dict = self.get_hosts_info()
+                        raise vimconn.VimConnException(
+                            "Cannot create floating_ip: {} {}".format(
+                                type(e).__name__, e
+                            ),
+                            http_code=vimconn.HTTP_Conflict,
+                        )
 
 
-        if r < 0:
-            return r, hype_dict
+            except Exception as e:
+                if not floating_network["exit_on_floating_ip_error"]:
+                    self.logger.error("Cannot create floating_ip. %s", str(e))
+                    continue
 
 
-        hypervisors = hype_dict["hosts"]
+                raise
 
 
-        try:
-            servers = self.nova.servers.list()
-            for hype in hypervisors:
-                for server in servers:
-                    if (
-                        server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
-                        == hype["hypervisor_hostname"]
-                    ):
-                        if "vm" in hype:
-                            hype["vm"].append(server.id)
-                        else:
-                            hype["vm"] = [server.id]
+    def _update_port_security_for_vminstance(
+        self,
+        no_secured_ports: list,
+        server: object,
+    ) -> None:
+        """Updates the port security according to no_secured_ports list.
 
 
-            return 1, hype_dict
-        except nvExceptions.NotFound as e:
-            error_value = -vimconn.HTTP_Not_Found
-            error_text = str(e) if len(e.args) == 0 else str(e.args[0])
-        except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
-            error_value = -vimconn.HTTP_Bad_Request
-            error_text = (
-                type(e).__name__
-                + ": "
-                + (str(e) if len(e.args) == 0 else str(e.args[0]))
-            )
+        Args:
+            no_secured_ports    (list):     List of ports that security will be disabled
+            server  (object):               Server Object
 
 
-        # TODO insert exception vimconn.HTTP_Unauthorized
-        # if reaching here is because an exception
-        self.logger.debug("get_hosts " + error_text)
+        Raises:
+            VimConnException
 
 
-        return error_value, error_text
+        """
+        # Wait until the VM is active and then disable the port-security
+        if no_secured_ports:
+            self.__wait_for_vm(server.id, "ACTIVE")
 
 
-    def new_classification(self, name, ctype, definition):
-        self.logger.debug(
-            "Adding a new (Traffic) Classification to VIM, named %s", name
-        )
+        for port in no_secured_ports:
+            port_update = {
+                "port": {"port_security_enabled": False, "security_groups": None}
+            }
 
 
-        try:
-            new_class = None
-            self._reload_connection()
+            if port[1] == "allow-address-pairs":
+                port_update = {
+                    "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
+                }
 
 
-            if ctype not in supportedClassificationTypes:
-                raise vimconn.VimConnNotSupportedException(
-                    "OpenStack VIM connector does not support provided "
-                    "Classification Type {}, supported ones are: {}".format(
-                        ctype, supportedClassificationTypes
-                    )
-                )
+            try:
+                self.neutron.update_port(port[0], port_update)
 
 
-            if not self._validate_classification(ctype, definition):
+            except Exception:
                 raise vimconn.VimConnException(
                 raise vimconn.VimConnException(
-                    "Incorrect Classification definition for the type specified."
+                    "It was not possible to disable port security for port {}".format(
+                        port[0]
+                    )
                 )
 
                 )
 
-            classification_dict = definition
-            classification_dict["name"] = name
-            new_class = self.neutron.create_sfc_flow_classifier(
-                {"flow_classifier": classification_dict}
-            )
-
-            return new_class["flow_classifier"]["id"]
-        except (
-            neExceptions.ConnectionFailed,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self.logger.error("Creation of Classification failed.")
-            self._format_exception(e)
-
-    def get_classification(self, class_id):
-        self.logger.debug(" Getting Classification %s from VIM", class_id)
-        filter_dict = {"id": class_id}
-        class_list = self.get_classification_list(filter_dict)
-
-        if len(class_list) == 0:
-            raise vimconn.VimConnNotFoundException(
-                "Classification '{}' not found".format(class_id)
-            )
-        elif len(class_list) > 1:
-            raise vimconn.VimConnConflictException(
-                "Found more than one Classification with this criteria"
-            )
-
-        classification = class_list[0]
-
-        return classification
-
-    def get_classification_list(self, filter_dict={}):
-        self.logger.debug(
-            "Getting Classifications from VIM filter: '%s'", str(filter_dict)
-        )
-
-        try:
-            filter_dict_os = filter_dict.copy()
-            self._reload_connection()
-
-            if self.api_version3 and "tenant_id" in filter_dict_os:
-                filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
-
-            classification_dict = self.neutron.list_sfc_flow_classifiers(
-                **filter_dict_os
-            )
-            classification_list = classification_dict["flow_classifiers"]
-            self.__classification_os2mano(classification_list)
-
-            return classification_list
-        except (
-            neExceptions.ConnectionFailed,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
-
-    def delete_classification(self, class_id):
-        self.logger.debug("Deleting Classification '%s' from VIM", class_id)
+    def new_vminstance(
+        self,
+        name: str,
+        description: str,
+        start: bool,
+        image_id: str,
+        flavor_id: str,
+        affinity_group_list: list,
+        net_list: list,
+        cloud_config=None,
+        disk_list=None,
+        availability_zone_index=None,
+        availability_zone_list=None,
+    ) -> tuple:
+        """Adds a VM instance to VIM.
 
 
-        try:
-            self._reload_connection()
-            self.neutron.delete_sfc_flow_classifier(class_id)
+        Args:
+            name    (str):          name of VM
+            description (str):      description
+            start   (bool):         indicates if VM must start or boot in pause mode. Ignored
+            image_id    (str)       image uuid
+            flavor_id   (str)       flavor uuid
+            affinity_group_list (list):     list of affinity groups, each one is a dictionary.Ignore if empty.
+            net_list    (list):         list of interfaces, each one is a dictionary with:
+                name:   name of network
+                net_id:     network uuid to connect
+                vpci:   virtual vcpi to assign, ignored because openstack lack #TODO
+                model:  interface model, ignored #TODO
+                mac_address:    used for  SR-IOV ifaces #TODO for other types
+                use:    'data', 'bridge',  'mgmt'
+                type:   'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
+                vim_id:     filled/added by this function
+                floating_ip:    True/False (or it can be None)
+                port_security:  True/False
+            cloud_config    (dict): (optional) dictionary with:
+                key-pairs:      (optional) list of strings with the public key to be inserted to the default user
+                users:      (optional) list of users to be inserted, each item is a dict with:
+                    name:   (mandatory) user name,
+                    key-pairs: (optional) list of strings with the public key to be inserted to the user
+                user-data:  (optional) string is a text script to be passed directly to cloud-init
+                config-files:   (optional). List of files to be transferred. Each item is a dict with:
+                    dest:   (mandatory) string with the destination absolute path
+                    encoding:   (optional, by default text). Can be one of:
+                        'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+                    content :    (mandatory) string with the content of the file
+                    permissions:    (optional) string with file permissions, typically octal notation '0644'
+                    owner:  (optional) file owner, string with the format 'owner:group'
+                boot-data-drive:    boolean to indicate if user-data must be passed using a boot drive (hard disk)
+            disk_list:  (optional) list with additional disks to the VM. Each item is a dict with:
+                image_id:   (optional). VIM id of an existing image. If not provided an empty disk must be mounted
+                size:   (mandatory) string with the size of the disk in GB
+                vim_id:  (optional) should use this existing volume id
+            availability_zone_index:    Index of availability_zone_list to use for this this VM. None if not AV required
+            availability_zone_list:     list of availability zones given by user in the VNFD descriptor.  Ignore if
+                availability_zone_index is None
+                #TODO ip, security groups
 
 
-            return class_id
-        except (
-            neExceptions.ConnectionFailed,
-            neExceptions.NeutronException,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+        Returns:
+            A tuple with the instance identifier and created_items or raises an exception on error
+            created_items can be None or a dictionary where this method can include key-values that will be passed to
+            the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
+            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+            as not present.
 
 
-    def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
+        """
         self.logger.debug(
         self.logger.debug(
-            "Adding a new Service Function Instance to VIM, named '%s'", name
+            "new_vminstance input: image='%s' flavor='%s' nics='%s'",
+            image_id,
+            flavor_id,
+            str(net_list),
         )
 
         try:
         )
 
         try:
-            new_sfi = None
+            server = None
+            created_items = {}
+            net_list_vim = []
+            # list of external networks to be connected to instance, later on used to create floating_ip
+            external_network = []
+            # List of ports with port-security disabled
+            no_secured_ports = []
+            block_device_mapping = {}
+            existing_vim_volumes = []
+            server_group_id = None
+            scheduller_hints = {}
+
+            # Check the Openstack Connection
             self._reload_connection()
             self._reload_connection()
-            correlation = None
 
 
-            if sfc_encap:
-                correlation = "nsh"
+            # Prepare network list
+            self._prepare_network_for_vminstance(
+                name=name,
+                net_list=net_list,
+                created_items=created_items,
+                net_list_vim=net_list_vim,
+                external_network=external_network,
+                no_secured_ports=no_secured_ports,
+            )
 
 
-            if len(ingress_ports) != 1:
-                raise vimconn.VimConnNotSupportedException(
-                    "OpenStack VIM connector can only have 1 ingress port per SFI"
-                )
+            # Cloud config
+            config_drive, userdata = self._create_user_data(cloud_config)
+
+            # Get availability Zone
+            vm_av_zone = self._get_vm_availability_zone(
+                availability_zone_index, availability_zone_list
+            )
 
 
-            if len(egress_ports) != 1:
-                raise vimconn.VimConnNotSupportedException(
-                    "OpenStack VIM connector can only have 1 egress port per SFI"
+            if disk_list:
+                # Prepare disks
+                self._prepare_disk_for_vminstance(
+                    name=name,
+                    existing_vim_volumes=existing_vim_volumes,
+                    created_items=created_items,
+                    vm_av_zone=vm_av_zone,
+                    block_device_mapping=block_device_mapping,
+                    disk_list=disk_list,
                 )
 
                 )
 
-            sfi_dict = {
-                "name": name,
-                "ingress": ingress_ports[0],
-                "egress": egress_ports[0],
-                "service_function_parameters": {"correlation": correlation},
-            }
-            new_sfi = self.neutron.create_sfc_port_pair({"port_pair": sfi_dict})
+            if affinity_group_list:
+                # Only first id on the list will be used. Openstack restriction
+                server_group_id = affinity_group_list[0]["affinity_group_id"]
+                scheduller_hints["group"] = server_group_id
 
 
-            return new_sfi["port_pair"]["id"]
-        except (
-            neExceptions.ConnectionFailed,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            if new_sfi:
-                try:
-                    self.neutron.delete_sfc_port_pair(new_sfi["port_pair"]["id"])
-                except Exception:
-                    self.logger.error(
-                        "Creation of Service Function Instance failed, with "
-                        "subsequent deletion failure as well."
-                    )
+            self.logger.debug(
+                "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
+                "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
+                "block_device_mapping={}, server_group={})".format(
+                    name,
+                    image_id,
+                    flavor_id,
+                    net_list_vim,
+                    self.config.get("security_groups"),
+                    vm_av_zone,
+                    self.config.get("keypair"),
+                    userdata,
+                    config_drive,
+                    block_device_mapping,
+                    server_group_id,
+                )
+            )
 
 
-            self._format_exception(e)
+            # Create VM
+            server = self.nova.servers.create(
+                name=name,
+                image=image_id,
+                flavor=flavor_id,
+                nics=net_list_vim,
+                security_groups=self.config.get("security_groups"),
+                # TODO remove security_groups in future versions. Already at neutron port
+                availability_zone=vm_av_zone,
+                key_name=self.config.get("keypair"),
+                userdata=userdata,
+                config_drive=config_drive,
+                block_device_mapping=block_device_mapping,
+                scheduler_hints=scheduller_hints,
+            )
 
 
-    def get_sfi(self, sfi_id):
-        self.logger.debug("Getting Service Function Instance %s from VIM", sfi_id)
-        filter_dict = {"id": sfi_id}
-        sfi_list = self.get_sfi_list(filter_dict)
+            vm_start_time = time.time()
 
 
-        if len(sfi_list) == 0:
-            raise vimconn.VimConnNotFoundException(
-                "Service Function Instance '{}' not found".format(sfi_id)
-            )
-        elif len(sfi_list) > 1:
-            raise vimconn.VimConnConflictException(
-                "Found more than one Service Function Instance with this criteria"
-            )
+            self._update_port_security_for_vminstance(no_secured_ports, server)
 
 
-        sfi = sfi_list[0]
+            self._prepare_external_network_for_vminstance(
+                external_network=external_network,
+                server=server,
+                created_items=created_items,
+                vm_start_time=vm_start_time,
+            )
 
 
-        return sfi
+            return server.id, created_items
 
 
-    def get_sfi_list(self, filter_dict={}):
-        self.logger.debug(
-            "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict)
-        )
+        except Exception as e:
+            server_id = None
+            if server:
+                server_id = server.id
 
 
-        try:
-            self._reload_connection()
-            filter_dict_os = filter_dict.copy()
+            try:
+                created_items = self.remove_keep_tag_from_persistent_volumes(
+                    created_items
+                )
 
 
-            if self.api_version3 and "tenant_id" in filter_dict_os:
-                filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
+                self.delete_vminstance(server_id, created_items)
 
 
-            sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os)
-            sfi_list = sfi_dict["port_pairs"]
-            self.__sfi_os2mano(sfi_list)
+            except Exception as e2:
+                self.logger.error("new_vminstance rollback fail {}".format(e2))
 
 
-            return sfi_list
-        except (
-            neExceptions.ConnectionFailed,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
             self._format_exception(e)
 
             self._format_exception(e)
 
-    def delete_sfi(self, sfi_id):
-        self.logger.debug("Deleting Service Function Instance '%s' from VIM", sfi_id)
+    @staticmethod
+    def remove_keep_tag_from_persistent_volumes(created_items: Dict) -> Dict:
+        """Removes the keep flag from persistent volumes. So, those volumes could be removed.
+
+        Args:
+            created_items (dict):       All created items belongs to VM
+
+        Returns:
+            updated_created_items   (dict):     Dict which does not include keep flag for volumes.
+
+        """
+        return {
+            key.replace(":keep", ""): value for (key, value) in created_items.items()
+        }
 
 
+    def get_vminstance(self, vm_id):
+        """Returns the VM instance information from VIM"""
+        # self.logger.debug("Getting VM from VIM")
         try:
             self._reload_connection()
         try:
             self._reload_connection()
-            self.neutron.delete_sfc_port_pair(sfi_id)
+            server = self.nova.servers.find(id=vm_id)
+            # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
 
 
-            return sfi_id
+            return server.to_dict()
         except (
         except (
-            neExceptions.ConnectionFailed,
-            neExceptions.NeutronException,
             ksExceptions.ClientException,
             ksExceptions.ClientException,
-            neExceptions.NeutronException,
+            nvExceptions.ClientException,
+            nvExceptions.NotFound,
             ConnectionError,
         ) as e:
             self._format_exception(e)
 
             ConnectionError,
         ) as e:
             self._format_exception(e)
 
-    def new_sf(self, name, sfis, sfc_encap=True):
-        self.logger.debug("Adding a new Service Function to VIM, named '%s'", name)
+    def get_vminstance_console(self, vm_id, console_type="vnc"):
+        """
+        Get a console for the virtual machine
+        Params:
+            vm_id: uuid of the VM
+            console_type, can be:
+                "novnc" (by default), "xvpvnc" for VNC types,
+                "rdp-html5" for RDP types, "spice-html5" for SPICE types
+        Returns dict with the console parameters:
+                protocol: ssh, ftp, http, https, ...
+                server:   usually ip address
+                port:     the http, ssh, ... port
+                suffix:   extra text, e.g. the http path and query string
+        """
+        self.logger.debug("Getting VM CONSOLE from VIM")
 
         try:
 
         try:
-            new_sf = None
             self._reload_connection()
             self._reload_connection()
-            # correlation = None
-            # if sfc_encap:
-            #     correlation = "nsh"
+            server = self.nova.servers.find(id=vm_id)
+
+            if console_type is None or console_type == "novnc":
+                console_dict = server.get_vnc_console("novnc")
+            elif console_type == "xvpvnc":
+                console_dict = server.get_vnc_console(console_type)
+            elif console_type == "rdp-html5":
+                console_dict = server.get_rdp_console(console_type)
+            elif console_type == "spice-html5":
+                console_dict = server.get_spice_console(console_type)
+            else:
+                raise vimconn.VimConnException(
+                    "console type '{}' not allowed".format(console_type),
+                    http_code=vimconn.HTTP_Bad_Request,
+                )
+
+            console_dict1 = console_dict.get("console")
 
 
-            for instance in sfis:
-                sfi = self.get_sfi(instance)
+            if console_dict1:
+                console_url = console_dict1.get("url")
 
 
-                if sfi.get("sfc_encap") != sfc_encap:
-                    raise vimconn.VimConnNotSupportedException(
-                        "OpenStack VIM connector requires all SFIs of the "
-                        "same SF to share the same SFC Encapsulation"
+                if console_url:
+                    # parse console_url
+                    protocol_index = console_url.find("//")
+                    suffix_index = (
+                        console_url[protocol_index + 2 :].find("/") + protocol_index + 2
+                    )
+                    port_index = (
+                        console_url[protocol_index + 2 : suffix_index].find(":")
+                        + protocol_index
+                        + 2
                     )
 
                     )
 
-            sf_dict = {"name": name, "port_pairs": sfis}
-            new_sf = self.neutron.create_sfc_port_pair_group(
-                {"port_pair_group": sf_dict}
-            )
+                    if protocol_index < 0 or port_index < 0 or suffix_index < 0:
+                        return (
+                            -vimconn.HTTP_Internal_Server_Error,
+                            "Unexpected response from VIM",
+                        )
+
+                    console_dict = {
+                        "protocol": console_url[0:protocol_index],
+                        "server": console_url[protocol_index + 2 : port_index],
+                        "port": console_url[port_index:suffix_index],
+                        "suffix": console_url[suffix_index + 1 :],
+                    }
+                    protocol_index += 2
 
 
-            return new_sf["port_pair_group"]["id"]
+                    return console_dict
+            raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
         except (
         except (
-            neExceptions.ConnectionFailed,
+            nvExceptions.NotFound,
             ksExceptions.ClientException,
             ksExceptions.ClientException,
-            neExceptions.NeutronException,
+            nvExceptions.ClientException,
+            nvExceptions.BadRequest,
             ConnectionError,
         ) as e:
             ConnectionError,
         ) as e:
-            if new_sf:
-                try:
-                    self.neutron.delete_sfc_port_pair_group(
-                        new_sf["port_pair_group"]["id"]
-                    )
-                except Exception:
-                    self.logger.error(
-                        "Creation of Service Function failed, with "
-                        "subsequent deletion failure as well."
-                    )
-
             self._format_exception(e)
 
             self._format_exception(e)
 
-    def get_sf(self, sf_id):
-        self.logger.debug("Getting Service Function %s from VIM", sf_id)
-        filter_dict = {"id": sf_id}
-        sf_list = self.get_sf_list(filter_dict)
-
-        if len(sf_list) == 0:
-            raise vimconn.VimConnNotFoundException(
-                "Service Function '{}' not found".format(sf_id)
-            )
-        elif len(sf_list) > 1:
-            raise vimconn.VimConnConflictException(
-                "Found more than one Service Function with this criteria"
-            )
-
-        sf = sf_list[0]
+    def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
+        """Neutron delete ports by id.
+        Args:
+            k_id    (str):      Port id in the VIM
+        """
+        try:
+            port_dict = self.neutron.list_ports()
+            existing_ports = [port["id"] for port in port_dict["ports"] if port_dict]
 
 
-        return sf
+            if k_id in existing_ports:
+                self.neutron.delete_port(k_id)
 
 
-    def get_sf_list(self, filter_dict={}):
-        self.logger.debug(
-            "Getting Service Function from VIM filter: '%s'", str(filter_dict)
-        )
+        except Exception as e:
+            self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
 
 
+    def _delete_volumes_by_id_wth_cinder(
+        self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
+    ) -> bool:
+        """Cinder delete volume by id.
+        Args:
+            k   (str):                      Full item name in created_items
+            k_id    (str):                  ID of floating ip in VIM
+            volumes_to_hold (list):          Volumes not to delete
+            created_items   (dict):         All created items belongs to VM
+        """
         try:
         try:
-            self._reload_connection()
-            filter_dict_os = filter_dict.copy()
-
-            if self.api_version3 and "tenant_id" in filter_dict_os:
-                filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
+            if k_id in volumes_to_hold:
+                return
 
 
-            sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
-            sf_list = sf_dict["port_pair_groups"]
-            self.__sf_os2mano(sf_list)
+            if self.cinder.volumes.get(k_id).status != "available":
+                return True
 
 
-            return sf_list
-        except (
-            neExceptions.ConnectionFailed,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+            else:
+                self.cinder.volumes.delete(k_id)
+                created_items[k] = None
 
 
-    def delete_sf(self, sf_id):
-        self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
+        except Exception as e:
+            self.logger.error(
+                "Error deleting volume: {}: {}".format(type(e).__name__, e)
+            )
 
 
+    def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None:
+        """Neutron delete floating ip by id.
+        Args:
+            k   (str):                      Full item name in created_items
+            k_id    (str):                  ID of floating ip in VIM
+            created_items   (dict):         All created items belongs to VM
+        """
         try:
         try:
-            self._reload_connection()
-            self.neutron.delete_sfc_port_pair_group(sf_id)
+            self.neutron.delete_floatingip(k_id)
+            created_items[k] = None
 
 
-            return sf_id
-        except (
-            neExceptions.ConnectionFailed,
-            neExceptions.NeutronException,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+        except Exception as e:
+            self.logger.error(
+                "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
+            )
 
 
-    def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
-        self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name)
+    @staticmethod
+    def _get_item_name_id(k: str) -> Tuple[str, str]:
+        k_item, _, k_id = k.partition(":")
+        return k_item, k_id
 
 
-        try:
-            new_sfp = None
-            self._reload_connection()
-            # In networking-sfc the MPLS encapsulation is legacy
-            # should be used when no full SFC Encapsulation is intended
-            correlation = "mpls"
-
-            if sfc_encap:
-                correlation = "nsh"
-
-            sfp_dict = {
-                "name": name,
-                "flow_classifiers": classifications,
-                "port_pair_groups": sfs,
-                "chain_parameters": {"correlation": correlation},
-            }
+    def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None:
+        """Delete VM ports attached to the networks before deleting virtual machine.
+        Args:
+            created_items   (dict):     All created items belongs to VM
+        """
 
 
-            if spi:
-                sfp_dict["chain_id"] = spi
+        for k, v in created_items.items():
+            if not v:  # skip already deleted
+                continue
 
 
-            new_sfp = self.neutron.create_sfc_port_chain({"port_chain": sfp_dict})
+            try:
+                k_item, k_id = self._get_item_name_id(k)
+                if k_item == "port":
+                    self._delete_ports_by_id_wth_neutron(k_id)
 
 
-            return new_sfp["port_chain"]["id"]
-        except (
-            neExceptions.ConnectionFailed,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            if new_sfp:
-                try:
-                    self.neutron.delete_sfc_port_chain(new_sfp["port_chain"]["id"])
-                except Exception:
-                    self.logger.error(
-                        "Creation of Service Function Path failed, with "
-                        "subsequent deletion failure as well."
-                    )
+            except Exception as e:
+                self.logger.error(
+                    "Error deleting port: {}: {}".format(type(e).__name__, e)
+                )
 
 
-            self._format_exception(e)
+    def _delete_created_items(
+        self, created_items: dict, volumes_to_hold: list, keep_waiting: bool
+    ) -> bool:
+        """Delete Volumes and floating ip if they exist in created_items."""
+        for k, v in created_items.items():
+            if not v:  # skip already deleted
+                continue
 
 
-    def get_sfp(self, sfp_id):
-        self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id)
+            try:
+                k_item, k_id = self._get_item_name_id(k)
 
 
-        filter_dict = {"id": sfp_id}
-        sfp_list = self.get_sfp_list(filter_dict)
+                if k_item == "volume":
+                    unavailable_vol = self._delete_volumes_by_id_wth_cinder(
+                        k, k_id, volumes_to_hold, created_items
+                    )
 
 
-        if len(sfp_list) == 0:
-            raise vimconn.VimConnNotFoundException(
-                "Service Function Path '{}' not found".format(sfp_id)
-            )
-        elif len(sfp_list) > 1:
-            raise vimconn.VimConnConflictException(
-                "Found more than one Service Function Path with this criteria"
-            )
+                    if unavailable_vol:
+                        keep_waiting = True
+
+                elif k_item == "floating_ip":
+                    self._delete_floating_ip_by_id(k, k_id, created_items)
 
 
-        sfp = sfp_list[0]
+            except Exception as e:
+                self.logger.error("Error deleting {}: {}".format(k, e))
 
 
-        return sfp
+        return keep_waiting
 
 
-    def get_sfp_list(self, filter_dict={}):
-        self.logger.debug(
-            "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict)
-        )
+    @staticmethod
+    def _extract_items_wth_keep_flag_from_created_items(created_items: dict) -> dict:
+        """Remove the volumes which has key flag from created_items
+
+        Args:
+            created_items   (dict):         All created items belongs to VM
+
+        Returns:
+            created_items   (dict):         Persistent volumes eliminated created_items
+        """
+        return {
+            key: value
+            for (key, value) in created_items.items()
+            if len(key.split(":")) == 2
+        }
+
+    def delete_vminstance(
+        self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
+    ) -> None:
+        """Removes a VM instance from VIM. Returns the old identifier.
+        Args:
+            vm_id   (str):              Identifier of VM instance
+            created_items   (dict):     All created items belongs to VM
+            volumes_to_hold (list):     Volumes_to_hold
+        """
+        if created_items is None:
+            created_items = {}
+        if volumes_to_hold is None:
+            volumes_to_hold = []
 
         try:
 
         try:
+            created_items = self._extract_items_wth_keep_flag_from_created_items(
+                created_items
+            )
+
             self._reload_connection()
             self._reload_connection()
-            filter_dict_os = filter_dict.copy()
 
 
-            if self.api_version3 and "tenant_id" in filter_dict_os:
-                filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
+            # Delete VM ports attached to the networks before the virtual machine
+            if created_items:
+                self._delete_vm_ports_attached_to_network(created_items)
 
 
-            sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
-            sfp_list = sfp_dict["port_chains"]
-            self.__sfp_os2mano(sfp_list)
+            if vm_id:
+                self.nova.servers.delete(vm_id)
 
 
-            return sfp_list
-        except (
-            neExceptions.ConnectionFailed,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+            # Although having detached, volumes should have in active status before deleting.
+            # We ensure in this loop
+            keep_waiting = True
+            elapsed_time = 0
 
 
-    def delete_sfp(self, sfp_id):
-        self.logger.debug("Deleting Service Function Path '%s' from VIM", sfp_id)
+            while keep_waiting and elapsed_time < volume_timeout:
+                keep_waiting = False
 
 
-        try:
-            self._reload_connection()
-            self.neutron.delete_sfc_port_chain(sfp_id)
+                # Delete volumes and floating IP.
+                keep_waiting = self._delete_created_items(
+                    created_items, volumes_to_hold, keep_waiting
+                )
+
+                if keep_waiting:
+                    time.sleep(1)
+                    elapsed_time += 1
 
 
-            return sfp_id
         except (
         except (
-            neExceptions.ConnectionFailed,
-            neExceptions.NeutronException,
+            nvExceptions.NotFound,
             ksExceptions.ClientException,
             ksExceptions.ClientException,
-            neExceptions.NeutronException,
+            nvExceptions.ClientException,
             ConnectionError,
         ) as e:
             self._format_exception(e)
 
             ConnectionError,
         ) as e:
             self._format_exception(e)
 
-    def refresh_sfps_status(self, sfp_list):
-        """Get the status of the service function path
-        Params: the list of sfp identifiers
+    def refresh_vms_status(self, vm_list):
+        """Get the status of the virtual machines and their interfaces/ports
+        Params: the list of VM identifiers
         Returns a dictionary with:
         Returns a dictionary with:
-            vm_id:          #VIM id of this service function path
+            vm_id:          #VIM id of this Virtual Machine
                 status:     #Mandatory. Text with one of:
                             #  DELETED (not found at vim)
                             #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
                             #  OTHER (Vim reported other status not understood)
                             #  ERROR (VIM indicates an ERROR status)
                 status:     #Mandatory. Text with one of:
                             #  DELETED (not found at vim)
                             #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
                             #  OTHER (Vim reported other status not understood)
                             #  ERROR (VIM indicates an ERROR status)
-                            #  ACTIVE,
-                            #  CREATING (on building process)
+                            #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
+                            #  CREATING (on building process), ERROR
+                            #  ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
+                            #
                 error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
                 error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
-                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)F
+                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+                interfaces:
+                 -  vim_info:         #Text with plain information obtained from vim (yaml.safe_dump)
+                    mac_address:      #Text format XX:XX:XX:XX:XX:XX
+                    vim_net_id:       #network id where this interface is connected
+                    vim_interface_id: #interface/port VIM id
+                    ip_address:       #null, or text with IPv4, IPv6 address
+                    compute_node:     #identification of compute node where PF,VF interface is allocated
+                    pci:              #PCI address of the NIC that hosts the PF,VF
+                    vlan:             #physical VLAN used for VF
         """
         """
-        sfp_dict = {}
+        vm_dict = {}
         self.logger.debug(
         self.logger.debug(
-            "refresh_sfps status: Getting tenant SFP information from VIM"
+            "refresh_vms status: Getting tenant VM instance information from VIM"
         )
 
         )
 
-        for sfp_id in sfp_list:
-            sfp = {}
+        for vm_id in vm_list:
+            vm = {}
 
             try:
 
             try:
-                sfp_vim = self.get_sfp(sfp_id)
+                vm_vim = self.get_vminstance(vm_id)
 
 
-                if sfp_vim["spi"]:
-                    sfp["status"] = vmStatus2manoFormat["ACTIVE"]
+                if vm_vim["status"] in vmStatus2manoFormat:
+                    vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
                 else:
                 else:
-                    sfp["status"] = "OTHER"
-                    sfp["error_msg"] = "VIM status reported " + sfp["status"]
+                    vm["status"] = "OTHER"
+                    vm["error_msg"] = "VIM status reported " + vm_vim["status"]
+
+                vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
+                vm_vim.pop("user_data", None)
+                vm["vim_info"] = self.serialize(vm_vim)
+
+                vm["interfaces"] = []
+                if vm_vim.get("fault"):
+                    vm["error_msg"] = str(vm_vim["fault"])
+
+                # get interfaces
+                try:
+                    self._reload_connection()
+                    port_dict = self.neutron.list_ports(device_id=vm_id)
+
+                    for port in port_dict["ports"]:
+                        interface = {}
+                        interface["vim_info"] = self.serialize(port)
+                        interface["mac_address"] = port.get("mac_address")
+                        interface["vim_net_id"] = port["network_id"]
+                        interface["vim_interface_id"] = port["id"]
+                        # check if OS-EXT-SRV-ATTR:host is there,
+                        # in case of non-admin credentials, it will be missing
+
+                        if vm_vim.get("OS-EXT-SRV-ATTR:host"):
+                            interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
+
+                        interface["pci"] = None
+
+                        # check if binding:profile is there,
+                        # in case of non-admin credentials, it will be missing
+                        if port.get("binding:profile"):
+                            if port["binding:profile"].get("pci_slot"):
+                                # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
+                                #  the slot to 0x00
+                                # TODO: This is just a workaround valid for niantinc. Find a better way to do so
+                                #   CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2)   assuming there are 2 ports per nic
+                                pci = port["binding:profile"]["pci_slot"]
+                                # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
+                                interface["pci"] = pci
+
+                        interface["vlan"] = None
+
+                        if port.get("binding:vif_details"):
+                            interface["vlan"] = port["binding:vif_details"].get("vlan")
+
+                        # Get vlan from network in case not present in port for those old openstacks and cases where
+                        # it is needed vlan at PT
+                        if not interface["vlan"]:
+                            # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
+                            network = self.neutron.show_network(port["network_id"])
+
+                            if (
+                                network["network"].get("provider:network_type")
+                                == "vlan"
+                            ):
+                                # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
+                                interface["vlan"] = network["network"].get(
+                                    "provider:segmentation_id"
+                                )
 
 
-                sfp["vim_info"] = self.serialize(sfp_vim)
+                        ips = []
+                        # look for floating ip address
+                        try:
+                            floating_ip_dict = self.neutron.list_floatingips(
+                                port_id=port["id"]
+                            )
+
+                            if floating_ip_dict.get("floatingips"):
+                                ips.append(
+                                    floating_ip_dict["floatingips"][0].get(
+                                        "floating_ip_address"
+                                    )
+                                )
+                        except Exception:
+                            pass
+
+                        for subnet in port["fixed_ips"]:
+                            ips.append(subnet["ip_address"])
 
 
-                if sfp_vim.get("fault"):
-                    sfp["error_msg"] = str(sfp_vim["fault"])
+                        interface["ip_address"] = ";".join(ips)
+                        vm["interfaces"].append(interface)
+                except Exception as e:
+                    self.logger.error(
+                        "Error getting vm interface information {}: {}".format(
+                            type(e).__name__, e
+                        ),
+                        exc_info=True,
+                    )
             except vimconn.VimConnNotFoundException as e:
             except vimconn.VimConnNotFoundException as e:
-                self.logger.error("Exception getting sfp status: %s", str(e))
-                sfp["status"] = "DELETED"
-                sfp["error_msg"] = str(e)
+                self.logger.error("Exception getting vm status: %s", str(e))
+                vm["status"] = "DELETED"
+                vm["error_msg"] = str(e)
             except vimconn.VimConnException as e:
             except vimconn.VimConnException as e:
-                self.logger.error("Exception getting sfp status: %s", str(e))
-                sfp["status"] = "VIM_ERROR"
-                sfp["error_msg"] = str(e)
+                self.logger.error("Exception getting vm status: %s", str(e))
+                vm["status"] = "VIM_ERROR"
+                vm["error_msg"] = str(e)
+
+            vm_dict[vm_id] = vm
+
+        return vm_dict
+
+    def action_vminstance(self, vm_id, action_dict, created_items={}):
+        """Send and action over a VM instance from VIM
+        Returns None or the console dict if the action was successfully sent to the VIM
+        """
+        self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
+
+        try:
+            self._reload_connection()
+            server = self.nova.servers.find(id=vm_id)
+
+            if "start" in action_dict:
+                if action_dict["start"] == "rebuild":
+                    server.rebuild()
+                else:
+                    if server.status == "PAUSED":
+                        server.unpause()
+                    elif server.status == "SUSPENDED":
+                        server.resume()
+                    elif server.status == "SHUTOFF":
+                        server.start()
+                    else:
+                        self.logger.debug(
+                            "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
+                        )
+                        raise vimconn.VimConnException(
+                            "Cannot 'start' instance while it is in active state",
+                            http_code=vimconn.HTTP_Bad_Request,
+                        )
+
+            elif "pause" in action_dict:
+                server.pause()
+            elif "resume" in action_dict:
+                server.resume()
+            elif "shutoff" in action_dict or "shutdown" in action_dict:
+                self.logger.debug("server status %s", server.status)
+                if server.status == "ACTIVE":
+                    server.stop()
+                else:
+                    self.logger.debug("ERROR: VM is not in Active state")
+                    raise vimconn.VimConnException(
+                        "VM is not in active state, stop operation is not allowed",
+                        http_code=vimconn.HTTP_Bad_Request,
+                    )
+            elif "forceOff" in action_dict:
+                server.stop()  # TODO
+            elif "terminate" in action_dict:
+                server.delete()
+            elif "createImage" in action_dict:
+                server.create_image()
+                # "path":path_schema,
+                # "description":description_schema,
+                # "name":name_schema,
+                # "metadata":metadata_schema,
+                # "imageRef": id_schema,
+                # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
+            elif "rebuild" in action_dict:
+                server.rebuild(server.image["id"])
+            elif "reboot" in action_dict:
+                server.reboot()  # reboot_type="SOFT"
+            elif "console" in action_dict:
+                console_type = action_dict["console"]
+
+                if console_type is None or console_type == "novnc":
+                    console_dict = server.get_vnc_console("novnc")
+                elif console_type == "xvpvnc":
+                    console_dict = server.get_vnc_console(console_type)
+                elif console_type == "rdp-html5":
+                    console_dict = server.get_rdp_console(console_type)
+                elif console_type == "spice-html5":
+                    console_dict = server.get_spice_console(console_type)
+                else:
+                    raise vimconn.VimConnException(
+                        "console type '{}' not allowed".format(console_type),
+                        http_code=vimconn.HTTP_Bad_Request,
+                    )
+
+                try:
+                    console_url = console_dict["console"]["url"]
+                    # parse console_url
+                    protocol_index = console_url.find("//")
+                    suffix_index = (
+                        console_url[protocol_index + 2 :].find("/") + protocol_index + 2
+                    )
+                    port_index = (
+                        console_url[protocol_index + 2 : suffix_index].find(":")
+                        + protocol_index
+                        + 2
+                    )
+
+                    if protocol_index < 0 or port_index < 0 or suffix_index < 0:
+                        raise vimconn.VimConnException(
+                            "Unexpected response from VIM " + str(console_dict)
+                        )
+
+                    console_dict2 = {
+                        "protocol": console_url[0:protocol_index],
+                        "server": console_url[protocol_index + 2 : port_index],
+                        "port": int(console_url[port_index + 1 : suffix_index]),
+                        "suffix": console_url[suffix_index + 1 :],
+                    }
 
 
-            sfp_dict[sfp_id] = sfp
+                    return console_dict2
+                except Exception:
+                    raise vimconn.VimConnException(
+                        "Unexpected response from VIM " + str(console_dict)
+                    )
 
 
-        return sfp_dict
+            return None
+        except (
+            ksExceptions.ClientException,
+            nvExceptions.ClientException,
+            nvExceptions.NotFound,
+            ConnectionError,
+        ) as e:
+            self._format_exception(e)
+        # TODO insert exception vimconn.HTTP_Unauthorized
 
 
-    def refresh_sfis_status(self, sfi_list):
-        """Get the status of the service function instances
-        Params: the list of sfi identifiers
-        Returns a dictionary with:
-            vm_id:          #VIM id of this service function instance
-                status:     #Mandatory. Text with one of:
-                            #  DELETED (not found at vim)
-                            #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
-                            #  OTHER (Vim reported other status not understood)
-                            #  ERROR (VIM indicates an ERROR status)
-                            #  ACTIVE,
-                            #  CREATING (on building process)
-                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
-                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+    # ###### VIO Specific Changes #########
+    def _generate_vlanID(self):
         """
         """
-        sfi_dict = {}
-        self.logger.debug(
-            "refresh_sfis status: Getting tenant sfi information from VIM"
-        )
+        Method to get unused vlanID
+            Args:
+                None
+            Returns:
+                vlanID
+        """
+        # Get used VLAN IDs
+        usedVlanIDs = []
+        networks = self.get_network_list()
+
+        for net in networks:
+            if net.get("provider:segmentation_id"):
+                usedVlanIDs.append(net.get("provider:segmentation_id"))
 
 
-        for sfi_id in sfi_list:
-            sfi = {}
+        used_vlanIDs = set(usedVlanIDs)
 
 
+        # find unused VLAN ID
+        for vlanID_range in self.config.get("dataplane_net_vlan_range"):
             try:
             try:
-                sfi_vim = self.get_sfi(sfi_id)
+                start_vlanid, end_vlanid = map(
+                    int, vlanID_range.replace(" ", "").split("-")
+                )
 
 
-                if sfi_vim:
-                    sfi["status"] = vmStatus2manoFormat["ACTIVE"]
-                else:
-                    sfi["status"] = "OTHER"
-                    sfi["error_msg"] = "VIM status reported " + sfi["status"]
+                for vlanID in range(start_vlanid, end_vlanid + 1):
+                    if vlanID not in used_vlanIDs:
+                        return vlanID
+            except Exception as exp:
+                raise vimconn.VimConnException(
+                    "Exception {} occurred while generating VLAN ID.".format(exp)
+                )
+        else:
+            raise vimconn.VimConnConflictException(
+                "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
+                    self.config.get("dataplane_net_vlan_range")
+                )
+            )
 
 
-                sfi["vim_info"] = self.serialize(sfi_vim)
+    def _generate_multisegment_vlanID(self):
+        """
+        Method to get unused vlanID
+        Args:
+            None
+        Returns:
+            vlanID
+        """
+        # Get used VLAN IDs
+        usedVlanIDs = []
+        networks = self.get_network_list()
+        for net in networks:
+            if net.get("provider:network_type") == "vlan" and net.get(
+                "provider:segmentation_id"
+            ):
+                usedVlanIDs.append(net.get("provider:segmentation_id"))
+            elif net.get("segments"):
+                for segment in net.get("segments"):
+                    if segment.get("provider:network_type") == "vlan" and segment.get(
+                        "provider:segmentation_id"
+                    ):
+                        usedVlanIDs.append(segment.get("provider:segmentation_id"))
 
 
-                if sfi_vim.get("fault"):
-                    sfi["error_msg"] = str(sfi_vim["fault"])
-            except vimconn.VimConnNotFoundException as e:
-                self.logger.error("Exception getting sfi status: %s", str(e))
-                sfi["status"] = "DELETED"
-                sfi["error_msg"] = str(e)
-            except vimconn.VimConnException as e:
-                self.logger.error("Exception getting sfi status: %s", str(e))
-                sfi["status"] = "VIM_ERROR"
-                sfi["error_msg"] = str(e)
+        used_vlanIDs = set(usedVlanIDs)
 
 
-            sfi_dict[sfi_id] = sfi
+        # find unused VLAN ID
+        for vlanID_range in self.config.get("multisegment_vlan_range"):
+            try:
+                start_vlanid, end_vlanid = map(
+                    int, vlanID_range.replace(" ", "").split("-")
+                )
 
 
-        return sfi_dict
+                for vlanID in range(start_vlanid, end_vlanid + 1):
+                    if vlanID not in used_vlanIDs:
+                        return vlanID
+            except Exception as exp:
+                raise vimconn.VimConnException(
+                    "Exception {} occurred while generating VLAN ID.".format(exp)
+                )
+        else:
+            raise vimconn.VimConnConflictException(
+                "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
+                    self.config.get("multisegment_vlan_range")
+                )
+            )
 
 
-    def refresh_sfs_status(self, sf_list):
-        """Get the status of the service functions
-        Params: the list of sf identifiers
-        Returns a dictionary with:
-            vm_id:          #VIM id of this service function
-                status:     #Mandatory. Text with one of:
-                            #  DELETED (not found at vim)
-                            #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
-                            #  OTHER (Vim reported other status not understood)
-                            #  ERROR (VIM indicates an ERROR status)
-                            #  ACTIVE,
-                            #  CREATING (on building process)
-                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
-                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+    def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
         """
         """
-        sf_dict = {}
-        self.logger.debug("refresh_sfs status: Getting tenant sf information from VIM")
+        Method to validate user given vlanID ranges
+            Args:  None
+            Returns: None
+        """
+        for vlanID_range in input_vlan_range:
+            vlan_range = vlanID_range.replace(" ", "")
+            # validate format
+            vlanID_pattern = r"(\d)*-(\d)*$"
+            match_obj = re.match(vlanID_pattern, vlan_range)
+            if not match_obj:
+                raise vimconn.VimConnConflictException(
+                    "Invalid VLAN range for {}: {}.You must provide "
+                    "'{}' in format [start_ID - end_ID].".format(
+                        text_vlan_range, vlanID_range, text_vlan_range
+                    )
+                )
 
 
-        for sf_id in sf_list:
-            sf = {}
+            start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
+            if start_vlanid <= 0:
+                raise vimconn.VimConnConflictException(
+                    "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
+                    "networks valid IDs are 1 to 4094 ".format(
+                        text_vlan_range, vlanID_range
+                    )
+                )
 
 
-            try:
-                sf_vim = self.get_sf(sf_id)
+            if end_vlanid > 4094:
+                raise vimconn.VimConnConflictException(
+                    "Invalid VLAN range for {}: {}. End VLAN ID can not be "
+                    "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
+                        text_vlan_range, vlanID_range
+                    )
+                )
 
 
-                if sf_vim:
-                    sf["status"] = vmStatus2manoFormat["ACTIVE"]
-                else:
-                    sf["status"] = "OTHER"
-                    sf["error_msg"] = "VIM status reported " + sf_vim["status"]
+            if start_vlanid > end_vlanid:
+                raise vimconn.VimConnConflictException(
+                    "Invalid VLAN range for {}: {}. You must provide '{}'"
+                    " in format start_ID - end_ID and start_ID < end_ID ".format(
+                        text_vlan_range, vlanID_range, text_vlan_range
+                    )
+                )
 
 
-                sf["vim_info"] = self.serialize(sf_vim)
+    def get_hosts_info(self):
+        """Get the information of deployed hosts
+        Returns the hosts content"""
+        if self.debug:
+            print("osconnector: Getting Host info from VIM")
 
 
-                if sf_vim.get("fault"):
-                    sf["error_msg"] = str(sf_vim["fault"])
-            except vimconn.VimConnNotFoundException as e:
-                self.logger.error("Exception getting sf status: %s", str(e))
-                sf["status"] = "DELETED"
-                sf["error_msg"] = str(e)
-            except vimconn.VimConnException as e:
-                self.logger.error("Exception getting sf status: %s", str(e))
-                sf["status"] = "VIM_ERROR"
-                sf["error_msg"] = str(e)
+        try:
+            h_list = []
+            self._reload_connection()
+            hypervisors = self.nova.hypervisors.list()
 
 
-            sf_dict[sf_id] = sf
+            for hype in hypervisors:
+                h_list.append(hype.to_dict())
 
 
-        return sf_dict
+            return 1, {"hosts": h_list}
+        except nvExceptions.NotFound as e:
+            error_value = -vimconn.HTTP_Not_Found
+            error_text = str(e) if len(e.args) == 0 else str(e.args[0])
+        except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
+            error_value = -vimconn.HTTP_Bad_Request
+            error_text = (
+                type(e).__name__
+                + ": "
+                + (str(e) if len(e.args) == 0 else str(e.args[0]))
+            )
 
 
-    def refresh_classifications_status(self, classification_list):
-        """Get the status of the classifications
-        Params: the list of classification identifiers
-        Returns a dictionary with:
-            vm_id:          #VIM id of this classifier
-                status:     #Mandatory. Text with one of:
-                            #  DELETED (not found at vim)
-                            #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
-                            #  OTHER (Vim reported other status not understood)
-                            #  ERROR (VIM indicates an ERROR status)
-                            #  ACTIVE,
-                            #  CREATING (on building process)
-                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
-                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
-        """
-        classification_dict = {}
-        self.logger.debug(
-            "refresh_classifications status: Getting tenant classification information from VIM"
-        )
+        # TODO insert exception vimconn.HTTP_Unauthorized
+        # if reaching here is because an exception
+        self.logger.debug("get_hosts_info " + error_text)
 
 
-        for classification_id in classification_list:
-            classification = {}
+        return error_value, error_text
 
 
-            try:
-                classification_vim = self.get_classification(classification_id)
+    def get_hosts(self, vim_tenant):
+        """Get the hosts and deployed instances
+        Returns the hosts content"""
+        r, hype_dict = self.get_hosts_info()
 
 
-                if classification_vim:
-                    classification["status"] = vmStatus2manoFormat["ACTIVE"]
-                else:
-                    classification["status"] = "OTHER"
-                    classification["error_msg"] = (
-                        "VIM status reported " + classification["status"]
-                    )
+        if r < 0:
+            return r, hype_dict
 
 
-                classification["vim_info"] = self.serialize(classification_vim)
+        hypervisors = hype_dict["hosts"]
 
 
-                if classification_vim.get("fault"):
-                    classification["error_msg"] = str(classification_vim["fault"])
-            except vimconn.VimConnNotFoundException as e:
-                self.logger.error("Exception getting classification status: %s", str(e))
-                classification["status"] = "DELETED"
-                classification["error_msg"] = str(e)
-            except vimconn.VimConnException as e:
-                self.logger.error("Exception getting classification status: %s", str(e))
-                classification["status"] = "VIM_ERROR"
-                classification["error_msg"] = str(e)
+        try:
+            servers = self.nova.servers.list()
+            for hype in hypervisors:
+                for server in servers:
+                    if (
+                        server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
+                        == hype["hypervisor_hostname"]
+                    ):
+                        if "vm" in hype:
+                            hype["vm"].append(server.id)
+                        else:
+                            hype["vm"] = [server.id]
+
+            return 1, hype_dict
+        except nvExceptions.NotFound as e:
+            error_value = -vimconn.HTTP_Not_Found
+            error_text = str(e) if len(e.args) == 0 else str(e.args[0])
+        except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
+            error_value = -vimconn.HTTP_Bad_Request
+            error_text = (
+                type(e).__name__
+                + ": "
+                + (str(e) if len(e.args) == 0 else str(e.args[0]))
+            )
 
 
-            classification_dict[classification_id] = classification
+        # TODO insert exception vimconn.HTTP_Unauthorized
+        # if reaching here is because an exception
+        self.logger.debug("get_hosts " + error_text)
 
 
-        return classification_dict
+        return error_value, error_text
 
     def new_affinity_group(self, affinity_group_data):
         """Adds a server group to VIM
 
     def new_affinity_group(self, affinity_group_data):
         """Adds a server group to VIM
@@ -3718,3 +3739,62 @@ class vimconnector(vimconn.VimConnector):
             nvExceptions.NotFound,
         ) as e:
             self._format_exception(e)
             nvExceptions.NotFound,
         ) as e:
             self._format_exception(e)
+
+    def resize_instance(self, vm_id, new_flavor_id):
+        """
+        For resizing the vm based on the given
+        flavor details
+        param:
+            vm_id : ID of an instance
+            new_flavor_id : Flavor id to be resized
+        Return the status of a resized instance
+        """
+        self._reload_connection()
+        self.logger.debug("resize the flavor of an instance")
+        instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
+        old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
+        new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
+        try:
+            if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
+                if old_flavor_disk > new_flavor_disk:
+                    raise nvExceptions.BadRequest(
+                        400,
+                        message="Server disk resize failed. Resize to lower disk flavor is not allowed",
+                    )
+                else:
+                    self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
+                    vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
+                    if vm_state:
+                        instance_resized_status = self.confirm_resize(vm_id)
+                        return instance_resized_status
+                    else:
+                        raise nvExceptions.BadRequest(
+                            409,
+                            message="Cannot 'resize' vm_state is in ERROR",
+                        )
+
+            else:
+                self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
+                raise nvExceptions.BadRequest(
+                    409,
+                    message="Cannot 'resize' instance while it is in vm_state resized",
+                )
+        except (
+            nvExceptions.BadRequest,
+            nvExceptions.ClientException,
+            nvExceptions.NotFound,
+        ) as e:
+            self._format_exception(e)
+
+    def confirm_resize(self, vm_id):
+        """
+        Confirm the resize of an instance
+        param:
+            vm_id: ID of an instance
+        """
+        self._reload_connection()
+        self.nova.servers.confirm_resize(server=vm_id)
+        if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
+            self.__wait_for_vm(vm_id, "ACTIVE")
+        instance_status = self.get_vdu_state(vm_id)[0]
+        return instance_status