Merge remote-tracking branch 'origin/master' into paas

Change-Id: Ic73831a987d021baeb25559f524b8f6e4dcc4ccf
Signed-off-by: Mark Beierl <mark.beierl@canonical.com>
diff --git a/NG-RO/osm_ng_ro/__init__.py b/NG-RO/osm_ng_ro/__init__.py
index 561c0f6..cce1038 100644
--- a/NG-RO/osm_ng_ro/__init__.py
+++ b/NG-RO/osm_ng_ro/__init__.py
@@ -16,13 +16,13 @@
 ##
 import logging
 
+from pkg_resources import get_distribution
+
 version = "8.0.1.post0"
 version_date = "2020-06-29"
 
 # Obtain installed package version. Ignore if error, e.g. pkg_resources not installed
 try:
-    from pkg_resources import get_distribution
-
     version = get_distribution("osm_ng_ro").version
 except Exception as error:
-    logging.exception(f"{error} occured while getting the ro version")
+    logging.warning(f"{error} occured while getting the ro version")
diff --git a/NG-RO/osm_ng_ro/ns.py b/NG-RO/osm_ng_ro/ns.py
index 9a16241..19ff791 100644
--- a/NG-RO/osm_ng_ro/ns.py
+++ b/NG-RO/osm_ng_ro/ns.py
@@ -23,7 +23,7 @@
 from threading import Lock
 from time import time
 from traceback import format_exc as traceback_format_exc
-from typing import Any, Dict, Tuple, Type
+from typing import Any, Dict, List, Optional, Tuple, Type
 from uuid import uuid4
 
 from cryptography.hazmat.backends import default_backend as crypto_default_backend
@@ -647,44 +647,57 @@
             Tuple[Dict[str, Any], bool]: [description]
         """
         numa = {}
+        numa_list = []
         epa_vcpu_set = False
 
         if guest_epa_quota.get("numa-node-policy"):
             numa_node_policy = guest_epa_quota.get("numa-node-policy")
 
             if numa_node_policy.get("node"):
-                numa_node = numa_node_policy["node"][0]
+                for numa_node in numa_node_policy["node"]:
+                    vcpu_list = []
+                    if numa_node.get("id"):
+                        numa["id"] = int(numa_node["id"])
 
-                if numa_node.get("num-cores"):
-                    numa["cores"] = numa_node["num-cores"]
-                    epa_vcpu_set = True
+                    if numa_node.get("vcpu"):
+                        for vcpu in numa_node.get("vcpu"):
+                            vcpu_id = int(vcpu.get("id"))
+                            vcpu_list.append(vcpu_id)
+                        numa["vcpu"] = vcpu_list
 
-                paired_threads = numa_node.get("paired-threads", {})
-                if paired_threads.get("num-paired-threads"):
-                    numa["paired-threads"] = int(
-                        numa_node["paired-threads"]["num-paired-threads"]
-                    )
-                    epa_vcpu_set = True
+                    if numa_node.get("num-cores"):
+                        numa["cores"] = numa_node["num-cores"]
+                        epa_vcpu_set = True
 
-                if paired_threads.get("paired-thread-ids"):
-                    numa["paired-threads-id"] = []
-
-                    for pair in paired_threads["paired-thread-ids"]:
-                        numa["paired-threads-id"].append(
-                            (
-                                str(pair["thread-a"]),
-                                str(pair["thread-b"]),
-                            )
+                    paired_threads = numa_node.get("paired-threads", {})
+                    if paired_threads.get("num-paired-threads"):
+                        numa["paired_threads"] = int(
+                            numa_node["paired-threads"]["num-paired-threads"]
                         )
+                        epa_vcpu_set = True
 
-                if numa_node.get("num-threads"):
-                    numa["threads"] = int(numa_node["num-threads"])
-                    epa_vcpu_set = True
+                    if paired_threads.get("paired-thread-ids"):
+                        numa["paired-threads-id"] = []
 
-                if numa_node.get("memory-mb"):
-                    numa["memory"] = max(int(int(numa_node["memory-mb"]) / 1024), 1)
+                        for pair in paired_threads["paired-thread-ids"]:
+                            numa["paired-threads-id"].append(
+                                (
+                                    str(pair["thread-a"]),
+                                    str(pair["thread-b"]),
+                                )
+                            )
 
-        return numa, epa_vcpu_set
+                    if numa_node.get("num-threads"):
+                        numa["threads"] = int(numa_node["num-threads"])
+                        epa_vcpu_set = True
+
+                    if numa_node.get("memory-mb"):
+                        numa["memory"] = max(int(int(numa_node["memory-mb"]) / 1024), 1)
+
+                    numa_list.append(numa)
+                    numa = {}
+
+        return numa_list, epa_vcpu_set
 
     @staticmethod
     def _process_guest_epa_cpu_pinning_params(
@@ -732,23 +745,39 @@
         """
         extended = {}
         numa = {}
+        numa_list = []
 
         if target_flavor.get("guest-epa"):
             guest_epa = target_flavor["guest-epa"]
 
-            numa, epa_vcpu_set = Ns._process_guest_epa_numa_params(
+            numa_list, epa_vcpu_set = Ns._process_guest_epa_numa_params(
                 guest_epa_quota=guest_epa
             )
 
             if guest_epa.get("mempage-size"):
                 extended["mempage-size"] = guest_epa.get("mempage-size")
 
+            if guest_epa.get("cpu-pinning-policy"):
+                extended["cpu-pinning-policy"] = guest_epa.get("cpu-pinning-policy")
+
+            if guest_epa.get("cpu-thread-pinning-policy"):
+                extended["cpu-thread-pinning-policy"] = guest_epa.get(
+                    "cpu-thread-pinning-policy"
+                )
+
+            if guest_epa.get("numa-node-policy"):
+                if guest_epa.get("numa-node-policy").get("mem-policy"):
+                    extended["mem-policy"] = guest_epa.get("numa-node-policy").get(
+                        "mem-policy"
+                    )
+
             tmp_numa, epa_vcpu_set = Ns._process_guest_epa_cpu_pinning_params(
                 guest_epa_quota=guest_epa,
                 vcpu_count=int(target_flavor.get("vcpu-count", 1)),
                 epa_vcpu_set=epa_vcpu_set,
             )
-            numa.update(tmp_numa)
+            for numa in numa_list:
+                numa.update(tmp_numa)
 
             extended.update(
                 Ns._process_guest_epa_quota_params(
@@ -758,7 +787,7 @@
             )
 
         if numa:
-            extended["numas"] = [numa]
+            extended["numas"] = numa_list
 
         return extended
 
@@ -920,7 +949,7 @@
                     "id": vim_info.get("vim_network_id"),
                 },
             }
-        elif target_vld.get("mgmt-network"):
+        elif target_vld.get("mgmt-network") and not vim_info.get("provider_network"):
             extra_dict["find_params"] = {
                 "mgmt": True,
                 "name": target_vld["id"],
@@ -947,24 +976,25 @@
     @staticmethod
     def find_persistent_root_volumes(
         vnfd: dict,
-        target_vdu: str,
+        target_vdu: dict,
         vdu_instantiation_volumes_list: list,
         disk_list: list,
-    ) -> (list, dict):
+    ) -> Dict[str, any]:
         """Find the persistent root volumes and add them to the disk_list
-        by parsing the instantiation parameters
+        by parsing the instantiation parameters.
 
         Args:
-            vnfd:   VNFD
-            target_vdu: processed VDU
-            vdu_instantiation_volumes_list: instantiation parameters for the each VDU as a list
-            disk_list:  to be filled up
+            vnfd    (dict):                                 VNF descriptor
+            target_vdu      (dict):                         processed VDU
+            vdu_instantiation_volumes_list  (list):         instantiation parameters for the each VDU as a list
+            disk_list   (list):                             to be filled up
 
         Returns:
-            disk_list:  filled VDU list which is used for VDU creation
+            persistent_root_disk    (dict):                 Details of persistent root disk
 
         """
         persistent_root_disk = {}
+        # There can be only one root disk, when we find it, it will return the result
 
         for vdu, vsd in product(
             vnfd.get("vdu", ()), vnfd.get("virtual-storage-desc", ())
@@ -992,8 +1022,7 @@
 
                             disk_list.append(persistent_root_disk[vsd["id"]])
 
-                            # There can be only one root disk, when we find it, it will return the result
-                            return disk_list, persistent_root_disk
+                            return persistent_root_disk
 
                     else:
 
@@ -1001,22 +1030,22 @@
                             persistent_root_disk[vsd["id"]] = {
                                 "image_id": vdu.get("sw-image-desc"),
                                 "size": root_disk.get("size-of-storage"),
+                                "keep": Ns.is_volume_keeping_required(root_disk),
                             }
 
                             disk_list.append(persistent_root_disk[vsd["id"]])
-                            return disk_list, persistent_root_disk
 
-        return disk_list, persistent_root_disk
+                            return persistent_root_disk
 
     @staticmethod
     def find_persistent_volumes(
         persistent_root_disk: dict,
-        target_vdu: str,
+        target_vdu: dict,
         vdu_instantiation_volumes_list: list,
         disk_list: list,
-    ) -> list:
+    ) -> None:
         """Find the ordinary persistent volumes and add them to the disk_list
-        by parsing the instantiation parameters
+        by parsing the instantiation parameters.
 
         Args:
             persistent_root_disk:   persistent root disk dictionary
@@ -1024,9 +1053,6 @@
             vdu_instantiation_volumes_list: instantiation parameters for the each VDU as a list
             disk_list:  to be filled up
 
-        Returns:
-            disk_list:  filled VDU list which is used for VDU creation
-
         """
         # Find the ordinary volumes which are not added to the persistent_root_disk
         persistent_disk = {}
@@ -1048,10 +1074,424 @@
                     if disk["id"] not in persistent_disk.keys():
                         persistent_disk[disk["id"]] = {
                             "size": disk.get("size-of-storage"),
+                            "keep": Ns.is_volume_keeping_required(disk),
                         }
                         disk_list.append(persistent_disk[disk["id"]])
 
-        return disk_list
+    @staticmethod
+    def is_volume_keeping_required(virtual_storage_desc: Dict[str, Any]) -> bool:
+        """Function to decide keeping persistent volume
+        upon VDU deletion.
+
+        Args:
+            virtual_storage_desc (Dict[str, Any]): virtual storage description dictionary
+
+        Returns:
+            bool (True/False)
+        """
+
+        if not virtual_storage_desc.get("vdu-storage-requirements"):
+            return False
+        for item in virtual_storage_desc.get("vdu-storage-requirements", {}):
+            if item.get("key") == "keep-volume" and item.get("value") == "true":
+                return True
+        return False
+
+    @staticmethod
+    def _sort_vdu_interfaces(target_vdu: dict) -> None:
+        """Sort the interfaces according to position number.
+
+        Args:
+            target_vdu  (dict):     Details of VDU to be created
+
+        """
+        # If the position info is provided for all the interfaces, it will be sorted
+        # according to position number ascendingly.
+        sorted_interfaces = sorted(
+            target_vdu["interfaces"],
+            key=lambda x: (x.get("position") is None, x.get("position")),
+        )
+        target_vdu["interfaces"] = sorted_interfaces
+
+    @staticmethod
+    def _partially_locate_vdu_interfaces(target_vdu: dict) -> None:
+        """Only place the interfaces which has specific position.
+
+        Args:
+            target_vdu  (dict):     Details of VDU to be created
+
+        """
+        # If the position info is provided for some interfaces but not all of them, the interfaces
+        # which has specific position numbers will be placed and others' positions will not be taken care.
+        if any(
+            i.get("position") + 1
+            for i in target_vdu["interfaces"]
+            if i.get("position") is not None
+        ):
+            n = len(target_vdu["interfaces"])
+            sorted_interfaces = [-1] * n
+            k, m = 0, 0
+
+            while k < n:
+                if target_vdu["interfaces"][k].get("position") is not None:
+                    if any(i.get("position") == 0 for i in target_vdu["interfaces"]):
+                        idx = target_vdu["interfaces"][k]["position"] + 1
+                    else:
+                        idx = target_vdu["interfaces"][k]["position"]
+                    sorted_interfaces[idx - 1] = target_vdu["interfaces"][k]
+                k += 1
+
+            while m < n:
+                if target_vdu["interfaces"][m].get("position") is None:
+                    idy = sorted_interfaces.index(-1)
+                    sorted_interfaces[idy] = target_vdu["interfaces"][m]
+                m += 1
+
+            target_vdu["interfaces"] = sorted_interfaces
+
+    @staticmethod
+    def _prepare_vdu_cloud_init(
+        target_vdu: dict, vdu2cloud_init: dict, db: object, fs: object
+    ) -> Dict:
+        """Fill cloud_config dict with cloud init details.
+
+        Args:
+            target_vdu  (dict):         Details of VDU to be created
+            vdu2cloud_init  (dict):     Cloud init dict
+            db  (object):               DB object
+            fs  (object):               FS object
+
+        Returns:
+            cloud_config (dict):        Cloud config details of VDU
+
+        """
+        # cloud config
+        cloud_config = {}
+
+        if target_vdu.get("cloud-init"):
+            if target_vdu["cloud-init"] not in vdu2cloud_init:
+                vdu2cloud_init[target_vdu["cloud-init"]] = Ns._get_cloud_init(
+                    db=db,
+                    fs=fs,
+                    location=target_vdu["cloud-init"],
+                )
+
+            cloud_content_ = vdu2cloud_init[target_vdu["cloud-init"]]
+            cloud_config["user-data"] = Ns._parse_jinja2(
+                cloud_init_content=cloud_content_,
+                params=target_vdu.get("additionalParams"),
+                context=target_vdu["cloud-init"],
+            )
+
+        if target_vdu.get("boot-data-drive"):
+            cloud_config["boot-data-drive"] = target_vdu.get("boot-data-drive")
+
+        return cloud_config
+
+    @staticmethod
+    def _check_vld_information_of_interfaces(
+        interface: dict, ns_preffix: str, vnf_preffix: str
+    ) -> Optional[str]:
+        """Prepare the net_text by the virtual link information for vnf and ns level.
+        Args:
+            interface   (dict):         Interface details
+            ns_preffix  (str):          Prefix of NS
+            vnf_preffix (str):          Prefix of VNF
+
+        Returns:
+            net_text    (str):          information of net
+
+        """
+        net_text = ""
+        if interface.get("ns-vld-id"):
+            net_text = ns_preffix + ":vld." + interface["ns-vld-id"]
+        elif interface.get("vnf-vld-id"):
+            net_text = vnf_preffix + ":vld." + interface["vnf-vld-id"]
+
+        return net_text
+
+    @staticmethod
+    def _prepare_interface_port_security(interface: dict) -> None:
+        """
+
+        Args:
+            interface   (dict):     Interface details
+
+        """
+        if "port-security-enabled" in interface:
+            interface["port_security"] = interface.pop("port-security-enabled")
+
+        if "port-security-disable-strategy" in interface:
+            interface["port_security_disable_strategy"] = interface.pop(
+                "port-security-disable-strategy"
+            )
+
+    @staticmethod
+    def _create_net_item_of_interface(interface: dict, net_text: str) -> dict:
+        """Prepare net item including name, port security, floating ip etc.
+
+        Args:
+            interface   (dict):         Interface details
+            net_text    (str):          information of net
+
+        Returns:
+            net_item    (dict):         Dict including net details
+
+        """
+
+        net_item = {
+            x: v
+            for x, v in interface.items()
+            if x
+            in (
+                "name",
+                "vpci",
+                "port_security",
+                "port_security_disable_strategy",
+                "floating_ip",
+            )
+        }
+        net_item["net_id"] = "TASK-" + net_text
+        net_item["type"] = "virtual"
+
+        return net_item
+
+    @staticmethod
+    def _prepare_type_of_interface(
+        interface: dict, tasks_by_target_record_id: dict, net_text: str, net_item: dict
+    ) -> None:
+        """Fill the net item type by interface type such as SR-IOV, OM-MGMT, bridge etc.
+
+        Args:
+            interface   (dict):                     Interface details
+            tasks_by_target_record_id   (dict):     Task details
+            net_text    (str):                      information of net
+            net_item    (dict):                     Dict including net details
+
+        """
+        # TODO mac_address: used for  SR-IOV ifaces #TODO for other types
+        # TODO floating_ip: True/False (or it can be None)
+
+        if interface.get("type") in ("SR-IOV", "PCI-PASSTHROUGH"):
+            # Mark the net create task as type data
+            if deep_get(
+                tasks_by_target_record_id,
+                net_text,
+                "extra_dict",
+                "params",
+                "net_type",
+            ):
+                tasks_by_target_record_id[net_text]["extra_dict"]["params"][
+                    "net_type"
+                ] = "data"
+
+            net_item["use"] = "data"
+            net_item["model"] = interface["type"]
+            net_item["type"] = interface["type"]
+
+        elif (
+            interface.get("type") == "OM-MGMT"
+            or interface.get("mgmt-interface")
+            or interface.get("mgmt-vnf")
+        ):
+            net_item["use"] = "mgmt"
+
+        else:
+            # If interface.get("type") in ("VIRTIO", "E1000", "PARAVIRT"):
+            net_item["use"] = "bridge"
+            net_item["model"] = interface.get("type")
+
+    @staticmethod
+    def _prepare_vdu_interfaces(
+        target_vdu: dict,
+        extra_dict: dict,
+        ns_preffix: str,
+        vnf_preffix: str,
+        logger: object,
+        tasks_by_target_record_id: dict,
+        net_list: list,
+    ) -> None:
+        """Prepare the net_item and add net_list, add mgmt interface to extra_dict.
+
+        Args:
+            target_vdu  (dict):                             VDU to be created
+            extra_dict  (dict):                             Dictionary to be filled
+            ns_preffix  (str):                              NS prefix as string
+            vnf_preffix (str):                              VNF prefix as string
+            logger  (object):                               Logger Object
+            tasks_by_target_record_id  (dict):              Task details
+            net_list    (list):                             Net list of VDU
+        """
+        for iface_index, interface in enumerate(target_vdu["interfaces"]):
+
+            net_text = Ns._check_vld_information_of_interfaces(
+                interface, ns_preffix, vnf_preffix
+            )
+            if not net_text:
+                # Interface not connected to any vld
+                logger.error(
+                    "Interface {} from vdu {} not connected to any vld".format(
+                        iface_index, target_vdu["vdu-name"]
+                    )
+                )
+                continue
+
+            extra_dict["depends_on"].append(net_text)
+
+            Ns._prepare_interface_port_security(interface)
+
+            net_item = Ns._create_net_item_of_interface(interface, net_text)
+
+            Ns._prepare_type_of_interface(
+                interface, tasks_by_target_record_id, net_text, net_item
+            )
+
+            if interface.get("ip-address"):
+                net_item["ip_address"] = interface["ip-address"]
+
+            if interface.get("mac-address"):
+                net_item["mac_address"] = interface["mac-address"]
+
+            net_list.append(net_item)
+
+            if interface.get("mgmt-vnf"):
+                extra_dict["mgmt_vnf_interface"] = iface_index
+            elif interface.get("mgmt-interface"):
+                extra_dict["mgmt_vdu_interface"] = iface_index
+
+    @staticmethod
+    def _prepare_vdu_ssh_keys(
+        target_vdu: dict, ro_nsr_public_key: dict, cloud_config: dict
+    ) -> None:
+        """Add ssh keys to cloud config.
+
+        Args:
+           target_vdu  (dict):                 Details of VDU to be created
+           ro_nsr_public_key   (dict):          RO NSR public Key
+           cloud_config  (dict):               Cloud config details
+
+        """
+        ssh_keys = []
+
+        if target_vdu.get("ssh-keys"):
+            ssh_keys += target_vdu.get("ssh-keys")
+
+        if target_vdu.get("ssh-access-required"):
+            ssh_keys.append(ro_nsr_public_key)
+
+        if ssh_keys:
+            cloud_config["key-pairs"] = ssh_keys
+
+    @staticmethod
+    def _select_persistent_root_disk(vsd: dict, vdu: dict) -> dict:
+        """Selects the persistent root disk if exists.
+        Args:
+            vsd (dict):             Virtual storage descriptors in VNFD
+            vdu (dict):             VNF descriptor
+
+        Returns:
+            root_disk   (dict):     Selected persistent root disk
+        """
+        if vsd.get("id") == vdu.get("virtual-storage-desc", [[]])[0]:
+            root_disk = vsd
+            if root_disk.get(
+                "type-of-storage"
+            ) == "persistent-storage:persistent-storage" and root_disk.get(
+                "size-of-storage"
+            ):
+                return root_disk
+
+    @staticmethod
+    def _add_persistent_root_disk_to_disk_list(
+        vnfd: dict, target_vdu: dict, persistent_root_disk: dict, disk_list: list
+    ) -> None:
+        """Find the persistent root disk and add to disk list.
+
+        Args:
+            vnfd  (dict):                           VNF descriptor
+            target_vdu  (dict):                     Details of VDU to be created
+            persistent_root_disk    (dict):         Details of persistent root disk
+            disk_list   (list):                     Disks of VDU
+
+        """
+        for vdu in vnfd.get("vdu", ()):
+            if vdu["name"] == target_vdu["vdu-name"]:
+                for vsd in vnfd.get("virtual-storage-desc", ()):
+                    root_disk = Ns._select_persistent_root_disk(vsd, vdu)
+                    if not root_disk:
+                        continue
+
+                    persistent_root_disk[vsd["id"]] = {
+                        "image_id": vdu.get("sw-image-desc"),
+                        "size": root_disk["size-of-storage"],
+                        "keep": Ns.is_volume_keeping_required(root_disk),
+                    }
+
+                    disk_list.append(persistent_root_disk[vsd["id"]])
+                    break
+
+    @staticmethod
+    def _add_persistent_ordinary_disks_to_disk_list(
+        target_vdu: dict,
+        persistent_root_disk: dict,
+        persistent_ordinary_disk: dict,
+        disk_list: list,
+    ) -> None:
+        """Fill the disk list by adding persistent ordinary disks.
+
+        Args:
+            target_vdu  (dict):                     Details of VDU to be created
+            persistent_root_disk    (dict):         Details of persistent root disk
+            persistent_ordinary_disk    (dict):     Details of persistent ordinary disk
+            disk_list   (list):                     Disks of VDU
+
+        """
+        if target_vdu.get("virtual-storages"):
+            for disk in target_vdu["virtual-storages"]:
+                if (
+                    disk.get("type-of-storage")
+                    == "persistent-storage:persistent-storage"
+                    and disk["id"] not in persistent_root_disk.keys()
+                ):
+                    persistent_ordinary_disk[disk["id"]] = {
+                        "size": disk["size-of-storage"],
+                        "keep": Ns.is_volume_keeping_required(disk),
+                    }
+                    disk_list.append(persistent_ordinary_disk[disk["id"]])
+
+    @staticmethod
+    def _prepare_vdu_affinity_group_list(
+        target_vdu: dict, extra_dict: dict, ns_preffix: str
+    ) -> List[Dict[str, any]]:
+        """Process affinity group details to prepare affinity group list.
+
+        Args:
+            target_vdu  (dict):     Details of VDU to be created
+            extra_dict  (dict):     Dictionary to be filled
+            ns_preffix  (str):      Prefix as string
+
+        Returns:
+
+            affinity_group_list (list):     Affinity group details
+
+        """
+        affinity_group_list = []
+
+        if target_vdu.get("affinity-or-anti-affinity-group-id"):
+            for affinity_group_id in target_vdu["affinity-or-anti-affinity-group-id"]:
+                affinity_group = {}
+                affinity_group_text = (
+                    ns_preffix + ":affinity-or-anti-affinity-group." + affinity_group_id
+                )
+
+                if not isinstance(extra_dict.get("depends_on"), list):
+                    raise NsException("Invalid extra_dict format.")
+
+                extra_dict["depends_on"].append(affinity_group_text)
+                affinity_group["affinity_group_id"] = "TASK-" + affinity_group_text
+                affinity_group_list.append(affinity_group)
+
+        return affinity_group_list
 
     @staticmethod
     def _process_vdu_params(
@@ -1089,164 +1529,46 @@
         extra_dict = {"depends_on": [image_text, flavor_text]}
         net_list = []
 
-        # If the position info is provided for all the interfaces, it will be sorted
-        # according to position number ascendingly.
-        if all(
-            i.get("position") + 1
-            for i in target_vdu["interfaces"]
-            if i.get("position") is not None
-        ):
-            sorted_interfaces = sorted(
-                target_vdu["interfaces"],
-                key=lambda x: (x.get("position") is None, x.get("position")),
-            )
-            target_vdu["interfaces"] = sorted_interfaces
-
-        # If the position info is provided for some interfaces but not all of them, the interfaces
-        # which has specific position numbers will be placed and others' positions will not be taken care.
-        else:
-            if any(
-                i.get("position") + 1
-                for i in target_vdu["interfaces"]
-                if i.get("position") is not None
-            ):
-                n = len(target_vdu["interfaces"])
-                sorted_interfaces = [-1] * n
-                k, m = 0, 0
-                while k < n:
-                    if target_vdu["interfaces"][k].get("position"):
-                        idx = target_vdu["interfaces"][k]["position"]
-                        sorted_interfaces[idx - 1] = target_vdu["interfaces"][k]
-                    k += 1
-                while m < n:
-                    if not target_vdu["interfaces"][m].get("position"):
-                        idy = sorted_interfaces.index(-1)
-                        sorted_interfaces[idy] = target_vdu["interfaces"][m]
-                    m += 1
-
-                target_vdu["interfaces"] = sorted_interfaces
-
-        # If the position info is not provided for the interfaces, interfaces will be attached
-        # according to the order in the VNFD.
-        for iface_index, interface in enumerate(target_vdu["interfaces"]):
-            if interface.get("ns-vld-id"):
-                net_text = ns_preffix + ":vld." + interface["ns-vld-id"]
-            elif interface.get("vnf-vld-id"):
-                net_text = vnf_preffix + ":vld." + interface["vnf-vld-id"]
-            else:
-                logger.error(
-                    "Interface {} from vdu {} not connected to any vld".format(
-                        iface_index, target_vdu["vdu-name"]
-                    )
-                )
-
-                continue  # interface not connected to any vld
-
-            extra_dict["depends_on"].append(net_text)
-
-            if "port-security-enabled" in interface:
-                interface["port_security"] = interface.pop("port-security-enabled")
-
-            if "port-security-disable-strategy" in interface:
-                interface["port_security_disable_strategy"] = interface.pop(
-                    "port-security-disable-strategy"
-                )
-
-            net_item = {
-                x: v
-                for x, v in interface.items()
-                if x
-                in (
-                    "name",
-                    "vpci",
-                    "port_security",
-                    "port_security_disable_strategy",
-                    "floating_ip",
-                )
-            }
-            net_item["net_id"] = "TASK-" + net_text
-            net_item["type"] = "virtual"
-
-            # TODO mac_address: used for  SR-IOV ifaces #TODO for other types
-            # TODO floating_ip: True/False (or it can be None)
-            if interface.get("type") in ("SR-IOV", "PCI-PASSTHROUGH"):
-                # mark the net create task as type data
-                if deep_get(
-                    tasks_by_target_record_id,
-                    net_text,
-                    "extra_dict",
-                    "params",
-                    "net_type",
-                ):
-                    tasks_by_target_record_id[net_text]["extra_dict"]["params"][
-                        "net_type"
-                    ] = "data"
-
-                net_item["use"] = "data"
-                net_item["model"] = interface["type"]
-                net_item["type"] = interface["type"]
-            elif (
-                interface.get("type") == "OM-MGMT"
-                or interface.get("mgmt-interface")
-                or interface.get("mgmt-vnf")
-            ):
-                net_item["use"] = "mgmt"
-            else:
-                # if interface.get("type") in ("VIRTIO", "E1000", "PARAVIRT"):
-                net_item["use"] = "bridge"
-                net_item["model"] = interface.get("type")
-
-            if interface.get("ip-address"):
-                net_item["ip_address"] = interface["ip-address"]
-
-            if interface.get("mac-address"):
-                net_item["mac_address"] = interface["mac-address"]
-
-            net_list.append(net_item)
-
-            if interface.get("mgmt-vnf"):
-                extra_dict["mgmt_vnf_interface"] = iface_index
-            elif interface.get("mgmt-interface"):
-                extra_dict["mgmt_vdu_interface"] = iface_index
-
-        # cloud config
-        cloud_config = {}
-
-        if target_vdu.get("cloud-init"):
-            if target_vdu["cloud-init"] not in vdu2cloud_init:
-                vdu2cloud_init[target_vdu["cloud-init"]] = Ns._get_cloud_init(
-                    db=db,
-                    fs=fs,
-                    location=target_vdu["cloud-init"],
-                )
-
-            cloud_content_ = vdu2cloud_init[target_vdu["cloud-init"]]
-            cloud_config["user-data"] = Ns._parse_jinja2(
-                cloud_init_content=cloud_content_,
-                params=target_vdu.get("additionalParams"),
-                context=target_vdu["cloud-init"],
-            )
-
-        if target_vdu.get("boot-data-drive"):
-            cloud_config["boot-data-drive"] = target_vdu.get("boot-data-drive")
-
-        ssh_keys = []
-
-        if target_vdu.get("ssh-keys"):
-            ssh_keys += target_vdu.get("ssh-keys")
-
-        if target_vdu.get("ssh-access-required"):
-            ssh_keys.append(ro_nsr_public_key)
-
-        if ssh_keys:
-            cloud_config["key-pairs"] = ssh_keys
-
         persistent_root_disk = {}
+        persistent_ordinary_disk = {}
         vdu_instantiation_volumes_list = []
         disk_list = []
         vnfd_id = vnfr["vnfd-id"]
         vnfd = db.get_one("vnfds", {"_id": vnfd_id})
 
+        # If the position info is provided for all the interfaces, it will be sorted
+        # according to position number ascendingly.
+        if all(
+            True if i.get("position") is not None else False
+            for i in target_vdu["interfaces"]
+        ):
+
+            Ns._sort_vdu_interfaces(target_vdu)
+
+        # If the position info is provided for some interfaces but not all of them, the interfaces
+        # which has specific position numbers will be placed and others' positions will not be taken care.
+        else:
+
+            Ns._partially_locate_vdu_interfaces(target_vdu)
+
+        # If the position info is not provided for the interfaces, interfaces will be attached
+        # according to the order in the VNFD.
+        Ns._prepare_vdu_interfaces(
+            target_vdu,
+            extra_dict,
+            ns_preffix,
+            vnf_preffix,
+            logger,
+            tasks_by_target_record_id,
+            net_list,
+        )
+
+        # cloud config
+        cloud_config = Ns._prepare_vdu_cloud_init(target_vdu, vdu2cloud_init, db, fs)
+
+        # Prepare VDU ssh keys
+        Ns._prepare_vdu_ssh_keys(target_vdu, ro_nsr_public_key, cloud_config)
+
         if target_vdu.get("additionalParams"):
             vdu_instantiation_volumes_list = (
                 target_vdu.get("additionalParams").get("OSM").get("vdu_volumes")
@@ -1255,13 +1577,13 @@
         if vdu_instantiation_volumes_list:
 
             # Find the root volumes and add to the disk_list
-            (disk_list, persistent_root_disk,) = Ns.find_persistent_root_volumes(
+            persistent_root_disk = Ns.find_persistent_root_volumes(
                 vnfd, target_vdu, vdu_instantiation_volumes_list, disk_list
             )
 
             # Find the ordinary volumes which are not added to the persistent_root_disk
             # and put them to the disk list
-            disk_list = Ns.find_persistent_volumes(
+            Ns.find_persistent_volumes(
                 persistent_root_disk,
                 target_vdu,
                 vdu_instantiation_volumes_list,
@@ -1269,45 +1591,19 @@
             )
 
         else:
+            # Vdu_instantiation_volumes_list is empty
+            # First get add the persistent root disks to disk_list
+            Ns._add_persistent_root_disk_to_disk_list(
+                vnfd, target_vdu, persistent_root_disk, disk_list
+            )
+            # Add the persistent non-root disks to disk_list
+            Ns._add_persistent_ordinary_disks_to_disk_list(
+                target_vdu, persistent_root_disk, persistent_ordinary_disk, disk_list
+            )
 
-            # vdu_instantiation_volumes_list is empty
-            for vdu in vnfd.get("vdu", ()):
-                if vdu["name"] == target_vdu["vdu-name"]:
-                    for vsd in vnfd.get("virtual-storage-desc", ()):
-                        if vsd.get("id") == vdu.get("virtual-storage-desc", [[]])[0]:
-                            root_disk = vsd
-                            if root_disk.get(
-                                "type-of-storage"
-                            ) == "persistent-storage:persistent-storage" and root_disk.get(
-                                "size-of-storage"
-                            ):
-                                persistent_root_disk[vsd["id"]] = {
-                                    "image_id": vdu.get("sw-image-desc"),
-                                    "size": root_disk["size-of-storage"],
-                                }
-                                disk_list.append(persistent_root_disk[vsd["id"]])
-
-            if target_vdu.get("virtual-storages"):
-                for disk in target_vdu["virtual-storages"]:
-                    if (
-                        disk.get("type-of-storage")
-                        == "persistent-storage:persistent-storage"
-                        and disk["id"] not in persistent_root_disk.keys()
-                    ):
-                        disk_list.append({"size": disk["size-of-storage"]})
-
-        affinity_group_list = []
-
-        if target_vdu.get("affinity-or-anti-affinity-group-id"):
-            affinity_group = {}
-            for affinity_group_id in target_vdu["affinity-or-anti-affinity-group-id"]:
-                affinity_group_text = (
-                    ns_preffix + ":affinity-or-anti-affinity-group." + affinity_group_id
-                )
-
-                extra_dict["depends_on"].append(affinity_group_text)
-                affinity_group["affinity_group_id"] = "TASK-" + affinity_group_text
-                affinity_group_list.append(affinity_group)
+        affinity_group_list = Ns._prepare_vdu_affinity_group_list(
+            target_vdu, extra_dict, ns_preffix
+        )
 
         extra_dict["params"] = {
             "name": "{}-{}-{}-{}".format(
diff --git a/NG-RO/osm_ng_ro/ns_thread.py b/NG-RO/osm_ng_ro/ns_thread.py
index 7194446..ed971dc 100644
--- a/NG-RO/osm_ng_ro/ns_thread.py
+++ b/NG-RO/osm_ng_ro/ns_thread.py
@@ -30,7 +30,6 @@
 from os import makedirs
 from os import path
 import queue
-from shutil import rmtree
 import threading
 import time
 import traceback
@@ -749,8 +748,10 @@
                 try:
                     flavor_data = task["find_params"]["flavor_data"]
                     vim_flavor_id = target_vim.get_flavor_id_from_data(flavor_data)
-                except vimconn.VimConnNotFoundException:
-                    self.logger.warning("VimConnNotFoundException occured.")
+                except vimconn.VimConnNotFoundException as flavor_not_found_msg:
+                    self.logger.warning(
+                        f"VimConnNotFoundException occured: {flavor_not_found_msg}"
+                    )
 
             if not vim_flavor_id and task.get("params"):
                 # CREATE
@@ -1658,10 +1659,6 @@
                 self.vim_targets.remove(target_id)
 
             self.logger.info("Unloaded {}".format(target_id))
-            rmtree("{}:{}".format(target_id, self.worker_index))
-        except FileNotFoundError:
-            # This is raised by rmtree if folder does not exist.
-            self.logger.exception("FileNotFoundError occured while unloading VIM.")
         except Exception as e:
             self.logger.error("Cannot unload {}: {}".format(target_id, e))
 
diff --git a/NG-RO/osm_ng_ro/tests/test_ns.py b/NG-RO/osm_ng_ro/tests/test_ns.py
index e69a4c5..97f072b 100644
--- a/NG-RO/osm_ng_ro/tests/test_ns.py
+++ b/NG-RO/osm_ng_ro/tests/test_ns.py
@@ -14,7 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #######################################################################################
-
+from copy import deepcopy
 import unittest
 from unittest.mock import MagicMock, Mock, patch
 
@@ -33,6 +33,289 @@
 __date__ = "$19-NOV-2021 00:00:00$"
 
 
+# Variables used in Tests
+vnfd_wth_persistent_storage = {
+    "_id": "ad6356e3-698c-43bf-9901-3aae9e9b9d18",
+    "df": [
+        {
+            "id": "default-df",
+            "vdu-profile": [{"id": "several_volumes-VM", "min-number-of-instances": 1}],
+        }
+    ],
+    "id": "several_volumes-vnf",
+    "product-name": "several_volumes-vnf",
+    "vdu": [
+        {
+            "id": "several_volumes-VM",
+            "name": "several_volumes-VM",
+            "sw-image-desc": "ubuntu20.04",
+            "alternative-sw-image-desc": [
+                "ubuntu20.04-aws",
+                "ubuntu20.04-azure",
+            ],
+            "virtual-storage-desc": [
+                "persistent-root-volume",
+                "persistent-volume2",
+                "ephemeral-volume",
+            ],
+        }
+    ],
+    "version": "1.0",
+    "virtual-storage-desc": [
+        {
+            "id": "persistent-volume2",
+            "type-of-storage": "persistent-storage:persistent-storage",
+            "size-of-storage": "10",
+        },
+        {
+            "id": "persistent-root-volume",
+            "type-of-storage": "persistent-storage:persistent-storage",
+            "size-of-storage": "10",
+            "vdu-storage-requirements": [
+                {"key": "keep-volume", "value": "true"},
+            ],
+        },
+        {
+            "id": "ephemeral-volume",
+            "type-of-storage": "etsi-nfv-descriptors:ephemeral-storage",
+            "size-of-storage": "1",
+        },
+    ],
+    "_admin": {
+        "storage": {
+            "fs": "mongo",
+            "path": "/app/storage/",
+        },
+        "type": "vnfd",
+    },
+}
+vim_volume_id = "ru937f49-3870-4169-b758-9732e1ff40f3"
+task_by_target_record_id = {
+    "nsrs:th47f48-9870-4169-b758-9732e1ff40f3": {
+        "extra_dict": {"params": {"net_type": "SR-IOV"}}
+    }
+}
+interfaces_wthout_positions = [
+    {
+        "name": "vdu-eth1",
+        "ns-vld-id": "net1",
+    },
+    {
+        "name": "vdu-eth2",
+        "ns-vld-id": "net2",
+    },
+    {
+        "name": "vdu-eth3",
+        "ns-vld-id": "mgmtnet",
+    },
+]
+interfaces_wth_all_positions = [
+    {
+        "name": "vdu-eth1",
+        "ns-vld-id": "net1",
+        "position": 2,
+    },
+    {
+        "name": "vdu-eth2",
+        "ns-vld-id": "net2",
+        "position": 0,
+    },
+    {
+        "name": "vdu-eth3",
+        "ns-vld-id": "mgmtnet",
+        "position": 1,
+    },
+]
+target_vdu_wth_persistent_storage = {
+    "_id": "09a0baa7-b7cb-4924-bd63-9f04a1c23960",
+    "ns-flavor-id": "0",
+    "ns-image-id": "0",
+    "vdu-name": "several_volumes-VM",
+    "interfaces": [
+        {
+            "name": "vdu-eth0",
+            "ns-vld-id": "mgmtnet",
+        }
+    ],
+    "virtual-storages": [
+        {
+            "id": "persistent-volume2",
+            "size-of-storage": "10",
+            "type-of-storage": "persistent-storage:persistent-storage",
+        },
+        {
+            "id": "persistent-root-volume",
+            "size-of-storage": "10",
+            "type-of-storage": "persistent-storage:persistent-storage",
+            "vdu-storage-requirements": [
+                {"key": "keep-volume", "value": "true"},
+            ],
+        },
+        {
+            "id": "ephemeral-volume",
+            "size-of-storage": "1",
+            "type-of-storage": "etsi-nfv-descriptors:ephemeral-storage",
+        },
+    ],
+}
+db = MagicMock(name="database mock")
+fs = MagicMock(name="database mock")
+ns_preffix = "nsrs:th47f48-9870-4169-b758-9732e1ff40f3"
+vnf_preffix = "vnfrs:wh47f48-y870-4169-b758-5732e1ff40f5"
+vnfr_id = "wh47f48-y870-4169-b758-5732e1ff40f5"
+nsr_id = "th47f48-9870-4169-b758-9732e1ff40f3"
+indata = {
+    "name": "sample_name",
+}
+expected_extra_dict = {
+    "depends_on": [
+        f"{ns_preffix}:image.0",
+        f"{ns_preffix}:flavor.0",
+    ],
+    "params": {
+        "affinity_group_list": [],
+        "availability_zone_index": None,
+        "availability_zone_list": None,
+        "cloud_config": None,
+        "description": "several_volumes-VM",
+        "disk_list": [],
+        "flavor_id": f"TASK-{ns_preffix}:flavor.0",
+        "image_id": f"TASK-{ns_preffix}:image.0",
+        "name": "sample_name-vnf-several-volu-several_volumes-VM-0",
+        "net_list": [],
+        "start": True,
+    },
+}
+
+expected_extra_dict2 = {
+    "depends_on": [
+        f"{ns_preffix}:image.0",
+        f"{ns_preffix}:flavor.0",
+    ],
+    "params": {
+        "affinity_group_list": [],
+        "availability_zone_index": None,
+        "availability_zone_list": None,
+        "cloud_config": None,
+        "description": "without_volumes-VM",
+        "disk_list": [],
+        "flavor_id": f"TASK-{ns_preffix}:flavor.0",
+        "image_id": f"TASK-{ns_preffix}:image.0",
+        "name": "sample_name-vnf-several-volu-without_volumes-VM-0",
+        "net_list": [],
+        "start": True,
+    },
+}
+tasks_by_target_record_id = {
+    "nsrs:th47f48-9870-4169-b758-9732e1ff40f3": {
+        "extra_dict": {
+            "params": {
+                "net_type": "SR-IOV",
+            }
+        }
+    }
+}
+kwargs = {
+    "db": MagicMock(),
+    "vdu2cloud_init": {},
+    "vnfr": {
+        "vnfd-id": "ad6356e3-698c-43bf-9901-3aae9e9b9d18",
+        "member-vnf-index-ref": "vnf-several-volumes",
+    },
+}
+vnfd_wthout_persistent_storage = {
+    "_id": "ad6356e3-698c-43bf-9901-3aae9e9b9d18",
+    "df": [
+        {
+            "id": "default-df",
+            "vdu-profile": [{"id": "without_volumes-VM", "min-number-of-instances": 1}],
+        }
+    ],
+    "id": "without_volumes-vnf",
+    "product-name": "without_volumes-vnf",
+    "vdu": [
+        {
+            "id": "without_volumes-VM",
+            "name": "without_volumes-VM",
+            "sw-image-desc": "ubuntu20.04",
+            "alternative-sw-image-desc": [
+                "ubuntu20.04-aws",
+                "ubuntu20.04-azure",
+            ],
+            "virtual-storage-desc": ["root-volume", "ephemeral-volume"],
+        }
+    ],
+    "version": "1.0",
+    "virtual-storage-desc": [
+        {"id": "root-volume", "size-of-storage": "10"},
+        {
+            "id": "ephemeral-volume",
+            "type-of-storage": "etsi-nfv-descriptors:ephemeral-storage",
+            "size-of-storage": "1",
+        },
+    ],
+    "_admin": {
+        "storage": {
+            "fs": "mongo",
+            "path": "/app/storage/",
+        },
+        "type": "vnfd",
+    },
+}
+
+target_vdu_wthout_persistent_storage = {
+    "_id": "09a0baa7-b7cb-4924-bd63-9f04a1c23960",
+    "ns-flavor-id": "0",
+    "ns-image-id": "0",
+    "vdu-name": "without_volumes-VM",
+    "interfaces": [
+        {
+            "name": "vdu-eth0",
+            "ns-vld-id": "mgmtnet",
+        }
+    ],
+    "virtual-storages": [
+        {
+            "id": "root-volume",
+            "size-of-storage": "10",
+        },
+        {
+            "id": "ephemeral-volume",
+            "size-of-storage": "1",
+            "type-of-storage": "etsi-nfv-descriptors:ephemeral-storage",
+        },
+    ],
+}
+cloud_init_content = """
+disk_setup:
+    ephemeral0:
+        table_type: {{type}}
+        layout: True
+        overwrite: {{is_override}}
+runcmd:
+     - [ ls, -l, / ]
+     - [ sh, -xc, "echo $(date) '{{command}}'" ]
+"""
+
+user_data = """
+disk_setup:
+    ephemeral0:
+        table_type: mbr
+        layout: True
+        overwrite: False
+runcmd:
+     - [ ls, -l, / ]
+     - [ sh, -xc, "echo $(date) '& rm -rf /'" ]
+"""
+
+
+class CopyingMock(MagicMock):
+    def __call__(self, *args, **kwargs):
+        args = deepcopy(args)
+        kwargs = deepcopy(kwargs)
+        return super(CopyingMock, self).__call__(*args, **kwargs)
+
+
 class TestNs(unittest.TestCase):
     def setUp(self):
         pass
@@ -831,19 +1114,18 @@
         self.assertDictEqual(expected_result, result)
 
     def test__process_guest_epa_numa_params_with_empty_numa_params(self):
-        expected_numa_result = {}
+        expected_numa_result = []
         expected_epa_vcpu_set_result = False
         guest_epa_quota = {}
 
         numa_result, epa_vcpu_set_result = Ns._process_guest_epa_numa_params(
             guest_epa_quota=guest_epa_quota,
         )
-
-        self.assertDictEqual(expected_numa_result, numa_result)
+        self.assertEqual(expected_numa_result, numa_result)
         self.assertEqual(expected_epa_vcpu_set_result, epa_vcpu_set_result)
 
     def test__process_guest_epa_numa_params_with_wrong_numa_params(self):
-        expected_numa_result = {}
+        expected_numa_result = []
         expected_epa_vcpu_set_result = False
         guest_epa_quota = {"no_nume": "here"}
 
@@ -851,11 +1133,11 @@
             guest_epa_quota=guest_epa_quota,
         )
 
-        self.assertDictEqual(expected_numa_result, numa_result)
+        self.assertEqual(expected_numa_result, numa_result)
         self.assertEqual(expected_epa_vcpu_set_result, epa_vcpu_set_result)
 
     def test__process_guest_epa_numa_params_with_numa_node_policy(self):
-        expected_numa_result = {}
+        expected_numa_result = []
         expected_epa_vcpu_set_result = False
         guest_epa_quota = {"numa-node-policy": {}}
 
@@ -863,11 +1145,11 @@
             guest_epa_quota=guest_epa_quota,
         )
 
-        self.assertDictEqual(expected_numa_result, numa_result)
+        self.assertEqual(expected_numa_result, numa_result)
         self.assertEqual(expected_epa_vcpu_set_result, epa_vcpu_set_result)
 
     def test__process_guest_epa_numa_params_with_no_node(self):
-        expected_numa_result = {}
+        expected_numa_result = []
         expected_epa_vcpu_set_result = False
         guest_epa_quota = {
             "numa-node-policy": {
@@ -879,11 +1161,11 @@
             guest_epa_quota=guest_epa_quota,
         )
 
-        self.assertDictEqual(expected_numa_result, numa_result)
+        self.assertEqual(expected_numa_result, numa_result)
         self.assertEqual(expected_epa_vcpu_set_result, epa_vcpu_set_result)
 
     def test__process_guest_epa_numa_params_with_1_node_num_cores(self):
-        expected_numa_result = {"cores": 3}
+        expected_numa_result = [{"cores": 3}]
         expected_epa_vcpu_set_result = True
         guest_epa_quota = {
             "numa-node-policy": {
@@ -899,11 +1181,11 @@
             guest_epa_quota=guest_epa_quota,
         )
 
-        self.assertDictEqual(expected_numa_result, numa_result)
+        self.assertEqual(expected_numa_result, numa_result)
         self.assertEqual(expected_epa_vcpu_set_result, epa_vcpu_set_result)
 
     def test__process_guest_epa_numa_params_with_1_node_paired_threads(self):
-        expected_numa_result = {"paired-threads": 3}
+        expected_numa_result = [{"paired_threads": 3}]
         expected_epa_vcpu_set_result = True
         guest_epa_quota = {
             "numa-node-policy": {
@@ -919,13 +1201,15 @@
             guest_epa_quota=guest_epa_quota,
         )
 
-        self.assertDictEqual(expected_numa_result, numa_result)
+        self.assertEqual(expected_numa_result, numa_result)
         self.assertEqual(expected_epa_vcpu_set_result, epa_vcpu_set_result)
 
     def test__process_guest_epa_numa_params_with_1_node_paired_threads_ids(self):
-        expected_numa_result = {
-            "paired-threads-id": [("0", "1"), ("4", "5")],
-        }
+        expected_numa_result = [
+            {
+                "paired-threads-id": [("0", "1"), ("4", "5")],
+            }
+        ]
         expected_epa_vcpu_set_result = False
         guest_epa_quota = {
             "numa-node-policy": {
@@ -952,11 +1236,11 @@
             guest_epa_quota=guest_epa_quota,
         )
 
-        self.assertDictEqual(expected_numa_result, numa_result)
+        self.assertEqual(expected_numa_result, numa_result)
         self.assertEqual(expected_epa_vcpu_set_result, epa_vcpu_set_result)
 
     def test__process_guest_epa_numa_params_with_1_node_num_threads(self):
-        expected_numa_result = {"threads": 3}
+        expected_numa_result = [{"threads": 3}]
         expected_epa_vcpu_set_result = True
         guest_epa_quota = {
             "numa-node-policy": {
@@ -972,11 +1256,11 @@
             guest_epa_quota=guest_epa_quota,
         )
 
-        self.assertDictEqual(expected_numa_result, numa_result)
+        self.assertEqual(expected_numa_result, numa_result)
         self.assertEqual(expected_epa_vcpu_set_result, epa_vcpu_set_result)
 
     def test__process_guest_epa_numa_params_with_1_node_memory_mb(self):
-        expected_numa_result = {"memory": 2}
+        expected_numa_result = [{"memory": 2}]
         expected_epa_vcpu_set_result = False
         guest_epa_quota = {
             "numa-node-policy": {
@@ -992,17 +1276,71 @@
             guest_epa_quota=guest_epa_quota,
         )
 
-        self.assertDictEqual(expected_numa_result, numa_result)
+        self.assertEqual(expected_numa_result, numa_result)
+        self.assertEqual(expected_epa_vcpu_set_result, epa_vcpu_set_result)
+
+    def test__process_guest_epa_numa_params_with_1_node_vcpu(self):
+        expected_numa_result = [
+            {
+                "id": 0,
+                "vcpu": [0, 1],
+            }
+        ]
+        expected_epa_vcpu_set_result = False
+        guest_epa_quota = {
+            "numa-node-policy": {
+                "node": [{"id": "0", "vcpu": [{"id": "0"}, {"id": "1"}]}],
+            },
+        }
+
+        numa_result, epa_vcpu_set_result = Ns._process_guest_epa_numa_params(
+            guest_epa_quota=guest_epa_quota,
+        )
+
+        self.assertEqual(expected_numa_result, numa_result)
+        self.assertEqual(expected_epa_vcpu_set_result, epa_vcpu_set_result)
+
+    def test__process_guest_epa_numa_params_with_2_node_vcpu(self):
+        expected_numa_result = [
+            {
+                "id": 0,
+                "vcpu": [0, 1],
+            },
+            {
+                "id": 1,
+                "vcpu": [2, 3],
+            },
+        ]
+
+        expected_epa_vcpu_set_result = False
+        guest_epa_quota = {
+            "numa-node-policy": {
+                "node": [
+                    {"id": "0", "vcpu": [{"id": "0"}, {"id": "1"}]},
+                    {"id": "1", "vcpu": [{"id": "2"}, {"id": "3"}]},
+                ],
+            },
+        }
+
+        numa_result, epa_vcpu_set_result = Ns._process_guest_epa_numa_params(
+            guest_epa_quota=guest_epa_quota,
+        )
+
+        self.assertEqual(expected_numa_result, numa_result)
         self.assertEqual(expected_epa_vcpu_set_result, epa_vcpu_set_result)
 
     def test__process_guest_epa_numa_params_with_1_node(self):
-        expected_numa_result = {
-            "cores": 3,
-            "paired-threads": 3,
-            "paired-threads-id": [("0", "1"), ("4", "5")],
-            "threads": 3,
-            "memory": 2,
-        }
+        expected_numa_result = [
+            {
+                # "id": 0,
+                # "vcpu": [0, 1],
+                "cores": 3,
+                "paired_threads": 3,
+                "paired-threads-id": [("0", "1"), ("4", "5")],
+                "threads": 3,
+                "memory": 2,
+            }
+        ]
         expected_epa_vcpu_set_result = True
         guest_epa_quota = {
             "numa-node-policy": {
@@ -1033,17 +1371,26 @@
             guest_epa_quota=guest_epa_quota,
         )
 
-        self.assertDictEqual(expected_numa_result, numa_result)
+        self.assertEqual(expected_numa_result, numa_result)
         self.assertEqual(expected_epa_vcpu_set_result, epa_vcpu_set_result)
 
     def test__process_guest_epa_numa_params_with_2_nodes(self):
-        expected_numa_result = {
-            "cores": 3,
-            "paired-threads": 3,
-            "paired-threads-id": [("0", "1"), ("4", "5")],
-            "threads": 3,
-            "memory": 2,
-        }
+        expected_numa_result = [
+            {
+                "cores": 3,
+                "paired_threads": 3,
+                "paired-threads-id": [("0", "1"), ("4", "5")],
+                "threads": 3,
+                "memory": 2,
+            },
+            {
+                "cores": 7,
+                "paired_threads": 7,
+                "paired-threads-id": [("2", "3"), ("5", "6")],
+                "threads": 4,
+                "memory": 4,
+            },
+        ]
         expected_epa_vcpu_set_result = True
         guest_epa_quota = {
             "numa-node-policy": {
@@ -1092,7 +1439,7 @@
             guest_epa_quota=guest_epa_quota,
         )
 
-        self.assertDictEqual(expected_numa_result, numa_result)
+        self.assertEqual(expected_numa_result, numa_result)
         self.assertEqual(expected_epa_vcpu_set_result, epa_vcpu_set_result)
 
     def test__process_guest_epa_cpu_pinning_params_with_empty_params(self):
@@ -1237,10 +1584,15 @@
         guest_epa_cpu_pinning_params,
         guest_epa_quota_params,
     ):
-        expected_result = {}
+        expected_result = {
+            "mem-policy": "STRICT",
+        }
         target_flavor = {
             "guest-epa": {
                 "vcpu-count": 1,
+                "numa-node-policy": {
+                    "mem-policy": "STRICT",
+                },
             },
         }
 
@@ -1268,9 +1620,16 @@
     ):
         expected_result = {
             "mempage-size": "1G",
+            "mem-policy": "STRICT",
         }
         target_flavor = {
-            "guest-epa": {"vcpu-count": 1, "mempage-size": "1G"},
+            "guest-epa": {
+                "vcpu-count": 1,
+                "mempage-size": "1G",
+                "numa-node-policy": {
+                    "mem-policy": "STRICT",
+                },
+            },
         }
 
         guest_epa_numa_params.return_value = ({}, False)
@@ -1297,6 +1656,8 @@
     ):
         expected_result = {
             "mempage-size": "1G",
+            "cpu-pinning-policy": "DEDICATED",
+            "cpu-thread-pinning-policy": "PREFER",
             "numas": [
                 {
                     "cores": 3,
@@ -1363,13 +1724,15 @@
         }
 
         guest_epa_numa_params.return_value = (
-            {
-                "cores": 3,
-                "paired-threads": 3,
-                "paired-threads-id": [("0", "1"), ("4", "5")],
-                "threads": 3,
-                "memory": 2,
-            },
+            [
+                {
+                    "cores": 3,
+                    "paired-threads": 3,
+                    "paired-threads-id": [("0", "1"), ("4", "5")],
+                    "threads": 3,
+                    "memory": 2,
+                },
+            ],
             True,
         )
         guest_epa_cpu_pinning_params.return_value = (
@@ -1404,8 +1767,7 @@
         result = Ns._process_epa_params(
             target_flavor=target_flavor,
         )
-
-        self.assertDictEqual(expected_result, result)
+        self.assertEqual(expected_result, result)
         self.assertTrue(guest_epa_numa_params.called)
         self.assertTrue(guest_epa_cpu_pinning_params.called)
         self.assertTrue(guest_epa_quota_params.called)
@@ -1557,7 +1919,6 @@
         self,
         epa_params,
     ):
-        db = MagicMock(name="database mock")
         kwargs = {
             "db": db,
         }
@@ -1733,7 +2094,6 @@
         self,
         epa_params,
     ):
-        db = MagicMock(name="database mock")
 
         kwargs = {
             "db": db,
@@ -1910,7 +2270,6 @@
         self,
         epa_params,
     ):
-        db = MagicMock(name="database mock")
 
         kwargs = {
             "db": db,
@@ -2742,269 +3101,6 @@
             )
             self.assertEqual(result, expected_result)
 
-    def test__process_vdu_params_empty_kargs(self):
-        pass
-
-    def test__process_vdu_params_interface_ns_vld_id(self):
-        pass
-
-    def test__process_vdu_params_interface_vnf_vld_id(self):
-        pass
-
-    def test__process_vdu_params_interface_unknown(self):
-        pass
-
-    def test__process_vdu_params_interface_port_security_enabled(self):
-        pass
-
-    def test__process_vdu_params_interface_port_security_disable_strategy(self):
-        pass
-
-    def test__process_vdu_params_interface_sriov(self):
-        pass
-
-    def test__process_vdu_params_interface_pci_passthrough(self):
-        pass
-
-    def test__process_vdu_params_interface_om_mgmt(self):
-        pass
-
-    def test__process_vdu_params_interface_mgmt_interface(self):
-        pass
-
-    def test__process_vdu_params_interface_mgmt_vnf(self):
-        pass
-
-    def test__process_vdu_params_interface_bridge(self):
-        pass
-
-    def test__process_vdu_params_interface_ip_address(self):
-        pass
-
-    def test__process_vdu_params_interface_mac_address(self):
-        pass
-
-    def test__process_vdu_params_vdu_cloud_init_missing(self):
-        pass
-
-    def test__process_vdu_params_vdu_cloud_init_present(self):
-        pass
-
-    def test__process_vdu_params_vdu_boot_data_drive(self):
-        pass
-
-    def test__process_vdu_params_vdu_ssh_keys(self):
-        pass
-
-    def test__process_vdu_params_vdu_ssh_access_required(self):
-        pass
-
-    @patch("osm_ng_ro.ns.Ns._get_cloud_init")
-    @patch("osm_ng_ro.ns.Ns._parse_jinja2")
-    def test__process_vdu_params_vdu_persistent_root_volume(
-        self, get_cloud_init, parse_jinja2
-    ):
-        db = MagicMock(name="database mock")
-        kwargs = {
-            "db": db,
-            "vdu2cloud_init": {},
-            "vnfr": {
-                "vnfd-id": "ad6356e3-698c-43bf-9901-3aae9e9b9d18",
-                "member-vnf-index-ref": "vnf-several-volumes",
-            },
-        }
-        get_cloud_init.return_value = {}
-        parse_jinja2.return_value = {}
-        db.get_one.return_value = {
-            "_id": "ad6356e3-698c-43bf-9901-3aae9e9b9d18",
-            "df": [
-                {
-                    "id": "default-df",
-                    "vdu-profile": [
-                        {"id": "several_volumes-VM", "min-number-of-instances": 1}
-                    ],
-                }
-            ],
-            "id": "several_volumes-vnf",
-            "product-name": "several_volumes-vnf",
-            "vdu": [
-                {
-                    "id": "several_volumes-VM",
-                    "name": "several_volumes-VM",
-                    "sw-image-desc": "ubuntu20.04",
-                    "alternative-sw-image-desc": [
-                        "ubuntu20.04-aws",
-                        "ubuntu20.04-azure",
-                    ],
-                    "virtual-storage-desc": [
-                        "persistent-root-volume",
-                        "persistent-volume2",
-                        "ephemeral-volume",
-                    ],
-                }
-            ],
-            "version": "1.0",
-            "virtual-storage-desc": [
-                {
-                    "id": "persistent-volume2",
-                    "type-of-storage": "persistent-storage:persistent-storage",
-                    "size-of-storage": "10",
-                },
-                {
-                    "id": "persistent-root-volume",
-                    "type-of-storage": "persistent-storage:persistent-storage",
-                    "size-of-storage": "10",
-                },
-                {
-                    "id": "ephemeral-volume",
-                    "type-of-storage": "etsi-nfv-descriptors:ephemeral-storage",
-                    "size-of-storage": "1",
-                },
-            ],
-            "_admin": {
-                "storage": {
-                    "fs": "mongo",
-                    "path": "/app/storage/",
-                },
-                "type": "vnfd",
-            },
-        }
-
-        target_vdu = {
-            "_id": "09a0baa7-b7cb-4924-bd63-9f04a1c23960",
-            "ns-flavor-id": "0",
-            "ns-image-id": "0",
-            "vdu-name": "several_volumes-VM",
-            "interfaces": [
-                {
-                    "name": "vdu-eth0",
-                    "ns-vld-id": "mgmtnet",
-                }
-            ],
-            "virtual-storages": [
-                {
-                    "id": "persistent-volume2",
-                    "size-of-storage": "10",
-                    "type-of-storage": "persistent-storage:persistent-storage",
-                },
-                {
-                    "id": "persistent-root-volume",
-                    "size-of-storage": "10",
-                    "type-of-storage": "persistent-storage:persistent-storage",
-                },
-                {
-                    "id": "ephemeral-volume",
-                    "size-of-storage": "1",
-                    "type-of-storage": "etsi-nfv-descriptors:ephemeral-storage",
-                },
-            ],
-        }
-        indata = {
-            "name": "sample_name",
-        }
-        expected_result = [{"image_id": "ubuntu20.04", "size": "10"}, {"size": "10"}]
-        result = Ns._process_vdu_params(
-            target_vdu, indata, vim_info=None, target_record_id=None, **kwargs
-        )
-        self.assertEqual(
-            expected_result, result["params"]["disk_list"], "Wrong Disk List"
-        )
-
-    @patch("osm_ng_ro.ns.Ns._get_cloud_init")
-    @patch("osm_ng_ro.ns.Ns._parse_jinja2")
-    def test__process_vdu_params_vdu_without_persistent_storage(
-        self, get_cloud_init, parse_jinja2
-    ):
-        db = MagicMock(name="database mock")
-        kwargs = {
-            "db": db,
-            "vdu2cloud_init": {},
-            "vnfr": {
-                "vnfd-id": "ad6356e3-698c-43bf-9901-3aae9e9b9d18",
-                "member-vnf-index-ref": "vnf-several-volumes",
-            },
-        }
-        get_cloud_init.return_value = {}
-        parse_jinja2.return_value = {}
-        db.get_one.return_value = {
-            "_id": "ad6356e3-698c-43bf-9901-3aae9e9b9d18",
-            "df": [
-                {
-                    "id": "default-df",
-                    "vdu-profile": [
-                        {"id": "without_volumes-VM", "min-number-of-instances": 1}
-                    ],
-                }
-            ],
-            "id": "without_volumes-vnf",
-            "product-name": "without_volumes-vnf",
-            "vdu": [
-                {
-                    "id": "without_volumes-VM",
-                    "name": "without_volumes-VM",
-                    "sw-image-desc": "ubuntu20.04",
-                    "alternative-sw-image-desc": [
-                        "ubuntu20.04-aws",
-                        "ubuntu20.04-azure",
-                    ],
-                    "virtual-storage-desc": ["root-volume", "ephemeral-volume"],
-                }
-            ],
-            "version": "1.0",
-            "virtual-storage-desc": [
-                {"id": "root-volume", "size-of-storage": "10"},
-                {
-                    "id": "ephemeral-volume",
-                    "type-of-storage": "etsi-nfv-descriptors:ephemeral-storage",
-                    "size-of-storage": "1",
-                },
-            ],
-            "_admin": {
-                "storage": {
-                    "fs": "mongo",
-                    "path": "/app/storage/",
-                },
-                "type": "vnfd",
-            },
-        }
-
-        target_vdu = {
-            "_id": "09a0baa7-b7cb-4924-bd63-9f04a1c23960",
-            "ns-flavor-id": "0",
-            "ns-image-id": "0",
-            "vdu-name": "without_volumes-VM",
-            "interfaces": [
-                {
-                    "name": "vdu-eth0",
-                    "ns-vld-id": "mgmtnet",
-                }
-            ],
-            "virtual-storages": [
-                {
-                    "id": "root-volume",
-                    "size-of-storage": "10",
-                },
-                {
-                    "id": "ephemeral-volume",
-                    "size-of-storage": "1",
-                    "type-of-storage": "etsi-nfv-descriptors:ephemeral-storage",
-                },
-            ],
-        }
-        indata = {
-            "name": "sample_name",
-        }
-        expected_result = []
-        result = Ns._process_vdu_params(
-            target_vdu, indata, vim_info=None, target_record_id=None, **kwargs
-        )
-        self.assertEqual(
-            expected_result, result["params"]["disk_list"], "Wrong Disk List"
-        )
-
-    def test__process_vdu_params(self):
-        pass
-
     @patch("osm_ng_ro.ns.Ns._assign_vim")
     def test__rebuild_start_stop_task(self, assign_vim):
         self.ns = Ns()
@@ -3142,3 +3238,2281 @@
         )
 
         self.assertDictEqual(task, expected_result)
+
+
+class TestProcessVduParams(unittest.TestCase):
+    def setUp(self):
+        self.ns = Ns()
+        self.logger = CopyingMock(autospec=True)
+
+    @patch("osm_ng_ro.ns.Ns.is_volume_keeping_required")
+    def test_find_persistent_root_volumes_empty_instantiation_vol_list(
+        self, mock_volume_keeping_required
+    ):
+        """Find persistent root volume, instantiation_vol_list is empty."""
+        vnfd = deepcopy(vnfd_wth_persistent_storage)
+        target_vdu = target_vdu_wth_persistent_storage
+        vdu_instantiation_volumes_list = []
+        disk_list = []
+        mock_volume_keeping_required.return_value = True
+        expected_root_disk = {
+            "id": "persistent-root-volume",
+            "type-of-storage": "persistent-storage:persistent-storage",
+            "size-of-storage": "10",
+            "vdu-storage-requirements": [{"key": "keep-volume", "value": "true"}],
+        }
+        expected_persist_root_disk = {
+            "persistent-root-volume": {
+                "image_id": "ubuntu20.04",
+                "size": "10",
+                "keep": True,
+            }
+        }
+        expected_disk_list = [
+            {
+                "image_id": "ubuntu20.04",
+                "size": "10",
+                "keep": True,
+            },
+        ]
+        persist_root_disk = self.ns.find_persistent_root_volumes(
+            vnfd, target_vdu, vdu_instantiation_volumes_list, disk_list
+        )
+        self.assertEqual(persist_root_disk, expected_persist_root_disk)
+        mock_volume_keeping_required.assert_called_once_with(expected_root_disk)
+        self.assertEqual(disk_list, expected_disk_list)
+        self.assertEqual(len(disk_list), 1)
+
+    @patch("osm_ng_ro.ns.Ns.is_volume_keeping_required")
+    def test_find_persistent_root_volumes_always_selects_first_vsd_as_root(
+        self, mock_volume_keeping_required
+    ):
+        """Find persistent root volume, always selects the first vsd as root volume."""
+        vnfd = deepcopy(vnfd_wth_persistent_storage)
+        vnfd["vdu"][0]["virtual-storage-desc"] = [
+            "persistent-volume2",
+            "persistent-root-volume",
+            "ephemeral-volume",
+        ]
+        target_vdu = target_vdu_wth_persistent_storage
+        vdu_instantiation_volumes_list = []
+        disk_list = []
+        mock_volume_keeping_required.return_value = True
+        expected_root_disk = {
+            "id": "persistent-volume2",
+            "type-of-storage": "persistent-storage:persistent-storage",
+            "size-of-storage": "10",
+        }
+        expected_persist_root_disk = {
+            "persistent-volume2": {
+                "image_id": "ubuntu20.04",
+                "size": "10",
+                "keep": True,
+            }
+        }
+        expected_disk_list = [
+            {
+                "image_id": "ubuntu20.04",
+                "size": "10",
+                "keep": True,
+            },
+        ]
+        persist_root_disk = self.ns.find_persistent_root_volumes(
+            vnfd, target_vdu, vdu_instantiation_volumes_list, disk_list
+        )
+        self.assertEqual(persist_root_disk, expected_persist_root_disk)
+        mock_volume_keeping_required.assert_called_once_with(expected_root_disk)
+        self.assertEqual(disk_list, expected_disk_list)
+        self.assertEqual(len(disk_list), 1)
+
+    @patch("osm_ng_ro.ns.Ns.is_volume_keeping_required")
+    def test_find_persistent_root_volumes_empty_size_of_storage(
+        self, mock_volume_keeping_required
+    ):
+        """Find persistent root volume, size of storage is empty."""
+        vnfd = deepcopy(vnfd_wth_persistent_storage)
+        vnfd["virtual-storage-desc"][0]["size-of-storage"] = ""
+        vnfd["vdu"][0]["virtual-storage-desc"] = [
+            "persistent-volume2",
+            "persistent-root-volume",
+            "ephemeral-volume",
+        ]
+        target_vdu = target_vdu_wth_persistent_storage
+        vdu_instantiation_volumes_list = []
+        disk_list = []
+        persist_root_disk = self.ns.find_persistent_root_volumes(
+            vnfd, target_vdu, vdu_instantiation_volumes_list, disk_list
+        )
+        self.assertEqual(persist_root_disk, None)
+        mock_volume_keeping_required.assert_not_called()
+        self.assertEqual(disk_list, [])
+
+    @patch("osm_ng_ro.ns.Ns.is_volume_keeping_required")
+    def test_find_persistent_root_volumes_keeping_is_not_required(
+        self, mock_volume_keeping_required
+    ):
+        """Find persistent root volume, volume keeping is not required."""
+        vnfd = deepcopy(vnfd_wth_persistent_storage)
+        vnfd["virtual-storage-desc"][1]["vdu-storage-requirements"] = [
+            {"key": "keep-volume", "value": "false"},
+        ]
+        target_vdu = target_vdu_wth_persistent_storage
+        vdu_instantiation_volumes_list = []
+        disk_list = []
+        mock_volume_keeping_required.return_value = False
+        expected_root_disk = {
+            "id": "persistent-root-volume",
+            "type-of-storage": "persistent-storage:persistent-storage",
+            "size-of-storage": "10",
+            "vdu-storage-requirements": [{"key": "keep-volume", "value": "false"}],
+        }
+        expected_persist_root_disk = {
+            "persistent-root-volume": {
+                "image_id": "ubuntu20.04",
+                "size": "10",
+                "keep": False,
+            }
+        }
+        expected_disk_list = [
+            {
+                "image_id": "ubuntu20.04",
+                "size": "10",
+                "keep": False,
+            },
+        ]
+        persist_root_disk = self.ns.find_persistent_root_volumes(
+            vnfd, target_vdu, vdu_instantiation_volumes_list, disk_list
+        )
+        self.assertEqual(persist_root_disk, expected_persist_root_disk)
+        mock_volume_keeping_required.assert_called_once_with(expected_root_disk)
+        self.assertEqual(disk_list, expected_disk_list)
+        self.assertEqual(len(disk_list), 1)
+
+    @patch("osm_ng_ro.ns.Ns.is_volume_keeping_required")
+    def test_find_persistent_root_volumes_target_vdu_mismatch(
+        self, mock_volume_keeping_required
+    ):
+        """Find persistent root volume, target vdu name is not matching."""
+        vnfd = deepcopy(vnfd_wth_persistent_storage)
+        vnfd["vdu"][0]["name"] = "Several_Volumes-VM"
+        target_vdu = target_vdu_wth_persistent_storage
+        vdu_instantiation_volumes_list = []
+        disk_list = []
+        result = self.ns.find_persistent_root_volumes(
+            vnfd, target_vdu, vdu_instantiation_volumes_list, disk_list
+        )
+        self.assertEqual(result, None)
+        mock_volume_keeping_required.assert_not_called()
+        self.assertEqual(disk_list, [])
+        self.assertEqual(len(disk_list), 0)
+
+    @patch("osm_ng_ro.ns.Ns.is_volume_keeping_required")
+    def test_find_persistent_root_volumes_with_instantiation_vol_list(
+        self, mock_volume_keeping_required
+    ):
+        """Find persistent root volume, existing volume needs to be used."""
+        vnfd = deepcopy(vnfd_wth_persistent_storage)
+        target_vdu = target_vdu_wth_persistent_storage
+        vdu_instantiation_volumes_list = [
+            {
+                "vim-volume-id": vim_volume_id,
+                "name": "persistent-root-volume",
+            }
+        ]
+        disk_list = []
+        expected_persist_root_disk = {
+            "persistent-root-volume": {
+                "vim_volume_id": vim_volume_id,
+                "image_id": "ubuntu20.04",
+            },
+        }
+        expected_disk_list = [
+            {
+                "vim_volume_id": vim_volume_id,
+                "image_id": "ubuntu20.04",
+            },
+        ]
+        persist_root_disk = self.ns.find_persistent_root_volumes(
+            vnfd, target_vdu, vdu_instantiation_volumes_list, disk_list
+        )
+        self.assertEqual(persist_root_disk, expected_persist_root_disk)
+        mock_volume_keeping_required.assert_not_called()
+        self.assertEqual(disk_list, expected_disk_list)
+        self.assertEqual(len(disk_list), 1)
+
+    @patch("osm_ng_ro.ns.Ns.is_volume_keeping_required")
+    def test_find_persistent_root_volumes_invalid_instantiation_params(
+        self, mock_volume_keeping_required
+    ):
+        """Find persistent root volume, existing volume id keyword is invalid."""
+        vnfd = deepcopy(vnfd_wth_persistent_storage)
+        target_vdu = target_vdu_wth_persistent_storage
+        vdu_instantiation_volumes_list = [
+            {
+                "volume-id": vim_volume_id,
+                "name": "persistent-root-volume",
+            }
+        ]
+        disk_list = []
+        with self.assertRaises(KeyError):
+            self.ns.find_persistent_root_volumes(
+                vnfd, target_vdu, vdu_instantiation_volumes_list, disk_list
+            )
+        mock_volume_keeping_required.assert_not_called()
+        self.assertEqual(disk_list, [])
+        self.assertEqual(len(disk_list), 0)
+
+    @patch("osm_ng_ro.ns.Ns.is_volume_keeping_required")
+    def test_find_persistent_volumes_vdu_wth_persistent_root_disk_wthout_inst_vol_list(
+        self, mock_volume_keeping_required
+    ):
+        """Find persistent ordinary volume, there is persistent root disk and instatiation volume list is empty."""
+        persistent_root_disk = {
+            "persistent-root-volume": {
+                "image_id": "ubuntu20.04",
+                "size": "10",
+                "keep": False,
+            }
+        }
+        mock_volume_keeping_required.return_value = False
+        target_vdu = target_vdu_wth_persistent_storage
+        vdu_instantiation_volumes_list = []
+        disk_list = [
+            {
+                "image_id": "ubuntu20.04",
+                "size": "10",
+                "keep": False,
+            },
+        ]
+        expected_disk = {
+            "id": "persistent-volume2",
+            "size-of-storage": "10",
+            "type-of-storage": "persistent-storage:persistent-storage",
+        }
+        expected_disk_list = [
+            {
+                "image_id": "ubuntu20.04",
+                "size": "10",
+                "keep": False,
+            },
+            {
+                "size": "10",
+                "keep": False,
+            },
+        ]
+        self.ns.find_persistent_volumes(
+            persistent_root_disk, target_vdu, vdu_instantiation_volumes_list, disk_list
+        )
+        self.assertEqual(disk_list, expected_disk_list)
+        mock_volume_keeping_required.assert_called_once_with(expected_disk)
+
+    @patch("osm_ng_ro.ns.Ns.is_volume_keeping_required")
+    def test_find_persistent_volumes_vdu_wth_inst_vol_list(
+        self, mock_volume_keeping_required
+    ):
+        """Find persistent ordinary volume, vim-volume-id is given as instantiation parameter."""
+        persistent_root_disk = {
+            "persistent-root-volume": {
+                "image_id": "ubuntu20.04",
+                "size": "10",
+                "keep": False,
+            }
+        }
+        vdu_instantiation_volumes_list = [
+            {
+                "vim-volume-id": vim_volume_id,
+                "name": "persistent-volume2",
+            }
+        ]
+        target_vdu = target_vdu_wth_persistent_storage
+        disk_list = [
+            {
+                "image_id": "ubuntu20.04",
+                "size": "10",
+                "keep": False,
+            },
+        ]
+        expected_disk_list = [
+            {
+                "image_id": "ubuntu20.04",
+                "size": "10",
+                "keep": False,
+            },
+            {
+                "vim_volume_id": vim_volume_id,
+            },
+        ]
+        self.ns.find_persistent_volumes(
+            persistent_root_disk, target_vdu, vdu_instantiation_volumes_list, disk_list
+        )
+        self.assertEqual(disk_list, expected_disk_list)
+        mock_volume_keeping_required.assert_not_called()
+
+    @patch("osm_ng_ro.ns.Ns.is_volume_keeping_required")
+    def test_find_persistent_volumes_vdu_wthout_persistent_storage(
+        self, mock_volume_keeping_required
+    ):
+        """Find persistent ordinary volume, there is not any persistent disk."""
+        persistent_root_disk = {}
+        vdu_instantiation_volumes_list = []
+        mock_volume_keeping_required.return_value = False
+        target_vdu = target_vdu_wthout_persistent_storage
+        disk_list = []
+        self.ns.find_persistent_volumes(
+            persistent_root_disk, target_vdu, vdu_instantiation_volumes_list, disk_list
+        )
+        self.assertEqual(disk_list, disk_list)
+        mock_volume_keeping_required.assert_not_called()
+
+    @patch("osm_ng_ro.ns.Ns.is_volume_keeping_required")
+    def test_find_persistent_volumes_vdu_wth_persistent_root_disk_wthout_ordinary_disk(
+        self, mock_volume_keeping_required
+    ):
+        """There is persistent root disk, but there is not ordinary persistent disk."""
+        persistent_root_disk = {
+            "persistent-root-volume": {
+                "image_id": "ubuntu20.04",
+                "size": "10",
+                "keep": False,
+            }
+        }
+        vdu_instantiation_volumes_list = []
+        mock_volume_keeping_required.return_value = False
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+        target_vdu["virtual-storages"] = [
+            {
+                "id": "persistent-root-volume",
+                "size-of-storage": "10",
+                "type-of-storage": "persistent-storage:persistent-storage",
+                "vdu-storage-requirements": [
+                    {"key": "keep-volume", "value": "true"},
+                ],
+            },
+            {
+                "id": "ephemeral-volume",
+                "size-of-storage": "1",
+                "type-of-storage": "etsi-nfv-descriptors:ephemeral-storage",
+            },
+        ]
+        disk_list = [
+            {
+                "image_id": "ubuntu20.04",
+                "size": "10",
+                "keep": False,
+            },
+        ]
+        self.ns.find_persistent_volumes(
+            persistent_root_disk, target_vdu, vdu_instantiation_volumes_list, disk_list
+        )
+        self.assertEqual(disk_list, disk_list)
+        mock_volume_keeping_required.assert_not_called()
+
+    @patch("osm_ng_ro.ns.Ns.is_volume_keeping_required")
+    def test_find_persistent_volumes_wth_inst_vol_list_disk_id_mismatch(
+        self, mock_volume_keeping_required
+    ):
+        """Find persistent ordinary volume, volume id is not persistent_root_disk dict,
+        vim-volume-id is given as instantiation parameter but disk id is not matching."""
+        mock_volume_keeping_required.return_value = True
+        vdu_instantiation_volumes_list = [
+            {
+                "vim-volume-id": vim_volume_id,
+                "name": "persistent-volume3",
+            }
+        ]
+        persistent_root_disk = {
+            "persistent-root-volume": {
+                "image_id": "ubuntu20.04",
+                "size": "10",
+                "keep": False,
+            }
+        }
+        disk_list = [
+            {
+                "image_id": "ubuntu20.04",
+                "size": "10",
+                "keep": False,
+            },
+        ]
+        expected_disk_list = [
+            {
+                "image_id": "ubuntu20.04",
+                "size": "10",
+                "keep": False,
+            },
+            {
+                "size": "10",
+                "keep": True,
+            },
+        ]
+        expected_disk = {
+            "id": "persistent-volume2",
+            "size-of-storage": "10",
+            "type-of-storage": "persistent-storage:persistent-storage",
+        }
+        target_vdu = target_vdu_wth_persistent_storage
+        self.ns.find_persistent_volumes(
+            persistent_root_disk, target_vdu, vdu_instantiation_volumes_list, disk_list
+        )
+        self.assertEqual(disk_list, expected_disk_list)
+        mock_volume_keeping_required.assert_called_once_with(expected_disk)
+
+    def test_is_volume_keeping_required_true(self):
+        """Volume keeping is required."""
+        virtual_storage_descriptor = {
+            "id": "persistent-root-volume",
+            "type-of-storage": "persistent-storage:persistent-storage",
+            "size-of-storage": "10",
+            "vdu-storage-requirements": [
+                {"key": "keep-volume", "value": "true"},
+            ],
+        }
+        result = self.ns.is_volume_keeping_required(virtual_storage_descriptor)
+        self.assertEqual(result, True)
+
+    def test_is_volume_keeping_required_false(self):
+        """Volume keeping is not required."""
+        virtual_storage_descriptor = {
+            "id": "persistent-root-volume",
+            "type-of-storage": "persistent-storage:persistent-storage",
+            "size-of-storage": "10",
+            "vdu-storage-requirements": [
+                {"key": "keep-volume", "value": "false"},
+            ],
+        }
+        result = self.ns.is_volume_keeping_required(virtual_storage_descriptor)
+        self.assertEqual(result, False)
+
+    def test_is_volume_keeping_required_wthout_vdu_storage_reqirement(self):
+        """Volume keeping is not required, vdu-storage-requirements key does not exist."""
+        virtual_storage_descriptor = {
+            "id": "persistent-root-volume",
+            "type-of-storage": "persistent-storage:persistent-storage",
+            "size-of-storage": "10",
+        }
+        result = self.ns.is_volume_keeping_required(virtual_storage_descriptor)
+        self.assertEqual(result, False)
+
+    def test_is_volume_keeping_required_wrong_keyword(self):
+        """vdu-storage-requirements key to indicate keeping-volume is wrong."""
+        virtual_storage_descriptor = {
+            "id": "persistent-root-volume",
+            "type-of-storage": "persistent-storage:persistent-storage",
+            "size-of-storage": "10",
+            "vdu-storage-requirements": [
+                {"key": "hold-volume", "value": "true"},
+            ],
+        }
+        result = self.ns.is_volume_keeping_required(virtual_storage_descriptor)
+        self.assertEqual(result, False)
+
+    def test_sort_vdu_interfaces_position_all_wth_positions(self):
+        """Interfaces are sorted according to position, all have positions."""
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+        target_vdu["interfaces"] = [
+            {
+                "name": "vdu-eth1",
+                "ns-vld-id": "datanet",
+                "position": 2,
+            },
+            {
+                "name": "vdu-eth0",
+                "ns-vld-id": "mgmtnet",
+                "position": 1,
+            },
+        ]
+        sorted_interfaces = [
+            {
+                "name": "vdu-eth0",
+                "ns-vld-id": "mgmtnet",
+                "position": 1,
+            },
+            {
+                "name": "vdu-eth1",
+                "ns-vld-id": "datanet",
+                "position": 2,
+            },
+        ]
+        self.ns._sort_vdu_interfaces(target_vdu)
+        self.assertEqual(target_vdu["interfaces"], sorted_interfaces)
+
+    def test_sort_vdu_interfaces_position_some_wth_position(self):
+        """Interfaces are sorted according to position, some of them have positions."""
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+        target_vdu["interfaces"] = [
+            {
+                "name": "vdu-eth0",
+                "ns-vld-id": "mgmtnet",
+            },
+            {
+                "name": "vdu-eth1",
+                "ns-vld-id": "datanet",
+                "position": 1,
+            },
+        ]
+        sorted_interfaces = [
+            {
+                "name": "vdu-eth1",
+                "ns-vld-id": "datanet",
+                "position": 1,
+            },
+            {
+                "name": "vdu-eth0",
+                "ns-vld-id": "mgmtnet",
+            },
+        ]
+        self.ns._sort_vdu_interfaces(target_vdu)
+        self.assertEqual(target_vdu["interfaces"], sorted_interfaces)
+
+    def test_sort_vdu_interfaces_position_empty_interface_list(self):
+        """Interface list is empty."""
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+        target_vdu["interfaces"] = []
+        sorted_interfaces = []
+        self.ns._sort_vdu_interfaces(target_vdu)
+        self.assertEqual(target_vdu["interfaces"], sorted_interfaces)
+
+    def test_partially_locate_vdu_interfaces(self):
+        """Some interfaces have positions."""
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+        target_vdu["interfaces"] = [
+            {
+                "name": "vdu-eth1",
+                "ns-vld-id": "net1",
+            },
+            {"name": "vdu-eth2", "ns-vld-id": "net2", "position": 3},
+            {
+                "name": "vdu-eth3",
+                "ns-vld-id": "mgmtnet",
+            },
+            {
+                "name": "vdu-eth1",
+                "ns-vld-id": "datanet",
+                "position": 1,
+            },
+        ]
+        self.ns._partially_locate_vdu_interfaces(target_vdu)
+        self.assertDictEqual(
+            target_vdu["interfaces"][0],
+            {
+                "name": "vdu-eth1",
+                "ns-vld-id": "datanet",
+                "position": 1,
+            },
+        )
+        self.assertDictEqual(
+            target_vdu["interfaces"][2],
+            {"name": "vdu-eth2", "ns-vld-id": "net2", "position": 3},
+        )
+
+    def test_partially_locate_vdu_interfaces_position_start_from_0(self):
+        """Some interfaces have positions, position start from 0."""
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+        target_vdu["interfaces"] = [
+            {
+                "name": "vdu-eth1",
+                "ns-vld-id": "net1",
+            },
+            {"name": "vdu-eth2", "ns-vld-id": "net2", "position": 3},
+            {
+                "name": "vdu-eth3",
+                "ns-vld-id": "mgmtnet",
+            },
+            {
+                "name": "vdu-eth1",
+                "ns-vld-id": "datanet",
+                "position": 0,
+            },
+        ]
+        self.ns._partially_locate_vdu_interfaces(target_vdu)
+        self.assertDictEqual(
+            target_vdu["interfaces"][0],
+            {
+                "name": "vdu-eth1",
+                "ns-vld-id": "datanet",
+                "position": 0,
+            },
+        )
+        self.assertDictEqual(
+            target_vdu["interfaces"][3],
+            {"name": "vdu-eth2", "ns-vld-id": "net2", "position": 3},
+        )
+
+    def test_partially_locate_vdu_interfaces_wthout_position(self):
+        """Interfaces do not have positions."""
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+        target_vdu["interfaces"] = interfaces_wthout_positions
+        expected_result = deepcopy(target_vdu["interfaces"])
+        self.ns._partially_locate_vdu_interfaces(target_vdu)
+        self.assertEqual(target_vdu["interfaces"], expected_result)
+
+    def test_partially_locate_vdu_interfaces_all_has_position(self):
+        """All interfaces have position."""
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+        target_vdu["interfaces"] = interfaces_wth_all_positions
+        expected_interfaces = [
+            {
+                "name": "vdu-eth2",
+                "ns-vld-id": "net2",
+                "position": 0,
+            },
+            {
+                "name": "vdu-eth3",
+                "ns-vld-id": "mgmtnet",
+                "position": 1,
+            },
+            {
+                "name": "vdu-eth1",
+                "ns-vld-id": "net1",
+                "position": 2,
+            },
+        ]
+        self.ns._partially_locate_vdu_interfaces(target_vdu)
+        self.assertEqual(target_vdu["interfaces"], expected_interfaces)
+
+    @patch("osm_ng_ro.ns.Ns._get_cloud_init")
+    @patch("osm_ng_ro.ns.Ns._parse_jinja2")
+    def test_prepare_vdu_cloud_init(self, mock_parse_jinja2, mock_get_cloud_init):
+        """Target_vdu has cloud-init and boot-data-drive."""
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+        target_vdu["cloud-init"] = "sample-cloud-init-path"
+        target_vdu["boot-data-drive"] = "vda"
+        vdu2cloud_init = {}
+        mock_get_cloud_init.return_value = cloud_init_content
+        mock_parse_jinja2.return_value = user_data
+        expected_result = {
+            "user-data": user_data,
+            "boot-data-drive": "vda",
+        }
+        result = self.ns._prepare_vdu_cloud_init(target_vdu, vdu2cloud_init, db, fs)
+        self.assertDictEqual(result, expected_result)
+        mock_get_cloud_init.assert_called_once_with(
+            db=db, fs=fs, location="sample-cloud-init-path"
+        )
+        mock_parse_jinja2.assert_called_once_with(
+            cloud_init_content=cloud_init_content,
+            params=None,
+            context="sample-cloud-init-path",
+        )
+
+    @patch("osm_ng_ro.ns.Ns._get_cloud_init")
+    @patch("osm_ng_ro.ns.Ns._parse_jinja2")
+    def test_prepare_vdu_cloud_init_get_cloud_init_raise_exception(
+        self, mock_parse_jinja2, mock_get_cloud_init
+    ):
+        """Target_vdu has cloud-init and boot-data-drive, get_cloud_init method raises exception."""
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+        target_vdu["cloud-init"] = "sample-cloud-init-path"
+        target_vdu["boot-data-drive"] = "vda"
+        vdu2cloud_init = {}
+        mock_get_cloud_init.side_effect = NsException(
+            "Mismatch descriptor for cloud init."
+        )
+
+        with self.assertRaises(NsException) as err:
+            self.ns._prepare_vdu_cloud_init(target_vdu, vdu2cloud_init, db, fs)
+            self.assertEqual(str(err.exception), "Mismatch descriptor for cloud init.")
+
+        mock_get_cloud_init.assert_called_once_with(
+            db=db, fs=fs, location="sample-cloud-init-path"
+        )
+        mock_parse_jinja2.assert_not_called()
+
+    @patch("osm_ng_ro.ns.Ns._get_cloud_init")
+    @patch("osm_ng_ro.ns.Ns._parse_jinja2")
+    def test_prepare_vdu_cloud_init_parse_jinja2_raise_exception(
+        self, mock_parse_jinja2, mock_get_cloud_init
+    ):
+        """Target_vdu has cloud-init and boot-data-drive, parse_jinja2 method raises exception."""
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+        target_vdu["cloud-init"] = "sample-cloud-init-path"
+        target_vdu["boot-data-drive"] = "vda"
+        vdu2cloud_init = {}
+        mock_get_cloud_init.return_value = cloud_init_content
+        mock_parse_jinja2.side_effect = NsException("Error parsing cloud-init content.")
+
+        with self.assertRaises(NsException) as err:
+            self.ns._prepare_vdu_cloud_init(target_vdu, vdu2cloud_init, db, fs)
+            self.assertEqual(str(err.exception), "Error parsing cloud-init content.")
+        mock_get_cloud_init.assert_called_once_with(
+            db=db, fs=fs, location="sample-cloud-init-path"
+        )
+        mock_parse_jinja2.assert_called_once_with(
+            cloud_init_content=cloud_init_content,
+            params=None,
+            context="sample-cloud-init-path",
+        )
+
+    @patch("osm_ng_ro.ns.Ns._get_cloud_init")
+    @patch("osm_ng_ro.ns.Ns._parse_jinja2")
+    def test_prepare_vdu_cloud_init_vdu_wthout_boot_data_drive(
+        self, mock_parse_jinja2, mock_get_cloud_init
+    ):
+        """Target_vdu has cloud-init but do not have boot-data-drive."""
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+        target_vdu["cloud-init"] = "sample-cloud-init-path"
+        vdu2cloud_init = {}
+        mock_get_cloud_init.return_value = cloud_init_content
+        mock_parse_jinja2.return_value = user_data
+        expected_result = {
+            "user-data": user_data,
+        }
+        result = self.ns._prepare_vdu_cloud_init(target_vdu, vdu2cloud_init, db, fs)
+        self.assertDictEqual(result, expected_result)
+        mock_get_cloud_init.assert_called_once_with(
+            db=db, fs=fs, location="sample-cloud-init-path"
+        )
+        mock_parse_jinja2.assert_called_once_with(
+            cloud_init_content=cloud_init_content,
+            params=None,
+            context="sample-cloud-init-path",
+        )
+
+    @patch("osm_ng_ro.ns.Ns._get_cloud_init")
+    @patch("osm_ng_ro.ns.Ns._parse_jinja2")
+    def test_prepare_vdu_cloud_init_exists_in_vdu2cloud_init(
+        self, mock_parse_jinja2, mock_get_cloud_init
+    ):
+        """Target_vdu has cloud-init, vdu2cloud_init dict has cloud-init_content."""
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+        target_vdu["cloud-init"] = "sample-cloud-init-path"
+        target_vdu["boot-data-drive"] = "vda"
+        vdu2cloud_init = {"sample-cloud-init-path": cloud_init_content}
+        mock_parse_jinja2.return_value = user_data
+        expected_result = {
+            "user-data": user_data,
+            "boot-data-drive": "vda",
+        }
+        result = self.ns._prepare_vdu_cloud_init(target_vdu, vdu2cloud_init, db, fs)
+        self.assertDictEqual(result, expected_result)
+        mock_get_cloud_init.assert_not_called()
+        mock_parse_jinja2.assert_called_once_with(
+            cloud_init_content=cloud_init_content,
+            params=None,
+            context="sample-cloud-init-path",
+        )
+
+    @patch("osm_ng_ro.ns.Ns._get_cloud_init")
+    @patch("osm_ng_ro.ns.Ns._parse_jinja2")
+    def test_prepare_vdu_cloud_init_no_cloud_init(
+        self, mock_parse_jinja2, mock_get_cloud_init
+    ):
+        """Target_vdu do not have cloud-init."""
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+        target_vdu["boot-data-drive"] = "vda"
+        vdu2cloud_init = {}
+        expected_result = {
+            "boot-data-drive": "vda",
+        }
+        result = self.ns._prepare_vdu_cloud_init(target_vdu, vdu2cloud_init, db, fs)
+        self.assertDictEqual(result, expected_result)
+        mock_get_cloud_init.assert_not_called()
+        mock_parse_jinja2.assert_not_called()
+
+    def test_check_vld_information_of_interfaces_ns_vld_vnf_vld_both_exist(self):
+        """ns_vld and vnf_vld both exist."""
+        interface = {
+            "name": "vdu-eth0",
+            "ns-vld-id": "mgmtnet",
+            "vnf-vld-id": "mgmt_cp_int",
+        }
+        expected_result = f"{ns_preffix}:vld.mgmtnet"
+        result = self.ns._check_vld_information_of_interfaces(
+            interface, ns_preffix, vnf_preffix
+        )
+        self.assertEqual(result, expected_result)
+
+    def test_check_vld_information_of_interfaces_empty_interfaces(self):
+        """Interface dict is empty."""
+        interface = {}
+        result = self.ns._check_vld_information_of_interfaces(
+            interface, ns_preffix, vnf_preffix
+        )
+        self.assertEqual(result, "")
+
+    def test_check_vld_information_of_interfaces_has_only_vnf_vld(self):
+        """Interface dict has only vnf_vld."""
+        interface = {
+            "name": "vdu-eth0",
+            "vnf-vld-id": "mgmt_cp_int",
+        }
+        expected_result = f"{vnf_preffix}:vld.mgmt_cp_int"
+        result = self.ns._check_vld_information_of_interfaces(
+            interface, ns_preffix, vnf_preffix
+        )
+        self.assertEqual(result, expected_result)
+
+    def test_check_vld_information_of_interfaces_has_vnf_vld_wthout_vnf_prefix(
+        self,
+    ):
+        """Interface dict has only vnf_vld but vnf_preffix does not exist."""
+        interface = {
+            "name": "vdu-eth0",
+            "vnf-vld-id": "mgmt_cp_int",
+        }
+        vnf_preffix = None
+        with self.assertRaises(Exception) as err:
+            self.ns._check_vld_information_of_interfaces(
+                interface, ns_preffix, vnf_preffix
+            )
+            self.assertEqual(type(err), TypeError)
+
+    def test_prepare_interface_port_security_has_security_details(self):
+        """Interface dict has port security details."""
+        interface = {
+            "name": "vdu-eth0",
+            "ns-vld-id": "mgmtnet",
+            "vnf-vld-id": "mgmt_cp_int",
+            "port-security-enabled": True,
+            "port-security-disable-strategy": "allow-address-pairs",
+        }
+        expected_interface = {
+            "name": "vdu-eth0",
+            "ns-vld-id": "mgmtnet",
+            "vnf-vld-id": "mgmt_cp_int",
+            "port_security": True,
+            "port_security_disable_strategy": "allow-address-pairs",
+        }
+        self.ns._prepare_interface_port_security(interface)
+        self.assertDictEqual(interface, expected_interface)
+
+    def test_prepare_interface_port_security_empty_interfaces(self):
+        """Interface dict is empty."""
+        interface = {}
+        expected_interface = {}
+        self.ns._prepare_interface_port_security(interface)
+        self.assertDictEqual(interface, expected_interface)
+
+    def test_prepare_interface_port_security_wthout_port_security(self):
+        """Interface dict does not have port security details."""
+        interface = {
+            "name": "vdu-eth0",
+            "ns-vld-id": "mgmtnet",
+            "vnf-vld-id": "mgmt_cp_int",
+        }
+        expected_interface = {
+            "name": "vdu-eth0",
+            "ns-vld-id": "mgmtnet",
+            "vnf-vld-id": "mgmt_cp_int",
+        }
+        self.ns._prepare_interface_port_security(interface)
+        self.assertDictEqual(interface, expected_interface)
+
+    def test_create_net_item_of_interface_floating_ip_port_security(self):
+        """Interface dict has floating ip, port-security details."""
+        interface = {
+            "name": "vdu-eth0",
+            "vcpi": "sample_vcpi",
+            "port_security": True,
+            "port_security_disable_strategy": "allow-address-pairs",
+            "floating_ip": "10.1.1.12",
+            "ns-vld-id": "mgmtnet",
+            "vnf-vld-id": "mgmt_cp_int",
+        }
+        net_text = f"{ns_preffix}"
+        expected_net_item = {
+            "name": "vdu-eth0",
+            "port_security": True,
+            "port_security_disable_strategy": "allow-address-pairs",
+            "floating_ip": "10.1.1.12",
+            "net_id": f"TASK-{ns_preffix}",
+            "type": "virtual",
+        }
+        result = self.ns._create_net_item_of_interface(interface, net_text)
+        self.assertDictEqual(result, expected_net_item)
+
+    def test_create_net_item_of_interface_invalid_net_text(self):
+        """net-text is invalid."""
+        interface = {
+            "name": "vdu-eth0",
+            "vcpi": "sample_vcpi",
+            "port_security": True,
+            "port_security_disable_strategy": "allow-address-pairs",
+            "floating_ip": "10.1.1.12",
+            "ns-vld-id": "mgmtnet",
+            "vnf-vld-id": "mgmt_cp_int",
+        }
+        net_text = None
+        with self.assertRaises(TypeError):
+            self.ns._create_net_item_of_interface(interface, net_text)
+
+    def test_create_net_item_of_interface_empty_interface(self):
+        """Interface dict is empty."""
+        interface = {}
+        net_text = ns_preffix
+        expected_net_item = {
+            "net_id": f"TASK-{ns_preffix}",
+            "type": "virtual",
+        }
+        result = self.ns._create_net_item_of_interface(interface, net_text)
+        self.assertDictEqual(result, expected_net_item)
+
+    @patch("osm_ng_ro.ns.deep_get")
+    def test_prepare_type_of_interface_type_sriov(self, mock_deep_get):
+        """Interface type is SR-IOV."""
+        interface = {
+            "name": "vdu-eth0",
+            "vcpi": "sample_vcpi",
+            "port_security": True,
+            "port_security_disable_strategy": "allow-address-pairs",
+            "floating_ip": "10.1.1.12",
+            "ns-vld-id": "mgmtnet",
+            "vnf-vld-id": "mgmt_cp_int",
+            "type": "SR-IOV",
+        }
+        mock_deep_get.return_value = "SR-IOV"
+        net_text = ns_preffix
+        net_item = {}
+        expected_net_item = {
+            "use": "data",
+            "model": "SR-IOV",
+            "type": "SR-IOV",
+        }
+        self.ns._prepare_type_of_interface(
+            interface, tasks_by_target_record_id, net_text, net_item
+        )
+        self.assertDictEqual(net_item, expected_net_item)
+        self.assertEqual(
+            "data",
+            tasks_by_target_record_id[net_text]["extra_dict"]["params"]["net_type"],
+        )
+        mock_deep_get.assert_called_once_with(
+            tasks_by_target_record_id, net_text, "extra_dict", "params", "net_type"
+        )
+
+    @patch("osm_ng_ro.ns.deep_get")
+    def test_prepare_type_of_interface_type_pic_passthrough_deep_get_return_empty_dict(
+        self, mock_deep_get
+    ):
+        """Interface type is PCI-PASSTHROUGH, deep_get method return empty dict."""
+        interface = {
+            "name": "vdu-eth0",
+            "vcpi": "sample_vcpi",
+            "port_security": True,
+            "port_security_disable_strategy": "allow-address-pairs",
+            "floating_ip": "10.1.1.12",
+            "ns-vld-id": "mgmtnet",
+            "vnf-vld-id": "mgmt_cp_int",
+            "type": "PCI-PASSTHROUGH",
+        }
+        mock_deep_get.return_value = {}
+        tasks_by_target_record_id = {}
+        net_text = ns_preffix
+        net_item = {}
+        expected_net_item = {
+            "use": "data",
+            "model": "PCI-PASSTHROUGH",
+            "type": "PCI-PASSTHROUGH",
+        }
+        self.ns._prepare_type_of_interface(
+            interface, tasks_by_target_record_id, net_text, net_item
+        )
+        self.assertDictEqual(net_item, expected_net_item)
+        mock_deep_get.assert_called_once_with(
+            tasks_by_target_record_id, net_text, "extra_dict", "params", "net_type"
+        )
+
+    @patch("osm_ng_ro.ns.deep_get")
+    def test_prepare_type_of_interface_type_mgmt(self, mock_deep_get):
+        """Interface type is mgmt."""
+        interface = {
+            "name": "vdu-eth0",
+            "vcpi": "sample_vcpi",
+            "port_security": True,
+            "port_security_disable_strategy": "allow-address-pairs",
+            "floating_ip": "10.1.1.12",
+            "ns-vld-id": "mgmtnet",
+            "vnf-vld-id": "mgmt_cp_int",
+            "type": "OM-MGMT",
+        }
+        tasks_by_target_record_id = {}
+        net_text = ns_preffix
+        net_item = {}
+        expected_net_item = {
+            "use": "mgmt",
+        }
+        self.ns._prepare_type_of_interface(
+            interface, tasks_by_target_record_id, net_text, net_item
+        )
+        self.assertDictEqual(net_item, expected_net_item)
+        mock_deep_get.assert_not_called()
+
+    @patch("osm_ng_ro.ns.deep_get")
+    def test_prepare_type_of_interface_type_bridge(self, mock_deep_get):
+        """Interface type is bridge."""
+        interface = {
+            "name": "vdu-eth0",
+            "vcpi": "sample_vcpi",
+            "port_security": True,
+            "port_security_disable_strategy": "allow-address-pairs",
+            "floating_ip": "10.1.1.12",
+            "ns-vld-id": "mgmtnet",
+            "vnf-vld-id": "mgmt_cp_int",
+        }
+        tasks_by_target_record_id = {}
+        net_text = ns_preffix
+        net_item = {}
+        expected_net_item = {
+            "use": "bridge",
+            "model": None,
+        }
+        self.ns._prepare_type_of_interface(
+            interface, tasks_by_target_record_id, net_text, net_item
+        )
+        self.assertDictEqual(net_item, expected_net_item)
+        mock_deep_get.assert_not_called()
+
+    @patch("osm_ng_ro.ns.Ns._check_vld_information_of_interfaces")
+    @patch("osm_ng_ro.ns.Ns._prepare_interface_port_security")
+    @patch("osm_ng_ro.ns.Ns._create_net_item_of_interface")
+    @patch("osm_ng_ro.ns.Ns._prepare_type_of_interface")
+    def test_prepare_vdu_interfaces(
+        self,
+        mock_type_of_interface,
+        mock_item_of_interface,
+        mock_port_security,
+        mock_vld_information_of_interface,
+    ):
+        """Prepare vdu interfaces successfully."""
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+        interface_1 = {
+            "name": "vdu-eth1",
+            "ns-vld-id": "net1",
+            "ip-address": "13.2.12.31",
+            "mgmt-interface": True,
+        }
+        interface_2 = {
+            "name": "vdu-eth2",
+            "vnf-vld-id": "net2",
+            "mac-address": "d0:94:66:ed:fc:e2",
+        }
+        interface_3 = {
+            "name": "vdu-eth3",
+            "ns-vld-id": "mgmtnet",
+        }
+        target_vdu["interfaces"] = [interface_1, interface_2, interface_3]
+        extra_dict = {
+            "params": "test_params",
+            "find_params": "test_find_params",
+            "depends_on": [],
+        }
+
+        net_text_1 = f"{ns_preffix}:net1"
+        net_text_2 = f"{vnf_preffix}:net2"
+        net_text_3 = f"{ns_preffix}:mgmtnet"
+        net_item_1 = {
+            "name": "vdu-eth1",
+            "net_id": f"TASK-{ns_preffix}",
+            "type": "virtual",
+        }
+        net_item_2 = {
+            "name": "vdu-eth2",
+            "net_id": f"TASK-{ns_preffix}",
+            "type": "virtual",
+        }
+        net_item_3 = {
+            "name": "vdu-eth3",
+            "net_id": f"TASK-{ns_preffix}",
+            "type": "virtual",
+        }
+        mock_item_of_interface.side_effect = [net_item_1, net_item_2, net_item_3]
+        mock_vld_information_of_interface.side_effect = [
+            net_text_1,
+            net_text_2,
+            net_text_3,
+        ]
+        net_list = []
+        expected_extra_dict = {
+            "params": "test_params",
+            "find_params": "test_find_params",
+            "depends_on": [net_text_1, net_text_2, net_text_3],
+            "mgmt_vdu_interface": 0,
+        }
+        updated_net_item1 = deepcopy(net_item_1)
+        updated_net_item1.update({"ip_address": "13.2.12.31"})
+        updated_net_item2 = deepcopy(net_item_2)
+        updated_net_item2.update({"mac_address": "d0:94:66:ed:fc:e2"})
+        expected_net_list = [updated_net_item1, updated_net_item2, net_item_3]
+        self.ns._prepare_vdu_interfaces(
+            target_vdu,
+            extra_dict,
+            ns_preffix,
+            vnf_preffix,
+            self.logger,
+            tasks_by_target_record_id,
+            net_list,
+        )
+        _call_mock_vld_information_of_interface = (
+            mock_vld_information_of_interface.call_args_list
+        )
+        self.assertEqual(
+            _call_mock_vld_information_of_interface[0][0],
+            (interface_1, ns_preffix, vnf_preffix),
+        )
+        self.assertEqual(
+            _call_mock_vld_information_of_interface[1][0],
+            (interface_2, ns_preffix, vnf_preffix),
+        )
+        self.assertEqual(
+            _call_mock_vld_information_of_interface[2][0],
+            (interface_3, ns_preffix, vnf_preffix),
+        )
+
+        _call_mock_port_security = mock_port_security.call_args_list
+        self.assertEqual(_call_mock_port_security[0].args[0], interface_1)
+        self.assertEqual(_call_mock_port_security[1].args[0], interface_2)
+        self.assertEqual(_call_mock_port_security[2].args[0], interface_3)
+
+        _call_mock_item_of_interface = mock_item_of_interface.call_args_list
+        self.assertEqual(_call_mock_item_of_interface[0][0], (interface_1, net_text_1))
+        self.assertEqual(_call_mock_item_of_interface[1][0], (interface_2, net_text_2))
+        self.assertEqual(_call_mock_item_of_interface[2][0], (interface_3, net_text_3))
+
+        _call_mock_type_of_interface = mock_type_of_interface.call_args_list
+        self.assertEqual(
+            _call_mock_type_of_interface[0][0],
+            (interface_1, tasks_by_target_record_id, net_text_1, net_item_1),
+        )
+        self.assertEqual(
+            _call_mock_type_of_interface[1][0],
+            (interface_2, tasks_by_target_record_id, net_text_2, net_item_2),
+        )
+        self.assertEqual(
+            _call_mock_type_of_interface[2][0],
+            (interface_3, tasks_by_target_record_id, net_text_3, net_item_3),
+        )
+        self.assertEqual(net_list, expected_net_list)
+        self.assertEqual(extra_dict, expected_extra_dict)
+        self.logger.error.assert_not_called()
+
+    @patch("osm_ng_ro.ns.Ns._check_vld_information_of_interfaces")
+    @patch("osm_ng_ro.ns.Ns._prepare_interface_port_security")
+    @patch("osm_ng_ro.ns.Ns._create_net_item_of_interface")
+    @patch("osm_ng_ro.ns.Ns._prepare_type_of_interface")
+    def test_prepare_vdu_interfaces_create_net_item_raise_exception(
+        self,
+        mock_type_of_interface,
+        mock_item_of_interface,
+        mock_port_security,
+        mock_vld_information_of_interface,
+    ):
+        """Prepare vdu interfaces, create_net_item_of_interface method raise exception."""
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+        interface_1 = {
+            "name": "vdu-eth1",
+            "ns-vld-id": "net1",
+            "ip-address": "13.2.12.31",
+            "mgmt-interface": True,
+        }
+        interface_2 = {
+            "name": "vdu-eth2",
+            "vnf-vld-id": "net2",
+            "mac-address": "d0:94:66:ed:fc:e2",
+        }
+        interface_3 = {
+            "name": "vdu-eth3",
+            "ns-vld-id": "mgmtnet",
+        }
+        target_vdu["interfaces"] = [interface_1, interface_2, interface_3]
+        extra_dict = {
+            "params": "test_params",
+            "find_params": "test_find_params",
+            "depends_on": [],
+        }
+        net_text_1 = f"{ns_preffix}:net1"
+        mock_item_of_interface.side_effect = [TypeError, TypeError, TypeError]
+
+        mock_vld_information_of_interface.side_effect = [net_text_1]
+        net_list = []
+        expected_extra_dict = {
+            "params": "test_params",
+            "find_params": "test_find_params",
+            "depends_on": [net_text_1],
+        }
+        with self.assertRaises(TypeError):
+            self.ns._prepare_vdu_interfaces(
+                target_vdu,
+                extra_dict,
+                ns_preffix,
+                vnf_preffix,
+                self.logger,
+                tasks_by_target_record_id,
+                net_list,
+            )
+
+        _call_mock_vld_information_of_interface = (
+            mock_vld_information_of_interface.call_args_list
+        )
+        self.assertEqual(
+            _call_mock_vld_information_of_interface[0][0],
+            (interface_1, ns_preffix, vnf_preffix),
+        )
+
+        _call_mock_port_security = mock_port_security.call_args_list
+        self.assertEqual(_call_mock_port_security[0].args[0], interface_1)
+
+        _call_mock_item_of_interface = mock_item_of_interface.call_args_list
+        self.assertEqual(_call_mock_item_of_interface[0][0], (interface_1, net_text_1))
+
+        mock_type_of_interface.assert_not_called()
+        self.logger.error.assert_not_called()
+        self.assertEqual(net_list, [])
+        self.assertEqual(extra_dict, expected_extra_dict)
+
+    @patch("osm_ng_ro.ns.Ns._check_vld_information_of_interfaces")
+    @patch("osm_ng_ro.ns.Ns._prepare_interface_port_security")
+    @patch("osm_ng_ro.ns.Ns._create_net_item_of_interface")
+    @patch("osm_ng_ro.ns.Ns._prepare_type_of_interface")
+    def test_prepare_vdu_interfaces_vld_information_is_empty(
+        self,
+        mock_type_of_interface,
+        mock_item_of_interface,
+        mock_port_security,
+        mock_vld_information_of_interface,
+    ):
+        """Prepare vdu interfaces, check_vld_information_of_interface method returns empty result."""
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+        interface_1 = {
+            "name": "vdu-eth1",
+            "ns-vld-id": "net1",
+            "ip-address": "13.2.12.31",
+            "mgmt-interface": True,
+        }
+        interface_2 = {
+            "name": "vdu-eth2",
+            "vnf-vld-id": "net2",
+            "mac-address": "d0:94:66:ed:fc:e2",
+        }
+        interface_3 = {
+            "name": "vdu-eth3",
+            "ns-vld-id": "mgmtnet",
+        }
+        target_vdu["interfaces"] = [interface_1, interface_2, interface_3]
+        extra_dict = {
+            "params": "test_params",
+            "find_params": "test_find_params",
+            "depends_on": [],
+        }
+        mock_vld_information_of_interface.side_effect = ["", "", ""]
+        net_list = []
+        self.ns._prepare_vdu_interfaces(
+            target_vdu,
+            extra_dict,
+            ns_preffix,
+            vnf_preffix,
+            self.logger,
+            tasks_by_target_record_id,
+            net_list,
+        )
+
+        _call_mock_vld_information_of_interface = (
+            mock_vld_information_of_interface.call_args_list
+        )
+        self.assertEqual(
+            _call_mock_vld_information_of_interface[0][0],
+            (interface_1, ns_preffix, vnf_preffix),
+        )
+        self.assertEqual(
+            _call_mock_vld_information_of_interface[1][0],
+            (interface_2, ns_preffix, vnf_preffix),
+        )
+        self.assertEqual(
+            _call_mock_vld_information_of_interface[2][0],
+            (interface_3, ns_preffix, vnf_preffix),
+        )
+
+        _call_logger = self.logger.error.call_args_list
+        self.assertEqual(
+            _call_logger[0][0],
+            ("Interface 0 from vdu several_volumes-VM not connected to any vld",),
+        )
+        self.assertEqual(
+            _call_logger[1][0],
+            ("Interface 1 from vdu several_volumes-VM not connected to any vld",),
+        )
+        self.assertEqual(
+            _call_logger[2][0],
+            ("Interface 2 from vdu several_volumes-VM not connected to any vld",),
+        )
+        self.assertEqual(net_list, [])
+        self.assertEqual(
+            extra_dict,
+            {
+                "params": "test_params",
+                "find_params": "test_find_params",
+                "depends_on": [],
+            },
+        )
+
+        mock_item_of_interface.assert_not_called()
+        mock_port_security.assert_not_called()
+        mock_type_of_interface.assert_not_called()
+
+    @patch("osm_ng_ro.ns.Ns._check_vld_information_of_interfaces")
+    @patch("osm_ng_ro.ns.Ns._prepare_interface_port_security")
+    @patch("osm_ng_ro.ns.Ns._create_net_item_of_interface")
+    @patch("osm_ng_ro.ns.Ns._prepare_type_of_interface")
+    def test_prepare_vdu_interfaces_empty_interface_list(
+        self,
+        mock_type_of_interface,
+        mock_item_of_interface,
+        mock_port_security,
+        mock_vld_information_of_interface,
+    ):
+        """Prepare vdu interfaces, interface list is empty."""
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+        target_vdu["interfaces"] = []
+        extra_dict = {}
+        net_list = []
+        self.ns._prepare_vdu_interfaces(
+            target_vdu,
+            extra_dict,
+            ns_preffix,
+            vnf_preffix,
+            self.logger,
+            tasks_by_target_record_id,
+            net_list,
+        )
+        mock_type_of_interface.assert_not_called()
+        mock_vld_information_of_interface.assert_not_called()
+        mock_item_of_interface.assert_not_called()
+        mock_port_security.assert_not_called()
+
+    def test_prepare_vdu_ssh_keys(self):
+        """Target_vdu has ssh-keys and ro_nsr_public_key exists."""
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+        target_vdu["ssh-keys"] = ["sample-ssh-key"]
+        ro_nsr_public_key = {"public_key": "path_of_public_key"}
+        target_vdu["ssh-access-required"] = True
+        cloud_config = {}
+        expected_cloud_config = {
+            "key-pairs": ["sample-ssh-key", {"public_key": "path_of_public_key"}]
+        }
+        self.ns._prepare_vdu_ssh_keys(target_vdu, ro_nsr_public_key, cloud_config)
+        self.assertDictEqual(cloud_config, expected_cloud_config)
+
+    def test_prepare_vdu_ssh_keys_target_vdu_wthout_ssh_keys(self):
+        """Target_vdu does not have ssh-keys."""
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+        ro_nsr_public_key = {"public_key": "path_of_public_key"}
+        target_vdu["ssh-access-required"] = True
+        cloud_config = {}
+        expected_cloud_config = {"key-pairs": [{"public_key": "path_of_public_key"}]}
+        self.ns._prepare_vdu_ssh_keys(target_vdu, ro_nsr_public_key, cloud_config)
+        self.assertDictEqual(cloud_config, expected_cloud_config)
+
+    def test_prepare_vdu_ssh_keys_ssh_access_is_not_required(self):
+        """Target_vdu has ssh-keys, ssh-access is not required."""
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+        target_vdu["ssh-keys"] = ["sample-ssh-key"]
+        ro_nsr_public_key = {"public_key": "path_of_public_key"}
+        target_vdu["ssh-access-required"] = False
+        cloud_config = {}
+        expected_cloud_config = {"key-pairs": ["sample-ssh-key"]}
+        self.ns._prepare_vdu_ssh_keys(target_vdu, ro_nsr_public_key, cloud_config)
+        self.assertDictEqual(cloud_config, expected_cloud_config)
+
+    @patch("osm_ng_ro.ns.Ns._select_persistent_root_disk")
+    @patch("osm_ng_ro.ns.Ns.is_volume_keeping_required")
+    def test_add_persistent_root_disk_to_disk_list_keep_false(
+        self, mock_volume_keeping_required, mock_select_persistent_root_disk
+    ):
+        """Add persistent root disk to disk_list, keep volume set to False."""
+        root_disk = {
+            "id": "persistent-root-volume",
+            "type-of-storage": "persistent-storage:persistent-storage",
+            "size-of-storage": "10",
+        }
+        mock_select_persistent_root_disk.return_value = root_disk
+        vnfd = deepcopy(vnfd_wth_persistent_storage)
+        vnfd["virtual-storage-desc"][1] = root_disk
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+        persistent_root_disk = {}
+        disk_list = []
+        mock_volume_keeping_required.return_value = False
+        expected_disk_list = [
+            {
+                "image_id": "ubuntu20.04",
+                "size": "10",
+                "keep": False,
+            }
+        ]
+        self.ns._add_persistent_root_disk_to_disk_list(
+            vnfd, target_vdu, persistent_root_disk, disk_list
+        )
+        self.assertEqual(disk_list, expected_disk_list)
+        mock_select_persistent_root_disk.assert_called_once()
+        mock_volume_keeping_required.assert_called_once()
+
+    @patch("osm_ng_ro.ns.Ns._select_persistent_root_disk")
+    @patch("osm_ng_ro.ns.Ns.is_volume_keeping_required")
+    def test_add_persistent_root_disk_to_disk_list_select_persistent_root_disk_raises(
+        self, mock_volume_keeping_required, mock_select_persistent_root_disk
+    ):
+        """Add persistent root disk to disk_list"""
+        root_disk = {
+            "id": "persistent-root-volume",
+            "type-of-storage": "persistent-storage:persistent-storage",
+            "size-of-storage": "10",
+        }
+        mock_select_persistent_root_disk.side_effect = AttributeError
+        vnfd = deepcopy(vnfd_wth_persistent_storage)
+        vnfd["virtual-storage-desc"][1] = root_disk
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+        persistent_root_disk = {}
+        disk_list = []
+        with self.assertRaises(AttributeError):
+            self.ns._add_persistent_root_disk_to_disk_list(
+                vnfd, target_vdu, persistent_root_disk, disk_list
+            )
+        self.assertEqual(disk_list, [])
+        mock_select_persistent_root_disk.assert_called_once()
+        mock_volume_keeping_required.assert_not_called()
+
+    @patch("osm_ng_ro.ns.Ns._select_persistent_root_disk")
+    @patch("osm_ng_ro.ns.Ns.is_volume_keeping_required")
+    def test_add_persistent_root_disk_to_disk_list_keep_true(
+        self, mock_volume_keeping_required, mock_select_persistent_root_disk
+    ):
+        """Add persistent root disk, keeo volume set to True."""
+        vnfd = deepcopy(vnfd_wth_persistent_storage)
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+        mock_volume_keeping_required.return_value = True
+        root_disk = {
+            "id": "persistent-root-volume",
+            "type-of-storage": "persistent-storage:persistent-storage",
+            "size-of-storage": "10",
+            "vdu-storage-requirements": [
+                {"key": "keep-volume", "value": "true"},
+            ],
+        }
+        mock_select_persistent_root_disk.return_value = root_disk
+        persistent_root_disk = {}
+        disk_list = []
+        expected_disk_list = [
+            {
+                "image_id": "ubuntu20.04",
+                "size": "10",
+                "keep": True,
+            }
+        ]
+        self.ns._add_persistent_root_disk_to_disk_list(
+            vnfd, target_vdu, persistent_root_disk, disk_list
+        )
+        self.assertEqual(disk_list, expected_disk_list)
+        mock_volume_keeping_required.assert_called_once_with(root_disk)
+
+    @patch("osm_ng_ro.ns.Ns.is_volume_keeping_required")
+    def test_add_persistent_ordinary_disk_to_disk_list(
+        self, mock_volume_keeping_required
+    ):
+        """Add persistent ordinary disk, keeo volume set to True."""
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+        mock_volume_keeping_required.return_value = False
+        persistent_root_disk = {
+            "persistent-root-volume": {
+                "image_id": "ubuntu20.04",
+                "size": "10",
+                "keep": True,
+            }
+        }
+        ordinary_disk = {
+            "id": "persistent-volume2",
+            "type-of-storage": "persistent-storage:persistent-storage",
+            "size-of-storage": "10",
+        }
+        persistent_ordinary_disk = {}
+        disk_list = []
+        expected_disk_list = [
+            {
+                "size": "10",
+                "keep": False,
+            }
+        ]
+        self.ns._add_persistent_ordinary_disks_to_disk_list(
+            target_vdu, persistent_root_disk, persistent_ordinary_disk, disk_list
+        )
+        self.assertEqual(disk_list, expected_disk_list)
+        mock_volume_keeping_required.assert_called_once_with(ordinary_disk)
+
+    @patch("osm_ng_ro.ns.Ns.is_volume_keeping_required")
+    def test_add_persistent_ordinary_disk_to_disk_list_vsd_id_in_root_disk_dict(
+        self, mock_volume_keeping_required
+    ):
+        """Add persistent ordinary disk, vsd id is in root_disk dict."""
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+        mock_volume_keeping_required.return_value = False
+        persistent_root_disk = {
+            "persistent-root-volume": {
+                "image_id": "ubuntu20.04",
+                "size": "10",
+                "keep": True,
+            },
+            "persistent-volume2": {
+                "size": "10",
+            },
+        }
+        persistent_ordinary_disk = {}
+        disk_list = []
+
+        self.ns._add_persistent_ordinary_disks_to_disk_list(
+            target_vdu, persistent_root_disk, persistent_ordinary_disk, disk_list
+        )
+        self.assertEqual(disk_list, [])
+        mock_volume_keeping_required.assert_not_called()
+
+    @patch("osm_ng_ro.ns.Ns._select_persistent_root_disk")
+    @patch("osm_ng_ro.ns.Ns.is_volume_keeping_required")
+    def test_add_persistent_root_disk_to_disk_list_vnfd_wthout_persistent_storage(
+        self, mock_volume_keeping_required, mock_select_persistent_root_disk
+    ):
+        """VNFD does not have persistent storage."""
+        vnfd = deepcopy(vnfd_wthout_persistent_storage)
+        target_vdu = deepcopy(target_vdu_wthout_persistent_storage)
+        mock_select_persistent_root_disk.return_value = None
+        persistent_root_disk = {}
+        disk_list = []
+        self.ns._add_persistent_root_disk_to_disk_list(
+            vnfd, target_vdu, persistent_root_disk, disk_list
+        )
+        self.assertEqual(disk_list, [])
+        self.assertEqual(mock_select_persistent_root_disk.call_count, 2)
+        mock_volume_keeping_required.assert_not_called()
+
+    @patch("osm_ng_ro.ns.Ns._select_persistent_root_disk")
+    @patch("osm_ng_ro.ns.Ns.is_volume_keeping_required")
+    def test_add_persistent_root_disk_to_disk_list_wthout_persistent_root_disk(
+        self, mock_volume_keeping_required, mock_select_persistent_root_disk
+    ):
+        """Persistent_root_disk dict is empty."""
+        vnfd = deepcopy(vnfd_wthout_persistent_storage)
+        target_vdu = deepcopy(target_vdu_wthout_persistent_storage)
+        mock_select_persistent_root_disk.return_value = None
+        persistent_root_disk = {}
+        disk_list = []
+        self.ns._add_persistent_root_disk_to_disk_list(
+            vnfd, target_vdu, persistent_root_disk, disk_list
+        )
+        self.assertEqual(disk_list, [])
+        self.assertEqual(mock_select_persistent_root_disk.call_count, 2)
+        mock_volume_keeping_required.assert_not_called()
+
+    def test_prepare_vdu_affinity_group_list_invalid_extra_dict(self):
+        """Invalid extra dict."""
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+        target_vdu["affinity-or-anti-affinity-group-id"] = "sample_affinity-group-id"
+        extra_dict = {}
+        ns_preffix = "nsrs:th47f48-9870-4169-b758-9732e1ff40f3"
+        with self.assertRaises(NsException) as err:
+            self.ns._prepare_vdu_affinity_group_list(target_vdu, extra_dict, ns_preffix)
+            self.assertEqual(str(err.exception), "Invalid extra_dict format.")
+
+    def test_prepare_vdu_affinity_group_list_one_affinity_group(self):
+        """There is one affinity-group."""
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+        target_vdu["affinity-or-anti-affinity-group-id"] = ["sample_affinity-group-id"]
+        extra_dict = {"depends_on": []}
+        ns_preffix = "nsrs:th47f48-9870-4169-b758-9732e1ff40f3"
+        affinity_group_txt = "nsrs:th47f48-9870-4169-b758-9732e1ff40f3:affinity-or-anti-affinity-group.sample_affinity-group-id"
+        expected_result = [{"affinity_group_id": "TASK-" + affinity_group_txt}]
+        expected_extra_dict = {"depends_on": [affinity_group_txt]}
+        result = self.ns._prepare_vdu_affinity_group_list(
+            target_vdu, extra_dict, ns_preffix
+        )
+        self.assertDictEqual(extra_dict, expected_extra_dict)
+        self.assertEqual(result, expected_result)
+
+    def test_prepare_vdu_affinity_group_list_several_affinity_groups(self):
+        """There are two affinity-groups."""
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+        target_vdu["affinity-or-anti-affinity-group-id"] = [
+            "affinity-group-id1",
+            "affinity-group-id2",
+        ]
+        extra_dict = {"depends_on": []}
+        ns_preffix = "nsrs:th47f48-9870-4169-b758-9732e1ff40f3"
+        affinity_group_txt1 = "nsrs:th47f48-9870-4169-b758-9732e1ff40f3:affinity-or-anti-affinity-group.affinity-group-id1"
+        affinity_group_txt2 = "nsrs:th47f48-9870-4169-b758-9732e1ff40f3:affinity-or-anti-affinity-group.affinity-group-id2"
+        expected_result = [
+            {"affinity_group_id": "TASK-" + affinity_group_txt1},
+            {"affinity_group_id": "TASK-" + affinity_group_txt2},
+        ]
+        expected_extra_dict = {"depends_on": [affinity_group_txt1, affinity_group_txt2]}
+        result = self.ns._prepare_vdu_affinity_group_list(
+            target_vdu, extra_dict, ns_preffix
+        )
+        self.assertDictEqual(extra_dict, expected_extra_dict)
+        self.assertEqual(result, expected_result)
+
+    def test_prepare_vdu_affinity_group_list_no_affinity_group(self):
+        """There is not any affinity-group."""
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+        extra_dict = {"depends_on": []}
+        ns_preffix = "nsrs:th47f48-9870-4169-b758-9732e1ff40f3"
+        result = self.ns._prepare_vdu_affinity_group_list(
+            target_vdu, extra_dict, ns_preffix
+        )
+        self.assertDictEqual(extra_dict, {"depends_on": []})
+        self.assertEqual(result, [])
+
+    @patch("osm_ng_ro.ns.Ns._sort_vdu_interfaces")
+    @patch("osm_ng_ro.ns.Ns._partially_locate_vdu_interfaces")
+    @patch("osm_ng_ro.ns.Ns._prepare_vdu_interfaces")
+    @patch("osm_ng_ro.ns.Ns._prepare_vdu_cloud_init")
+    @patch("osm_ng_ro.ns.Ns._prepare_vdu_ssh_keys")
+    @patch("osm_ng_ro.ns.Ns.find_persistent_root_volumes")
+    @patch("osm_ng_ro.ns.Ns.find_persistent_volumes")
+    @patch("osm_ng_ro.ns.Ns._add_persistent_root_disk_to_disk_list")
+    @patch("osm_ng_ro.ns.Ns._add_persistent_ordinary_disks_to_disk_list")
+    @patch("osm_ng_ro.ns.Ns._prepare_vdu_affinity_group_list")
+    def test_process_vdu_params_with_inst_vol_list(
+        self,
+        mock_prepare_vdu_affinity_group_list,
+        mock_add_persistent_ordinary_disks_to_disk_list,
+        mock_add_persistent_root_disk_to_disk_list,
+        mock_find_persistent_volumes,
+        mock_find_persistent_root_volumes,
+        mock_prepare_vdu_ssh_keys,
+        mock_prepare_vdu_cloud_init,
+        mock_prepare_vdu_interfaces,
+        mock_locate_vdu_interfaces,
+        mock_sort_vdu_interfaces,
+    ):
+        """Instantiation volume list is empty."""
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+
+        target_vdu["interfaces"] = interfaces_wth_all_positions
+
+        vdu_instantiation_vol_list = [
+            {
+                "vim-volume-id": vim_volume_id,
+                "name": "persistent-volume2",
+            }
+        ]
+        target_vdu["additionalParams"] = {
+            "OSM": {"vdu_volumes": vdu_instantiation_vol_list}
+        }
+        mock_prepare_vdu_cloud_init.return_value = {}
+        mock_prepare_vdu_affinity_group_list.return_value = []
+        persistent_root_disk = {
+            "persistent-root-volume": {
+                "image_id": "ubuntu20.04",
+                "size": "10",
+            }
+        }
+        mock_find_persistent_root_volumes.return_value = persistent_root_disk
+
+        new_kwargs = deepcopy(kwargs)
+        new_kwargs.update(
+            {
+                "vnfr_id": vnfr_id,
+                "nsr_id": nsr_id,
+                "tasks_by_target_record_id": {},
+                "logger": "logger",
+            }
+        )
+        expected_extra_dict_copy = deepcopy(expected_extra_dict)
+        vnfd = deepcopy(vnfd_wth_persistent_storage)
+        db.get_one.return_value = vnfd
+        result = Ns._process_vdu_params(
+            target_vdu, indata, vim_info=None, target_record_id=None, **new_kwargs
+        )
+        mock_sort_vdu_interfaces.assert_called_once_with(target_vdu)
+        mock_locate_vdu_interfaces.assert_not_called()
+        mock_prepare_vdu_cloud_init.assert_called_once()
+        mock_add_persistent_root_disk_to_disk_list.assert_not_called()
+        mock_add_persistent_ordinary_disks_to_disk_list.assert_not_called()
+        mock_prepare_vdu_interfaces.assert_called_once_with(
+            target_vdu,
+            expected_extra_dict_copy,
+            ns_preffix,
+            vnf_preffix,
+            "logger",
+            {},
+            [],
+        )
+        self.assertDictEqual(result, expected_extra_dict_copy)
+        mock_prepare_vdu_ssh_keys.assert_called_once_with(target_vdu, None, {})
+        mock_prepare_vdu_affinity_group_list.assert_called_once()
+        mock_find_persistent_volumes.assert_called_once_with(
+            persistent_root_disk, target_vdu, vdu_instantiation_vol_list, []
+        )
+
+    @patch("osm_ng_ro.ns.Ns._sort_vdu_interfaces")
+    @patch("osm_ng_ro.ns.Ns._partially_locate_vdu_interfaces")
+    @patch("osm_ng_ro.ns.Ns._prepare_vdu_interfaces")
+    @patch("osm_ng_ro.ns.Ns._prepare_vdu_cloud_init")
+    @patch("osm_ng_ro.ns.Ns._prepare_vdu_ssh_keys")
+    @patch("osm_ng_ro.ns.Ns.find_persistent_root_volumes")
+    @patch("osm_ng_ro.ns.Ns.find_persistent_volumes")
+    @patch("osm_ng_ro.ns.Ns._add_persistent_root_disk_to_disk_list")
+    @patch("osm_ng_ro.ns.Ns._add_persistent_ordinary_disks_to_disk_list")
+    @patch("osm_ng_ro.ns.Ns._prepare_vdu_affinity_group_list")
+    def test_process_vdu_params_wth_affinity_groups(
+        self,
+        mock_prepare_vdu_affinity_group_list,
+        mock_add_persistent_ordinary_disks_to_disk_list,
+        mock_add_persistent_root_disk_to_disk_list,
+        mock_find_persistent_volumes,
+        mock_find_persistent_root_volumes,
+        mock_prepare_vdu_ssh_keys,
+        mock_prepare_vdu_cloud_init,
+        mock_prepare_vdu_interfaces,
+        mock_locate_vdu_interfaces,
+        mock_sort_vdu_interfaces,
+    ):
+        """There is cloud-config."""
+        target_vdu = deepcopy(target_vdu_wthout_persistent_storage)
+
+        self.maxDiff = None
+        target_vdu["interfaces"] = interfaces_wth_all_positions
+        mock_prepare_vdu_cloud_init.return_value = {}
+        mock_prepare_vdu_affinity_group_list.return_value = [
+            "affinity_group_1",
+            "affinity_group_2",
+        ]
+
+        new_kwargs = deepcopy(kwargs)
+        new_kwargs.update(
+            {
+                "vnfr_id": vnfr_id,
+                "nsr_id": nsr_id,
+                "tasks_by_target_record_id": {},
+                "logger": "logger",
+            }
+        )
+        expected_extra_dict3 = deepcopy(expected_extra_dict2)
+        expected_extra_dict3["params"]["affinity_group_list"] = [
+            "affinity_group_1",
+            "affinity_group_2",
+        ]
+        vnfd = deepcopy(vnfd_wth_persistent_storage)
+        db.get_one.return_value = vnfd
+        result = Ns._process_vdu_params(
+            target_vdu, indata, vim_info=None, target_record_id=None, **new_kwargs
+        )
+        self.assertDictEqual(result, expected_extra_dict3)
+        mock_sort_vdu_interfaces.assert_called_once_with(target_vdu)
+        mock_locate_vdu_interfaces.assert_not_called()
+        mock_prepare_vdu_cloud_init.assert_called_once()
+        mock_add_persistent_root_disk_to_disk_list.assert_called_once()
+        mock_add_persistent_ordinary_disks_to_disk_list.assert_called_once()
+        mock_prepare_vdu_interfaces.assert_called_once_with(
+            target_vdu,
+            expected_extra_dict3,
+            ns_preffix,
+            vnf_preffix,
+            "logger",
+            {},
+            [],
+        )
+
+        mock_prepare_vdu_ssh_keys.assert_called_once_with(target_vdu, None, {})
+        mock_prepare_vdu_affinity_group_list.assert_called_once()
+        mock_find_persistent_volumes.assert_not_called()
+
+    @patch("osm_ng_ro.ns.Ns._sort_vdu_interfaces")
+    @patch("osm_ng_ro.ns.Ns._partially_locate_vdu_interfaces")
+    @patch("osm_ng_ro.ns.Ns._prepare_vdu_interfaces")
+    @patch("osm_ng_ro.ns.Ns._prepare_vdu_cloud_init")
+    @patch("osm_ng_ro.ns.Ns._prepare_vdu_ssh_keys")
+    @patch("osm_ng_ro.ns.Ns.find_persistent_root_volumes")
+    @patch("osm_ng_ro.ns.Ns.find_persistent_volumes")
+    @patch("osm_ng_ro.ns.Ns._add_persistent_root_disk_to_disk_list")
+    @patch("osm_ng_ro.ns.Ns._add_persistent_ordinary_disks_to_disk_list")
+    @patch("osm_ng_ro.ns.Ns._prepare_vdu_affinity_group_list")
+    def test_process_vdu_params_wth_cloud_config(
+        self,
+        mock_prepare_vdu_affinity_group_list,
+        mock_add_persistent_ordinary_disks_to_disk_list,
+        mock_add_persistent_root_disk_to_disk_list,
+        mock_find_persistent_volumes,
+        mock_find_persistent_root_volumes,
+        mock_prepare_vdu_ssh_keys,
+        mock_prepare_vdu_cloud_init,
+        mock_prepare_vdu_interfaces,
+        mock_locate_vdu_interfaces,
+        mock_sort_vdu_interfaces,
+    ):
+        """There is cloud-config."""
+        target_vdu = deepcopy(target_vdu_wthout_persistent_storage)
+
+        self.maxDiff = None
+        target_vdu["interfaces"] = interfaces_wth_all_positions
+        mock_prepare_vdu_cloud_init.return_value = {
+            "user-data": user_data,
+            "boot-data-drive": "vda",
+        }
+        mock_prepare_vdu_affinity_group_list.return_value = []
+
+        new_kwargs = deepcopy(kwargs)
+        new_kwargs.update(
+            {
+                "vnfr_id": vnfr_id,
+                "nsr_id": nsr_id,
+                "tasks_by_target_record_id": {},
+                "logger": "logger",
+            }
+        )
+        expected_extra_dict3 = deepcopy(expected_extra_dict2)
+        expected_extra_dict3["params"]["cloud_config"] = {
+            "user-data": user_data,
+            "boot-data-drive": "vda",
+        }
+        vnfd = deepcopy(vnfd_wth_persistent_storage)
+        db.get_one.return_value = vnfd
+        result = Ns._process_vdu_params(
+            target_vdu, indata, vim_info=None, target_record_id=None, **new_kwargs
+        )
+        mock_sort_vdu_interfaces.assert_called_once_with(target_vdu)
+        mock_locate_vdu_interfaces.assert_not_called()
+        mock_prepare_vdu_cloud_init.assert_called_once()
+        mock_add_persistent_root_disk_to_disk_list.assert_called_once()
+        mock_add_persistent_ordinary_disks_to_disk_list.assert_called_once()
+        mock_prepare_vdu_interfaces.assert_called_once_with(
+            target_vdu,
+            expected_extra_dict3,
+            ns_preffix,
+            vnf_preffix,
+            "logger",
+            {},
+            [],
+        )
+        self.assertDictEqual(result, expected_extra_dict3)
+        mock_prepare_vdu_ssh_keys.assert_called_once_with(
+            target_vdu, None, {"user-data": user_data, "boot-data-drive": "vda"}
+        )
+        mock_prepare_vdu_affinity_group_list.assert_called_once()
+        mock_find_persistent_volumes.assert_not_called()
+
+    @patch("osm_ng_ro.ns.Ns._sort_vdu_interfaces")
+    @patch("osm_ng_ro.ns.Ns._partially_locate_vdu_interfaces")
+    @patch("osm_ng_ro.ns.Ns._prepare_vdu_interfaces")
+    @patch("osm_ng_ro.ns.Ns._prepare_vdu_cloud_init")
+    @patch("osm_ng_ro.ns.Ns._prepare_vdu_ssh_keys")
+    @patch("osm_ng_ro.ns.Ns.find_persistent_root_volumes")
+    @patch("osm_ng_ro.ns.Ns.find_persistent_volumes")
+    @patch("osm_ng_ro.ns.Ns._add_persistent_root_disk_to_disk_list")
+    @patch("osm_ng_ro.ns.Ns._add_persistent_ordinary_disks_to_disk_list")
+    @patch("osm_ng_ro.ns.Ns._prepare_vdu_affinity_group_list")
+    def test_process_vdu_params_wthout_persistent_storage(
+        self,
+        mock_prepare_vdu_affinity_group_list,
+        mock_add_persistent_ordinary_disks_to_disk_list,
+        mock_add_persistent_root_disk_to_disk_list,
+        mock_find_persistent_volumes,
+        mock_find_persistent_root_volumes,
+        mock_prepare_vdu_ssh_keys,
+        mock_prepare_vdu_cloud_init,
+        mock_prepare_vdu_interfaces,
+        mock_locate_vdu_interfaces,
+        mock_sort_vdu_interfaces,
+    ):
+        """There is not any persistent storage."""
+        target_vdu = deepcopy(target_vdu_wthout_persistent_storage)
+
+        self.maxDiff = None
+        target_vdu["interfaces"] = interfaces_wth_all_positions
+        mock_prepare_vdu_cloud_init.return_value = {}
+        mock_prepare_vdu_affinity_group_list.return_value = []
+
+        new_kwargs = deepcopy(kwargs)
+        new_kwargs.update(
+            {
+                "vnfr_id": vnfr_id,
+                "nsr_id": nsr_id,
+                "tasks_by_target_record_id": {},
+                "logger": "logger",
+            }
+        )
+        expected_extra_dict_copy = deepcopy(expected_extra_dict2)
+        vnfd = deepcopy(vnfd_wthout_persistent_storage)
+        db.get_one.return_value = vnfd
+        result = Ns._process_vdu_params(
+            target_vdu, indata, vim_info=None, target_record_id=None, **new_kwargs
+        )
+        mock_sort_vdu_interfaces.assert_called_once_with(target_vdu)
+        mock_locate_vdu_interfaces.assert_not_called()
+        mock_prepare_vdu_cloud_init.assert_called_once()
+        mock_add_persistent_root_disk_to_disk_list.assert_called_once()
+        mock_add_persistent_ordinary_disks_to_disk_list.assert_called_once()
+        mock_prepare_vdu_interfaces.assert_called_once_with(
+            target_vdu,
+            expected_extra_dict_copy,
+            ns_preffix,
+            vnf_preffix,
+            "logger",
+            {},
+            [],
+        )
+        self.assertDictEqual(result, expected_extra_dict_copy)
+        mock_prepare_vdu_ssh_keys.assert_called_once_with(target_vdu, None, {})
+        mock_prepare_vdu_affinity_group_list.assert_called_once()
+        mock_find_persistent_volumes.assert_not_called()
+
+    @patch("osm_ng_ro.ns.Ns._sort_vdu_interfaces")
+    @patch("osm_ng_ro.ns.Ns._partially_locate_vdu_interfaces")
+    @patch("osm_ng_ro.ns.Ns._prepare_vdu_interfaces")
+    @patch("osm_ng_ro.ns.Ns._prepare_vdu_cloud_init")
+    @patch("osm_ng_ro.ns.Ns._prepare_vdu_ssh_keys")
+    @patch("osm_ng_ro.ns.Ns.find_persistent_root_volumes")
+    @patch("osm_ng_ro.ns.Ns.find_persistent_volumes")
+    @patch("osm_ng_ro.ns.Ns._add_persistent_root_disk_to_disk_list")
+    @patch("osm_ng_ro.ns.Ns._add_persistent_ordinary_disks_to_disk_list")
+    @patch("osm_ng_ro.ns.Ns._prepare_vdu_affinity_group_list")
+    def test_process_vdu_params_interfaces_partially_located(
+        self,
+        mock_prepare_vdu_affinity_group_list,
+        mock_add_persistent_ordinary_disks_to_disk_list,
+        mock_add_persistent_root_disk_to_disk_list,
+        mock_find_persistent_volumes,
+        mock_find_persistent_root_volumes,
+        mock_prepare_vdu_ssh_keys,
+        mock_prepare_vdu_cloud_init,
+        mock_prepare_vdu_interfaces,
+        mock_locate_vdu_interfaces,
+        mock_sort_vdu_interfaces,
+    ):
+        """Some interfaces have position."""
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+
+        self.maxDiff = None
+        target_vdu["interfaces"] = [
+            {
+                "name": "vdu-eth1",
+                "ns-vld-id": "net1",
+            },
+            {"name": "vdu-eth2", "ns-vld-id": "net2", "position": 2},
+            {
+                "name": "vdu-eth3",
+                "ns-vld-id": "mgmtnet",
+            },
+        ]
+        mock_prepare_vdu_cloud_init.return_value = {}
+        mock_prepare_vdu_affinity_group_list.return_value = []
+        persistent_root_disk = {
+            "persistent-root-volume": {
+                "image_id": "ubuntu20.04",
+                "size": "10",
+                "keep": True,
+            }
+        }
+        mock_find_persistent_root_volumes.return_value = persistent_root_disk
+
+        new_kwargs = deepcopy(kwargs)
+        new_kwargs.update(
+            {
+                "vnfr_id": vnfr_id,
+                "nsr_id": nsr_id,
+                "tasks_by_target_record_id": {},
+                "logger": "logger",
+            }
+        )
+
+        vnfd = deepcopy(vnfd_wth_persistent_storage)
+        db.get_one.return_value = vnfd
+        result = Ns._process_vdu_params(
+            target_vdu, indata, vim_info=None, target_record_id=None, **new_kwargs
+        )
+        expected_extra_dict_copy = deepcopy(expected_extra_dict)
+        mock_sort_vdu_interfaces.assert_not_called()
+        mock_locate_vdu_interfaces.assert_called_once_with(target_vdu)
+        mock_prepare_vdu_cloud_init.assert_called_once()
+        mock_add_persistent_root_disk_to_disk_list.assert_called_once()
+        mock_add_persistent_ordinary_disks_to_disk_list.assert_called_once()
+        mock_prepare_vdu_interfaces.assert_called_once_with(
+            target_vdu,
+            expected_extra_dict_copy,
+            ns_preffix,
+            vnf_preffix,
+            "logger",
+            {},
+            [],
+        )
+        self.assertDictEqual(result, expected_extra_dict_copy)
+        mock_prepare_vdu_ssh_keys.assert_called_once_with(target_vdu, None, {})
+        mock_prepare_vdu_affinity_group_list.assert_called_once()
+        mock_find_persistent_volumes.assert_not_called()
+        mock_find_persistent_root_volumes.assert_not_called()
+
+    @patch("osm_ng_ro.ns.Ns._sort_vdu_interfaces")
+    @patch("osm_ng_ro.ns.Ns._partially_locate_vdu_interfaces")
+    @patch("osm_ng_ro.ns.Ns._prepare_vdu_interfaces")
+    @patch("osm_ng_ro.ns.Ns._prepare_vdu_cloud_init")
+    @patch("osm_ng_ro.ns.Ns._prepare_vdu_ssh_keys")
+    @patch("osm_ng_ro.ns.Ns.find_persistent_root_volumes")
+    @patch("osm_ng_ro.ns.Ns.find_persistent_volumes")
+    @patch("osm_ng_ro.ns.Ns._add_persistent_root_disk_to_disk_list")
+    @patch("osm_ng_ro.ns.Ns._add_persistent_ordinary_disks_to_disk_list")
+    @patch("osm_ng_ro.ns.Ns._prepare_vdu_affinity_group_list")
+    def test_process_vdu_params_no_interface_position(
+        self,
+        mock_prepare_vdu_affinity_group_list,
+        mock_add_persistent_ordinary_disks_to_disk_list,
+        mock_add_persistent_root_disk_to_disk_list,
+        mock_find_persistent_volumes,
+        mock_find_persistent_root_volumes,
+        mock_prepare_vdu_ssh_keys,
+        mock_prepare_vdu_cloud_init,
+        mock_prepare_vdu_interfaces,
+        mock_locate_vdu_interfaces,
+        mock_sort_vdu_interfaces,
+    ):
+        """Interfaces do not have position."""
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+
+        self.maxDiff = None
+        target_vdu["interfaces"] = interfaces_wthout_positions
+        mock_prepare_vdu_cloud_init.return_value = {}
+        mock_prepare_vdu_affinity_group_list.return_value = []
+        persistent_root_disk = {
+            "persistent-root-volume": {
+                "image_id": "ubuntu20.04",
+                "size": "10",
+                "keep": True,
+            }
+        }
+        mock_find_persistent_root_volumes.return_value = persistent_root_disk
+        new_kwargs = deepcopy(kwargs)
+        new_kwargs.update(
+            {
+                "vnfr_id": vnfr_id,
+                "nsr_id": nsr_id,
+                "tasks_by_target_record_id": {},
+                "logger": "logger",
+            }
+        )
+
+        vnfd = deepcopy(vnfd_wth_persistent_storage)
+        db.get_one.return_value = vnfd
+        result = Ns._process_vdu_params(
+            target_vdu, indata, vim_info=None, target_record_id=None, **new_kwargs
+        )
+        expected_extra_dict_copy = deepcopy(expected_extra_dict)
+        mock_sort_vdu_interfaces.assert_not_called()
+        mock_locate_vdu_interfaces.assert_called_once_with(target_vdu)
+        mock_prepare_vdu_cloud_init.assert_called_once()
+        mock_add_persistent_root_disk_to_disk_list.assert_called_once()
+        mock_add_persistent_ordinary_disks_to_disk_list.assert_called_once()
+        mock_prepare_vdu_interfaces.assert_called_once_with(
+            target_vdu,
+            expected_extra_dict_copy,
+            ns_preffix,
+            vnf_preffix,
+            "logger",
+            {},
+            [],
+        )
+        self.assertDictEqual(result, expected_extra_dict_copy)
+        mock_prepare_vdu_ssh_keys.assert_called_once_with(target_vdu, None, {})
+        mock_prepare_vdu_affinity_group_list.assert_called_once()
+        mock_find_persistent_volumes.assert_not_called()
+        mock_find_persistent_root_volumes.assert_not_called()
+
+    @patch("osm_ng_ro.ns.Ns._sort_vdu_interfaces")
+    @patch("osm_ng_ro.ns.Ns._partially_locate_vdu_interfaces")
+    @patch("osm_ng_ro.ns.Ns._prepare_vdu_interfaces")
+    @patch("osm_ng_ro.ns.Ns._prepare_vdu_cloud_init")
+    @patch("osm_ng_ro.ns.Ns._prepare_vdu_ssh_keys")
+    @patch("osm_ng_ro.ns.Ns.find_persistent_root_volumes")
+    @patch("osm_ng_ro.ns.Ns.find_persistent_volumes")
+    @patch("osm_ng_ro.ns.Ns._add_persistent_root_disk_to_disk_list")
+    @patch("osm_ng_ro.ns.Ns._add_persistent_ordinary_disks_to_disk_list")
+    @patch("osm_ng_ro.ns.Ns._prepare_vdu_affinity_group_list")
+    def test_process_vdu_params_prepare_vdu_interfaces_raises_exception(
+        self,
+        mock_prepare_vdu_affinity_group_list,
+        mock_add_persistent_ordinary_disks_to_disk_list,
+        mock_add_persistent_root_disk_to_disk_list,
+        mock_find_persistent_volumes,
+        mock_find_persistent_root_volumes,
+        mock_prepare_vdu_ssh_keys,
+        mock_prepare_vdu_cloud_init,
+        mock_prepare_vdu_interfaces,
+        mock_locate_vdu_interfaces,
+        mock_sort_vdu_interfaces,
+    ):
+        """Prepare vdu interfaces method raises exception."""
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+
+        self.maxDiff = None
+        target_vdu["interfaces"] = interfaces_wthout_positions
+        mock_prepare_vdu_cloud_init.return_value = {}
+        mock_prepare_vdu_affinity_group_list.return_value = []
+        persistent_root_disk = {
+            "persistent-root-volume": {
+                "image_id": "ubuntu20.04",
+                "size": "10",
+                "keep": True,
+            }
+        }
+        mock_find_persistent_root_volumes.return_value = persistent_root_disk
+        new_kwargs = deepcopy(kwargs)
+        new_kwargs.update(
+            {
+                "vnfr_id": vnfr_id,
+                "nsr_id": nsr_id,
+                "tasks_by_target_record_id": {},
+                "logger": "logger",
+            }
+        )
+        mock_prepare_vdu_interfaces.side_effect = TypeError
+
+        vnfd = deepcopy(vnfd_wth_persistent_storage)
+        db.get_one.return_value = vnfd
+        with self.assertRaises(Exception) as err:
+            Ns._process_vdu_params(
+                target_vdu, indata, vim_info=None, target_record_id=None, **new_kwargs
+            )
+            self.assertEqual(type(err), TypeError)
+        mock_sort_vdu_interfaces.assert_not_called()
+        mock_locate_vdu_interfaces.assert_called_once_with(target_vdu)
+        mock_prepare_vdu_cloud_init.assert_not_called()
+        mock_add_persistent_root_disk_to_disk_list.assert_not_called()
+        mock_add_persistent_ordinary_disks_to_disk_list.assert_not_called()
+        mock_prepare_vdu_interfaces.assert_called_once()
+        mock_prepare_vdu_ssh_keys.assert_not_called()
+        mock_prepare_vdu_affinity_group_list.assert_not_called()
+        mock_find_persistent_volumes.assert_not_called()
+        mock_find_persistent_root_volumes.assert_not_called()
+
+    @patch("osm_ng_ro.ns.Ns._sort_vdu_interfaces")
+    @patch("osm_ng_ro.ns.Ns._partially_locate_vdu_interfaces")
+    @patch("osm_ng_ro.ns.Ns._prepare_vdu_interfaces")
+    @patch("osm_ng_ro.ns.Ns._prepare_vdu_cloud_init")
+    @patch("osm_ng_ro.ns.Ns._prepare_vdu_ssh_keys")
+    @patch("osm_ng_ro.ns.Ns.find_persistent_root_volumes")
+    @patch("osm_ng_ro.ns.Ns.find_persistent_volumes")
+    @patch("osm_ng_ro.ns.Ns._add_persistent_root_disk_to_disk_list")
+    @patch("osm_ng_ro.ns.Ns._add_persistent_ordinary_disks_to_disk_list")
+    @patch("osm_ng_ro.ns.Ns._prepare_vdu_affinity_group_list")
+    def test_process_vdu_params_add_persistent_root_disk_raises_exception(
+        self,
+        mock_prepare_vdu_affinity_group_list,
+        mock_add_persistent_ordinary_disks_to_disk_list,
+        mock_add_persistent_root_disk_to_disk_list,
+        mock_find_persistent_volumes,
+        mock_find_persistent_root_volumes,
+        mock_prepare_vdu_ssh_keys,
+        mock_prepare_vdu_cloud_init,
+        mock_prepare_vdu_interfaces,
+        mock_locate_vdu_interfaces,
+        mock_sort_vdu_interfaces,
+    ):
+        """Add persistent root disk method raises exception."""
+        target_vdu = deepcopy(target_vdu_wth_persistent_storage)
+
+        self.maxDiff = None
+        target_vdu["interfaces"] = interfaces_wthout_positions
+        mock_prepare_vdu_cloud_init.return_value = {}
+        mock_prepare_vdu_affinity_group_list.return_value = []
+        mock_add_persistent_root_disk_to_disk_list.side_effect = KeyError
+        new_kwargs = deepcopy(kwargs)
+        new_kwargs.update(
+            {
+                "vnfr_id": vnfr_id,
+                "nsr_id": nsr_id,
+                "tasks_by_target_record_id": {},
+                "logger": "logger",
+            }
+        )
+
+        vnfd = deepcopy(vnfd_wth_persistent_storage)
+        db.get_one.return_value = vnfd
+        with self.assertRaises(Exception) as err:
+            Ns._process_vdu_params(
+                target_vdu, indata, vim_info=None, target_record_id=None, **new_kwargs
+            )
+            self.assertEqual(type(err), KeyError)
+        mock_sort_vdu_interfaces.assert_not_called()
+        mock_locate_vdu_interfaces.assert_called_once_with(target_vdu)
+        mock_prepare_vdu_cloud_init.assert_called_once()
+        mock_add_persistent_root_disk_to_disk_list.assert_called_once()
+        mock_add_persistent_ordinary_disks_to_disk_list.assert_not_called()
+        mock_prepare_vdu_interfaces.assert_called_once_with(
+            target_vdu,
+            {
+                "depends_on": [
+                    f"{ns_preffix}:image.0",
+                    f"{ns_preffix}:flavor.0",
+                ]
+            },
+            ns_preffix,
+            vnf_preffix,
+            "logger",
+            {},
+            [],
+        )
+
+        mock_prepare_vdu_ssh_keys.assert_called_once_with(target_vdu, None, {})
+        mock_prepare_vdu_affinity_group_list.assert_not_called()
+        mock_find_persistent_volumes.assert_not_called()
+        mock_find_persistent_root_volumes.assert_not_called()
+
+    def test_select_persistent_root_disk(self):
+        vdu = deepcopy(target_vdu_wth_persistent_storage)
+        vdu["virtual-storage-desc"] = [
+            "persistent-root-volume",
+            "persistent-volume2",
+            "ephemeral-volume",
+        ]
+        vsd = deepcopy(vnfd_wth_persistent_storage)["virtual-storage-desc"][1]
+        expected_result = vsd
+        result = Ns._select_persistent_root_disk(vsd, vdu)
+        self.assertEqual(result, expected_result)
+
+    def test_select_persistent_root_disk_first_vsd_is_different(self):
+        """VDU first virtual-storage-desc is different than vsd id."""
+        vdu = deepcopy(target_vdu_wth_persistent_storage)
+        vdu["virtual-storage-desc"] = [
+            "persistent-volume2",
+            "persistent-root-volume",
+            "ephemeral-volume",
+        ]
+        vsd = deepcopy(vnfd_wth_persistent_storage)["virtual-storage-desc"][1]
+        expected_result = None
+        result = Ns._select_persistent_root_disk(vsd, vdu)
+        self.assertEqual(result, expected_result)
+
+    def test_select_persistent_root_disk_vsd_is_not_persistent(self):
+        """vsd type is not persistent."""
+        vdu = deepcopy(target_vdu_wth_persistent_storage)
+        vdu["virtual-storage-desc"] = [
+            "persistent-volume2",
+            "persistent-root-volume",
+            "ephemeral-volume",
+        ]
+        vsd = deepcopy(vnfd_wth_persistent_storage)["virtual-storage-desc"][1]
+        vsd["type-of-storage"] = "etsi-nfv-descriptors:ephemeral-storage"
+        expected_result = None
+        result = Ns._select_persistent_root_disk(vsd, vdu)
+        self.assertEqual(result, expected_result)
+
+    def test_select_persistent_root_disk_vsd_does_not_have_size(self):
+        """vsd size is None."""
+        vdu = deepcopy(target_vdu_wth_persistent_storage)
+        vdu["virtual-storage-desc"] = [
+            "persistent-volume2",
+            "persistent-root-volume",
+            "ephemeral-volume",
+        ]
+        vsd = deepcopy(vnfd_wth_persistent_storage)["virtual-storage-desc"][1]
+        vsd["size-of-storage"] = None
+        expected_result = None
+        result = Ns._select_persistent_root_disk(vsd, vdu)
+        self.assertEqual(result, expected_result)
+
+    def test_select_persistent_root_disk_vdu_wthout_vsd(self):
+        """VDU does not have virtual-storage-desc."""
+        vdu = deepcopy(target_vdu_wth_persistent_storage)
+        vsd = deepcopy(vnfd_wth_persistent_storage)["virtual-storage-desc"][1]
+        expected_result = None
+        result = Ns._select_persistent_root_disk(vsd, vdu)
+        self.assertEqual(result, expected_result)
+
+    def test_select_persistent_root_disk_invalid_vsd_type(self):
+        """vsd is list, expected to be a dict."""
+        vdu = deepcopy(target_vdu_wth_persistent_storage)
+        vsd = deepcopy(vnfd_wth_persistent_storage)["virtual-storage-desc"]
+        with self.assertRaises(AttributeError):
+            Ns._select_persistent_root_disk(vsd, vdu)
diff --git a/RO-VIM-openstack/osm_rovim_openstack/tests/test_vimconn_openstack.py b/RO-VIM-openstack/osm_rovim_openstack/tests/test_vimconn_openstack.py
index 0999124..f4382ad 100644
--- a/RO-VIM-openstack/osm_rovim_openstack/tests/test_vimconn_openstack.py
+++ b/RO-VIM-openstack/osm_rovim_openstack/tests/test_vimconn_openstack.py
@@ -23,19 +23,61 @@
 This module contains unit tests for the OpenStack VIM connector
 Run this directly with python2 or python3.
 """
-
 import copy
+from copy import deepcopy
 import logging
 import unittest
 
 import mock
+from mock import MagicMock, patch
 from neutronclient.v2_0.client import Client
+from novaclient import exceptions as nvExceptions
 from osm_ro_plugin import vimconn
+from osm_ro_plugin.vimconn import (
+    VimConnConnectionException,
+    VimConnException,
+    VimConnNotFoundException,
+)
 from osm_rovim_openstack.vimconn_openstack import vimconnector
 
 __author__ = "Igor D.C."
 __date__ = "$23-aug-2017 23:59:59$"
 
+# Variables Used in TestNewVmInstance Class
+name = "basicvm"
+description = "my firewall"
+start = True
+image_id = "408b73-e9cc-5a6a-t270-82cc4811bd4a"
+flavor_id = "208b73-e9cc-5a6a-t270-82cc4811bd4a"
+affinity_group_list = []
+net_list = []
+cloud_config = {}
+disk_list = []
+disk_list2 = [
+    {"size": 10, "image_id": image_id},
+    {"size": 20},
+]
+availability_zone_index = 0
+availability_zone_list = ["nova"]
+floating_network_vim_id = "108b73-e9cc-5a6a-t270-82cc4811bd4a"
+net_id = "83372685-f67f-49fd-8722-eabb7692fc22"
+net2_id = "46472685-f67f-49fd-8722-eabb7692fc22"
+mac_address = "00:00:5e:00:53:af"
+port_id = "03372685-f67f-49fd-8722-eabb7692fc22"
+time_return_value = 156570000
+port2_id = "17472685-f67f-49fd-8722-eabb7692fc22"
+root_vol_id = "tc408b73-r9cc-5a6a-a270-82cc4811bd4a"
+ip_addr1 = "20.3.4.5"
+volume_id = "ac408b73-b9cc-4a6a-a270-82cc4811bd4a"
+volume_id2 = "o4e0e83-b9uu-4akk-a234-89cc4811bd4a"
+volume_id3 = "44e0e83-t9uu-4akk-a234-p9cc4811bd4a"
+virtual_mac_id = "64e0e83-t9uu-4akk-a234-p9cc4811bd4a"
+created_items_all_true = {
+    f"floating_ip:{floating_network_vim_id}": True,
+    f"volume:{volume_id}": True,
+    f"port:{port_id}": True,
+}
+
 
 class TestSfcOperations(unittest.TestCase):
     @mock.patch("logging.getLogger", autospec=True)
@@ -1057,5 +1099,4795 @@
         self.assertEqual(result, "638f957c-82df-11e7-b7c8-132706021464")
 
 
+class Status:
+    def __init__(self, s):
+        self.status = s
+
+    def __str__(self):
+        return self.status
+
+
+class CopyingMock(MagicMock):
+    def __call__(self, *args, **kwargs):
+        args = deepcopy(args)
+        kwargs = deepcopy(kwargs)
+        return super(CopyingMock, self).__call__(*args, **kwargs)
+
+
+class TestNewVmInstance(unittest.TestCase):
+    @patch("logging.getLogger", autospec=True)
+    def setUp(self, mock_logger):
+        # Instantiate dummy VIM connector so we can test it
+        # It throws exception because of dummy parameters,
+        # We are disabling the logging of exception not to print them to console.
+        mock_logger = logging.getLogger()
+        mock_logger.disabled = True
+        self.vimconn = vimconnector(
+            "123",
+            "openstackvim",
+            "456",
+            "789",
+            "http://dummy.url",
+            None,
+            "user",
+            "pass",
+        )
+        self.vimconn.neutron = CopyingMock()
+        self.vimconn.nova = CopyingMock()
+        self.vimconn.cinder = CopyingMock()
+        self.server = MagicMock(object, autospec=True)
+        self.server.tenant_id = "408b73-r9cc-5a6a-a270-82cc4811bd4a"
+        self.server.id = "908b73-e9cc-5a6a-t270-82cc4811bd4a"
+        self.vimconn.config["security_groups"] = "default"
+        self.vimconn.config["keypair"] = "my_keypair"
+        self.vimconn.security_groups_id = "12345"
+        self.vimconn.nova.api_version.get_string.return_value = "2.32"
+        self.vimconn.logger = CopyingMock()
+
+    @patch.object(vimconnector, "_get_ids_from_name")
+    def test_prepare_port_dict_security_security_groups_exists_in_config(
+        self, mock_get_ids
+    ):
+        """In VIM config security_groups exists, net port_security is True
+        no_port_security_extension does not exist.
+        """
+        self.vimconn.config = {"security_groups": "example_security_group"}
+        net = {"port_security": True}
+        port_dict = {}
+        result_dict = {"security_groups": "12345"}
+
+        self.vimconn._prepare_port_dict_security_groups(net, port_dict)
+        self.assertDictEqual(result_dict, port_dict)
+        mock_get_ids.assert_not_called()
+
+    @patch.object(vimconnector, "_get_ids_from_name")
+    def test_prepare_port_dict_security_security_groups_exists_in_config_no_security_groups_id(
+        self, mock_get_ids
+    ):
+        """In VIM config Security_groups exists, net port_security is True, vim security_groups_id does not exist,
+        no_port_security_extension does not exist.
+        """
+        self.vimconn.config = {"security_groups": "example_security_group"}
+        self.vimconn.security_groups_id = None
+        net = {"port_security": True}
+        port_dict = {}
+        result_dict = {"security_groups": None}
+
+        self.vimconn._prepare_port_dict_security_groups(net, port_dict)
+        self.assertDictEqual(result_dict, port_dict)
+        mock_get_ids.assert_called()
+
+    @patch.object(vimconnector, "_get_ids_from_name")
+    def test_prepare_port_dict_security_security_groups_exists_security_extension_true_in_config(
+        self, mock_get_ids
+    ):
+        """In VIM config security_groups exists, net port_security is True, in VIM security_groups_id exists,
+        no_port_security_extension set to True.
+        """
+        self.vimconn.config = {
+            "security_groups": "example_security_group",
+            "no_port_security_extension": True,
+        }
+        net = {"port_security": True}
+        port_dict = {}
+        result_dict = {}
+
+        self.vimconn._prepare_port_dict_security_groups(net, port_dict)
+        self.assertDictEqual(result_dict, port_dict)
+        mock_get_ids.assert_not_called()
+
+    @patch.object(vimconnector, "_get_ids_from_name")
+    def test_prepare_port_dict_security_no_security_groups_in_config(
+        self, mock_get_ids
+    ):
+        """In VIM config security_group does not exist, net port_security True, in VIM security_groups_id exists,
+        no_port_security_extension does not exist."""
+        self.vimconn.config = {}
+        net = {"port_security": True}
+        port_dict = {}
+        result_dict = {}
+
+        self.vimconn._prepare_port_dict_security_groups(net, port_dict)
+        self.assertDictEqual(result_dict, port_dict)
+        mock_get_ids.assert_not_called()
+
+    @patch.object(vimconnector, "_get_ids_from_name")
+    def test_prepare_port_dict_security_no_security_groups_security_extension_true_in_config(
+        self, mock_get_ids
+    ):
+        """Security_group does not exist, net port_security is True, in VIM security_groups_id exists,
+        no_port_security_extension set to True."""
+        self.vimconn.config = {"no_port_security_extension": True}
+        net = {"port_security": True}
+        port_dict = {}
+        result_dict = {}
+
+        self.vimconn._prepare_port_dict_security_groups(net, port_dict)
+        self.assertDictEqual(result_dict, port_dict)
+        mock_get_ids.assert_not_called()
+
+    @patch.object(vimconnector, "_get_ids_from_name")
+    def test_prepare_port_dict_security_security_groups_exists_net_port_security_false(
+        self, mock_get_ids
+    ):
+        """In VIM config security_group exists, net port_security False, security_groups_id exists,
+        no_port_security_extension does not exist."""
+        self.vimconn.config = {"security_groups": "example_security_group"}
+        net = {"port_security": False}
+        port_dict = {}
+        result_dict = {}
+
+        self.vimconn._prepare_port_dict_security_groups(net, port_dict)
+        self.assertDictEqual(result_dict, port_dict)
+        mock_get_ids.assert_not_called()
+
+    @patch.object(vimconnector, "_get_ids_from_name")
+    def test_prepare_port_dict_security_net_port_security_false_port_security_extension_true(
+        self, mock_get_ids
+    ):
+        """In VIM config security_group exists, net port_security False, security_groups_id exists,
+        no_port_security_extension set to True."""
+        self.vimconn.config = {
+            "security_groups": "example_security_group",
+            "no_port_security_extension": True,
+        }
+        net = {"port_security": False}
+        port_dict = {}
+        result_dict = {}
+
+        self.vimconn._prepare_port_dict_security_groups(net, port_dict)
+        self.assertDictEqual(result_dict, port_dict)
+        mock_get_ids.assert_not_called()
+
+    def test_prepare_port_dict_binding_net_type_virtual(self):
+        """net type is virtual."""
+        net = {"type": "virtual"}
+        port_dict = {}
+        result_dict = {}
+        self.vimconn._prepare_port_dict_binding(net, port_dict)
+        self.assertDictEqual(result_dict, port_dict)
+
+    def test_prepare_port_dict_binding_net_type_vf(self):
+        """net type is VF, vim_type is not VIO."""
+        net = {"type": "VF"}
+        self.vimconn.vim_type = None
+        port_dict = {}
+        result_dict = {"binding:vnic_type": "direct"}
+        self.vimconn._prepare_port_dict_binding(net, port_dict)
+        self.assertDictEqual(port_dict, result_dict)
+
+    def test_prepare_port_dict_binding_net_type_sriov_vim_type_vio(self):
+        """net type is SR-IOV, vim_type is VIO."""
+        net = {"type": "SR-IOV"}
+        self.vimconn.vim_type = "VIO"
+        port_dict = {}
+        result_dict = {
+            "binding:vnic_type": "direct",
+            "port_security_enabled": False,
+            "provider_security_groups": [],
+            "security_groups": [],
+        }
+        self.vimconn._prepare_port_dict_binding(net, port_dict)
+        self.assertDictEqual(port_dict, result_dict)
+
+    def test_prepare_port_dict_binding_net_type_passthrough(self):
+        """net type is pci-passthrough."""
+        net = {"type": "PCI-PASSTHROUGH"}
+        port_dict = {}
+        result_dict = {
+            "binding:vnic_type": "direct-physical",
+        }
+        self.vimconn._prepare_port_dict_binding(net, port_dict)
+        self.assertDictEqual(port_dict, result_dict)
+
+    def test_prepare_port_dict_binding_no_net_type(self):
+        """net type is missing."""
+        net = {}
+        port_dict = {}
+        with self.assertRaises(VimConnException) as err:
+            self.vimconn._prepare_port_dict_binding(net, port_dict)
+        self.assertEqual(str(err.exception), "Type is missing in the network details.")
+
+    def test_set_fixed_ip(self):
+        """new_port has fixed ip."""
+        net = {}
+        new_port = {
+            "port": {
+                "fixed_ips": [{"ip_address": "10.1.2.3"}, {"ip_address": "20.1.2.3"}]
+            }
+        }
+        result = {"ip": "10.1.2.3"}
+        self.vimconn._set_fixed_ip(new_port, net)
+        self.assertDictEqual(net, result)
+
+    def test_set_fixed_ip_no_fixed_ip(self):
+        """new_port does not have fixed ip."""
+        net = {}
+        new_port = {"port": {}}
+        result = {"ip": None}
+        self.vimconn._set_fixed_ip(new_port, net)
+        self.assertDictEqual(net, result)
+
+    def test_set_fixed_ip_raise_exception(self):
+        """new_port does not have port details."""
+        net = {}
+        new_port = {}
+        with self.assertRaises(Exception) as err:
+            self.vimconn._set_fixed_ip(new_port, net)
+        self.assertEqual(type(err.exception), KeyError)
+
+    def test_prepare_port_dict_mac_ip_addr(self):
+        """mac address and ip address exist."""
+        net = {
+            "mac_address": mac_address,
+            "ip_address": "10.0.1.5",
+        }
+        port_dict = {}
+        result_dict = {
+            "mac_address": mac_address,
+            "fixed_ips": [{"ip_address": "10.0.1.5"}],
+        }
+        self.vimconn._prepare_port_dict_mac_ip_addr(net, port_dict)
+        self.assertDictEqual(port_dict, result_dict)
+
+    def test_prepare_port_dict_mac_ip_addr_no_mac_and_ip(self):
+        """mac address and ip address does not exist."""
+        net = {}
+        port_dict = {}
+        result_dict = {}
+        self.vimconn._prepare_port_dict_mac_ip_addr(net, port_dict)
+        self.assertDictEqual(port_dict, result_dict)
+
+    def test_create_new_port(self):
+        """new port has id and mac address."""
+        new_port = {
+            "port": {
+                "id": port_id,
+                "mac_address": mac_address,
+            },
+        }
+        self.vimconn.neutron.create_port.return_value = new_port
+        net, port_dict, created_items = {}, {}, {}
+        expected_result = new_port
+        expected_net = {
+            "mac_adress": mac_address,
+            "vim_id": port_id,
+        }
+        expected_created_items = {f"port:{port_id}": True}
+        result = self.vimconn._create_new_port(port_dict, created_items, net)
+        self.assertDictEqual(result, expected_result)
+        self.assertEqual(net, expected_net)
+        self.assertEqual(created_items, expected_created_items)
+        self.vimconn.neutron.create_port.assert_called_once_with({"port": port_dict})
+
+    def test_create_new_port_without_mac_or_id(self):
+        """new port does not have mac address or ID."""
+        new_port = {}
+        self.vimconn.neutron.create_port.return_value = new_port
+        net, port_dict, created_items = {}, {}, {}
+        with self.assertRaises(KeyError):
+            self.vimconn._create_new_port(port_dict, created_items, net)
+        self.vimconn.neutron.create_port.assert_called_once_with({"port": port_dict})
+
+    def test_create_new_port_neutron_create_port_raises_exception(self):
+        """Neutron create port raises exception."""
+        self.vimconn.neutron.create_port.side_effect = VimConnException(
+            "New port is not created."
+        )
+        net, port_dict, created_items = {}, {}, {}
+        with self.assertRaises(VimConnException):
+            self.vimconn._create_new_port(port_dict, created_items, net)
+        self.vimconn.neutron.create_port.assert_called_once_with({"port": port_dict})
+
+    @patch.object(vimconnector, "_prepare_port_dict_security_groups")
+    @patch.object(vimconnector, "_prepare_port_dict_binding")
+    @patch.object(vimconnector, "_prepare_port_dict_mac_ip_addr")
+    @patch.object(vimconnector, "_create_new_port")
+    @patch.object(vimconnector, "_set_fixed_ip")
+    def test_create_port(
+        self,
+        mock_set_fixed_ip,
+        mock_create_new_port,
+        mock_prepare_port_dict_mac_ip_addr,
+        mock_prepare_port_dict_binding,
+        mock_prepare_port_dict_security_groups,
+    ):
+        """Net has name, type, net-id."""
+
+        net = {
+            "net_id": net_id,
+            "name": "management",
+            "type": "virtual",
+        }
+        created_items = {}
+        new_port = {
+            "port": {
+                "id": net_id,
+                "mac_address": mac_address,
+                "name": "management",
+                "fixed_ips": [{"ip_address": ip_addr1}],
+            },
+        }
+        mock_create_new_port.return_value = new_port
+        expected_port = {
+            "port-id": net_id,
+            "tag": "management",
+        }
+        port_dict = {
+            "network_id": net_id,
+            "name": "management",
+            "admin_state_up": True,
+        }
+
+        new_port_result, port_result = self.vimconn._create_port(
+            net, name, created_items
+        )
+
+        self.assertDictEqual(new_port_result, new_port)
+        self.assertDictEqual(port_result, expected_port)
+
+        mock_prepare_port_dict_security_groups.assert_called_once_with(net, port_dict)
+        mock_prepare_port_dict_binding.assert_called_once_with(net, port_dict)
+        mock_prepare_port_dict_mac_ip_addr.assert_called_once_with(net, port_dict)
+        mock_create_new_port.assert_called_once_with(port_dict, created_items, net)
+        mock_set_fixed_ip.assert_called_once_with(new_port, net)
+
+    @patch.object(vimconnector, "_prepare_port_dict_security_groups")
+    @patch.object(vimconnector, "_prepare_port_dict_binding")
+    @patch.object(vimconnector, "_prepare_port_dict_mac_ip_addr")
+    @patch.object(vimconnector, "_create_new_port")
+    @patch.object(vimconnector, "_set_fixed_ip")
+    def test_create_port_no_port_name(
+        self,
+        mock_set_fixed_ip,
+        mock_create_new_port,
+        mock_prepare_port_dict_mac_ip_addr,
+        mock_prepare_port_dict_binding,
+        mock_prepare_port_dict_security_groups,
+    ):
+        """Net has no name."""
+        net = {
+            "net_id": net_id,
+            "type": "virtual",
+        }
+        created_items = {}
+        new_port = {
+            "port": {
+                "id": net_id,
+                "mac_address": mac_address,
+                "name": name,
+                "fixed_ips": [{"ip_address": ip_addr1}],
+            },
+        }
+        mock_create_new_port.return_value = new_port
+        expected_port = {
+            "port-id": net_id,
+            "tag": name,
+        }
+        port_dict = {
+            "network_id": net_id,
+            "admin_state_up": True,
+            "name": name,
+        }
+
+        new_port_result, port_result = self.vimconn._create_port(
+            net, name, created_items
+        )
+
+        self.assertDictEqual(new_port_result, new_port)
+        self.assertDictEqual(port_result, expected_port)
+
+        mock_prepare_port_dict_security_groups.assert_called_once_with(net, port_dict)
+        mock_prepare_port_dict_binding.assert_called_once_with(net, port_dict)
+        mock_prepare_port_dict_mac_ip_addr.assert_called_once_with(net, port_dict)
+        mock_create_new_port.assert_called_once_with(port_dict, created_items, net)
+        mock_set_fixed_ip.assert_called_once_with(new_port, net)
+
+    @patch.object(vimconnector, "_prepare_port_dict_security_groups")
+    @patch.object(vimconnector, "_prepare_port_dict_binding")
+    @patch.object(vimconnector, "_prepare_port_dict_mac_ip_addr")
+    @patch.object(vimconnector, "_create_new_port")
+    @patch.object(vimconnector, "_set_fixed_ip")
+    def test_create_port_nova_api_version_smaller_than_232(
+        self,
+        mock_set_fixed_ip,
+        mock_create_new_port,
+        mock_prepare_port_dict_mac_ip_addr,
+        mock_prepare_port_dict_binding,
+        mock_prepare_port_dict_security_groups,
+    ):
+        """Nova api version is smaller than 2.32."""
+        self.vimconn.nova.api_version.get_string.return_value = "2.30"
+        net = {
+            "net_id": net_id,
+            "type": "virtual",
+        }
+        created_items = {}
+        new_port = {
+            "port": {
+                "id": net_id,
+                "mac_address": mac_address,
+                "name": name,
+                "fixed_ips": [{"ip_address": ip_addr1}],
+            },
+        }
+        mock_create_new_port.return_value = new_port
+        expected_port = {
+            "port-id": net_id,
+        }
+        port_dict = {
+            "network_id": net_id,
+            "admin_state_up": True,
+            "name": name,
+        }
+
+        new_port_result, port_result = self.vimconn._create_port(
+            net, name, created_items
+        )
+
+        self.assertDictEqual(new_port_result, new_port)
+        self.assertDictEqual(port_result, expected_port)
+
+        mock_prepare_port_dict_security_groups.assert_called_once_with(net, port_dict)
+        mock_prepare_port_dict_binding.assert_called_once_with(net, port_dict)
+        mock_prepare_port_dict_mac_ip_addr.assert_called_once_with(net, port_dict)
+        mock_create_new_port.assert_called_once_with(port_dict, created_items, net)
+        mock_set_fixed_ip.assert_called_once_with(new_port, net)
+
+    @patch.object(vimconnector, "_prepare_port_dict_security_groups")
+    @patch.object(vimconnector, "_prepare_port_dict_binding")
+    @patch.object(vimconnector, "_prepare_port_dict_mac_ip_addr")
+    @patch.object(vimconnector, "_create_new_port")
+    @patch.object(vimconnector, "_set_fixed_ip")
+    def test_create_port_create_new_port_raise_exception(
+        self,
+        mock_set_fixed_ip,
+        mock_create_new_port,
+        mock_prepare_port_dict_mac_ip_addr,
+        mock_prepare_port_dict_binding,
+        mock_prepare_port_dict_security_groups,
+    ):
+        """_create_new_port method raises exception."""
+        net = {
+            "net_id": net_id,
+            "type": "virtual",
+        }
+        created_items = {}
+        mock_create_new_port.side_effect = Exception
+        port_dict = {
+            "network_id": net_id,
+            "admin_state_up": True,
+            "name": name,
+        }
+
+        with self.assertRaises(Exception):
+            self.vimconn._create_port(net, name, created_items)
+
+        mock_prepare_port_dict_security_groups.assert_called_once_with(net, port_dict)
+        mock_prepare_port_dict_binding.assert_called_once_with(net, port_dict)
+        mock_prepare_port_dict_mac_ip_addr.assert_called_once_with(net, port_dict)
+        mock_create_new_port.assert_called_once_with(port_dict, created_items, net)
+        mock_set_fixed_ip.assert_not_called()
+
+    @patch.object(vimconnector, "_prepare_port_dict_security_groups")
+    @patch.object(vimconnector, "_prepare_port_dict_binding")
+    @patch.object(vimconnector, "_prepare_port_dict_mac_ip_addr")
+    @patch.object(vimconnector, "_create_new_port")
+    @patch.object(vimconnector, "_set_fixed_ip")
+    def test_create_port_create_sec_groups_raises_exception(
+        self,
+        mock_set_fixed_ip,
+        mock_create_new_port,
+        mock_prepare_port_dict_mac_ip_addr,
+        mock_prepare_port_dict_binding,
+        mock_prepare_port_dict_security_groups,
+    ):
+        """_prepare_port_dict_security_groups method raises exception."""
+        net = {
+            "net_id": net_id,
+            "type": "virtual",
+        }
+        created_items = {}
+        mock_prepare_port_dict_security_groups.side_effect = Exception
+        port_dict = {
+            "network_id": net_id,
+            "admin_state_up": True,
+            "name": name,
+        }
+
+        with self.assertRaises(Exception):
+            self.vimconn._create_port(net, name, created_items)
+
+        mock_prepare_port_dict_security_groups.assert_called_once_with(net, port_dict)
+
+        mock_prepare_port_dict_binding.assert_not_called()
+        mock_prepare_port_dict_mac_ip_addr.assert_not_called()
+        mock_create_new_port.assert_not_called()
+        mock_set_fixed_ip.assert_not_called()
+
+    @patch.object(vimconnector, "_prepare_port_dict_security_groups")
+    @patch.object(vimconnector, "_prepare_port_dict_binding")
+    @patch.object(vimconnector, "_prepare_port_dict_mac_ip_addr")
+    @patch.object(vimconnector, "_create_new_port")
+    @patch.object(vimconnector, "_set_fixed_ip")
+    def test_create_port_create_port_dict_binding_raise_exception(
+        self,
+        mock_set_fixed_ip,
+        mock_create_new_port,
+        mock_prepare_port_dict_mac_ip_addr,
+        mock_prepare_port_dict_binding,
+        mock_prepare_port_dict_security_groups,
+    ):
+        """_prepare_port_dict_binding method raises exception."""
+
+        net = {
+            "net_id": net_id,
+            "type": "virtual",
+        }
+        created_items = {}
+        mock_prepare_port_dict_binding.side_effect = Exception
+        port_dict = {
+            "network_id": net_id,
+            "admin_state_up": True,
+            "name": name,
+        }
+
+        with self.assertRaises(Exception):
+            self.vimconn._create_port(net, name, created_items)
+
+        mock_prepare_port_dict_security_groups.assert_called_once_with(net, port_dict)
+
+        mock_prepare_port_dict_binding.assert_called_once_with(net, port_dict)
+
+        mock_prepare_port_dict_mac_ip_addr.assert_not_called()
+        mock_create_new_port.assert_not_called()
+        mock_set_fixed_ip.assert_not_called()
+
+    @patch.object(vimconnector, "_prepare_port_dict_security_groups")
+    @patch.object(vimconnector, "_prepare_port_dict_binding")
+    @patch.object(vimconnector, "_prepare_port_dict_mac_ip_addr")
+    @patch.object(vimconnector, "_create_new_port")
+    @patch.object(vimconnector, "_set_fixed_ip")
+    def test_create_port_create_port_mac_ip_addr_raise_exception(
+        self,
+        mock_set_fixed_ip,
+        mock_create_new_port,
+        mock_prepare_port_dict_mac_ip_addr,
+        mock_prepare_port_dict_binding,
+        mock_prepare_port_dict_security_groups,
+    ):
+        """prepare_port_dict_mac_ip_addr method raises exception."""
+        net = {
+            "net_id": net_id,
+            "type": "virtual",
+        }
+        created_items = {}
+        mock_prepare_port_dict_mac_ip_addr.side_effect = Exception
+        port_dict = {
+            "network_id": net_id,
+            "admin_state_up": True,
+            "name": name,
+        }
+
+        with self.assertRaises(Exception):
+            self.vimconn._create_port(net, name, created_items)
+
+        mock_prepare_port_dict_security_groups.assert_called_once_with(net, port_dict)
+        mock_prepare_port_dict_binding.assert_called_once_with(net, port_dict)
+        mock_prepare_port_dict_mac_ip_addr.assert_called_once_with(net, port_dict)
+
+        mock_create_new_port.assert_not_called()
+        mock_set_fixed_ip.assert_not_called()
+
+    @patch.object(vimconnector, "_prepare_port_dict_security_groups")
+    @patch.object(vimconnector, "_prepare_port_dict_binding")
+    @patch.object(vimconnector, "_prepare_port_dict_mac_ip_addr")
+    @patch.object(vimconnector, "_create_new_port")
+    @patch.object(vimconnector, "_set_fixed_ip")
+    def test_create_port_create_port_set_fixed_ip_raise_exception(
+        self,
+        mock_set_fixed_ip,
+        mock_create_new_port,
+        mock_prepare_port_dict_mac_ip_addr,
+        mock_prepare_port_dict_binding,
+        mock_prepare_port_dict_security_groups,
+    ):
+        """_set_fixed_ip method raises exception."""
+        net = {
+            "net_id": net_id,
+            "type": "virtual",
+        }
+        created_items = {}
+        mock_set_fixed_ip.side_effect = VimConnException(
+            "Port detail is missing in new_port."
+        )
+        port_dict = {
+            "network_id": net_id,
+            "admin_state_up": True,
+            "name": name,
+        }
+        new_port = {
+            "port": {
+                "id": net_id,
+                "mac_address": mac_address,
+                "name": name,
+                "fixed_ips": [{"ip_address": ip_addr1}],
+            },
+        }
+        mock_create_new_port.return_value = new_port
+
+        with self.assertRaises(VimConnException):
+            self.vimconn._create_port(net, name, created_items)
+
+        mock_prepare_port_dict_security_groups.assert_called_once_with(net, port_dict)
+        mock_prepare_port_dict_binding.assert_called_once_with(net, port_dict)
+        mock_prepare_port_dict_mac_ip_addr.assert_called_once_with(net, port_dict)
+        mock_create_new_port.assert_called_once_with(port_dict, created_items, net)
+        mock_set_fixed_ip.assert_called_once_with(new_port, net)
+
+    @patch.object(vimconnector, "_reload_connection")
+    @patch.object(vimconnector, "_create_port")
+    def test_prepare_network_for_vm_instance_no_net_id(
+        self, mock_create_port, mock_reload_connection
+    ):
+        """Nets do not have net_id"""
+        mock_reload_connection.side_effect = None
+        created_items = {}
+        net_list = [
+            {
+                "use": "mgmt",
+                "port_security": False,
+                "exit_on_floating_ip_error": False,
+                "port_security_disable_strategy": "full",
+            },
+            {
+                "port_security": True,
+                "exit_on_floating_ip_error": False,
+                "floating_ip": True,
+            },
+        ]
+        net_list_vim = []
+        external_network, no_secured_ports = [], []
+        expected_external_network, expected_no_secured_ports = [], []
+        expected_net_list_vim = []
+
+        self.vimconn._prepare_network_for_vminstance(
+            name,
+            net_list,
+            created_items,
+            net_list_vim,
+            external_network,
+            no_secured_ports,
+        )
+        self.assertEqual(expected_net_list_vim, net_list_vim)
+        self.assertEqual(external_network, expected_external_network)
+        self.assertEqual(expected_no_secured_ports, no_secured_ports)
+
+        mock_create_port.assert_not_called()
+
+    @patch.object(vimconnector, "_reload_connection")
+    @patch.object(vimconnector, "_create_port")
+    def test_prepare_network_for_vm_instance_empty_net_list(
+        self, mock_create_port, mock_reload_connection
+    ):
+        """Net list is empty."""
+        mock_reload_connection.side_effect = None
+        created_items = {}
+        net_list_vim = []
+        external_network, no_secured_ports = [], []
+        expected_external_network, expected_no_secured_ports = [], []
+        expected_net_list_vim = []
+
+        self.vimconn._prepare_network_for_vminstance(
+            name,
+            net_list,
+            created_items,
+            net_list_vim,
+            external_network,
+            no_secured_ports,
+        )
+        self.assertEqual(expected_net_list_vim, net_list_vim)
+        self.assertEqual(external_network, expected_external_network)
+        self.assertEqual(expected_no_secured_ports, no_secured_ports)
+
+        mock_create_port.assert_not_called()
+
+    @patch.object(vimconnector, "_reload_connection")
+    @patch.object(vimconnector, "_create_port")
+    def test_prepare_network_for_vm_instance_use_floating_ip_false_mgmt_net(
+        self, mock_create_port, mock_reload_connection
+    ):
+        """Nets have net-id, floating_ip False, mgmt network."""
+        mock_reload_connection.side_effect = None
+        created_items = {}
+        net_list = [
+            {
+                "net_id": net2_id,
+                "floating_ip": False,
+                "use": "mgmt",
+            }
+        ]
+        net_list_vim = []
+        mock_create_port.side_effect = [
+            (
+                {
+                    "port": {
+                        "id": port2_id,
+                        "mac_address": mac_address,
+                        "name": name,
+                    },
+                },
+                {"port-dict": port2_id},
+            ),
+        ]
+        external_network, no_secured_ports = [], []
+        expected_external_network, expected_no_secured_ports = [], []
+        expected_net_list_vim = [{"port-dict": port2_id}]
+        self.vimconn._prepare_network_for_vminstance(
+            name,
+            net_list,
+            created_items,
+            net_list_vim,
+            external_network,
+            no_secured_ports,
+        )
+        self.assertEqual(expected_net_list_vim, net_list_vim)
+        self.assertEqual(external_network, expected_external_network)
+        self.assertEqual(expected_no_secured_ports, no_secured_ports)
+
+        mock_create_port.assert_called_once_with(
+            {
+                "net_id": net2_id,
+                "floating_ip": False,
+                "use": "mgmt",
+            },
+            name,
+            created_items,
+        )
+
+    @patch.object(vimconnector, "_reload_connection")
+    def test_prepare_network_for_vm_instance_mgmt_net_net_port_security_and_floating_ip_true(
+        self, mock_reload_connection
+    ):
+        """Nets have net-id, use_floating_ip False in VIM config, mgmt network, net floating_ip is True."""
+        self.vimconn.config["use_floating_ip"] = False
+        mock_create_port = CopyingMock()
+        mock_reload_connection.side_effect = None
+        created_items = {}
+        net_list = [
+            {
+                "net_id": net2_id,
+                "floating_ip": True,
+                "use": "mgmt",
+            }
+        ]
+        net_list_vim = []
+        mock_create_port.side_effect = [
+            (
+                {
+                    "port": {
+                        "id": port2_id,
+                        "mac_address": mac_address,
+                        "name": name,
+                    },
+                },
+                {"port-dict": port2_id},
+            ),
+        ]
+        external_network, no_secured_ports = [], []
+        expected_external_network = [
+            {
+                "net_id": net2_id,
+                "floating_ip": True,
+                "use": "mgmt",
+                "exit_on_floating_ip_error": True,
+            },
+        ]
+        expected_no_secured_ports = []
+        expected_net_list_vim = [{"port-dict": port2_id}]
+        with patch.object(vimconnector, "_create_port", mock_create_port):
+            self.vimconn._prepare_network_for_vminstance(
+                name,
+                net_list,
+                created_items,
+                net_list_vim,
+                external_network,
+                no_secured_ports,
+            )
+        self.assertEqual(expected_net_list_vim, net_list_vim)
+        self.assertEqual(external_network, expected_external_network)
+        self.assertEqual(expected_no_secured_ports, no_secured_ports)
+
+        mock_create_port.assert_called_once_with(
+            {
+                "net_id": net2_id,
+                "floating_ip": True,
+                "use": "mgmt",
+            },
+            name,
+            created_items,
+        )
+
+    @patch.object(vimconnector, "_reload_connection")
+    def test_prepare_network_for_vm_instance_use_floating_ip_true_mgmt_net_port_security_false(
+        self, mock_reload_connection
+    ):
+        """Nets have net-id, use_floating_ip is True in VIM config, mgmt network, net port security is False."""
+        mock_create_port = CopyingMock()
+        self.vimconn.config["use_floating_ip"] = True
+        self.vimconn.config["no_port_security_extension"] = False
+        mock_reload_connection.side_effect = None
+        created_items = {}
+
+        net_list = [
+            {
+                "net_id": net2_id,
+                "use": "mgmt",
+                "port_security": False,
+                "exit_on_floating_ip_error": False,
+                "port_security_disable_strategy": "full",
+            }
+        ]
+        net_list_vim = []
+        mock_create_port.side_effect = [
+            (
+                {
+                    "port": {
+                        "id": port2_id,
+                        "mac_address": mac_address,
+                        "name": name,
+                    },
+                },
+                {"port-dict": port2_id},
+            ),
+        ]
+        external_network, no_secured_ports = [], []
+        expected_external_network = [
+            {
+                "net_id": net2_id,
+                "use": "mgmt",
+                "port_security": False,
+                "exit_on_floating_ip_error": False,
+                "port_security_disable_strategy": "full",
+                "floating_ip": True,
+            },
+        ]
+        expected_no_secured_ports = [(port2_id, "full")]
+        expected_net_list_vim = [{"port-dict": port2_id}]
+        with patch.object(vimconnector, "_create_port", mock_create_port):
+            self.vimconn._prepare_network_for_vminstance(
+                name,
+                net_list,
+                created_items,
+                net_list_vim,
+                external_network,
+                no_secured_ports,
+            )
+
+        mock_create_port.assert_called_once_with(
+            {
+                "net_id": net2_id,
+                "use": "mgmt",
+                "port_security": False,
+                "exit_on_floating_ip_error": False,
+                "port_security_disable_strategy": "full",
+            },
+            name,
+            created_items,
+        )
+        self.assertEqual(expected_net_list_vim, net_list_vim)
+        self.assertEqual(external_network, expected_external_network)
+        self.assertEqual(expected_no_secured_ports, no_secured_ports)
+
+    @patch.object(vimconnector, "_reload_connection")
+    def test_prepare_network_for_vm_instance_use_fip_true_non_mgmt_net_port_security_false(
+        self, mock_reload_connection
+    ):
+        """Nets have net-id, use_floating_ip True in VIM config, non-mgmt network, port security is False."""
+        mock_create_port = CopyingMock()
+        self.vimconn.config["use_floating_ip"] = True
+        self.vimconn.config["no_port_security_extension"] = False
+        mock_reload_connection.side_effect = None
+        created_items = {}
+
+        net_list = [
+            {
+                "net_id": net2_id,
+                "use": "other",
+                "port_security": False,
+                "port_security_disable_strategy": "full",
+            }
+        ]
+        net_list_vim = []
+        mock_create_port.side_effect = [
+            (
+                {
+                    "port": {
+                        "id": port2_id,
+                        "mac_address": mac_address,
+                        "name": name,
+                    },
+                },
+                {"port-dict": port2_id},
+            ),
+        ]
+        external_network, no_secured_ports = [], []
+        expected_external_network = []
+        expected_no_secured_ports = [(port2_id, "full")]
+        expected_net_list_vim = [{"port-dict": port2_id}]
+        with patch.object(vimconnector, "_create_port", mock_create_port):
+            self.vimconn._prepare_network_for_vminstance(
+                name,
+                net_list,
+                created_items,
+                net_list_vim,
+                external_network,
+                no_secured_ports,
+            )
+
+        mock_create_port.assert_called_once_with(
+            {
+                "net_id": net2_id,
+                "use": "other",
+                "port_security": False,
+                "port_security_disable_strategy": "full",
+            },
+            name,
+            created_items,
+        )
+        self.assertEqual(expected_net_list_vim, net_list_vim)
+        self.assertEqual(external_network, expected_external_network)
+        self.assertEqual(expected_no_secured_ports, no_secured_ports)
+
+    @patch.object(vimconnector, "_reload_connection")
+    def test_prepare_network_for_vm_instance_use_fip_true_non_mgmt_net_port_security_true(
+        self, mock_reload_connection
+    ):
+        """Nets have net-id, use_floating_ip is True in VIM config, non-mgmt network, net port security is True."""
+        mock_create_port = CopyingMock()
+        self.vimconn.config["use_floating_ip"] = True
+        self.vimconn.config["no_port_security_extension"] = True
+        mock_reload_connection.side_effect = None
+        created_items = {}
+
+        net_list = [
+            {
+                "net_id": net2_id,
+                "use": "other",
+                "port_security": True,
+                "port_security_disable_strategy": "full",
+            }
+        ]
+        net_list_vim = []
+        mock_create_port.side_effect = [
+            (
+                {
+                    "port": {
+                        "id": port2_id,
+                        "mac_address": mac_address,
+                        "name": name,
+                    },
+                },
+                {"port-dict": port2_id},
+            ),
+        ]
+        external_network, no_secured_ports = [], []
+        expected_external_network = []
+        expected_no_secured_ports = []
+        expected_net_list_vim = [{"port-dict": port2_id}]
+        with patch.object(vimconnector, "_create_port", mock_create_port):
+            self.vimconn._prepare_network_for_vminstance(
+                name,
+                net_list,
+                created_items,
+                net_list_vim,
+                external_network,
+                no_secured_ports,
+            )
+
+        mock_create_port.assert_called_once_with(
+            {
+                "net_id": net2_id,
+                "use": "other",
+                "port_security": True,
+                "port_security_disable_strategy": "full",
+            },
+            name,
+            created_items,
+        )
+        self.assertEqual(expected_net_list_vim, net_list_vim)
+        self.assertEqual(external_network, expected_external_network)
+        self.assertEqual(expected_no_secured_ports, no_secured_ports)
+
+    @patch.object(vimconnector, "_reload_connection")
+    def test_prepare_network_for_vm_instance_create_port_raise_exception(
+        self, mock_reload_connection
+    ):
+        """_create_port method raise exception."""
+        mock_create_port = CopyingMock()
+        self.vimconn.config["use_floating_ip"] = True
+        self.vimconn.config["no_port_security_extension"] = True
+        mock_reload_connection.side_effect = None
+        created_items = {}
+
+        net_list = [
+            {
+                "net_id": net2_id,
+                "use": "other",
+                "port_security": True,
+                "port_security_disable_strategy": "full",
+            }
+        ]
+        net_list_vim = []
+        mock_create_port.side_effect = KeyError
+        external_network, no_secured_ports = [], []
+        expected_external_network = []
+        expected_no_secured_ports = []
+        expected_net_list_vim = []
+        with patch.object(vimconnector, "_create_port", mock_create_port):
+            with self.assertRaises(Exception) as err:
+                self.vimconn._prepare_network_for_vminstance(
+                    name,
+                    net_list,
+                    created_items,
+                    net_list_vim,
+                    external_network,
+                    no_secured_ports,
+                )
+
+        self.assertEqual(type(err.exception), KeyError)
+
+        mock_create_port.assert_called_once_with(
+            {
+                "net_id": net2_id,
+                "use": "other",
+                "port_security": True,
+                "port_security_disable_strategy": "full",
+            },
+            name,
+            created_items,
+        )
+        self.assertEqual(expected_net_list_vim, net_list_vim)
+        self.assertEqual(external_network, expected_external_network)
+        self.assertEqual(expected_no_secured_ports, no_secured_ports)
+
+    @patch.object(vimconnector, "_reload_connection")
+    def test_prepare_network_for_vm_instance_reload_connection_raise_exception(
+        self, mock_reload_connection
+    ):
+        """_reload_connection method raises exception."""
+        mock_create_port = CopyingMock()
+        mock_reload_connection.side_effect = VimConnConnectionException(
+            "Connection failed."
+        )
+        self.vimconn.config["use_floating_ip"] = True
+        self.vimconn.config["no_port_security_extension"] = True
+        created_items = {}
+
+        net_list = [
+            {
+                "net_id": net2_id,
+                "use": "other",
+                "port_security": True,
+                "port_security_disable_strategy": "full",
+            }
+        ]
+        net_list_vim = []
+        mock_create_port.side_effect = None
+        external_network, no_secured_ports = [], []
+        expected_external_network = []
+        expected_no_secured_ports = []
+        expected_net_list_vim = []
+        with patch.object(vimconnector, "_create_port", mock_create_port):
+            with self.assertRaises(Exception) as err:
+                self.vimconn._prepare_network_for_vminstance(
+                    name,
+                    net_list,
+                    created_items,
+                    net_list_vim,
+                    external_network,
+                    no_secured_ports,
+                )
+
+        self.assertEqual(type(err.exception), VimConnConnectionException)
+        self.assertEqual(str(err.exception), "Connection failed.")
+        mock_reload_connection.assert_called_once()
+        mock_create_port.assert_not_called()
+        self.assertEqual(expected_net_list_vim, net_list_vim)
+        self.assertEqual(external_network, expected_external_network)
+        self.assertEqual(expected_no_secured_ports, no_secured_ports)
+
+    def test_prepare_persistent_root_volumes_vim_using_volume_id(self):
+        """Existing persistent root volume with vim_volume_id."""
+        vm_av_zone = ["nova"]
+        base_disk_index = ord("a")
+        disk = {"vim_volume_id": volume_id}
+        block_device_mapping = {}
+        existing_vim_volumes = []
+        created_items = {}
+        expected_boot_vol_id = None
+        expected_block_device_mapping = {"vda": volume_id}
+        expected_existing_vim_volumes = [{"id": volume_id}]
+        boot_volume_id = self.vimconn._prepare_persistent_root_volumes(
+            name,
+            vm_av_zone,
+            disk,
+            base_disk_index,
+            block_device_mapping,
+            existing_vim_volumes,
+            created_items,
+        )
+        self.assertEqual(boot_volume_id, expected_boot_vol_id)
+        self.assertDictEqual(block_device_mapping, expected_block_device_mapping)
+        self.assertEqual(existing_vim_volumes, expected_existing_vim_volumes)
+        self.vimconn.cinder.volumes.create.assert_not_called()
+
+    @patch.object(vimconnector, "update_block_device_mapping")
+    def test_prepare_persistent_non_root_volumes_vim_using_volume_id(
+        self, mock_update_block_device_mapping
+    ):
+        """Existing persistent non root volume with vim_volume_id."""
+        vm_av_zone = ["nova"]
+        base_disk_index = ord("b")
+        disk = {"vim_volume_id": volume_id}
+        block_device_mapping = {}
+        existing_vim_volumes = []
+        created_items = {}
+        expected_block_device_mapping = {"vdb": volume_id}
+        expected_existing_vim_volumes = [{"id": volume_id}]
+        self.vimconn._prepare_non_root_persistent_volumes(
+            name,
+            disk,
+            vm_av_zone,
+            block_device_mapping,
+            base_disk_index,
+            existing_vim_volumes,
+            created_items,
+        )
+        self.assertDictEqual(block_device_mapping, expected_block_device_mapping)
+        self.assertEqual(existing_vim_volumes, expected_existing_vim_volumes)
+        self.vimconn.cinder.volumes.create.assert_not_called()
+        mock_update_block_device_mapping.assert_not_called()
+
+    @patch.object(vimconnector, "update_block_device_mapping")
+    def test_prepare_persistent_root_volumes_using_vim_id(
+        self, mock_update_block_device_mapping
+    ):
+        """Existing persistent root volume with vim_id."""
+        vm_av_zone = ["nova"]
+        base_disk_index = ord("a")
+        disk = {"vim_id": volume_id}
+        block_device_mapping = {}
+        existing_vim_volumes = []
+        created_items = {}
+        expected_boot_vol_id = None
+        expected_block_device_mapping = {"vda": volume_id}
+        expected_existing_vim_volumes = [{"id": volume_id}]
+        boot_volume_id = self.vimconn._prepare_persistent_root_volumes(
+            name,
+            vm_av_zone,
+            disk,
+            base_disk_index,
+            block_device_mapping,
+            existing_vim_volumes,
+            created_items,
+        )
+        self.assertEqual(boot_volume_id, expected_boot_vol_id)
+        self.assertDictEqual(block_device_mapping, expected_block_device_mapping)
+        self.assertEqual(existing_vim_volumes, expected_existing_vim_volumes)
+        self.vimconn.cinder.volumes.create.assert_not_called()
+        mock_update_block_device_mapping.assert_not_called()
+
+    @patch.object(vimconnector, "update_block_device_mapping")
+    def test_prepare_persistent_non_root_volumes_using_vim_id(
+        self, mock_update_block_device_mapping
+    ):
+        """Existing persistent root volume with vim_id."""
+        vm_av_zone = ["nova"]
+        base_disk_index = ord("b")
+        disk = {"vim_id": volume_id}
+        block_device_mapping = {}
+        existing_vim_volumes = []
+        created_items = {}
+
+        expected_block_device_mapping = {"vdb": volume_id}
+        expected_existing_vim_volumes = [{"id": volume_id}]
+        self.vimconn._prepare_non_root_persistent_volumes(
+            name,
+            disk,
+            vm_av_zone,
+            block_device_mapping,
+            base_disk_index,
+            existing_vim_volumes,
+            created_items,
+        )
+
+        self.assertDictEqual(block_device_mapping, expected_block_device_mapping)
+        self.assertEqual(existing_vim_volumes, expected_existing_vim_volumes)
+        self.vimconn.cinder.volumes.create.assert_not_called()
+        mock_update_block_device_mapping.assert_not_called()
+
+    @patch.object(vimconnector, "update_block_device_mapping")
+    def test_prepare_persistent_root_volumes_create(
+        self, mock_update_block_device_mapping
+    ):
+        """Create persistent root volume."""
+        self.vimconn.cinder.volumes.create.return_value.id = volume_id2
+        vm_av_zone = ["nova"]
+        base_disk_index = ord("a")
+        disk = {"size": 10, "image_id": image_id}
+        block_device_mapping = {}
+        existing_vim_volumes = []
+        created_items = {}
+        expected_boot_vol_id = volume_id2
+        boot_volume_id = self.vimconn._prepare_persistent_root_volumes(
+            name,
+            vm_av_zone,
+            disk,
+            base_disk_index,
+            block_device_mapping,
+            existing_vim_volumes,
+            created_items,
+        )
+        self.assertEqual(boot_volume_id, expected_boot_vol_id)
+        self.vimconn.cinder.volumes.create.assert_called_once_with(
+            size=10,
+            name="basicvmvda",
+            imageRef=image_id,
+            availability_zone=["nova"],
+        )
+        mock_update_block_device_mapping.assert_called_once()
+        _call_mock_update_block_device_mapping = (
+            mock_update_block_device_mapping.call_args_list
+        )
+        self.assertEqual(
+            _call_mock_update_block_device_mapping[0].kwargs["block_device_mapping"],
+            block_device_mapping,
+        )
+        self.assertEqual(
+            _call_mock_update_block_device_mapping[0].kwargs["base_disk_index"], 97
+        )
+        self.assertEqual(_call_mock_update_block_device_mapping[0].kwargs["disk"], disk)
+        self.assertEqual(
+            _call_mock_update_block_device_mapping[0].kwargs["created_items"], {}
+        )
+
+    @patch.object(vimconnector, "update_block_device_mapping")
+    def test_prepare_persistent_root_volumes_create_with_keep(
+        self, mock_update_block_device_mapping
+    ):
+        """Create persistent root volume, disk has keep parameter."""
+        self.vimconn.cinder.volumes.create.return_value.id = volume_id2
+        vm_av_zone = ["nova"]
+        base_disk_index = ord("a")
+        disk = {"size": 10, "image_id": image_id, "keep": True}
+        block_device_mapping = {}
+        existing_vim_volumes = []
+        created_items = {}
+        expected_boot_vol_id = volume_id2
+        expected_existing_vim_volumes = []
+        boot_volume_id = self.vimconn._prepare_persistent_root_volumes(
+            name,
+            vm_av_zone,
+            disk,
+            base_disk_index,
+            block_device_mapping,
+            existing_vim_volumes,
+            created_items,
+        )
+        self.assertEqual(boot_volume_id, expected_boot_vol_id)
+        self.assertEqual(existing_vim_volumes, expected_existing_vim_volumes)
+        self.vimconn.cinder.volumes.create.assert_called_once_with(
+            size=10,
+            name="basicvmvda",
+            imageRef=image_id,
+            availability_zone=["nova"],
+        )
+        mock_update_block_device_mapping.assert_called_once()
+        _call_mock_update_block_device_mapping = (
+            mock_update_block_device_mapping.call_args_list
+        )
+        self.assertEqual(
+            _call_mock_update_block_device_mapping[0].kwargs["block_device_mapping"],
+            block_device_mapping,
+        )
+        self.assertEqual(
+            _call_mock_update_block_device_mapping[0].kwargs["base_disk_index"], 97
+        )
+        self.assertEqual(_call_mock_update_block_device_mapping[0].kwargs["disk"], disk)
+        self.assertEqual(
+            _call_mock_update_block_device_mapping[0].kwargs["created_items"], {}
+        )
+
+    @patch.object(vimconnector, "update_block_device_mapping")
+    def test_prepare_persistent_non_root_volumes_create(
+        self, mock_update_block_device_mapping
+    ):
+        """Create persistent non-root volume."""
+        self.vimconn.cinder = CopyingMock()
+        self.vimconn.cinder.volumes.create.return_value.id = volume_id2
+        vm_av_zone = ["nova"]
+        base_disk_index = ord("a")
+        disk = {"size": 10}
+        block_device_mapping = {}
+        existing_vim_volumes = []
+        created_items = {}
+        expected_existing_vim_volumes = []
+        self.vimconn._prepare_non_root_persistent_volumes(
+            name,
+            disk,
+            vm_av_zone,
+            block_device_mapping,
+            base_disk_index,
+            existing_vim_volumes,
+            created_items,
+        )
+
+        self.assertEqual(existing_vim_volumes, expected_existing_vim_volumes)
+        self.vimconn.cinder.volumes.create.assert_called_once_with(
+            size=10, name="basicvmvda", availability_zone=["nova"]
+        )
+        mock_update_block_device_mapping.assert_called_once()
+        _call_mock_update_block_device_mapping = (
+            mock_update_block_device_mapping.call_args_list
+        )
+        self.assertEqual(
+            _call_mock_update_block_device_mapping[0].kwargs["block_device_mapping"],
+            block_device_mapping,
+        )
+        self.assertEqual(
+            _call_mock_update_block_device_mapping[0].kwargs["base_disk_index"], 97
+        )
+        self.assertEqual(_call_mock_update_block_device_mapping[0].kwargs["disk"], disk)
+        self.assertEqual(
+            _call_mock_update_block_device_mapping[0].kwargs["created_items"], {}
+        )
+
+    @patch.object(vimconnector, "update_block_device_mapping")
+    def test_prepare_persistent_non_root_volumes_create_with_keep(
+        self, mock_update_block_device_mapping
+    ):
+        """Create persistent non-root volume."""
+        self.vimconn.cinder = CopyingMock()
+        self.vimconn.cinder.volumes.create.return_value.id = volume_id2
+        vm_av_zone = ["nova"]
+        base_disk_index = ord("a")
+        disk = {"size": 10, "keep": True}
+        block_device_mapping = {}
+        existing_vim_volumes = []
+        created_items = {}
+        expected_existing_vim_volumes = []
+        self.vimconn._prepare_non_root_persistent_volumes(
+            name,
+            disk,
+            vm_av_zone,
+            block_device_mapping,
+            base_disk_index,
+            existing_vim_volumes,
+            created_items,
+        )
+
+        self.assertEqual(existing_vim_volumes, expected_existing_vim_volumes)
+        self.vimconn.cinder.volumes.create.assert_called_once_with(
+            size=10, name="basicvmvda", availability_zone=["nova"]
+        )
+        mock_update_block_device_mapping.assert_called_once()
+        _call_mock_update_block_device_mapping = (
+            mock_update_block_device_mapping.call_args_list
+        )
+        self.assertEqual(
+            _call_mock_update_block_device_mapping[0].kwargs["block_device_mapping"],
+            block_device_mapping,
+        )
+        self.assertEqual(
+            _call_mock_update_block_device_mapping[0].kwargs["base_disk_index"], 97
+        )
+        self.assertEqual(_call_mock_update_block_device_mapping[0].kwargs["disk"], disk)
+        self.assertEqual(
+            _call_mock_update_block_device_mapping[0].kwargs["created_items"], {}
+        )
+
+    @patch.object(vimconnector, "update_block_device_mapping")
+    def test_prepare_persistent_root_volumes_create_raise_exception(
+        self, mock_update_block_device_mapping
+    ):
+        """Create persistent root volume raise exception."""
+        self.vimconn.cinder.volumes.create.side_effect = Exception
+        vm_av_zone = ["nova"]
+        base_disk_index = ord("a")
+        disk = {"size": 10, "image_id": image_id}
+        block_device_mapping = {}
+        existing_vim_volumes = []
+        created_items = {}
+
+        with self.assertRaises(Exception):
+            result = self.vimconn._prepare_persistent_root_volumes(
+                name,
+                vm_av_zone,
+                disk,
+                base_disk_index,
+                block_device_mapping,
+                existing_vim_volumes,
+                created_items,
+            )
+
+            self.assertEqual(result, None)
+
+        self.vimconn.cinder.volumes.create.assert_called_once_with(
+            size=10,
+            name="basicvmvda",
+            imageRef=image_id,
+            availability_zone=["nova"],
+        )
+        self.assertEqual(existing_vim_volumes, [])
+        self.assertEqual(block_device_mapping, {})
+        self.assertEqual(created_items, {})
+        mock_update_block_device_mapping.assert_not_called()
+
+    @patch.object(vimconnector, "update_block_device_mapping")
+    def test_prepare_persistent_non_root_volumes_create_raise_exception(
+        self, mock_update_block_device_mapping
+    ):
+        """Create persistent non-root volume raise exception."""
+        self.vimconn.cinder.volumes.create.side_effect = Exception
+        vm_av_zone = ["nova"]
+        base_disk_index = ord("b")
+        disk = {"size": 10}
+        block_device_mapping = {}
+        existing_vim_volumes = []
+        created_items = {}
+
+        with self.assertRaises(Exception):
+            self.vimconn._prepare_non_root_persistent_volumes(
+                name,
+                disk,
+                vm_av_zone,
+                block_device_mapping,
+                base_disk_index,
+                existing_vim_volumes,
+                created_items,
+            )
+
+        self.vimconn.cinder.volumes.create.assert_called_once_with(
+            size=10, name="basicvmvdb", availability_zone=["nova"]
+        )
+        self.assertEqual(existing_vim_volumes, [])
+        self.assertEqual(block_device_mapping, {})
+        self.assertEqual(created_items, {})
+        mock_update_block_device_mapping.assert_not_called()
+
+    @patch("time.sleep")
+    def test_wait_for_created_volumes_availability_volume_status_available(
+        self, mock_sleep
+    ):
+        """Created volume status is available."""
+        elapsed_time = 5
+        created_items = {f"volume:{volume_id2}": True}
+        self.vimconn.cinder.volumes.get.return_value.status = "available"
+
+        result = self.vimconn._wait_for_created_volumes_availability(
+            elapsed_time, created_items
+        )
+        self.assertEqual(result, elapsed_time)
+        self.vimconn.cinder.volumes.get.assert_called_with(volume_id2)
+        mock_sleep.assert_not_called()
+
+    @patch("time.sleep")
+    def test_wait_for_existing_volumes_availability_volume_status_available(
+        self, mock_sleep
+    ):
+        """Existing volume status is available."""
+        elapsed_time = 5
+        existing_vim_volumes = [{"id": volume_id2}]
+        self.vimconn.cinder.volumes.get.return_value.status = "available"
+
+        result = self.vimconn._wait_for_existing_volumes_availability(
+            elapsed_time, existing_vim_volumes
+        )
+        self.assertEqual(result, elapsed_time)
+        self.vimconn.cinder.volumes.get.assert_called_with(volume_id2)
+        mock_sleep.assert_not_called()
+
+    @patch("time.sleep")
+    def test_wait_for_created_volumes_availability_status_processing_multiple_volumes(
+        self, mock_sleep
+    ):
+        """Created volume status is processing."""
+        elapsed_time = 5
+        created_items = {
+            f"volume:{volume_id2}": True,
+            f"volume:{volume_id3}": True,
+        }
+        self.vimconn.cinder.volumes.get.side_effect = [
+            Status("processing"),
+            Status("available"),
+            Status("available"),
+        ]
+
+        result = self.vimconn._wait_for_created_volumes_availability(
+            elapsed_time, created_items
+        )
+        self.assertEqual(result, 10)
+        _call_mock_get_volumes = self.vimconn.cinder.volumes.get.call_args_list
+        self.assertEqual(_call_mock_get_volumes[0][0], (volume_id2,))
+        self.assertEqual(_call_mock_get_volumes[1][0], (volume_id2,))
+        self.assertEqual(_call_mock_get_volumes[2][0], (volume_id3,))
+        mock_sleep.assert_called_with(5)
+        self.assertEqual(1, mock_sleep.call_count)
+
+    @patch("time.sleep")
+    def test_wait_for_existing_volumes_availability_status_processing_multiple_volumes(
+        self, mock_sleep
+    ):
+        """Existing volume status is processing."""
+        elapsed_time = 5
+        existing_vim_volumes = [
+            {"id": volume_id2},
+            {"id": "44e0e83-b9uu-4akk-t234-p9cc4811bd4a"},
+        ]
+        self.vimconn.cinder.volumes.get.side_effect = [
+            Status("processing"),
+            Status("available"),
+            Status("available"),
+        ]
+
+        result = self.vimconn._wait_for_existing_volumes_availability(
+            elapsed_time, existing_vim_volumes
+        )
+        self.assertEqual(result, 10)
+        _call_mock_get_volumes = self.vimconn.cinder.volumes.get.call_args_list
+        self.assertEqual(_call_mock_get_volumes[0][0], (volume_id2,))
+        self.assertEqual(_call_mock_get_volumes[1][0], (volume_id2,))
+        self.assertEqual(
+            _call_mock_get_volumes[2][0], ("44e0e83-b9uu-4akk-t234-p9cc4811bd4a",)
+        )
+        mock_sleep.assert_called_with(5)
+        self.assertEqual(1, mock_sleep.call_count)
+
+    @patch("time.sleep")
+    def test_wait_for_created_volumes_availability_volume_status_processing_timeout(
+        self, mock_sleep
+    ):
+        """Created volume status is processing, elapsed time greater than timeout (1800)."""
+        elapsed_time = 1805
+        created_items = {f"volume:{volume_id2}": True}
+        self.vimconn.cinder.volumes.get.side_effect = [
+            Status("processing"),
+            Status("processing"),
+        ]
+        with patch("time.sleep", mock_sleep):
+            result = self.vimconn._wait_for_created_volumes_availability(
+                elapsed_time, created_items
+            )
+            self.assertEqual(result, 1805)
+        self.vimconn.cinder.volumes.get.assert_not_called()
+        mock_sleep.assert_not_called()
+
+    @patch("time.sleep")
+    def test_wait_for_existing_volumes_availability_volume_status_processing_timeout(
+        self, mock_sleep
+    ):
+        """Exsiting volume status is processing, elapsed time greater than timeout (1800)."""
+        elapsed_time = 1805
+        existing_vim_volumes = [{"id": volume_id2}]
+        self.vimconn.cinder.volumes.get.side_effect = [
+            Status("processing"),
+            Status("processing"),
+        ]
+
+        result = self.vimconn._wait_for_existing_volumes_availability(
+            elapsed_time, existing_vim_volumes
+        )
+        self.assertEqual(result, 1805)
+        self.vimconn.cinder.volumes.get.assert_not_called()
+        mock_sleep.assert_not_called()
+
+    @patch("time.sleep")
+    def test_wait_for_created_volumes_availability_cinder_raise_exception(
+        self, mock_sleep
+    ):
+        """Cinder get volumes raises exception for created volumes."""
+        elapsed_time = 1000
+        created_items = {f"volume:{volume_id2}": True}
+        self.vimconn.cinder.volumes.get.side_effect = Exception
+        with self.assertRaises(Exception):
+            result = self.vimconn._wait_for_created_volumes_availability(
+                elapsed_time, created_items
+            )
+            self.assertEqual(result, 1000)
+        self.vimconn.cinder.volumes.get.assert_called_with(volume_id2)
+        mock_sleep.assert_not_called()
+
+    @patch("time.sleep")
+    def test_wait_for_existing_volumes_availability_cinder_raise_exception(
+        self, mock_sleep
+    ):
+        """Cinder get volumes raises exception for existing volumes."""
+        elapsed_time = 1000
+        existing_vim_volumes = [{"id": volume_id2}]
+        self.vimconn.cinder.volumes.get.side_effect = Exception
+        with self.assertRaises(Exception):
+            result = self.vimconn._wait_for_existing_volumes_availability(
+                elapsed_time, existing_vim_volumes
+            )
+            self.assertEqual(result, 1000)
+        self.vimconn.cinder.volumes.get.assert_called_with(volume_id2)
+        mock_sleep.assert_not_called()
+
+    @patch("time.sleep")
+    def test_wait_for_created_volumes_availability_no_volume_in_created_items(
+        self, mock_sleep
+    ):
+        """Created_items dict does not have volume-id."""
+        elapsed_time = 10
+        created_items = {}
+
+        self.vimconn.cinder.volumes.get.side_effect = [None]
+
+        result = self.vimconn._wait_for_created_volumes_availability(
+            elapsed_time, created_items
+        )
+        self.assertEqual(result, 10)
+        self.vimconn.cinder.volumes.get.assert_not_called()
+        mock_sleep.assert_not_called()
+
+    @patch("time.sleep")
+    def test_wait_for_existing_volumes_availability_no_volume_in_existing_vim_volumes(
+        self, mock_sleep
+    ):
+        """Existing_vim_volumes list does not have volume."""
+        elapsed_time = 10
+        existing_vim_volumes = []
+
+        self.vimconn.cinder.volumes.get.side_effect = [None]
+
+        result = self.vimconn._wait_for_existing_volumes_availability(
+            elapsed_time, existing_vim_volumes
+        )
+        self.assertEqual(result, 10)
+        self.vimconn.cinder.volumes.get.assert_not_called()
+        mock_sleep.assert_not_called()
+
+    @patch.object(vimconnector, "_prepare_persistent_root_volumes")
+    @patch.object(vimconnector, "_prepare_non_root_persistent_volumes")
+    @patch.object(vimconnector, "_wait_for_created_volumes_availability")
+    @patch.object(vimconnector, "_wait_for_existing_volumes_availability")
+    def test_prepare_disk_for_vm_instance(
+        self,
+        mock_existing_vol_availability,
+        mock_created_vol_availability,
+        mock_non_root_volumes,
+        mock_root_volumes,
+    ):
+        """Prepare disks for VM instance successfully."""
+        existing_vim_volumes = []
+        created_items = {}
+        block_device_mapping = {}
+        vm_av_zone = ["nova"]
+
+        mock_root_volumes.return_value = root_vol_id
+        mock_created_vol_availability.return_value = 10
+        mock_existing_vol_availability.return_value = 15
+        self.vimconn.cinder = CopyingMock()
+        self.vimconn._prepare_disk_for_vminstance(
+            name,
+            existing_vim_volumes,
+            created_items,
+            vm_av_zone,
+            block_device_mapping,
+            disk_list2,
+        )
+        self.vimconn.cinder.volumes.set_bootable.assert_called_once_with(
+            root_vol_id, True
+        )
+        mock_created_vol_availability.assert_called_once_with(0, created_items)
+        mock_existing_vol_availability.assert_called_once_with(10, existing_vim_volumes)
+        self.assertEqual(mock_root_volumes.call_count, 1)
+        self.assertEqual(mock_non_root_volumes.call_count, 1)
+        mock_root_volumes.assert_called_once_with(
+            name="basicvm",
+            vm_av_zone=["nova"],
+            disk={"size": 10, "image_id": image_id},
+            base_disk_index=97,
+            block_device_mapping={},
+            existing_vim_volumes=[],
+            created_items={},
+        )
+        mock_non_root_volumes.assert_called_once_with(
+            name="basicvm",
+            disk={"size": 20},
+            vm_av_zone=["nova"],
+            base_disk_index=98,
+            block_device_mapping={},
+            existing_vim_volumes=[],
+            created_items={},
+        )
+
+    @patch.object(vimconnector, "_prepare_persistent_root_volumes")
+    @patch.object(vimconnector, "_prepare_non_root_persistent_volumes")
+    @patch.object(vimconnector, "_wait_for_created_volumes_availability")
+    @patch.object(vimconnector, "_wait_for_existing_volumes_availability")
+    def test_prepare_disk_for_vm_instance_timeout_exceeded(
+        self,
+        mock_existing_vol_availability,
+        mock_created_vol_availability,
+        mock_non_root_volumes,
+        mock_root_volumes,
+    ):
+        """Timeout exceeded while waiting for disks."""
+        existing_vim_volumes = []
+        created_items = {}
+        vm_av_zone = ["nova"]
+        block_device_mapping = {}
+
+        mock_root_volumes.return_value = root_vol_id
+        mock_created_vol_availability.return_value = 1700
+        mock_existing_vol_availability.return_value = 1900
+
+        with self.assertRaises(VimConnException) as err:
+            self.vimconn._prepare_disk_for_vminstance(
+                name,
+                existing_vim_volumes,
+                created_items,
+                vm_av_zone,
+                block_device_mapping,
+                disk_list2,
+            )
+        self.assertEqual(
+            str(err.exception), "Timeout creating volumes for instance basicvm"
+        )
+        self.vimconn.cinder.volumes.set_bootable.assert_not_called()
+        mock_created_vol_availability.assert_called_once_with(0, created_items)
+        mock_existing_vol_availability.assert_called_once_with(
+            1700, existing_vim_volumes
+        )
+        self.assertEqual(mock_root_volumes.call_count, 1)
+        self.assertEqual(mock_non_root_volumes.call_count, 1)
+        mock_root_volumes.assert_called_once_with(
+            name="basicvm",
+            vm_av_zone=["nova"],
+            disk={"size": 10, "image_id": image_id},
+            base_disk_index=97,
+            block_device_mapping={},
+            existing_vim_volumes=[],
+            created_items={},
+        )
+        mock_non_root_volumes.assert_called_once_with(
+            name="basicvm",
+            disk={"size": 20},
+            vm_av_zone=["nova"],
+            base_disk_index=98,
+            block_device_mapping={},
+            existing_vim_volumes=[],
+            created_items={},
+        )
+
+    @patch.object(vimconnector, "_prepare_persistent_root_volumes")
+    @patch.object(vimconnector, "_prepare_non_root_persistent_volumes")
+    @patch.object(vimconnector, "_wait_for_created_volumes_availability")
+    @patch.object(vimconnector, "_wait_for_existing_volumes_availability")
+    def test_prepare_disk_for_vm_instance_empty_disk_list(
+        self,
+        mock_existing_vol_availability,
+        mock_created_vol_availability,
+        mock_non_root_volumes,
+        mock_root_volumes,
+    ):
+        """Disk list is empty."""
+        existing_vim_volumes = []
+        created_items = {}
+        block_device_mapping = {}
+        vm_av_zone = ["nova"]
+        mock_created_vol_availability.return_value = 2
+        mock_existing_vol_availability.return_value = 3
+
+        self.vimconn._prepare_disk_for_vminstance(
+            name,
+            existing_vim_volumes,
+            created_items,
+            vm_av_zone,
+            block_device_mapping,
+            disk_list,
+        )
+        self.vimconn.cinder.volumes.set_bootable.assert_not_called()
+        mock_created_vol_availability.assert_called_once_with(0, created_items)
+        mock_existing_vol_availability.assert_called_once_with(2, existing_vim_volumes)
+        mock_root_volumes.assert_not_called()
+        mock_non_root_volumes.assert_not_called()
+
+    @patch.object(vimconnector, "_prepare_persistent_root_volumes")
+    @patch.object(vimconnector, "_prepare_non_root_persistent_volumes")
+    @patch.object(vimconnector, "_wait_for_created_volumes_availability")
+    @patch.object(vimconnector, "_wait_for_existing_volumes_availability")
+    def test_prepare_disk_for_vm_instance_persistent_root_volume_error(
+        self,
+        mock_existing_vol_availability,
+        mock_created_vol_availability,
+        mock_non_root_volumes,
+        mock_root_volumes,
+    ):
+        """Persistent root volumes preparation raises error."""
+        existing_vim_volumes = []
+        created_items = {}
+        vm_av_zone = ["nova"]
+        block_device_mapping = {}
+
+        mock_root_volumes.side_effect = Exception()
+        mock_created_vol_availability.return_value = 10
+        mock_existing_vol_availability.return_value = 15
+
+        with self.assertRaises(Exception):
+            self.vimconn._prepare_disk_for_vminstance(
+                name,
+                existing_vim_volumes,
+                created_items,
+                vm_av_zone,
+                block_device_mapping,
+                disk_list2,
+            )
+        self.vimconn.cinder.volumes.set_bootable.assert_not_called()
+        mock_created_vol_availability.assert_not_called()
+        mock_existing_vol_availability.assert_not_called()
+        mock_root_volumes.assert_called_once_with(
+            name="basicvm",
+            vm_av_zone=["nova"],
+            disk={"size": 10, "image_id": image_id},
+            base_disk_index=97,
+            block_device_mapping={},
+            existing_vim_volumes=[],
+            created_items={},
+        )
+        mock_non_root_volumes.assert_not_called()
+
+    @patch.object(vimconnector, "_prepare_persistent_root_volumes")
+    @patch.object(vimconnector, "_prepare_non_root_persistent_volumes")
+    @patch.object(vimconnector, "_wait_for_created_volumes_availability")
+    @patch.object(vimconnector, "_wait_for_existing_volumes_availability")
+    def test_prepare_disk_for_vm_instance_non_root_volume_error(
+        self,
+        mock_existing_vol_availability,
+        mock_created_vol_availability,
+        mock_non_root_volumes,
+        mock_root_volumes,
+    ):
+        """Non-root volumes preparation raises error."""
+        existing_vim_volumes = []
+        created_items = {}
+        vm_av_zone = ["nova"]
+        block_device_mapping = {}
+
+        mock_root_volumes.return_value = root_vol_id
+        mock_non_root_volumes.side_effect = Exception
+
+        with self.assertRaises(Exception):
+            self.vimconn._prepare_disk_for_vminstance(
+                name,
+                existing_vim_volumes,
+                created_items,
+                vm_av_zone,
+                block_device_mapping,
+                disk_list2,
+            )
+        self.vimconn.cinder.volumes.set_bootable.assert_not_called()
+        mock_created_vol_availability.assert_not_called()
+        mock_existing_vol_availability.assert_not_called()
+        self.assertEqual(mock_root_volumes.call_count, 1)
+        self.assertEqual(mock_non_root_volumes.call_count, 1)
+        mock_root_volumes.assert_called_once_with(
+            name="basicvm",
+            vm_av_zone=["nova"],
+            disk={"size": 10, "image_id": image_id},
+            base_disk_index=97,
+            block_device_mapping={},
+            existing_vim_volumes=[],
+            created_items={},
+        )
+        mock_non_root_volumes.assert_called_once_with(
+            name="basicvm",
+            disk={"size": 20},
+            vm_av_zone=["nova"],
+            base_disk_index=98,
+            block_device_mapping={},
+            existing_vim_volumes=[],
+            created_items={},
+        )
+
+    def test_find_external_network_for_floating_ip_no_external_network(self):
+        """External network could not be found."""
+        self.vimconn.neutron.list_networks.return_value = {
+            "networks": [
+                {"id": "408b73-r9cc-5a6a-a270-82cc4811bd4a", "router:external": False}
+            ]
+        }
+        with self.assertRaises(VimConnException) as err:
+            self.vimconn._find_the_external_network_for_floating_ip()
+        self.assertEqual(
+            str(err.exception),
+            "Cannot create floating_ip automatically since no external network is present",
+        )
+
+    def test_find_external_network_for_floating_one_external_network(self):
+        """One external network has been found."""
+        self.vimconn.neutron.list_networks.return_value = {
+            "networks": [
+                {"id": "408b73-r9cc-5a6a-a270-82cc4811bd4a", "router:external": True}
+            ]
+        }
+        expected_result = "408b73-r9cc-5a6a-a270-82cc4811bd4a"
+        result = self.vimconn._find_the_external_network_for_floating_ip()
+        self.assertEqual(result, expected_result)
+
+    def test_find_external_network_for_floating_neutron_raises_exception(self):
+        """Neutron list networks raises exception."""
+        self.vimconn.neutron.list_networks.side_effect = Exception
+        with self.assertRaises(Exception):
+            self.vimconn._find_the_external_network_for_floating_ip()
+
+    def test_find_external_network_for_floating_several_external_network(self):
+        """Several exernal networks has been found."""
+        self.vimconn.neutron.list_networks.return_value = {
+            "networks": [
+                {"id": "408b73-r9cc-5a6a-a270-82cc4811bd4a", "router:external": True},
+                {"id": "608b73-y9cc-5a6a-a270-12cc4811bd4a", "router:external": True},
+            ]
+        }
+        with self.assertRaises(VimConnException) as err:
+            self.vimconn._find_the_external_network_for_floating_ip()
+        self.assertEqual(
+            str(err.exception),
+            "Cannot create floating_ip automatically since multiple external networks are present",
+        )
+
+    def test_neutron_create_float_ip(self):
+        """Floating ip creation is successful."""
+        param = {"net_id": "408b73-r9cc-5a6a-a270-p2cc4811bd9a"}
+        created_items = {}
+        self.vimconn.neutron.create_floatingip.return_value = {
+            "floatingip": {"id": "308b73-t9cc-1a6a-a270-12cc4811bd4a"}
+        }
+        expected_created_items = {
+            "floating_ip:308b73-t9cc-1a6a-a270-12cc4811bd4a": True
+        }
+        self.vimconn._neutron_create_float_ip(param, created_items)
+        self.assertEqual(created_items, expected_created_items)
+
+    def test_neutron_create_float_ip_exception_occured(self):
+        """Floating ip could not be created."""
+        param = {
+            "floatingip": {
+                "floating_network_id": "408b73-r9cc-5a6a-a270-p2cc4811bd9a",
+                "tenant_id": "308b73-19cc-8a6a-a270-02cc4811bd9a",
+            }
+        }
+        created_items = {}
+        self.vimconn.neutron = CopyingMock()
+        self.vimconn.neutron.create_floatingip.side_effect = Exception(
+            "Neutron floating ip create exception occured."
+        )
+        with self.assertRaises(VimConnException) as err:
+            self.vimconn._neutron_create_float_ip(param, created_items)
+        self.assertEqual(created_items, {})
+        self.assertEqual(
+            str(err.exception),
+            "Exception: Cannot create new floating_ip Neutron floating ip create exception occured.",
+        )
+
+    @patch.object(vimconnector, "_neutron_create_float_ip")
+    @patch.object(vimconnector, "_find_the_external_network_for_floating_ip")
+    def test_create_floating_ip_pool_id_available(
+        self, mock_find_ext_network, mock_create_float_ip
+    ):
+        """Floating ip creation, ip pool is available."""
+        floating_network = {"floating_ip": "308b73-t9cc-1a6a-a270-12cc4811bd4a"}
+        created_items = {}
+        expected_param = {
+            "floatingip": {
+                "floating_network_id": "308b73-t9cc-1a6a-a270-12cc4811bd4a",
+                "tenant_id": "408b73-r9cc-5a6a-a270-82cc4811bd4a",
+            }
+        }
+        self.vimconn._create_floating_ip(floating_network, self.server, created_items)
+        mock_find_ext_network.assert_not_called()
+        mock_create_float_ip.assert_called_once_with(expected_param, {})
+
+    @patch.object(vimconnector, "_neutron_create_float_ip")
+    @patch.object(vimconnector, "_find_the_external_network_for_floating_ip")
+    def test_create_floating_ip_finding_pool_id(
+        self, mock_find_ext_network, mock_create_float_ip
+    ):
+        """Floating ip creation, pool id need to be found."""
+        floating_network = {"floating_ip": True}
+        created_items = {}
+        mock_find_ext_network.return_value = "308b73-t9cc-1a6a-a270-12cc4811bd4a"
+        expected_param = {
+            "floatingip": {
+                "floating_network_id": "308b73-t9cc-1a6a-a270-12cc4811bd4a",
+                "tenant_id": "408b73-r9cc-5a6a-a270-82cc4811bd4a",
+            }
+        }
+        self.vimconn._create_floating_ip(floating_network, self.server, created_items)
+        mock_find_ext_network.assert_called_once()
+        mock_create_float_ip.assert_called_once_with(expected_param, {})
+
+    @patch.object(vimconnector, "_neutron_create_float_ip")
+    @patch.object(vimconnector, "_find_the_external_network_for_floating_ip")
+    def test_create_floating_ip_neutron_create_floating_ip_exception(
+        self, mock_find_ext_network, mock_create_float_ip
+    ):
+        """Neutron creat floating ip raises error."""
+        floating_network = {"floating_ip": True}
+        created_items = {}
+        mock_create_float_ip.side_effect = VimConnException(
+            "Can not create floating ip."
+        )
+        mock_find_ext_network.return_value = "308b73-t9cc-1a6a-a270-12cc4811bd4a"
+        expected_param = {
+            "floatingip": {
+                "floating_network_id": "308b73-t9cc-1a6a-a270-12cc4811bd4a",
+                "tenant_id": "408b73-r9cc-5a6a-a270-82cc4811bd4a",
+            }
+        }
+
+        with self.assertRaises(VimConnException) as err:
+            self.vimconn._create_floating_ip(
+                floating_network, self.server, created_items
+            )
+        self.assertEqual(str(err.exception), "Can not create floating ip.")
+        mock_find_ext_network.assert_called_once()
+        mock_create_float_ip.assert_called_once_with(expected_param, {})
+
+    @patch.object(vimconnector, "_neutron_create_float_ip")
+    @patch.object(vimconnector, "_find_the_external_network_for_floating_ip")
+    def test_create_floating_ip_can_not_find_pool_id(
+        self, mock_find_ext_network, mock_create_float_ip
+    ):
+        """Floating ip creation, pool id could not be found."""
+        floating_network = {"floating_ip": True}
+        created_items = {}
+        mock_find_ext_network.side_effect = VimConnException(
+            "Cannot create floating_ip automatically since no external network is present"
+        )
+        with self.assertRaises(VimConnException) as err:
+            self.vimconn._create_floating_ip(
+                floating_network, self.server, created_items
+            )
+        self.assertEqual(
+            str(err.exception),
+            "Cannot create floating_ip automatically since no external network is present",
+        )
+        mock_find_ext_network.assert_called_once()
+        mock_create_float_ip.assert_not_called()
+
+    def test_find_floating_ip_get_free_floating_ip(self):
+        """Get free floating ips successfully."""
+        floating_ips = [
+            {
+                "tenant_id": "408b73-r9cc-5a6a-a270-82cc4811bd4a",
+                "floating_network_id": "308b73-t9cc-1a6a-a270-12cc4811bd4a",
+                "id": "508b73-o9cc-5a6a-a270-72cc4811bd8",
+            }
+        ]
+        floating_network = {"floating_ip": "308b73-t9cc-1a6a-a270-12cc4811bd4a"}
+        expected_result = "508b73-o9cc-5a6a-a270-72cc4811bd8"
+
+        result = self.vimconn._find_floating_ip(
+            self.server, floating_ips, floating_network
+        )
+        self.assertEqual(result, expected_result)
+
+    def test_find_floating_ip_different_floating_network_id(self):
+        """Floating network id is different with floating_ip of floating network."""
+        floating_ips = [
+            {
+                "floating_network_id": "308b73-t9cc-1a6a-a270-12cc4811bd4a",
+                "id": "508b73-o9cc-5a6a-a270-72cc4811bd8",
+            }
+        ]
+        floating_network = {"floating_ip": "508b73-t9cc-1a6a-a270-12cc4811bd4a"}
+
+        result = self.vimconn._find_floating_ip(
+            self.server, floating_ips, floating_network
+        )
+        self.assertEqual(result, None)
+
+    def test_find_floating_ip_different_fip_tenant(self):
+        """Items in floating_ips has port_id, tenant_is is not same with server tenant id."""
+        floating_ips = [
+            {
+                "port_id": "608b73-r9cc-5a6a-a270-82cc4811bd4a",
+                "floating_network_id": "308b73-t9cc-1a6a-a270-12cc4811bd4a",
+                "id": "508b73-o9cc-5a6a-a270-72cc4811bd8",
+                "tenant_id": self.server.id,
+            }
+        ]
+        floating_network = {"floating_ip": "308b73-t9cc-1a6a-a270-12cc4811bd4a"}
+        mock_create_floating_ip = CopyingMock()
+        with patch.object(vimconnector, "_create_floating_ip", mock_create_floating_ip):
+            result = self.vimconn._find_floating_ip(
+                self.server, floating_ips, floating_network
+            )
+            self.assertEqual(result, None)
+
+    @patch("time.sleep")
+    def test_assign_floating_ip(self, mock_sleep):
+        """Assign floating ip successfully."""
+        free_floating_ip = "508b73-o9cc-5a6a-a270-72cc4811bd8"
+        floating_network = {"vim_id": floating_network_vim_id}
+        fip = {
+            "port_id": floating_network_vim_id,
+            "floating_network_id": "p08b73-e9cc-5a6a-t270-82cc4811bd4a",
+            "id": "508b73-o9cc-5a6a-a270-72cc4811bd8",
+            "tenant_id": "k08b73-e9cc-5a6a-t270-82cc4811bd4a",
+        }
+        self.vimconn.neutron.update_floatingip.side_effect = None
+        self.vimconn.neutron.show_floatingip.return_value = fip
+        expected_result = fip
+
+        result = self.vimconn._assign_floating_ip(free_floating_ip, floating_network)
+        self.assertEqual(result, expected_result)
+        self.vimconn.neutron.update_floatingip.assert_called_once_with(
+            free_floating_ip,
+            {"floatingip": {"port_id": floating_network_vim_id}},
+        )
+        mock_sleep.assert_called_once_with(5)
+        self.vimconn.neutron.show_floatingip.assert_called_once_with(free_floating_ip)
+
+    @patch("time.sleep")
+    def test_assign_floating_ip_update_floating_ip_exception(self, mock_sleep):
+        """Neutron update floating ip raises exception."""
+        free_floating_ip = "508b73-o9cc-5a6a-a270-72cc4811bd8"
+        floating_network = {"vim_id": floating_network_vim_id}
+        self.vimconn.neutron = CopyingMock()
+        self.vimconn.neutron.update_floatingip.side_effect = Exception(
+            "Floating ip is not updated."
+        )
+
+        with self.assertRaises(Exception) as err:
+            result = self.vimconn._assign_floating_ip(
+                free_floating_ip, floating_network
+            )
+            self.assertEqual(result, None)
+        self.assertEqual(str(err.exception), "Floating ip is not updated.")
+
+        self.vimconn.neutron.update_floatingip.assert_called_once_with(
+            free_floating_ip,
+            {"floatingip": {"port_id": floating_network_vim_id}},
+        )
+        mock_sleep.assert_not_called()
+        self.vimconn.neutron.show_floatingip.assert_not_called()
+
+    @patch("time.sleep")
+    def test_assign_floating_ip_show_floating_ip_exception(self, mock_sleep):
+        """Neutron show floating ip raises exception."""
+        free_floating_ip = "508b73-o9cc-5a6a-a270-72cc4811bd8"
+        floating_network = {"vim_id": floating_network_vim_id}
+        self.vimconn.neutron.update_floatingip.side_effect = None
+        self.vimconn.neutron.show_floatingip.side_effect = Exception(
+            "Floating ip could not be shown."
+        )
+
+        with self.assertRaises(Exception) as err:
+            result = self.vimconn._assign_floating_ip(
+                free_floating_ip, floating_network
+            )
+            self.assertEqual(result, None)
+            self.assertEqual(str(err.exception), "Floating ip could not be shown.")
+        self.vimconn.neutron.update_floatingip.assert_called_once_with(
+            free_floating_ip,
+            {"floatingip": {"port_id": floating_network_vim_id}},
+        )
+        mock_sleep.assert_called_once_with(5)
+        self.vimconn.neutron.show_floatingip.assert_called_once_with(free_floating_ip)
+
+    @patch("random.shuffle")
+    @patch.object(vimconnector, "_find_floating_ip")
+    def test_get_free_floating_ip(self, mock_find_floating_ip, mock_shuffle):
+        """Get free floating ip successfully."""
+        floating_network = {"floating_ip": "308b73-t9cc-1a6a-a270-12cc4811bd4a"}
+        floating_ips = [
+            {
+                "port_id": "608b73-r9cc-5a6a-a270-82cc4811bd4a",
+                "floating_network_id": "308b73-t9cc-1a6a-a270-12cc4811bd4a",
+                "id": "508b73-o9cc-5a6a-a270-72cc4811bd8",
+                "tenant_id": "208b73-e9cc-5a6a-t270-82cc4811bd4a",
+            },
+            {
+                "port_id": "508b73-r9cc-5a6a-5270-o2cc4811bd4a",
+                "floating_network_id": "308b73-t9cc-1a6a-a270-12cc4811bd4a",
+                "id": "208b73-o9cc-5a6a-a270-52cc4811bd8",
+                "tenant_id": "208b73-e9cc-5a6a-t270-82cc4811bd4a",
+            },
+        ]
+        self.vimconn.neutron.list_floatingips.return_value = {
+            "floatingips": floating_ips
+        }
+        mock_find_floating_ip.return_value = "508b73-o9cc-5a6a-a270-72cc4811bd8"
+        expected_result = "508b73-o9cc-5a6a-a270-72cc4811bd8"
+
+        result = self.vimconn._get_free_floating_ip(self.server, floating_network)
+        self.assertEqual(result, expected_result)
+        mock_shuffle.assert_called_once_with(floating_ips)
+        mock_find_floating_ip.assert_called_once_with(
+            self.server, floating_ips, floating_network
+        )
+
+    @patch("random.shuffle")
+    @patch.object(vimconnector, "_find_floating_ip")
+    def test_get_free_floating_ip_list_floating_ip_exception(
+        self, mock_find_floating_ip, mock_shuffle
+    ):
+        """Neutron list floating IPs raises exception."""
+        floating_network = {"floating_ip": "308b73-t9cc-1a6a-a270-12cc4811bd4a"}
+        self.vimconn.neutron = CopyingMock()
+        self.vimconn.neutron.list_floatingips.side_effect = Exception(
+            "Floating ips could not be listed."
+        )
+        with self.assertRaises(Exception) as err:
+            result = self.vimconn._get_free_floating_ip(self.server, floating_network)
+            self.assertEqual(result, None)
+            self.assertEqual(str(err.exception), "Floating ips could not be listed.")
+        mock_shuffle.assert_not_called()
+        mock_find_floating_ip.assert_not_called()
+
+    @patch("random.shuffle")
+    @patch.object(vimconnector, "_find_floating_ip")
+    def test_get_free_floating_ip_find_floating_ip_exception(
+        self, mock_find_floating_ip, mock_shuffle
+    ):
+        """_find_floating_ip method raises exception."""
+        floating_network = {"floating_ip": "308b73-t9cc-1a6a-a270-12cc4811bd4a"}
+        floating_ips = [
+            {
+                "port_id": "608b73-r9cc-5a6a-a270-82cc4811bd4a",
+                "floating_network_id": "308b73-t9cc-1a6a-a270-12cc4811bd4a",
+                "id": "508b73-o9cc-5a6a-a270-72cc4811bd8",
+                "tenant_id": "208b73-e9cc-5a6a-t270-82cc4811bd4a",
+            },
+            {
+                "port_id": "508b73-r9cc-5a6a-5270-o2cc4811bd4a",
+                "floating_network_id": "308b73-t9cc-1a6a-a270-12cc4811bd4a",
+                "id": "208b73-o9cc-5a6a-a270-52cc4811bd8",
+                "tenant_id": "208b73-e9cc-5a6a-t270-82cc4811bd4a",
+            },
+        ]
+        self.vimconn.neutron = CopyingMock()
+        self.vimconn.neutron.list_floatingips.return_value = {
+            "floatingips": floating_ips
+        }
+        mock_find_floating_ip.side_effect = Exception(
+            "Free floating ip could not be found."
+        )
+
+        with self.assertRaises(Exception) as err:
+            result = self.vimconn._get_free_floating_ip(self.server, floating_network)
+            self.assertEqual(result, None)
+            self.assertEqual(str(err.exception), "Free floating ip could not be found.")
+        mock_shuffle.assert_called_once_with(floating_ips)
+        mock_find_floating_ip.assert_called_once_with(
+            self.server, floating_ips, floating_network
+        )
+
+    @patch.object(vimconnector, "_create_floating_ip")
+    @patch.object(vimconnector, "_get_free_floating_ip")
+    @patch.object(vimconnector, "_assign_floating_ip")
+    def test_prepare_external_network_for_vm_instance(
+        self,
+        mock_assign_floating_ip,
+        mock_get_free_floating_ip,
+        mock_create_floating_ip,
+    ):
+        """Prepare external network successfully."""
+        external_network = [
+            {
+                "floating_ip": "y08b73-o9cc-1a6a-a270-12cc4811bd4u",
+                "vim_id": "608b73-r9cc-5a6a-a270-82cc4811bd4a",
+            },
+        ]
+        created_items = {}
+        vm_start_time = time_return_value
+        mock_get_free_floating_ip.side_effect = ["y08b73-o9cc-1a6a-a270-12cc4811bd4u"]
+        mock_assign_floating_ip.return_value = {
+            "floatingip": {"port_id": "608b73-r9cc-5a6a-a270-82cc4811bd4a"}
+        }
+        self.vimconn.neutron = CopyingMock()
+        self.vimconn.nova = CopyingMock()
+        self.vimconn.neutron.show_floatingip.return_value = {
+            "floatingip": {"port_id": ""}
+        }
+
+        self.vimconn._prepare_external_network_for_vminstance(
+            external_network, self.server, created_items, vm_start_time
+        )
+
+        self.assertEqual(mock_get_free_floating_ip.call_count, 1)
+        mock_get_free_floating_ip.assert_called_once_with(
+            self.server,
+            {
+                "floating_ip": "y08b73-o9cc-1a6a-a270-12cc4811bd4u",
+                "vim_id": "608b73-r9cc-5a6a-a270-82cc4811bd4a",
+            },
+        )
+        self.vimconn.neutron.show_floatingip.assert_called_once_with(
+            "y08b73-o9cc-1a6a-a270-12cc4811bd4u"
+        )
+        self.vimconn.nova.servers.get.assert_not_called()
+        mock_create_floating_ip.assert_not_called()
+        mock_assign_floating_ip.assert_called_once_with(
+            "y08b73-o9cc-1a6a-a270-12cc4811bd4u",
+            {
+                "floating_ip": "y08b73-o9cc-1a6a-a270-12cc4811bd4u",
+                "vim_id": "608b73-r9cc-5a6a-a270-82cc4811bd4a",
+            },
+        )
+
+    @patch("time.time")
+    @patch("time.sleep")
+    @patch.object(vimconnector, "_create_floating_ip")
+    @patch.object(vimconnector, "_get_free_floating_ip")
+    @patch.object(vimconnector, "_assign_floating_ip")
+    def test_prepare_external_network_for_vm_instance_no_free_floating_ip(
+        self,
+        mock_assign_floating_ip,
+        mock_get_free_floating_ip,
+        mock_create_floating_ip,
+        mock_sleep,
+        mock_time,
+    ):
+        """There is not any free floating ip."""
+        floating_network = {
+            "floating_ip": "y08b73-o9cc-1a6a-a270-12cc4811bd4u",
+            "vim_id": "608b73-r9cc-5a6a-a270-82cc4811bd4a",
+        }
+        external_network = [floating_network]
+
+        created_items = {}
+        vm_start_time = time_return_value
+        mock_get_free_floating_ip.return_value = None
+        mock_assign_floating_ip.return_value = {}
+        self.vimconn.nova.servers.get.return_value.status = "ERROR"
+        self.vimconn.neutron.show_floatingip.return_value = {}
+
+        with self.assertRaises(KeyError):
+            self.vimconn._prepare_external_network_for_vminstance(
+                external_network, self.server, created_items, vm_start_time
+            )
+
+        self.assertEqual(mock_get_free_floating_ip.call_count, 4)
+        mock_get_free_floating_ip.assert_called_with(
+            self.server,
+            {
+                "floating_ip": "y08b73-o9cc-1a6a-a270-12cc4811bd4u",
+                "vim_id": "608b73-r9cc-5a6a-a270-82cc4811bd4a",
+            },
+        )
+        self.vimconn.neutron.show_floatingip.assert_called_with(None)
+        mock_sleep.assert_not_called()
+        mock_time.assert_not_called()
+        self.assertEqual(self.vimconn.nova.servers.get.call_count, 4)
+        mock_create_floating_ip.assert_called_with(
+            floating_network, self.server, created_items
+        )
+        self.assertEqual(mock_create_floating_ip.call_count, 4)
+        mock_assign_floating_ip.assert_not_called()
+        self.vimconn.nova.servers.get.assert_called_with(self.server.id)
+
+    @patch("time.time")
+    @patch("time.sleep")
+    @patch.object(vimconnector, "_create_floating_ip")
+    @patch.object(vimconnector, "_get_free_floating_ip")
+    @patch.object(vimconnector, "_assign_floating_ip")
+    def test_prepare_external_network_for_vm_instance_no_free_fip_can_not_create_fip_exit_on_error_false(
+        self,
+        mock_assign_floating_ip,
+        mock_get_free_floating_ip,
+        mock_create_floating_ip,
+        mock_sleep,
+        mock_time,
+    ):
+        """There is not any free floating ip, create_floating ip method raise exception
+        exit_on_floating_ip_error set to False."""
+        floating_network = {
+            "floating_ip": "y08b73-o9cc-1a6a-a270-12cc4811bd4u",
+            "vim_id": "608b73-r9cc-5a6a-a270-82cc4811bd4a",
+            "exit_on_floating_ip_error": False,
+        }
+        external_network = [floating_network]
+
+        created_items = {}
+        vm_start_time = time_return_value
+        mock_get_free_floating_ip.return_value = None
+        mock_assign_floating_ip.return_value = {}
+        mock_create_floating_ip.side_effect = VimConnException(
+            "Can not create floating ip."
+        )
+        self.vimconn.nova.servers.get.return_value.status = "ERROR"
+        self.vimconn.neutron.show_floatingip.return_value = {}
+
+        self.vimconn._prepare_external_network_for_vminstance(
+            external_network, self.server, created_items, vm_start_time
+        )
+        self.assertEqual(mock_get_free_floating_ip.call_count, 1)
+        mock_get_free_floating_ip.assert_called_with(
+            self.server,
+            {
+                "floating_ip": "y08b73-o9cc-1a6a-a270-12cc4811bd4u",
+                "vim_id": "608b73-r9cc-5a6a-a270-82cc4811bd4a",
+                "exit_on_floating_ip_error": False,
+            },
+        )
+        self.vimconn.neutron.show_floatingip.assert_not_called()
+        mock_sleep.assert_not_called()
+        mock_time.assert_not_called()
+        self.vimconn.nova.servers.get.assert_not_called()
+        mock_create_floating_ip.assert_called_with(
+            floating_network, self.server, created_items
+        )
+        self.assertEqual(mock_create_floating_ip.call_count, 1)
+        mock_assign_floating_ip.assert_not_called()
+
+    @patch("time.time")
+    @patch("time.sleep")
+    @patch.object(vimconnector, "_create_floating_ip")
+    @patch.object(vimconnector, "_get_free_floating_ip")
+    @patch.object(vimconnector, "_assign_floating_ip")
+    def test_prepare_external_network_for_vm_instance_no_free_fip_can_not_create_fip_exit_on_error_true(
+        self,
+        mock_assign_floating_ip,
+        mock_get_free_floating_ip,
+        mock_create_floating_ip,
+        mock_sleep,
+        mock_time,
+    ):
+        """There is not any free floating ip, create_floating ip method raise exception
+        exit_on_floating_ip_error set to False."""
+        floating_network = {
+            "floating_ip": "y08b73-o9cc-1a6a-a270-12cc4811bd4u",
+            "vim_id": "608b73-r9cc-5a6a-a270-82cc4811bd4a",
+            "exit_on_floating_ip_error": True,
+        }
+        external_network = [floating_network]
+
+        created_items = {}
+        vm_start_time = time_return_value
+        mock_get_free_floating_ip.return_value = None
+        mock_assign_floating_ip.return_value = {}
+        mock_create_floating_ip.side_effect = VimConnException(
+            "Can not create floating ip."
+        )
+        self.vimconn.nova.servers.get.return_value.status = "ERROR"
+        self.vimconn.neutron.show_floatingip.return_value = {}
+        with self.assertRaises(VimConnException):
+            self.vimconn._prepare_external_network_for_vminstance(
+                external_network, self.server, created_items, vm_start_time
+            )
+        self.assertEqual(mock_get_free_floating_ip.call_count, 1)
+        mock_get_free_floating_ip.assert_called_with(
+            self.server,
+            {
+                "floating_ip": "y08b73-o9cc-1a6a-a270-12cc4811bd4u",
+                "vim_id": "608b73-r9cc-5a6a-a270-82cc4811bd4a",
+                "exit_on_floating_ip_error": True,
+            },
+        )
+        self.vimconn.neutron.show_floatingip.assert_not_called()
+        mock_sleep.assert_not_called()
+        mock_time.assert_not_called()
+        self.vimconn.nova.servers.get.assert_not_called()
+        mock_create_floating_ip.assert_called_with(
+            floating_network, self.server, created_items
+        )
+        self.assertEqual(mock_create_floating_ip.call_count, 1)
+        mock_assign_floating_ip.assert_not_called()
+
+    @patch.object(vimconnector, "_create_floating_ip")
+    @patch.object(vimconnector, "_get_free_floating_ip")
+    @patch.object(vimconnector, "_assign_floating_ip")
+    def test_prepare_external_network_for_vm_instance_fip_has_port_id(
+        self,
+        mock_assign_floating_ip,
+        mock_get_free_floating_ip,
+        mock_create_floating_ip,
+    ):
+        """Neutron show floating ip return the fip with port_id and floating network vim_id
+        is different from port_id."""
+        floating_network = {
+            "floating_ip": "y08b73-o9cc-1a6a-a270-12cc4811bd4u",
+            "vim_id": "608b73-r9cc-5a6a-a270-82cc4811bd4a",
+        }
+        external_network = [floating_network]
+        created_items = {}
+        vm_start_time = 150
+        mock_get_free_floating_ip.side_effect = [
+            "t08b73-o9cc-1a6a-a270-12cc4811bd4u",
+            "r08b73-o9cc-1a6a-a270-12cc4811bd4u",
+            "y08b73-o9cc-1a6a-a270-12cc4811bd4u",
+        ]
+        mock_assign_floating_ip.side_effect = [
+            {"floatingip": {"port_id": "k08b73-r9cc-5a6a-a270-82cc4811bd4a"}},
+            {"floatingip": {"port_id": "608b73-r9cc-5a6a-a270-82cc4811bd4a"}},
+        ]
+        self.vimconn.neutron = CopyingMock()
+        self.vimconn.nova = CopyingMock()
+        self.vimconn.neutron.show_floatingip.side_effect = [
+            {"floatingip": {"port_id": "608b73-r9cc-5a6a-a270-82cc4811bd4a"}},
+            {"floatingip": {"port_id": ""}},
+            {"floatingip": {"port_id": ""}},
+        ]
+        self.vimconn._prepare_external_network_for_vminstance(
+            external_network, self.server, created_items, vm_start_time
+        )
+        self.assertEqual(mock_get_free_floating_ip.call_count, 3)
+        _call_mock_get_free_floating_ip = mock_get_free_floating_ip.call_args_list
+        self.assertEqual(
+            _call_mock_get_free_floating_ip[0][0],
+            (
+                self.server,
+                floating_network,
+            ),
+        )
+        self.assertEqual(
+            _call_mock_get_free_floating_ip[1][0],
+            (
+                self.server,
+                floating_network,
+            ),
+        )
+        self.assertEqual(
+            _call_mock_get_free_floating_ip[2][0],
+            (
+                self.server,
+                floating_network,
+            ),
+        )
+        self.assertEqual(self.vimconn.neutron.show_floatingip.call_count, 3)
+        self.vimconn.nova.servers.get.assert_not_called()
+        mock_create_floating_ip.assert_not_called()
+        self.assertEqual(mock_assign_floating_ip.call_count, 2)
+        _call_mock_assign_floating_ip = mock_assign_floating_ip.call_args_list
+        self.assertEqual(
+            _call_mock_assign_floating_ip[0][0],
+            ("r08b73-o9cc-1a6a-a270-12cc4811bd4u", floating_network),
+        )
+        self.assertEqual(
+            _call_mock_assign_floating_ip[1][0],
+            ("y08b73-o9cc-1a6a-a270-12cc4811bd4u", floating_network),
+        )
+
+    @patch("time.time")
+    @patch("time.sleep")
+    @patch.object(vimconnector, "_create_floating_ip")
+    @patch.object(vimconnector, "_get_free_floating_ip")
+    @patch.object(vimconnector, "_assign_floating_ip")
+    def test_prepare_external_network_for_vm_instance_neutron_show_fip_exception_vm_status_in_error(
+        self,
+        mock_assign_floating_ip,
+        mock_get_free_floating_ip,
+        mock_create_floating_ip,
+        mock_sleep,
+        mock_time,
+    ):
+        """Neutron show floating ip gives exception, exit_on_floating_ip_error set to True,
+        VM status is in error."""
+        floating_network = {
+            "floating_ip": "y08b73-o9cc-1a6a-a270-12cc4811bd4u",
+            "vim_id": "608b73-r9cc-5a6a-a270-82cc4811bd4a",
+            "exit_on_floating_ip_error": True,
+        }
+        external_network = [floating_network]
+        created_items = {}
+        vm_start_time = time_return_value
+
+        mock_time.side_effect = [156570150, 156570800, 156571200]
+
+        self.vimconn.nova.servers.get.return_value.status = "ERROR"
+        self.vimconn.neutron.show_floatingip.side_effect = [
+            Exception("Floating ip could not be shown.")
+        ] * 4
+        with self.assertRaises(Exception) as err:
+            self.vimconn._prepare_external_network_for_vminstance(
+                external_network, self.server, created_items, vm_start_time
+            )
+            self.assertEqual(
+                str(err.exception),
+                "Cannot create floating_ip: Exception Floating ip could not be shown.",
+            )
+
+        self.assertEqual(mock_get_free_floating_ip.call_count, 4)
+        _call_mock_get_free_floating_ip = mock_get_free_floating_ip.call_args_list
+        self.assertEqual(
+            _call_mock_get_free_floating_ip[0][0],
+            (
+                self.server,
+                floating_network,
+            ),
+        )
+        self.assertEqual(
+            _call_mock_get_free_floating_ip[1][0],
+            (
+                self.server,
+                floating_network,
+            ),
+        )
+        self.assertEqual(
+            _call_mock_get_free_floating_ip[2][0],
+            (
+                self.server,
+                floating_network,
+            ),
+        )
+        self.assertEqual(
+            _call_mock_get_free_floating_ip[3][0],
+            (
+                self.server,
+                floating_network,
+            ),
+        )
+
+        self.assertEqual(self.vimconn.neutron.show_floatingip.call_count, 4)
+        self.vimconn.nova.servers.get.assert_called_with(self.server.id)
+        mock_create_floating_ip.assert_not_called()
+        mock_assign_floating_ip.assert_not_called()
+        mock_time.assert_not_called()
+        mock_sleep.assert_not_called()
+
+    @patch("time.time")
+    @patch("time.sleep")
+    @patch.object(vimconnector, "_create_floating_ip")
+    @patch.object(vimconnector, "_get_free_floating_ip")
+    @patch.object(vimconnector, "_assign_floating_ip")
+    def test_prepare_external_network_for_vm_instance_neutron_show_fip_exception_vm_status_in_active(
+        self,
+        mock_assign_floating_ip,
+        mock_get_free_floating_ip,
+        mock_create_floating_ip,
+        mock_sleep,
+        mock_time,
+    ):
+        """Neutron show floating ip gives exception, exit_on_floating_ip_error is set to False,
+        VM status is in active."""
+        floating_network = {
+            "floating_ip": "y08b73-o9cc-1a6a-a270-12cc4811bd4u",
+            "vim_id": "608b73-r9cc-5a6a-a270-82cc4811bd4a",
+            "exit_on_floating_ip_error": False,
+        }
+        external_network = [floating_network]
+        created_items = {}
+        vm_start_time = time_return_value
+
+        mock_time.side_effect = [156570150, 156570800, 156571200]
+
+        self.vimconn.nova.servers.get.return_value.status = "ACTIVE"
+        self.vimconn.neutron.show_floatingip.side_effect = [
+            Exception("Floating ip could not be shown.")
+        ] * 4
+
+        self.vimconn._prepare_external_network_for_vminstance(
+            external_network, self.server, created_items, vm_start_time
+        )
+        # self.assertEqual(str(err.exception), "Cannot create floating_ip")
+
+        self.assertEqual(mock_get_free_floating_ip.call_count, 4)
+        _call_mock_get_free_floating_ip = mock_get_free_floating_ip.call_args_list
+        self.assertEqual(
+            _call_mock_get_free_floating_ip[0][0],
+            (
+                self.server,
+                floating_network,
+            ),
+        )
+        self.assertEqual(
+            _call_mock_get_free_floating_ip[1][0],
+            (
+                self.server,
+                floating_network,
+            ),
+        )
+        self.assertEqual(
+            _call_mock_get_free_floating_ip[2][0],
+            (
+                self.server,
+                floating_network,
+            ),
+        )
+        self.assertEqual(
+            _call_mock_get_free_floating_ip[3][0],
+            (
+                self.server,
+                floating_network,
+            ),
+        )
+
+        self.assertEqual(self.vimconn.neutron.show_floatingip.call_count, 4)
+        self.vimconn.nova.servers.get.assert_called_with(self.server.id)
+        mock_create_floating_ip.assert_not_called()
+        mock_assign_floating_ip.assert_not_called()
+        mock_time.assert_not_called()
+        mock_sleep.assert_not_called()
+
+    @patch("time.time")
+    @patch("time.sleep")
+    @patch.object(vimconnector, "_create_floating_ip")
+    @patch.object(vimconnector, "_get_free_floating_ip")
+    @patch.object(vimconnector, "_assign_floating_ip")
+    def test_prepare_external_network_for_vm_instance_neutron_show_fip_exception_exit_on_error(
+        self,
+        mock_assign_floating_ip,
+        mock_get_free_floating_ip,
+        mock_create_floating_ip,
+        mock_sleep,
+        mock_time,
+    ):
+        """Neutron show floating ip gives exception, but exit_on_floating_ip_error is set to True.
+        VM status is not ACTIVE or ERROR, server timeout happened."""
+        floating_network = {
+            "floating_ip": "y08b73-o9cc-1a6a-a270-12cc4811bd4u",
+            "vim_id": "608b73-r9cc-5a6a-a270-82cc4811bd4a",
+            "exit_on_floating_ip_error": True,
+        }
+        external_network = [floating_network]
+        created_items = {}
+        vm_start_time = time_return_value
+        mock_get_free_floating_ip.side_effect = None
+        mock_time.side_effect = [156571790, 156571795, 156571800, 156571805]
+        self.vimconn.nova.servers.get.return_value.status = "OTHER"
+        self.vimconn.neutron.show_floatingip.side_effect = [
+            Exception("Floating ip could not be shown.")
+        ] * 5
+
+        with self.assertRaises(VimConnException) as err:
+            self.vimconn._prepare_external_network_for_vminstance(
+                external_network, self.server, created_items, vm_start_time
+            )
+        self.assertEqual(
+            str(err.exception),
+            "Cannot create floating_ip: Exception Floating ip could not be shown.",
+        )
+
+        self.assertEqual(mock_get_free_floating_ip.call_count, 3)
+        _call_mock_get_free_floating_ip = mock_get_free_floating_ip.call_args_list
+        self.assertEqual(
+            _call_mock_get_free_floating_ip[0][0],
+            (
+                self.server,
+                floating_network,
+            ),
+        )
+        self.assertEqual(
+            _call_mock_get_free_floating_ip[1][0],
+            (
+                self.server,
+                floating_network,
+            ),
+        )
+        self.assertEqual(
+            _call_mock_get_free_floating_ip[2][0],
+            (
+                self.server,
+                floating_network,
+            ),
+        )
+
+        self.assertEqual(self.vimconn.neutron.show_floatingip.call_count, 3)
+        self.vimconn.nova.servers.get.assert_called_with(self.server.id)
+        mock_create_floating_ip.assert_not_called()
+        mock_assign_floating_ip.assert_not_called()
+        self.assertEqual(mock_time.call_count, 3)
+        self.assertEqual(mock_sleep.call_count, 2)
+
+    @patch("time.time")
+    @patch("time.sleep")
+    @patch.object(vimconnector, "_create_floating_ip")
+    @patch.object(vimconnector, "_get_free_floating_ip")
+    @patch.object(vimconnector, "_assign_floating_ip")
+    def test_prepare_external_network_for_vm_instance_assign_floating_ip_exception_exit_on_error(
+        self,
+        mock_assign_floating_ip,
+        mock_get_free_floating_ip,
+        mock_create_floating_ip,
+        mock_sleep,
+        mock_time,
+    ):
+        """Assign floating ip method gives exception, exit_on_floating_ip_error is set to True.
+        VM status is in ERROR."""
+        floating_network = {
+            "floating_ip": "y08b73-o9cc-1a6a-a270-12cc4811bd4u",
+            "vim_id": "608b73-r9cc-5a6a-a270-82cc4811bd4a",
+            "exit_on_floating_ip_error": True,
+        }
+        external_network = [floating_network]
+        created_items = {}
+        vm_start_time = time_return_value
+
+        mock_get_free_floating_ip.side_effect = [
+            "y08b73-o9cc-1a6a-a270-12cc4811bd4u"
+        ] * 4
+
+        mock_time.side_effect = [156571790, 156571795, 156571800, 156571805]
+
+        mock_assign_floating_ip.side_effect = [
+            Exception("Floating ip could not be assigned.")
+        ] * 4
+
+        self.vimconn.nova.servers.get.return_value.status = "ERROR"
+        self.vimconn.neutron.show_floatingip.side_effect = [
+            {"floatingip": {"port_id": ""}}
+        ] * 4
+
+        with self.assertRaises(VimConnException) as err:
+            self.vimconn._prepare_external_network_for_vminstance(
+                external_network, self.server, created_items, vm_start_time
+            )
+        self.assertEqual(
+            str(err.exception),
+            "Cannot create floating_ip: Exception Floating ip could not be assigned.",
+        )
+
+        self.assertEqual(mock_get_free_floating_ip.call_count, 4)
+        _call_mock_get_free_floating_ip = mock_get_free_floating_ip.call_args_list
+        self.assertEqual(
+            _call_mock_get_free_floating_ip[0][0],
+            (
+                self.server,
+                floating_network,
+            ),
+        )
+        self.assertEqual(
+            _call_mock_get_free_floating_ip[1][0],
+            (
+                self.server,
+                floating_network,
+            ),
+        )
+        self.assertEqual(
+            _call_mock_get_free_floating_ip[2][0],
+            (
+                self.server,
+                floating_network,
+            ),
+        )
+
+        self.assertEqual(self.vimconn.neutron.show_floatingip.call_count, 4)
+        self.vimconn.neutron.show_floatingip.assert_called_with(
+            "y08b73-o9cc-1a6a-a270-12cc4811bd4u"
+        )
+        self.assertEqual(self.vimconn.nova.servers.get.call_count, 4)
+        self.vimconn.nova.servers.get.assert_called_with(self.server.id)
+        mock_time.assert_not_called()
+        mock_sleep.assert_not_called()
+        mock_create_floating_ip.assert_not_called()
+
+    @patch("time.time")
+    @patch("time.sleep")
+    @patch.object(vimconnector, "_create_floating_ip")
+    @patch.object(vimconnector, "_get_free_floating_ip")
+    @patch.object(vimconnector, "_assign_floating_ip")
+    def test_prepare_external_network_for_vm_instance_empty_external_network_list(
+        self,
+        mock_assign_floating_ip,
+        mock_get_free_floating_ip,
+        mock_create_floating_ip,
+        mock_sleep,
+        mock_time,
+    ):
+        """External network list is empty."""
+        external_network = []
+        created_items = {}
+        vm_start_time = time_return_value
+
+        self.vimconn._prepare_external_network_for_vminstance(
+            external_network, self.server, created_items, vm_start_time
+        )
+        mock_create_floating_ip.assert_not_called()
+        mock_time.assert_not_called()
+        mock_sleep.assert_not_called()
+        mock_assign_floating_ip.assert_not_called()
+        mock_get_free_floating_ip.assert_not_called()
+        self.vimconn.neutron.show.show_floatingip.assert_not_called()
+        self.vimconn.nova.servers.get.assert_not_called()
+
+    @patch.object(vimconnector, "_vimconnector__wait_for_vm")
+    def test_update_port_security_for_vm_instance(self, mock_wait_for_vm):
+        """no_secured_ports has port and the port has allow-address-pairs."""
+        no_secured_ports = [(port2_id, "allow-address-pairs")]
+
+        self.vimconn._update_port_security_for_vminstance(no_secured_ports, self.server)
+
+        mock_wait_for_vm.assert_called_once_with(self.server.id, "ACTIVE")
+
+        self.vimconn.neutron.update_port.assert_called_once_with(
+            port2_id,
+            {"port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}},
+        )
+
+    @patch.object(vimconnector, "_vimconnector__wait_for_vm")
+    def test_update_port_security_for_vm_instance_no_allowed_address_pairs(
+        self, mock_wait_for_vm
+    ):
+        """no_secured_ports has port and the port does not have allow-address-pairs."""
+        no_secured_ports = [(port2_id, "something")]
+
+        self.vimconn._update_port_security_for_vminstance(no_secured_ports, self.server)
+
+        mock_wait_for_vm.assert_called_once_with(self.server.id, "ACTIVE")
+
+        self.vimconn.neutron.update_port.assert_called_once_with(
+            port2_id,
+            {"port": {"port_security_enabled": False, "security_groups": None}},
+        )
+
+    @patch.object(vimconnector, "_vimconnector__wait_for_vm")
+    def test_update_port_security_for_vm_instance_wait_for_vm_raise_exception(
+        self, mock_wait_for_vm
+    ):
+        """__wait_for_vm raises timeout exception."""
+        no_secured_ports = [(port2_id, "something")]
+
+        mock_wait_for_vm.side_effect = VimConnException("Timeout waiting for instance.")
+
+        with self.assertRaises(VimConnException) as err:
+            self.vimconn._update_port_security_for_vminstance(
+                no_secured_ports, self.server
+            )
+        self.assertEqual(str(err.exception), "Timeout waiting for instance.")
+
+        mock_wait_for_vm.assert_called_once_with(self.server.id, "ACTIVE")
+
+        self.vimconn.neutron.update_port.assert_not_called()
+
+    @patch.object(vimconnector, "_vimconnector__wait_for_vm")
+    def test_update_port_security_for_vm_instance_neutron_update_port_raise_exception(
+        self, mock_wait_for_vm
+    ):
+        """neutron_update_port method raises exception."""
+        no_secured_ports = [(port2_id, "something")]
+
+        self.vimconn.neutron.update_port.side_effect = Exception(
+            "Port security could not be updated."
+        )
+
+        with self.assertRaises(VimConnException) as err:
+            self.vimconn._update_port_security_for_vminstance(
+                no_secured_ports, self.server
+            )
+        self.assertEqual(
+            str(err.exception),
+            "It was not possible to disable port security for port 17472685-f67f-49fd-8722-eabb7692fc22",
+        )
+        mock_wait_for_vm.assert_called_once_with(self.server.id, "ACTIVE")
+
+        self.vimconn.neutron.update_port.assert_called_once_with(
+            port2_id,
+            {"port": {"port_security_enabled": False, "security_groups": None}},
+        )
+
+    @patch.object(vimconnector, "_vimconnector__wait_for_vm")
+    def test_update_port_security_for_vm_instance_empty_port_list(
+        self, mock_wait_for_vm
+    ):
+        """no_secured_ports list does not have any ports."""
+        no_secured_ports = []
+
+        self.vimconn._update_port_security_for_vminstance(no_secured_ports, self.server)
+
+        mock_wait_for_vm.assert_not_called()
+
+        self.vimconn.neutron.update_port.assert_not_called()
+
+    @patch("time.time")
+    @patch.object(vimconnector, "remove_keep_tag_from_persistent_volumes")
+    @patch.object(vimconnector, "_reload_connection")
+    @patch.object(vimconnector, "_prepare_network_for_vminstance")
+    @patch.object(vimconnector, "_create_user_data")
+    @patch.object(vimconnector, "_get_vm_availability_zone")
+    @patch.object(vimconnector, "_prepare_disk_for_vminstance")
+    @patch.object(vimconnector, "_update_port_security_for_vminstance")
+    @patch.object(vimconnector, "_prepare_external_network_for_vminstance")
+    @patch.object(vimconnector, "delete_vminstance")
+    @patch.object(vimconnector, "_format_exception")
+    def test_new_vm_instance(
+        self,
+        mock_format_exception,
+        mock_delete_vm_instance,
+        mock_prepare_external_network,
+        mock_update_port_security,
+        mock_prepare_disk_for_vm_instance,
+        mock_get_vm_availability_zone,
+        mock_create_user_data,
+        mock_prepare_network_for_vm_instance,
+        mock_reload_connection,
+        mock_remove_keep_flag_from_persistent_volumes,
+        mock_time,
+    ):
+        """New VM instance creation is successful."""
+
+        mock_create_user_data.return_value = True, "userdata"
+
+        mock_get_vm_availability_zone.return_value = "nova"
+
+        self.vimconn.nova.servers.create.return_value = self.server
+
+        mock_time.return_value = time_return_value
+
+        expected_result = self.server.id, {}
+
+        result = self.vimconn.new_vminstance(
+            name,
+            description,
+            start,
+            image_id,
+            flavor_id,
+            affinity_group_list,
+            net_list,
+            cloud_config,
+            disk_list2,
+            availability_zone_index,
+            availability_zone_list,
+        )
+        self.assertEqual(result, expected_result)
+
+        mock_reload_connection.assert_called_once()
+        mock_prepare_network_for_vm_instance.assert_called_once_with(
+            name=name,
+            net_list=net_list,
+            created_items={},
+            net_list_vim=[],
+            external_network=[],
+            no_secured_ports=[],
+        )
+        mock_create_user_data.assert_called_once_with(cloud_config)
+        mock_get_vm_availability_zone.assert_called_once_with(
+            availability_zone_index, availability_zone_list
+        )
+        mock_prepare_disk_for_vm_instance.assert_called_once_with(
+            name=name,
+            existing_vim_volumes=[],
+            created_items={},
+            vm_av_zone="nova",
+            block_device_mapping={},
+            disk_list=disk_list2,
+        )
+        self.vimconn.nova.servers.create.assert_called_once_with(
+            name=name,
+            image=image_id,
+            flavor=flavor_id,
+            nics=[],
+            security_groups="default",
+            availability_zone="nova",
+            key_name="my_keypair",
+            userdata="userdata",
+            config_drive=True,
+            block_device_mapping={},
+            scheduler_hints={},
+        )
+        mock_time.assert_called_once()
+        mock_update_port_security.assert_called_once_with([], self.server)
+        mock_prepare_external_network.assert_called_once_with(
+            external_network=[],
+            server=self.server,
+            created_items={},
+            vm_start_time=time_return_value,
+        )
+        mock_remove_keep_flag_from_persistent_volumes.assert_not_called()
+        mock_delete_vm_instance.assert_not_called()
+        mock_format_exception.assert_not_called()
+
+    @patch("time.time")
+    @patch.object(vimconnector, "remove_keep_tag_from_persistent_volumes")
+    @patch.object(vimconnector, "_reload_connection")
+    @patch.object(vimconnector, "_prepare_network_for_vminstance")
+    @patch.object(vimconnector, "_create_user_data")
+    @patch.object(vimconnector, "_get_vm_availability_zone")
+    @patch.object(vimconnector, "_prepare_disk_for_vminstance")
+    @patch.object(vimconnector, "_update_port_security_for_vminstance")
+    @patch.object(vimconnector, "_prepare_external_network_for_vminstance")
+    @patch.object(vimconnector, "delete_vminstance")
+    @patch.object(vimconnector, "_format_exception")
+    def test_new_vm_instance_create_user_data_fails(
+        self,
+        mock_format_exception,
+        mock_delete_vm_instance,
+        mock_prepare_external_network,
+        mock_update_port_security,
+        mock_prepare_disk_for_vm_instance,
+        mock_get_vm_availability_zone,
+        mock_create_user_data,
+        mock_prepare_network_for_vm_instance,
+        mock_reload_connection,
+        mock_remove_keep_flag_from_persistent_volumes,
+        mock_time,
+    ):
+        """New VM instance creation failed because of user data creation failure."""
+
+        mock_create_user_data.side_effect = Exception(
+            "User data could not be retrieved."
+        )
+
+        mock_get_vm_availability_zone.return_value = "nova"
+
+        mock_remove_keep_flag_from_persistent_volumes.return_value = {}
+
+        self.vimconn.nova.servers.create.return_value = self.server
+
+        mock_time.return_value = time_return_value
+
+        self.vimconn.new_vminstance(
+            name,
+            description,
+            start,
+            image_id,
+            flavor_id,
+            affinity_group_list,
+            net_list,
+            cloud_config,
+            disk_list,
+            availability_zone_index,
+            availability_zone_list,
+        )
+
+        mock_reload_connection.assert_called_once()
+        mock_prepare_network_for_vm_instance.assert_called_once_with(
+            name=name,
+            net_list=net_list,
+            created_items={},
+            net_list_vim=[],
+            external_network=[],
+            no_secured_ports=[],
+        )
+        mock_create_user_data.assert_called_once_with(cloud_config)
+        mock_get_vm_availability_zone.assert_not_called()
+        mock_prepare_disk_for_vm_instance.assert_not_called()
+        self.vimconn.nova.servers.create.assert_not_called()
+        mock_time.assert_not_called()
+        mock_update_port_security.assert_not_called()
+        mock_prepare_external_network.assert_not_called()
+        mock_remove_keep_flag_from_persistent_volumes.assert_called_once_with({})
+        mock_delete_vm_instance.assert_called_once_with(None, {})
+        mock_format_exception.assert_called_once()
+        arg = mock_format_exception.call_args[0][0]
+        self.assertEqual(str(arg), "User data could not be retrieved.")
+
+    @patch("time.time")
+    @patch.object(vimconnector, "remove_keep_tag_from_persistent_volumes")
+    @patch.object(vimconnector, "_reload_connection")
+    @patch.object(vimconnector, "_prepare_network_for_vminstance")
+    @patch.object(vimconnector, "_create_user_data")
+    @patch.object(vimconnector, "_get_vm_availability_zone")
+    @patch.object(vimconnector, "_prepare_disk_for_vminstance")
+    @patch.object(vimconnector, "_update_port_security_for_vminstance")
+    @patch.object(vimconnector, "_prepare_external_network_for_vminstance")
+    @patch.object(vimconnector, "delete_vminstance")
+    @patch.object(vimconnector, "_format_exception")
+    def test_new_vm_instance_external_network_exception(
+        self,
+        mock_format_exception,
+        mock_delete_vm_instance,
+        mock_prepare_external_network,
+        mock_update_port_security,
+        mock_prepare_disk_for_vm_instance,
+        mock_get_vm_availability_zone,
+        mock_create_user_data,
+        mock_prepare_network_for_vm_instance,
+        mock_reload_connection,
+        mock_remove_keep_flag_from_persistent_volumes,
+        mock_time,
+    ):
+        """New VM instance creation, external network connection has failed as floating
+        ip could not be created."""
+
+        mock_create_user_data.return_value = True, "userdata"
+
+        mock_get_vm_availability_zone.return_value = "nova"
+
+        self.vimconn.nova.servers.create.return_value = self.server
+
+        mock_time.return_value = time_return_value
+
+        mock_remove_keep_flag_from_persistent_volumes.return_value = {}
+
+        mock_prepare_external_network.side_effect = VimConnException(
+            "Can not create floating ip."
+        )
+
+        self.vimconn.new_vminstance(
+            name,
+            description,
+            start,
+            image_id,
+            flavor_id,
+            affinity_group_list,
+            net_list,
+            cloud_config,
+            disk_list2,
+            availability_zone_index,
+            availability_zone_list,
+        )
+
+        mock_reload_connection.assert_called_once()
+        mock_prepare_network_for_vm_instance.assert_called_once_with(
+            name=name,
+            net_list=net_list,
+            created_items={},
+            net_list_vim=[],
+            external_network=[],
+            no_secured_ports=[],
+        )
+        mock_create_user_data.assert_called_once_with(cloud_config)
+        mock_get_vm_availability_zone.assert_called_once_with(
+            availability_zone_index, availability_zone_list
+        )
+        mock_prepare_disk_for_vm_instance.assert_called_once_with(
+            name=name,
+            existing_vim_volumes=[],
+            created_items={},
+            vm_av_zone="nova",
+            block_device_mapping={},
+            disk_list=disk_list2,
+        )
+        self.vimconn.nova.servers.create.assert_called_once_with(
+            name=name,
+            image=image_id,
+            flavor=flavor_id,
+            nics=[],
+            security_groups="default",
+            availability_zone="nova",
+            key_name="my_keypair",
+            userdata="userdata",
+            config_drive=True,
+            block_device_mapping={},
+            scheduler_hints={},
+        )
+        mock_time.assert_called_once()
+        mock_update_port_security.assert_called_once_with([], self.server)
+        mock_prepare_external_network.assert_called_once_with(
+            external_network=[],
+            server=self.server,
+            created_items={},
+            vm_start_time=time_return_value,
+        )
+        mock_remove_keep_flag_from_persistent_volumes.assert_called_once_with({})
+        mock_delete_vm_instance.assert_called_once_with(self.server.id, {})
+        mock_format_exception.assert_called_once()
+        arg = mock_format_exception.call_args[0][0]
+        self.assertEqual(str(arg), "Can not create floating ip.")
+
+    @patch("time.time")
+    @patch.object(vimconnector, "remove_keep_tag_from_persistent_volumes")
+    @patch.object(vimconnector, "_reload_connection")
+    @patch.object(vimconnector, "_prepare_network_for_vminstance")
+    @patch.object(vimconnector, "_create_user_data")
+    @patch.object(vimconnector, "_get_vm_availability_zone")
+    @patch.object(vimconnector, "_prepare_disk_for_vminstance")
+    @patch.object(vimconnector, "_update_port_security_for_vminstance")
+    @patch.object(vimconnector, "_prepare_external_network_for_vminstance")
+    @patch.object(vimconnector, "delete_vminstance")
+    @patch.object(vimconnector, "_format_exception")
+    def test_new_vm_instance_with_affinity_group(
+        self,
+        mock_format_exception,
+        mock_delete_vm_instance,
+        mock_prepare_external_network,
+        mock_update_port_security,
+        mock_prepare_disk_for_vm_instance,
+        mock_get_vm_availability_zone,
+        mock_create_user_data,
+        mock_prepare_network_for_vm_instance,
+        mock_reload_connection,
+        mock_remove_keep_flag_from_persistent_volumes,
+        mock_time,
+    ):
+        """New VM creation with affinity group."""
+        affinity_group_list = [
+            {"affinity_group_id": "38b73-e9cc-5a6a-t270-82cc4811bd4a"}
+        ]
+        mock_create_user_data.return_value = True, "userdata"
+        mock_get_vm_availability_zone.return_value = "nova"
+        self.vimconn.nova.servers.create.return_value = self.server
+        mock_time.return_value = time_return_value
+        expected_result = self.server.id, {}
+
+        result = self.vimconn.new_vminstance(
+            name,
+            description,
+            start,
+            image_id,
+            flavor_id,
+            affinity_group_list,
+            net_list,
+            cloud_config,
+            disk_list2,
+            availability_zone_index,
+            availability_zone_list,
+        )
+        self.assertEqual(result, expected_result)
+
+        mock_reload_connection.assert_called_once()
+        mock_prepare_network_for_vm_instance.assert_called_once_with(
+            name=name,
+            net_list=net_list,
+            created_items={},
+            net_list_vim=[],
+            external_network=[],
+            no_secured_ports=[],
+        )
+        mock_create_user_data.assert_called_once_with(cloud_config)
+        mock_get_vm_availability_zone.assert_called_once_with(
+            availability_zone_index, availability_zone_list
+        )
+        mock_prepare_disk_for_vm_instance.assert_called_once_with(
+            name=name,
+            existing_vim_volumes=[],
+            created_items={},
+            vm_av_zone="nova",
+            block_device_mapping={},
+            disk_list=disk_list2,
+        )
+        self.vimconn.nova.servers.create.assert_called_once_with(
+            name=name,
+            image=image_id,
+            flavor=flavor_id,
+            nics=[],
+            security_groups="default",
+            availability_zone="nova",
+            key_name="my_keypair",
+            userdata="userdata",
+            config_drive=True,
+            block_device_mapping={},
+            scheduler_hints={"group": "38b73-e9cc-5a6a-t270-82cc4811bd4a"},
+        )
+        mock_time.assert_called_once()
+        mock_update_port_security.assert_called_once_with([], self.server)
+        mock_prepare_external_network.assert_called_once_with(
+            external_network=[],
+            server=self.server,
+            created_items={},
+            vm_start_time=time_return_value,
+        )
+        mock_remove_keep_flag_from_persistent_volumes.assert_not_called()
+        mock_delete_vm_instance.assert_not_called()
+        mock_format_exception.assert_not_called()
+
+    @patch("time.time")
+    @patch.object(vimconnector, "remove_keep_tag_from_persistent_volumes")
+    @patch.object(vimconnector, "_reload_connection")
+    @patch.object(vimconnector, "_prepare_network_for_vminstance")
+    @patch.object(vimconnector, "_create_user_data")
+    @patch.object(vimconnector, "_get_vm_availability_zone")
+    @patch.object(vimconnector, "_prepare_disk_for_vminstance")
+    @patch.object(vimconnector, "_update_port_security_for_vminstance")
+    @patch.object(vimconnector, "_prepare_external_network_for_vminstance")
+    @patch.object(vimconnector, "delete_vminstance")
+    @patch.object(vimconnector, "_format_exception")
+    def test_new_vm_instance_nova_server_create_failed(
+        self,
+        mock_format_exception,
+        mock_delete_vm_instance,
+        mock_prepare_external_network,
+        mock_update_port_security,
+        mock_prepare_disk_for_vm_instance,
+        mock_get_vm_availability_zone,
+        mock_create_user_data,
+        mock_prepare_network_for_vm_instance,
+        mock_reload_connection,
+        mock_remove_keep_flag_from_persistent_volumes,
+        mock_time,
+    ):
+        """New VM(server) creation failed."""
+
+        mock_create_user_data.return_value = True, "userdata"
+
+        mock_get_vm_availability_zone.return_value = "nova"
+
+        self.vimconn.nova.servers.create.side_effect = Exception(
+            "Server could not be created."
+        )
+
+        mock_time.return_value = time_return_value
+
+        mock_remove_keep_flag_from_persistent_volumes.return_value = {}
+
+        self.vimconn.new_vminstance(
+            name,
+            description,
+            start,
+            image_id,
+            flavor_id,
+            affinity_group_list,
+            net_list,
+            cloud_config,
+            disk_list2,
+            availability_zone_index,
+            availability_zone_list,
+        )
+
+        mock_reload_connection.assert_called_once()
+        mock_prepare_network_for_vm_instance.assert_called_once_with(
+            name=name,
+            net_list=net_list,
+            created_items={},
+            net_list_vim=[],
+            external_network=[],
+            no_secured_ports=[],
+        )
+        mock_create_user_data.assert_called_once_with(cloud_config)
+        mock_get_vm_availability_zone.assert_called_once_with(
+            availability_zone_index, availability_zone_list
+        )
+        mock_prepare_disk_for_vm_instance.assert_called_once_with(
+            name=name,
+            existing_vim_volumes=[],
+            created_items={},
+            vm_av_zone="nova",
+            block_device_mapping={},
+            disk_list=disk_list2,
+        )
+
+        self.vimconn.nova.servers.create.assert_called_once_with(
+            name=name,
+            image=image_id,
+            flavor=flavor_id,
+            nics=[],
+            security_groups="default",
+            availability_zone="nova",
+            key_name="my_keypair",
+            userdata="userdata",
+            config_drive=True,
+            block_device_mapping={},
+            scheduler_hints={},
+        )
+        mock_time.assert_not_called()
+        mock_update_port_security.assert_not_called()
+        mock_prepare_external_network.assert_not_called()
+        mock_remove_keep_flag_from_persistent_volumes.assert_called_once_with({})
+        mock_delete_vm_instance.assert_called_once_with(None, {})
+        mock_format_exception.assert_called_once()
+        arg = mock_format_exception.call_args[0][0]
+        self.assertEqual(str(arg), "Server could not be created.")
+
+    @patch("time.time")
+    @patch.object(vimconnector, "remove_keep_tag_from_persistent_volumes")
+    @patch.object(vimconnector, "_reload_connection")
+    @patch.object(vimconnector, "_prepare_network_for_vminstance")
+    @patch.object(vimconnector, "_create_user_data")
+    @patch.object(vimconnector, "_get_vm_availability_zone")
+    @patch.object(vimconnector, "_prepare_disk_for_vminstance")
+    @patch.object(vimconnector, "_update_port_security_for_vminstance")
+    @patch.object(vimconnector, "_prepare_external_network_for_vminstance")
+    @patch.object(vimconnector, "delete_vminstance")
+    @patch.object(vimconnector, "_format_exception")
+    def test_new_vm_instance_connection_exception(
+        self,
+        mock_format_exception,
+        mock_delete_vm_instance,
+        mock_prepare_external_network,
+        mock_update_port_security,
+        mock_prepare_disk_for_vm_instance,
+        mock_get_vm_availability_zone,
+        mock_create_user_data,
+        mock_prepare_network_for_vm_instance,
+        mock_reload_connection,
+        mock_remove_keep_flag_from_persistent_volumes,
+        mock_time,
+    ):
+        """Connection to Cloud API has failed."""
+        mock_reload_connection.side_effect = Exception("Can not connect to Cloud APIs.")
+        mock_create_user_data.return_value = True, "userdata"
+        mock_get_vm_availability_zone.return_value = "nova"
+        self.vimconn.nova.servers.create.return_value = self.server
+        mock_time.return_value = time_return_value
+        mock_remove_keep_flag_from_persistent_volumes.return_value = {}
+
+        self.vimconn.new_vminstance(
+            name,
+            description,
+            start,
+            image_id,
+            flavor_id,
+            affinity_group_list,
+            net_list,
+            cloud_config,
+            disk_list,
+            availability_zone_index,
+            availability_zone_list,
+        )
+        mock_format_exception.assert_called_once()
+        arg = mock_format_exception.call_args[0][0]
+        self.assertEqual(str(arg), "Can not connect to Cloud APIs.")
+        mock_reload_connection.assert_called_once()
+        mock_prepare_network_for_vm_instance.assert_not_called()
+        mock_create_user_data.assert_not_called()
+        mock_get_vm_availability_zone.assert_not_called()
+        mock_prepare_disk_for_vm_instance.assert_not_called()
+        self.vimconn.nova.servers.create.assert_not_called()
+        mock_time.assert_not_called()
+        mock_update_port_security.assert_not_called()
+        mock_prepare_external_network.assert_not_called()
+        mock_remove_keep_flag_from_persistent_volumes.assert_called_once_with({})
+        mock_delete_vm_instance.assert_called_once_with(None, {})
+
+    @patch.object(vimconnector, "_delete_ports_by_id_wth_neutron")
+    def test_delete_vm_ports_attached_to_network_empty_created_items(
+        self, mock_delete_ports_by_id_wth_neutron
+    ):
+        """Created_items is emtpty."""
+        created_items = {}
+        self.vimconn._delete_vm_ports_attached_to_network(created_items)
+        self.vimconn.neutron.list_ports.assert_not_called()
+        self.vimconn.neutron.delete_port.assert_not_called()
+        mock_delete_ports_by_id_wth_neutron.assert_not_called()
+
+    @patch.object(vimconnector, "_delete_ports_by_id_wth_neutron")
+    def test_delete_vm_ports_attached_to_network(
+        self, mock_delete_ports_by_id_wth_neutron
+    ):
+        created_items = {
+            "floating_ip:308b73-t9cc-1a6a-a270-12cc4811bd4a": True,
+            f"volume:{volume_id2}": True,
+            f"volume:{volume_id}": True,
+            f"port:{port_id}": True,
+        }
+        self.vimconn._delete_vm_ports_attached_to_network(created_items)
+        mock_delete_ports_by_id_wth_neutron.assert_called_once_with(f"{port_id}")
+        self.vimconn.logger.error.assert_not_called()
+
+    @patch.object(vimconnector, "_delete_ports_by_id_wth_neutron")
+    def test_delete_vm_ports_attached_to_network_wthout_port(
+        self, mock_delete_ports_by_id_wth_neutron
+    ):
+        """Created_items does not have port."""
+        created_items = {
+            f"floating_ip:{floating_network_vim_id}": True,
+            f"volume:{volume_id2}": True,
+            f"volume:{volume_id}": True,
+        }
+        self.vimconn._delete_vm_ports_attached_to_network(created_items)
+        mock_delete_ports_by_id_wth_neutron.assert_not_called()
+        self.vimconn.logger.error.assert_not_called()
+
+    @patch.object(vimconnector, "_delete_ports_by_id_wth_neutron")
+    def test_delete_vm_ports_attached_to_network_delete_port_raise_vimconnexception(
+        self, mock_delete_ports_by_id_wth_neutron
+    ):
+        """_delete_ports_by_id_wth_neutron raises vimconnexception."""
+        created_items = deepcopy(created_items_all_true)
+        mock_delete_ports_by_id_wth_neutron.side_effect = VimConnException(
+            "Can not delete port"
+        )
+        self.vimconn._delete_vm_ports_attached_to_network(created_items)
+        mock_delete_ports_by_id_wth_neutron.assert_called_once_with(f"{port_id}")
+        self.vimconn.logger.error.assert_called_once_with(
+            "Error deleting port: VimConnException: Can not delete port"
+        )
+
+    @patch.object(vimconnector, "_delete_ports_by_id_wth_neutron")
+    def test_delete_vm_ports_attached_to_network_delete_port_raise_nvexception(
+        self, mock_delete_ports_by_id_wth_neutron
+    ):
+        """_delete_ports_by_id_wth_neutron raises nvExceptions.ClientException."""
+        created_items = deepcopy(created_items_all_true)
+        mock_delete_ports_by_id_wth_neutron.side_effect = nvExceptions.ClientException(
+            "Connection aborted."
+        )
+        self.vimconn._delete_vm_ports_attached_to_network(created_items)
+        mock_delete_ports_by_id_wth_neutron.assert_called_once_with(f"{port_id}")
+        self.vimconn.logger.error.assert_called_once_with(
+            "Error deleting port: ClientException: Unknown Error (HTTP Connection aborted.)"
+        )
+
+    @patch.object(vimconnector, "_delete_ports_by_id_wth_neutron")
+    def test_delete_vm_ports_attached_to_network_delete_port_invalid_port_item(
+        self, mock_delete_ports_by_id_wth_neutron
+    ):
+        """port item is invalid."""
+        created_items = {
+            f"floating_ip:{floating_network_vim_id}": True,
+            f"volume:{volume_id2}": True,
+            f"volume:{volume_id}": True,
+            f"port:{port_id}:": True,
+        }
+        mock_delete_ports_by_id_wth_neutron.side_effect = VimConnException(
+            "Port is not valid."
+        )
+        self.vimconn._delete_vm_ports_attached_to_network(created_items)
+        mock_delete_ports_by_id_wth_neutron.assert_called_once_with(f"{port_id}:")
+        self.vimconn.logger.error.assert_called_once_with(
+            "Error deleting port: VimConnException: Port is not valid."
+        )
+
+    @patch.object(vimconnector, "_delete_ports_by_id_wth_neutron")
+    def test_delete_vm_ports_attached_to_network_delete_port_already_deleted(
+        self, mock_delete_ports_by_id_wth_neutron
+    ):
+        """port is already deleted."""
+        created_items = {
+            f"floating_ip:{floating_network_vim_id}": True,
+            f"volume:{volume_id2}": True,
+            f"volume:{volume_id}": None,
+            f"port:{port_id}": None,
+        }
+        self.vimconn._delete_vm_ports_attached_to_network(created_items)
+        mock_delete_ports_by_id_wth_neutron.assert_not_called()
+        self.vimconn.logger.error.assert_not_called()
+
+    def test_delete_floating_ip_by_id(self):
+        created_items = {
+            f"floating_ip:{floating_network_vim_id}": True,
+            f"port:{port_id}": True,
+        }
+        expected_created_items = {
+            f"floating_ip:{floating_network_vim_id}": None,
+            f"port:{port_id}": True,
+        }
+        k_id = floating_network_vim_id
+        k = f"floating_ip:{floating_network_vim_id}"
+        self.vimconn._delete_floating_ip_by_id(k, k_id, created_items)
+        self.vimconn.neutron.delete_floatingip.assert_called_once_with(k_id)
+        self.assertEqual(created_items, expected_created_items)
+
+    def test_delete_floating_ip_by_id_floating_ip_already_deleted(self):
+        """floating ip is already deleted."""
+        created_items = {
+            f"floating_ip:{floating_network_vim_id}": None,
+            f"port:{port_id}": True,
+        }
+        k_id = floating_network_vim_id
+        k = f"floating_ip:{floating_network_vim_id}"
+        self.vimconn._delete_floating_ip_by_id(k, k_id, created_items)
+        self.vimconn.neutron.delete_floatingip.assert_called_once_with(k_id)
+        self.assertEqual(
+            created_items,
+            {
+                f"floating_ip:{floating_network_vim_id}": None,
+                f"port:{port_id}": True,
+            },
+        )
+
+    def test_delete_floating_ip_by_id_floating_ip_raises_nvexception(self):
+        """netron delete floating ip raises nvExceptions.ClientException."""
+        created_items = {
+            f"floating_ip:{floating_network_vim_id}": True,
+            f"port:{port_id}": True,
+        }
+        k_id = floating_network_vim_id
+        k = f"floating_ip:{floating_network_vim_id}"
+        self.vimconn.neutron.delete_floatingip.side_effect = (
+            nvExceptions.ClientException("Client exception occured.")
+        )
+        self.vimconn._delete_floating_ip_by_id(k, k_id, created_items)
+        self.vimconn.neutron.delete_floatingip.assert_called_once_with(k_id)
+        self.assertEqual(
+            created_items,
+            {
+                f"floating_ip:{floating_network_vim_id}": True,
+                f"port:{port_id}": True,
+            },
+        )
+        self.vimconn.logger.error.assert_called_once_with(
+            "Error deleting floating ip: ClientException: Unknown Error (HTTP Client exception occured.)"
+        )
+
+    def test_delete_floating_ip_by_id_floating_ip_raises_vimconnexception(self):
+        """netron delete floating ip raises VimConnNotFoundException."""
+        created_items = {
+            f"floating_ip:{floating_network_vim_id}": True,
+            f"port:{port_id}": True,
+        }
+        k_id = floating_network_vim_id
+        k = f"floating_ip:{floating_network_vim_id}"
+        self.vimconn.neutron.delete_floatingip.side_effect = VimConnNotFoundException(
+            "Port id could not found."
+        )
+        self.vimconn._delete_floating_ip_by_id(k, k_id, created_items)
+        self.vimconn.neutron.delete_floatingip.assert_called_once_with(k_id)
+        self.assertEqual(
+            created_items,
+            {
+                f"floating_ip:{floating_network_vim_id}": True,
+                f"port:{port_id}": True,
+            },
+        )
+        self.vimconn.logger.error.assert_called_once_with(
+            "Error deleting floating ip: VimConnNotFoundException: Port id could not found."
+        )
+
+    def test_delete_floating_ip_by_id_floating_ip_invalid_k_item(self):
+        """invalid floating ip item."""
+        created_items = {
+            f"floating_ip:{floating_network_vim_id}": True,
+            f"port:{port_id}": True,
+        }
+        expected_created_items = {
+            f"floating_ip:{floating_network_vim_id}::": None,
+            f"floating_ip:{floating_network_vim_id}": True,
+            f"port:{port_id}": True,
+        }
+        k_id = floating_network_vim_id
+        k = f"floating_ip:{floating_network_vim_id}::"
+        self.vimconn._delete_floating_ip_by_id(k, k_id, created_items)
+        self.vimconn.neutron.delete_floatingip.assert_called_once_with(k_id)
+        self.assertEqual(created_items, expected_created_items)
+
+    def test_delete_volumes_by_id_with_cinder_volume_status_available(self):
+        """volume status is available."""
+        created_items = {
+            f"floating_ip:{floating_network_vim_id}": True,
+            f"volume:{volume_id2}": True,
+            f"volume:{volume_id}": True,
+            f"port:{port_id}": None,
+        }
+        expected_created_items = {
+            f"floating_ip:{floating_network_vim_id}": True,
+            f"volume:{volume_id2}": True,
+            f"volume:{volume_id}": None,
+            f"port:{port_id}": None,
+        }
+        volumes_to_hold = []
+        k = f"volume:{volume_id}"
+        k_id = volume_id
+        self.vimconn.cinder.volumes.get.return_value.status = "available"
+        result = self.vimconn._delete_volumes_by_id_wth_cinder(
+            k, k_id, volumes_to_hold, created_items
+        )
+        self.assertEqual(result, None)
+        self.vimconn.cinder.volumes.get.assert_called_once_with(k_id)
+        self.vimconn.cinder.volumes.delete.assert_called_once_with(k_id)
+        self.vimconn.logger.error.assert_not_called()
+        self.assertEqual(created_items, expected_created_items)
+
+    def test_delete_volumes_by_id_with_cinder_volume_already_deleted(self):
+        """volume is already deleted."""
+        created_items = {
+            f"floating_ip:{floating_network_vim_id}": True,
+            f"volume:{volume_id2}": True,
+            f"volume:{volume_id}": None,
+            f"port:{port_id}": None,
+        }
+        expected_created_items = {
+            f"floating_ip:{floating_network_vim_id}": True,
+            f"volume:{volume_id2}": True,
+            f"volume:{volume_id}": None,
+            f"port:{port_id}": None,
+        }
+        volumes_to_hold = []
+        k = f"volume:{volume_id}"
+        k_id = volume_id
+        self.vimconn.cinder.volumes.get.return_value.status = "available"
+        result = self.vimconn._delete_volumes_by_id_wth_cinder(
+            k, k_id, volumes_to_hold, created_items
+        )
+        self.assertEqual(result, None)
+        self.vimconn.cinder.volumes.get.assert_called_once_with(k_id)
+        self.vimconn.cinder.volumes.delete.assert_called_once_with(k_id)
+        self.vimconn.logger.error.assert_not_called()
+        self.assertEqual(created_items, expected_created_items)
+
+    def test_delete_volumes_by_id_with_cinder_get_volume_raise_exception(self):
+        """cinder get volume raises exception."""
+        created_items = {
+            f"floating_ip:{floating_network_vim_id}": True,
+            f"volume:{volume_id2}": True,
+            f"volume:{volume_id}": True,
+            f"port:{port_id}": None,
+        }
+        expected_created_items = {
+            f"floating_ip:{floating_network_vim_id}": True,
+            f"volume:{volume_id2}": True,
+            f"volume:{volume_id}": True,
+            f"port:{port_id}": None,
+        }
+        volumes_to_hold = []
+        k = f"volume:{volume_id}"
+        k_id = volume_id
+        self.vimconn.cinder.volumes.get.side_effect = Exception(
+            "Can not get volume status."
+        )
+        result = self.vimconn._delete_volumes_by_id_wth_cinder(
+            k, k_id, volumes_to_hold, created_items
+        )
+        self.assertEqual(result, None)
+        self.vimconn.cinder.volumes.get.assert_called_once_with(k_id)
+        self.vimconn.cinder.volumes.delete.assert_not_called()
+        self.vimconn.logger.error.assert_called_once_with(
+            "Error deleting volume: Exception: Can not get volume status."
+        )
+        self.assertEqual(created_items, expected_created_items)
+
+    def test_delete_volumes_by_id_with_cinder_delete_volume_raise_exception(self):
+        """cinder delete volume raises exception."""
+        created_items = {
+            f"floating_ip:{floating_network_vim_id}": True,
+            f"volume:{volume_id2}": True,
+            f"volume:{volume_id}": True,
+            f"port:{port_id}": None,
+        }
+        expected_created_items = {
+            f"floating_ip:{floating_network_vim_id}": True,
+            f"volume:{volume_id2}": True,
+            f"volume:{volume_id}": True,
+            f"port:{port_id}": None,
+        }
+        volumes_to_hold = []
+        k = f"volume:{volume_id}"
+        k_id = volume_id
+        self.vimconn.cinder.volumes.get.return_value.status = "available"
+        self.vimconn.cinder.volumes.delete.side_effect = nvExceptions.ClientException(
+            "Connection aborted."
+        )
+        result = self.vimconn._delete_volumes_by_id_wth_cinder(
+            k, k_id, volumes_to_hold, created_items
+        )
+        self.assertEqual(result, None)
+        self.vimconn.cinder.volumes.get.assert_called_once_with(k_id)
+        self.vimconn.cinder.volumes.delete.assert_called_once_with(k_id)
+        self.vimconn.logger.error.assert_called_once_with(
+            "Error deleting volume: ClientException: Unknown Error (HTTP Connection aborted.)"
+        )
+        self.assertEqual(created_items, expected_created_items)
+
+    def test_delete_volumes_by_id_with_cinder_volume_to_be_hold(self):
+        """volume_to_hold has item."""
+        created_items = {
+            f"floating_ip:{floating_network_vim_id}": True,
+            f"volume:{volume_id2}": True,
+            f"volume:{volume_id}": True,
+            f"port:{port_id}": None,
+        }
+        expected_created_items = {
+            f"floating_ip:{floating_network_vim_id}": True,
+            f"volume:{volume_id2}": True,
+            f"volume:{volume_id}": True,
+            f"port:{port_id}": None,
+        }
+        volumes_to_hold = [volume_id]
+        k = f"volume:{volume_id}"
+        k_id = volume_id
+        result = self.vimconn._delete_volumes_by_id_wth_cinder(
+            k, k_id, volumes_to_hold, created_items
+        )
+        self.assertEqual(result, None)
+        self.vimconn.cinder.volumes.get.assert_not_called()
+        self.vimconn.cinder.volumes.delete.assert_not_called()
+        self.vimconn.logger.error.assert_not_called()
+        self.assertEqual(created_items, expected_created_items)
+
+    def test_delete_volumes_by_id_with_cinder_volume_status_not_available(self):
+        """volume status is not available."""
+        created_items = {
+            f"floating_ip:{floating_network_vim_id}": True,
+            f"volume:{volume_id2}": True,
+            f"volume:{volume_id}": True,
+            f"port:{port_id}": None,
+        }
+        expected_created_items = {
+            f"floating_ip:{floating_network_vim_id}": True,
+            f"volume:{volume_id2}": True,
+            f"volume:{volume_id}": True,
+            f"port:{port_id}": None,
+        }
+        volumes_to_hold = []
+        k = f"volume:{volume_id}"
+        k_id = volume_id
+        self.vimconn.cinder.volumes.get.return_value.status = "unavailable"
+        result = self.vimconn._delete_volumes_by_id_wth_cinder(
+            k, k_id, volumes_to_hold, created_items
+        )
+        self.assertEqual(result, True)
+        self.vimconn.cinder.volumes.get.assert_called_once_with(k_id)
+        self.vimconn.cinder.volumes.delete.assert_not_called()
+        self.vimconn.logger.error.assert_not_called()
+        self.assertEqual(created_items, expected_created_items)
+
+    def test_delete_ports_by_id_by_neutron(self):
+        """neutron delete ports."""
+        k_id = port_id
+        self.vimconn.neutron.list_ports.return_value = {
+            "ports": [{"id": port_id}, {"id": port2_id}]
+        }
+
+        self.vimconn._delete_ports_by_id_wth_neutron(k_id)
+        self.vimconn.neutron.list_ports.assert_called_once()
+        self.vimconn.neutron.delete_port.assert_called_once_with(k_id)
+        self.vimconn.logger.error.assert_not_called()
+
+    def test_delete_ports_by_id_by_neutron_id_not_in_port_list(self):
+        """port id not in the port list."""
+        k_id = volume_id
+        self.vimconn.neutron.list_ports.return_value = {
+            "ports": [{"id": port_id}, {"id": port2_id}]
+        }
+
+        self.vimconn._delete_ports_by_id_wth_neutron(k_id)
+        self.vimconn.neutron.list_ports.assert_called_once()
+        self.vimconn.neutron.delete_port.assert_not_called()
+        self.vimconn.logger.error.assert_not_called()
+
+    def test_delete_ports_by_id_by_neutron_list_port_raise_exception(self):
+        """neutron list port raises exception."""
+        k_id = port_id
+        self.vimconn.neutron.list_ports.side_effect = nvExceptions.ClientException(
+            "Connection aborted."
+        )
+        self.vimconn._delete_ports_by_id_wth_neutron(k_id)
+        self.vimconn.neutron.list_ports.assert_called_once()
+        self.vimconn.neutron.delete_port.assert_not_called()
+        self.vimconn.logger.error.assert_called_once_with(
+            "Error deleting port: ClientException: Unknown Error (HTTP Connection aborted.)"
+        )
+
+    def test_delete_ports_by_id_by_neutron_delete_port_raise_exception(self):
+        """neutron delete port raises exception."""
+        k_id = port_id
+        self.vimconn.neutron.list_ports.return_value = {
+            "ports": [{"id": port_id}, {"id": port2_id}]
+        }
+        self.vimconn.neutron.delete_port.side_effect = nvExceptions.ClientException(
+            "Connection aborted."
+        )
+        self.vimconn._delete_ports_by_id_wth_neutron(k_id)
+        self.vimconn.neutron.list_ports.assert_called_once()
+        self.vimconn.neutron.delete_port.assert_called_once_with(k_id)
+        self.vimconn.logger.error.assert_called_once_with(
+            "Error deleting port: ClientException: Unknown Error (HTTP Connection aborted.)"
+        )
+
+    def test_get_item_name_id(self):
+        """Get name and id successfully."""
+        k = f"some:{port_id}"
+        result = self.vimconn._get_item_name_id(k)
+        self.assertEqual(result, ("some", f"{port_id}"))
+
+    def test_get_item_name_id_wthout_semicolon(self):
+        """Does not have seperator."""
+        k = f"some{port_id}"
+        result = self.vimconn._get_item_name_id(k)
+        self.assertEqual(result, (f"some{port_id}", ""))
+
+    def test_get_item_name_id_empty_string(self):
+        """Empty string."""
+        k = ""
+        result = self.vimconn._get_item_name_id(k)
+        self.assertEqual(result, ("", ""))
+
+    def test_get_item_name_id_k_is_none(self):
+        """item is None."""
+        k = None
+        with self.assertRaises(AttributeError):
+            self.vimconn._get_item_name_id(k)
+
+    @patch.object(vimconnector, "_get_item_name_id")
+    @patch.object(vimconnector, "_delete_volumes_by_id_wth_cinder")
+    @patch.object(vimconnector, "_delete_floating_ip_by_id")
+    def test_delete_created_items(
+        self,
+        mock_delete_floating_ip_by_id,
+        mock_delete_volumes_by_id_wth_cinder,
+        mock_get_item_name_id,
+    ):
+        """Created items has floating ip and volume."""
+        created_items = {
+            f"floating_ip:{floating_network_vim_id}": True,
+            f"volume:{volume_id}": True,
+            f"port:{port_id}": None,
+        }
+        mock_get_item_name_id.side_effect = [
+            ("floating_ip", f"{floating_network_vim_id}"),
+            ("volume", f"{volume_id}"),
+        ]
+        mock_delete_volumes_by_id_wth_cinder.return_value = True
+        volumes_to_hold = []
+        keep_waiting = False
+        result = self.vimconn._delete_created_items(
+            created_items, volumes_to_hold, keep_waiting
+        )
+        self.assertEqual(result, True)
+        self.assertEqual(mock_get_item_name_id.call_count, 2)
+        mock_delete_volumes_by_id_wth_cinder.assert_called_once_with(
+            f"volume:{volume_id}", f"{volume_id}", [], created_items
+        )
+        mock_delete_floating_ip_by_id.assert_called_once_with(
+            f"floating_ip:{floating_network_vim_id}",
+            f"{floating_network_vim_id}",
+            created_items,
+        )
+        self.vimconn.logger.error.assert_not_called()
+
+    @patch.object(vimconnector, "_get_item_name_id")
+    @patch.object(vimconnector, "_delete_volumes_by_id_wth_cinder")
+    @patch.object(vimconnector, "_delete_floating_ip_by_id")
+    def test_delete_created_items_wth_volumes_to_hold(
+        self,
+        mock_delete_floating_ip_by_id,
+        mock_delete_volumes_by_id_wth_cinder,
+        mock_get_item_name_id,
+    ):
+        """Created items has floating ip and volume and volumes_to_hold has items."""
+        created_items = {
+            f"floating_ip:{floating_network_vim_id}": True,
+            f"volume:{volume_id}": True,
+            f"port:{port_id}": None,
+        }
+        mock_get_item_name_id.side_effect = [
+            ("floating_ip", f"{floating_network_vim_id}"),
+            ("volume", f"{volume_id}"),
+        ]
+        mock_delete_volumes_by_id_wth_cinder.return_value = True
+        volumes_to_hold = [f"{volume_id}", f"{volume_id2}"]
+        keep_waiting = False
+        result = self.vimconn._delete_created_items(
+            created_items, volumes_to_hold, keep_waiting
+        )
+        self.assertEqual(result, True)
+        self.assertEqual(mock_get_item_name_id.call_count, 2)
+        mock_delete_volumes_by_id_wth_cinder.assert_called_once_with(
+            f"volume:{volume_id}", f"{volume_id}", volumes_to_hold, created_items
+        )
+        mock_delete_floating_ip_by_id.assert_called_once_with(
+            f"floating_ip:{floating_network_vim_id}",
+            f"{floating_network_vim_id}",
+            created_items,
+        )
+        self.vimconn.logger.error.assert_not_called()
+
+    @patch.object(vimconnector, "_get_item_name_id")
+    @patch.object(vimconnector, "_delete_volumes_by_id_wth_cinder")
+    @patch.object(vimconnector, "_delete_floating_ip_by_id")
+    def test_delete_created_items_wth_keep_waiting_true(
+        self,
+        mock_delete_floating_ip_by_id,
+        mock_delete_volumes_by_id_wth_cinder,
+        mock_get_item_name_id,
+    ):
+        """Keep waiting initial value is True."""
+        created_items = {
+            f"floating_ip:{floating_network_vim_id}": True,
+            f"volume:{volume_id}": True,
+            f"port:{port_id}": None,
+        }
+        mock_get_item_name_id.side_effect = [
+            ("floating_ip", f"{floating_network_vim_id}"),
+            ("volume", f"{volume_id}"),
+        ]
+        mock_delete_volumes_by_id_wth_cinder.return_value = False
+        volumes_to_hold = [f"{volume_id}", f"{volume_id2}"]
+        keep_waiting = True
+        result = self.vimconn._delete_created_items(
+            created_items, volumes_to_hold, keep_waiting
+        )
+        self.assertEqual(result, True)
+        self.assertEqual(mock_get_item_name_id.call_count, 2)
+        mock_delete_volumes_by_id_wth_cinder.assert_called_once_with(
+            f"volume:{volume_id}", f"{volume_id}", volumes_to_hold, created_items
+        )
+        mock_delete_floating_ip_by_id.assert_called_once_with(
+            f"floating_ip:{floating_network_vim_id}",
+            f"{floating_network_vim_id}",
+            created_items,
+        )
+        self.vimconn.logger.error.assert_not_called()
+
+    @patch.object(vimconnector, "_get_item_name_id")
+    @patch.object(vimconnector, "_delete_volumes_by_id_wth_cinder")
+    @patch.object(vimconnector, "_delete_floating_ip_by_id")
+    def test_delete_created_items_delete_vol_raises(
+        self,
+        mock_delete_floating_ip_by_id,
+        mock_delete_volumes_by_id_wth_cinder,
+        mock_get_item_name_id,
+    ):
+        """Delete volume raises exception."""
+        created_items = {
+            f"floating_ip:{floating_network_vim_id}": True,
+            f"volume:{volume_id}": True,
+            f"port:{port_id}": None,
+        }
+        mock_get_item_name_id.side_effect = [
+            ("floating_ip", f"{floating_network_vim_id}"),
+            ("volume", f"{volume_id}"),
+        ]
+        mock_delete_volumes_by_id_wth_cinder.side_effect = ConnectionError(
+            "Connection failed."
+        )
+        volumes_to_hold = []
+        keep_waiting = False
+        result = self.vimconn._delete_created_items(
+            created_items, volumes_to_hold, keep_waiting
+        )
+        self.assertEqual(result, False)
+        self.assertEqual(mock_get_item_name_id.call_count, 2)
+        mock_delete_volumes_by_id_wth_cinder.assert_called_once_with(
+            f"volume:{volume_id}", f"{volume_id}", [], created_items
+        )
+        mock_delete_floating_ip_by_id.assert_called_once_with(
+            f"floating_ip:{floating_network_vim_id}",
+            f"{floating_network_vim_id}",
+            created_items,
+        )
+        self.vimconn.logger.error.assert_called_once_with(
+            "Error deleting volume:ac408b73-b9cc-4a6a-a270-82cc4811bd4a: Connection failed."
+        )
+
+    @patch.object(vimconnector, "_get_item_name_id")
+    @patch.object(vimconnector, "_delete_volumes_by_id_wth_cinder")
+    @patch.object(vimconnector, "_delete_floating_ip_by_id")
+    def test_delete_created_items_delete_fip_raises(
+        self,
+        mock_delete_floating_ip_by_id,
+        mock_delete_volumes_by_id_wth_cinder,
+        mock_get_item_name_id,
+    ):
+        """Delete floating ip raises exception."""
+        created_items = {
+            f"floating_ip:{floating_network_vim_id}": True,
+            f"volume:{volume_id}": True,
+            f"port:{port_id}": None,
+        }
+        mock_get_item_name_id.side_effect = [
+            ("floating_ip", f"{floating_network_vim_id}"),
+            ("volume", f"{volume_id}"),
+        ]
+        mock_delete_volumes_by_id_wth_cinder.return_value = False
+        mock_delete_floating_ip_by_id.side_effect = ConnectionError(
+            "Connection failed."
+        )
+        volumes_to_hold = []
+        keep_waiting = True
+        result = self.vimconn._delete_created_items(
+            created_items, volumes_to_hold, keep_waiting
+        )
+        self.assertEqual(result, True)
+        self.assertEqual(mock_get_item_name_id.call_count, 2)
+        mock_delete_volumes_by_id_wth_cinder.assert_called_once_with(
+            f"volume:{volume_id}", f"{volume_id}", [], created_items
+        )
+        mock_delete_floating_ip_by_id.assert_called_once_with(
+            f"floating_ip:{floating_network_vim_id}",
+            f"{floating_network_vim_id}",
+            created_items,
+        )
+        self.vimconn.logger.error.assert_called_once_with(
+            "Error deleting floating_ip:108b73-e9cc-5a6a-t270-82cc4811bd4a: Connection failed."
+        )
+
+    @patch.object(vimconnector, "_get_item_name_id")
+    @patch.object(vimconnector, "_delete_volumes_by_id_wth_cinder")
+    @patch.object(vimconnector, "_delete_floating_ip_by_id")
+    def test_delete_created_items_get_item_name_raises(
+        self,
+        mock_delete_floating_ip_by_id,
+        mock_delete_volumes_by_id_wth_cinder,
+        mock_get_item_name_id,
+    ):
+        """Get item, name raises exception."""
+        created_items = {
+            3: True,
+            f"volume{volume_id}": True,
+            f"port:{port_id}": None,
+        }
+        mock_get_item_name_id.side_effect = [
+            TypeError("Invalid Type"),
+            AttributeError("Invalid attribute"),
+        ]
+        volumes_to_hold = []
+        keep_waiting = False
+        result = self.vimconn._delete_created_items(
+            created_items, volumes_to_hold, keep_waiting
+        )
+        self.assertEqual(result, False)
+        self.assertEqual(mock_get_item_name_id.call_count, 2)
+        mock_delete_volumes_by_id_wth_cinder.assert_not_called()
+        mock_delete_floating_ip_by_id.assert_not_called()
+        _call_logger = self.vimconn.logger.error.call_args_list
+        self.assertEqual(_call_logger[0][0], ("Error deleting 3: Invalid Type",))
+        self.assertEqual(
+            _call_logger[1][0],
+            (f"Error deleting volume{volume_id}: Invalid attribute",),
+        )
+
+    @patch.object(vimconnector, "_get_item_name_id")
+    @patch.object(vimconnector, "_delete_volumes_by_id_wth_cinder")
+    @patch.object(vimconnector, "_delete_floating_ip_by_id")
+    def test_delete_created_items_no_fip_wth_port(
+        self,
+        mock_delete_floating_ip_by_id,
+        mock_delete_volumes_by_id_wth_cinder,
+        mock_get_item_name_id,
+    ):
+        """Created items has port, does not have floating ip."""
+        created_items = {
+            f"volume:{volume_id}": True,
+            f"port:{port_id}": True,
+        }
+        mock_get_item_name_id.side_effect = [
+            ("volume", f"{volume_id}"),
+            ("port", f"{port_id}"),
+        ]
+        mock_delete_volumes_by_id_wth_cinder.return_value = False
+        volumes_to_hold = []
+        keep_waiting = False
+        result = self.vimconn._delete_created_items(
+            created_items, volumes_to_hold, keep_waiting
+        )
+        self.assertEqual(result, False)
+        self.assertEqual(mock_get_item_name_id.call_count, 2)
+        mock_delete_volumes_by_id_wth_cinder.assert_called_once_with(
+            f"volume:{volume_id}", f"{volume_id}", [], created_items
+        )
+        mock_delete_floating_ip_by_id.assert_not_called()
+        self.vimconn.logger.error.assert_not_called()
+
+    @patch.object(vimconnector, "_get_item_name_id")
+    @patch.object(vimconnector, "_delete_volumes_by_id_wth_cinder")
+    @patch.object(vimconnector, "_delete_floating_ip_by_id")
+    def test_delete_created_items_no_volume(
+        self,
+        mock_delete_floating_ip_by_id,
+        mock_delete_volumes_by_id_wth_cinder,
+        mock_get_item_name_id,
+    ):
+        """Created items does not have volume."""
+        created_items = {
+            f"floating_ip:{floating_network_vim_id}": True,
+            f"port:{port_id}": None,
+        }
+        mock_get_item_name_id.side_effect = [
+            ("floating_ip", f"{floating_network_vim_id}")
+        ]
+        volumes_to_hold = []
+        keep_waiting = False
+        result = self.vimconn._delete_created_items(
+            created_items, volumes_to_hold, keep_waiting
+        )
+        self.assertEqual(result, False)
+        self.assertEqual(mock_get_item_name_id.call_count, 1)
+        mock_delete_volumes_by_id_wth_cinder.assert_not_called()
+        mock_delete_floating_ip_by_id.assert_called_once_with(
+            f"floating_ip:{floating_network_vim_id}",
+            f"{floating_network_vim_id}",
+            created_items,
+        )
+        self.vimconn.logger.error.assert_not_called()
+
+    @patch.object(vimconnector, "_get_item_name_id")
+    @patch.object(vimconnector, "_delete_volumes_by_id_wth_cinder")
+    @patch.object(vimconnector, "_delete_floating_ip_by_id")
+    def test_delete_created_items_already_deleted(
+        self,
+        mock_delete_floating_ip_by_id,
+        mock_delete_volumes_by_id_wth_cinder,
+        mock_get_item_name_id,
+    ):
+        """All created items are alerady deleted."""
+        created_items = {
+            f"floating_ip:{floating_network_vim_id}": None,
+            f"volume:{volume_id}": None,
+            f"port:{port_id}": None,
+        }
+        volumes_to_hold = []
+        keep_waiting = False
+        result = self.vimconn._delete_created_items(
+            created_items, volumes_to_hold, keep_waiting
+        )
+        self.assertEqual(result, False)
+        mock_get_item_name_id.assert_not_called()
+        mock_delete_volumes_by_id_wth_cinder.assert_not_called()
+        mock_delete_floating_ip_by_id.assert_not_called()
+        self.vimconn.logger.error.assert_not_called()
+
+    @patch("time.sleep")
+    @patch.object(vimconnector, "_extract_items_wth_keep_flag_from_created_items")
+    @patch.object(vimconnector, "_format_exception")
+    @patch.object(vimconnector, "_reload_connection")
+    @patch.object(vimconnector, "_delete_vm_ports_attached_to_network")
+    @patch.object(vimconnector, "_delete_created_items")
+    def test_delete_vminstance_successfully(
+        self,
+        mock_delete_created_items,
+        mock_delete_vm_ports_attached_to_network,
+        mock_reload_connection,
+        mock_format_exception,
+        mock_extract_items_wth_keep_flag_from_created_items,
+        mock_sleep,
+    ):
+        vm_id = f"{virtual_mac_id}"
+        created_items = deepcopy(created_items_all_true)
+        mock_extract_items_wth_keep_flag_from_created_items.return_value = created_items
+        volumes_to_hold = [f"{volume_id}", f"{volume_id2}"]
+        mock_delete_created_items.return_value = False
+        self.vimconn.delete_vminstance(vm_id, created_items, volumes_to_hold)
+        mock_reload_connection.assert_called_once()
+        mock_delete_vm_ports_attached_to_network.assert_called_once_with(created_items)
+        self.vimconn.nova.servers.delete.assert_called_once_with(vm_id)
+        mock_delete_created_items.assert_called_once_with(
+            created_items, volumes_to_hold, False
+        )
+        mock_sleep.assert_not_called()
+        mock_format_exception.assert_not_called()
+        mock_extract_items_wth_keep_flag_from_created_items.assert_called_once_with(
+            created_items
+        )
+
+    @patch("time.sleep")
+    @patch.object(vimconnector, "_extract_items_wth_keep_flag_from_created_items")
+    @patch.object(vimconnector, "_format_exception")
+    @patch.object(vimconnector, "_reload_connection")
+    @patch.object(vimconnector, "_delete_vm_ports_attached_to_network")
+    @patch.object(vimconnector, "_delete_created_items")
+    def test_delete_vminstance_created_items_has_keep_flag(
+        self,
+        mock_delete_created_items,
+        mock_delete_vm_ports_attached_to_network,
+        mock_reload_connection,
+        mock_format_exception,
+        mock_extract_items_wth_keep_flag_from_created_items,
+        mock_sleep,
+    ):
+        """Created_items includes items which has keep flag."""
+        vm_id = f"{virtual_mac_id}"
+        initial_created_items = {
+            f"port{port_id}": True,
+            f"floating_ip{floating_network_vim_id}": None,
+            f"volume{volume_id}keep": True,
+            f"volume{volume_id2}keep": True,
+        }
+        created_items = {
+            f"port{port_id}": True,
+            f"floating_ip{floating_network_vim_id}": None,
+        }
+        mock_extract_items_wth_keep_flag_from_created_items.return_value = created_items
+        volumes_to_hold = []
+        mock_delete_created_items.return_value = False
+        self.vimconn.delete_vminstance(vm_id, initial_created_items, volumes_to_hold)
+        mock_reload_connection.assert_called_once()
+        mock_delete_vm_ports_attached_to_network.assert_called_once_with(created_items)
+        self.vimconn.nova.servers.delete.assert_called_once_with(vm_id)
+        mock_delete_created_items.assert_called_once_with(
+            created_items, volumes_to_hold, False
+        )
+        mock_sleep.assert_not_called()
+        mock_format_exception.assert_not_called()
+        mock_extract_items_wth_keep_flag_from_created_items.assert_called_once_with(
+            initial_created_items
+        )
+
+    @patch("time.sleep")
+    @patch.object(vimconnector, "_extract_items_wth_keep_flag_from_created_items")
+    @patch.object(vimconnector, "_format_exception")
+    @patch.object(vimconnector, "_reload_connection")
+    @patch.object(vimconnector, "_delete_vm_ports_attached_to_network")
+    @patch.object(vimconnector, "_delete_created_items")
+    def test_delete_vminstance_extract_items_wth_keep_raises(
+        self,
+        mock_delete_created_items,
+        mock_delete_vm_ports_attached_to_network,
+        mock_reload_connection,
+        mock_format_exception,
+        mock_extract_items_wth_keep_flag_from_created_items,
+        mock_sleep,
+    ):
+        """extract_items_wth_keep_flag_from_created_items raises AttributeError."""
+        vm_id = f"{virtual_mac_id}"
+        initial_created_items = {
+            f"port{port_id}": True,
+            f"floating_ip{floating_network_vim_id}": None,
+            f"volume{volume_id}keep": True,
+            f"volume{volume_id2}keep": True,
+        }
+
+        mock_extract_items_wth_keep_flag_from_created_items.side_effect = AttributeError
+        volumes_to_hold = []
+        mock_delete_created_items.return_value = False
+        with self.assertRaises(AttributeError):
+            self.vimconn.delete_vminstance(
+                vm_id, initial_created_items, volumes_to_hold
+            )
+        mock_reload_connection.assert_not_called()
+        mock_delete_vm_ports_attached_to_network.assert_not_called()
+        self.vimconn.nova.servers.delete.assert_not_called()
+        mock_delete_created_items.assert_not_called()
+        mock_sleep.assert_not_called()
+        mock_format_exception.assert_not_called()
+        mock_extract_items_wth_keep_flag_from_created_items.assert_called_once_with(
+            initial_created_items
+        )
+
+    @patch("time.sleep")
+    @patch.object(vimconnector, "_extract_items_wth_keep_flag_from_created_items")
+    @patch.object(vimconnector, "_format_exception")
+    @patch.object(vimconnector, "_reload_connection")
+    @patch.object(vimconnector, "_delete_vm_ports_attached_to_network")
+    @patch.object(vimconnector, "_delete_created_items")
+    def test_delete_vminstance_delete_created_items_raises(
+        self,
+        mock_delete_created_items,
+        mock_delete_vm_ports_attached_to_network,
+        mock_reload_connection,
+        mock_format_exception,
+        mock_extract_items_wth_keep_flag_from_created_items,
+        mock_sleep,
+    ):
+        """Delete creted items raises exception."""
+        vm_id = f"{virtual_mac_id}"
+        created_items = deepcopy(created_items_all_true)
+        mock_extract_items_wth_keep_flag_from_created_items.return_value = created_items
+        mock_sleep = MagicMock()
+        volumes_to_hold = []
+        err = ConnectionError("ClientException occured.")
+        mock_delete_created_items.side_effect = err
+        with self.assertRaises(ConnectionError) as err:
+            self.vimconn.delete_vminstance(vm_id, created_items, volumes_to_hold)
+            self.assertEqual(str(err), "ClientException occured.")
+        mock_reload_connection.assert_called_once()
+        mock_delete_vm_ports_attached_to_network.assert_called_once_with(created_items)
+        self.vimconn.nova.servers.delete.assert_called_once_with(vm_id)
+        mock_delete_created_items.assert_called_once()
+        mock_sleep.assert_not_called()
+        mock_extract_items_wth_keep_flag_from_created_items.assert_called_once_with(
+            created_items
+        )
+
+    @patch("time.sleep")
+    @patch.object(vimconnector, "_extract_items_wth_keep_flag_from_created_items")
+    @patch.object(vimconnector, "_format_exception")
+    @patch.object(vimconnector, "_reload_connection")
+    @patch.object(vimconnector, "_delete_vm_ports_attached_to_network")
+    @patch.object(vimconnector, "_delete_created_items")
+    def test_delete_vminstance_delete_vm_ports_raises(
+        self,
+        mock_delete_created_items,
+        mock_delete_vm_ports_attached_to_network,
+        mock_reload_connection,
+        mock_format_exception,
+        mock_extract_items_wth_keep_flag_from_created_items,
+        mock_sleep,
+    ):
+        """Delete vm ports raises exception."""
+        vm_id = f"{virtual_mac_id}"
+        created_items = deepcopy(created_items_all_true)
+        mock_extract_items_wth_keep_flag_from_created_items.return_value = created_items
+        volumes_to_hold = [f"{volume_id}", f"{volume_id2}"]
+        err = ConnectionError("ClientException occured.")
+        mock_delete_vm_ports_attached_to_network.side_effect = err
+        mock_delete_created_items.side_effect = err
+        with self.assertRaises(ConnectionError) as err:
+            self.vimconn.delete_vminstance(vm_id, created_items, volumes_to_hold)
+            self.assertEqual(str(err), "ClientException occured.")
+        mock_reload_connection.assert_called_once()
+        mock_delete_vm_ports_attached_to_network.assert_called_once_with(created_items)
+        self.vimconn.nova.servers.delete.assert_not_called()
+        mock_delete_created_items.assert_not_called()
+        mock_sleep.assert_not_called()
+        mock_extract_items_wth_keep_flag_from_created_items.assert_called_once_with(
+            created_items
+        )
+
+    @patch("time.sleep")
+    @patch.object(vimconnector, "_extract_items_wth_keep_flag_from_created_items")
+    @patch.object(vimconnector, "_format_exception")
+    @patch.object(vimconnector, "_reload_connection")
+    @patch.object(vimconnector, "_delete_vm_ports_attached_to_network")
+    @patch.object(vimconnector, "_delete_created_items")
+    def test_delete_vminstance_nova_server_delete_raises(
+        self,
+        mock_delete_created_items,
+        mock_delete_vm_ports_attached_to_network,
+        mock_reload_connection,
+        mock_format_exception,
+        mock_extract_items_wth_keep_flag_from_created_items,
+        mock_sleep,
+    ):
+        """Nova server delete raises exception."""
+        vm_id = f"{virtual_mac_id}"
+        created_items = deepcopy(created_items_all_true)
+        mock_extract_items_wth_keep_flag_from_created_items.return_value = created_items
+        volumes_to_hold = [f"{volume_id}", f"{volume_id2}"]
+        err = VimConnConnectionException("ClientException occured.")
+        self.vimconn.nova.servers.delete.side_effect = err
+        mock_delete_created_items.side_effect = err
+        with self.assertRaises(VimConnConnectionException) as err:
+            self.vimconn.delete_vminstance(vm_id, created_items, volumes_to_hold)
+            self.assertEqual(str(err), "ClientException occured.")
+        mock_reload_connection.assert_called_once()
+        mock_delete_vm_ports_attached_to_network.assert_called_once_with(created_items)
+        self.vimconn.nova.servers.delete.assert_called_once_with(vm_id)
+        mock_delete_created_items.assert_not_called()
+        mock_sleep.assert_not_called()
+        mock_extract_items_wth_keep_flag_from_created_items.assert_called_once_with(
+            created_items
+        )
+
+    @patch("time.sleep")
+    @patch.object(vimconnector, "_extract_items_wth_keep_flag_from_created_items")
+    @patch.object(vimconnector, "_format_exception")
+    @patch.object(vimconnector, "_reload_connection")
+    @patch.object(vimconnector, "_delete_vm_ports_attached_to_network")
+    @patch.object(vimconnector, "_delete_created_items")
+    def test_delete_vminstance_reload_connection_raises(
+        self,
+        mock_delete_created_items,
+        mock_delete_vm_ports_attached_to_network,
+        mock_reload_connection,
+        mock_format_exception,
+        mock_extract_items_wth_keep_flag_from_created_items,
+        mock_sleep,
+    ):
+        """Reload connection raises exception."""
+        vm_id = f"{virtual_mac_id}"
+        created_items = deepcopy(created_items_all_true)
+        mock_extract_items_wth_keep_flag_from_created_items.return_value = created_items
+        mock_sleep = MagicMock()
+        volumes_to_hold = [f"{volume_id}", f"{volume_id2}"]
+        err = ConnectionError("ClientException occured.")
+        mock_delete_created_items.return_value = False
+        mock_reload_connection.side_effect = err
+        with self.assertRaises(ConnectionError) as err:
+            self.vimconn.delete_vminstance(vm_id, created_items, volumes_to_hold)
+            self.assertEqual(str(err), "ClientException occured.")
+        mock_reload_connection.assert_called_once()
+        mock_delete_vm_ports_attached_to_network.assert_not_called()
+        self.vimconn.nova.servers.delete.assert_not_called()
+        mock_delete_created_items.assert_not_called()
+        mock_sleep.assert_not_called()
+        mock_extract_items_wth_keep_flag_from_created_items.assert_called_once_with(
+            created_items
+        )
+
+    @patch("time.sleep")
+    @patch.object(vimconnector, "_extract_items_wth_keep_flag_from_created_items")
+    @patch.object(vimconnector, "_format_exception")
+    @patch.object(vimconnector, "_reload_connection")
+    @patch.object(vimconnector, "_delete_vm_ports_attached_to_network")
+    @patch.object(vimconnector, "_delete_created_items")
+    def test_delete_vminstance_created_item_vol_to_hold_are_none(
+        self,
+        mock_delete_created_items,
+        mock_delete_vm_ports_attached_to_network,
+        mock_reload_connection,
+        mock_format_exception,
+        mock_extract_items_wth_keep_flag_from_created_items,
+        mock_sleep,
+    ):
+        """created_items and volumes_to_hold are None."""
+        vm_id = f"{virtual_mac_id}"
+        created_items = None
+        volumes_to_hold = None
+        mock_extract_items_wth_keep_flag_from_created_items.return_value = {}
+        mock_delete_created_items.return_value = False
+        self.vimconn.delete_vminstance(vm_id, created_items, volumes_to_hold)
+        mock_reload_connection.assert_called_once()
+        mock_delete_vm_ports_attached_to_network.assert_not_called()
+        self.vimconn.nova.servers.delete.assert_called_once_with(vm_id)
+        mock_delete_created_items.assert_called_once_with({}, [], False)
+        mock_sleep.assert_not_called()
+        mock_format_exception.assert_not_called()
+        mock_extract_items_wth_keep_flag_from_created_items.assert_called_once_with({})
+
+    @patch("time.sleep")
+    @patch.object(vimconnector, "_extract_items_wth_keep_flag_from_created_items")
+    @patch.object(vimconnector, "_format_exception")
+    @patch.object(vimconnector, "_reload_connection")
+    @patch.object(vimconnector, "_delete_vm_ports_attached_to_network")
+    @patch.object(vimconnector, "_delete_created_items")
+    def test_delete_vminstance_vm_id_is_none(
+        self,
+        mock_delete_created_items,
+        mock_delete_vm_ports_attached_to_network,
+        mock_reload_connection,
+        mock_format_exception,
+        mock_extract_items_wth_keep_flag_from_created_items,
+        mock_sleep,
+    ):
+        """vm_id is None."""
+        vm_id = None
+        created_items = deepcopy(created_items_all_true)
+        mock_extract_items_wth_keep_flag_from_created_items.return_value = created_items
+        volumes_to_hold = [f"{volume_id}", f"{volume_id2}"]
+        mock_delete_created_items.side_effect = [True, True, False]
+        self.vimconn.delete_vminstance(vm_id, created_items, volumes_to_hold)
+        mock_reload_connection.assert_called_once()
+        mock_delete_vm_ports_attached_to_network.assert_called_once_with(created_items)
+        self.vimconn.nova.servers.delete.assert_not_called()
+        self.assertEqual(mock_delete_created_items.call_count, 3)
+        self.assertEqual(mock_sleep.call_count, 2)
+        mock_format_exception.assert_not_called()
+        mock_extract_items_wth_keep_flag_from_created_items.assert_called_once_with(
+            created_items
+        )
+
+    @patch("time.sleep")
+    @patch.object(vimconnector, "_extract_items_wth_keep_flag_from_created_items")
+    @patch.object(vimconnector, "_format_exception")
+    @patch.object(vimconnector, "_reload_connection")
+    @patch.object(vimconnector, "_delete_vm_ports_attached_to_network")
+    @patch.object(vimconnector, "_delete_created_items")
+    def test_delete_vminstance_delete_created_items_return_true(
+        self,
+        mock_delete_created_items,
+        mock_delete_vm_ports_attached_to_network,
+        mock_reload_connection,
+        mock_format_exception,
+        mock_extract_items_wth_keep_flag_from_created_items,
+        mock_sleep,
+    ):
+        """Delete created items always return True."""
+        vm_id = None
+        created_items = deepcopy(created_items_all_true)
+        mock_extract_items_wth_keep_flag_from_created_items.return_value = created_items
+        volumes_to_hold = [f"{volume_id}", f"{volume_id2}"]
+        mock_delete_created_items.side_effect = [True] * 1800
+        self.vimconn.delete_vminstance(vm_id, created_items, volumes_to_hold)
+        mock_reload_connection.assert_called_once()
+        mock_delete_vm_ports_attached_to_network.assert_called_once_with(created_items)
+        self.vimconn.nova.servers.delete.assert_not_called()
+        self.assertEqual(mock_delete_created_items.call_count, 1800)
+        self.assertEqual(mock_sleep.call_count, 1800)
+        mock_format_exception.assert_not_called()
+        mock_extract_items_wth_keep_flag_from_created_items.assert_called_once_with(
+            created_items
+        )
+
+    def test_remove_keep_tag_from_persistent_volumes_keep_flag_exists(self):
+        """Keep flag exists in created items."""
+        created_items = {
+            f"port:{port_id}": True,
+            f"floating_ip:{floating_network_vim_id}": True,
+            f"volume:{volume_id}:keep": True,
+            f"volume:{volume_id2}:keep": True,
+        }
+        expected_result = {
+            f"port:{port_id}": True,
+            f"floating_ip:{floating_network_vim_id}": True,
+            f"volume:{volume_id}": True,
+            f"volume:{volume_id2}": True,
+        }
+        result = self.vimconn.remove_keep_tag_from_persistent_volumes(created_items)
+        self.assertDictEqual(result, expected_result)
+
+    def test_remove_keep_tag_from_persistent_volumes_without_keep_flag(self):
+        """Keep flag does not exist in created items."""
+        created_items = {
+            f"port:{port_id}": True,
+            f"floating_ip:{floating_network_vim_id}": True,
+            f"volume:{volume_id}": True,
+            f"volume:{volume_id2}": True,
+        }
+        result = self.vimconn.remove_keep_tag_from_persistent_volumes(created_items)
+        self.assertDictEqual(result, created_items)
+
+    def test_update_block_device_mapping_empty_volume(self):
+        """"""
+        volume = ""
+        block_device_mapping = {}
+        base_disk_index = 100
+        disk = {}
+        created_items = {}
+        with self.assertRaises(VimConnException) as err:
+            self.vimconn.update_block_device_mapping(
+                volume, block_device_mapping, base_disk_index, disk, created_items
+            )
+            self.assertEqual(str(err), "Volume is empty.")
+        self.assertEqual(block_device_mapping, {})
+        self.assertEqual(created_items, {})
+
+    def test_update_block_device_mapping_invalid_volume(self):
+        """"""
+        volume = "Volume-A"
+        block_device_mapping = {}
+        base_disk_index = 100
+        disk = {}
+        created_items = {}
+        with self.assertRaises(VimConnException) as err:
+            self.vimconn.update_block_device_mapping(
+                volume, block_device_mapping, base_disk_index, disk, created_items
+            )
+            self.assertEqual(
+                str(err), "Created volume is not valid, does not have id attribute."
+            )
+        self.assertEqual(block_device_mapping, {})
+        self.assertEqual(created_items, {})
+
+    def test_update_block_device_mapping(self):
+        """"""
+        volume = MagicMock(autospec=True)
+        volume.id = volume_id
+        block_device_mapping = {}
+        base_disk_index = 100
+        disk = {}
+        created_items = {}
+        self.vimconn.update_block_device_mapping(
+            volume, block_device_mapping, base_disk_index, disk, created_items
+        )
+        self.assertEqual(
+            block_device_mapping, {"vdd": "ac408b73-b9cc-4a6a-a270-82cc4811bd4a"}
+        )
+        self.assertEqual(
+            created_items, {"volume:ac408b73-b9cc-4a6a-a270-82cc4811bd4a": True}
+        )
+
+    def test_update_block_device_mapping_with_keep_flag(self):
+        """"""
+        volume = MagicMock(autospec=True)
+        volume.id = volume_id
+        block_device_mapping = {}
+        base_disk_index = 100
+        disk = {"size": 10, "keep": True}
+        created_items = {}
+        self.vimconn.update_block_device_mapping(
+            volume, block_device_mapping, base_disk_index, disk, created_items
+        )
+        self.assertEqual(
+            block_device_mapping, {"vdd": "ac408b73-b9cc-4a6a-a270-82cc4811bd4a"}
+        )
+        self.assertEqual(
+            created_items, {"volume:ac408b73-b9cc-4a6a-a270-82cc4811bd4a:keep": True}
+        )
+
+    def test_extract_items_with_keep_flag_item_has_keep_flag(self):
+        created_items = deepcopy(created_items_all_true)
+        created_items[f"volume:{volume_id2}:keep"] = True
+        result = self.vimconn._extract_items_wth_keep_flag_from_created_items(
+            created_items
+        )
+        self.assertEqual(result, deepcopy(created_items_all_true))
+
+    def test_extract_items_with_keep_flag_no_item_wth_keep_flag(self):
+        created_items = deepcopy(created_items_all_true)
+        result = self.vimconn._extract_items_wth_keep_flag_from_created_items(
+            created_items
+        )
+        self.assertEqual(result, deepcopy(created_items_all_true))
+
+    def test_extract_items_with_keep_flag_all_items_are_already_deleted(self):
+        created_items = {
+            f"port:{port_id}": None,
+            f"floating_ip:{floating_network_vim_id}": None,
+            f"volume:{volume_id}:keep": None,
+            f"volume:{volume_id2}:keep": None,
+        }
+        expected_result = {
+            f"port:{port_id}": None,
+            f"floating_ip:{floating_network_vim_id}": None,
+        }
+        result = self.vimconn._extract_items_wth_keep_flag_from_created_items(
+            created_items
+        )
+        self.assertEqual(result, expected_result)
+
+    def test_extract_items_with_keep_flag_without_semicolon(self):
+        created_items = {
+            f"port{port_id}": True,
+            f"floating_ip{floating_network_vim_id}": None,
+            f"volume{volume_id}keep": True,
+            f"volume{volume_id2}keep": True,
+        }
+        result = self.vimconn._extract_items_wth_keep_flag_from_created_items(
+            created_items
+        )
+        self.assertEqual(result, {})
+
+    def test_extract_items_with_keep_flag_invalid_type_created_items(self):
+        created_items = [{f"port{port_id}": True}, {f"volume{volume_id2}keep": True}]
+        with self.assertRaises(AttributeError):
+            self.vimconn._extract_items_wth_keep_flag_from_created_items(created_items)
+
+
 if __name__ == "__main__":
     unittest.main()
diff --git a/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py b/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py
index eda5699..00558dc 100644
--- a/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py
+++ b/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py
@@ -38,6 +38,7 @@
 import random
 import re
 import time
+from typing import Dict, Optional, Tuple
 
 from cinderclient import client as cClient
 from glanceclient import client as glClient
@@ -753,7 +754,7 @@
             self._reload_connection()
             network_dict = {"name": net_name, "admin_state_up": True}
 
-            if net_type in ("data", "ptp"):
+            if net_type in ("data", "ptp") or provider_network_profile:
                 provider_physical_network = None
 
                 if provider_network_profile and provider_network_profile.get(
@@ -1283,13 +1284,7 @@
                         if numas:
                             numa_nodes = len(numas)
 
-                            if numa_nodes > 1:
-                                return -1, "Can not add flavor with more than one numa"
-
                             extra_specs["hw:numa_nodes"] = str(numa_nodes)
-                            extra_specs["hw:mem_page_size"] = "large"
-                            extra_specs["hw:cpu_policy"] = "dedicated"
-                            extra_specs["hw:numa_mempolicy"] = "strict"
 
                             if self.vim_type == "VIO":
                                 extra_specs[
@@ -1298,13 +1293,25 @@
                                 extra_specs["vmware:latency_sensitivity_level"] = "high"
 
                             for numa in numas:
+                                if "id" in numa:
+                                    node_id = numa["id"]
+
+                                    if "memory" in numa:
+                                        memory_mb = numa["memory"] * 1024
+                                        memory = "hw:numa_mem.{}".format(node_id)
+                                        extra_specs[memory] = int(memory_mb)
+
+                                    if "vcpu" in numa:
+                                        vcpu = numa["vcpu"]
+                                        cpu = "hw:numa_cpus.{}".format(node_id)
+                                        vcpu = ",".join(map(str, vcpu))
+                                        extra_specs[cpu] = vcpu
+
                                 # overwrite ram and vcpus
                                 # check if key "memory" is present in numa else use ram value at flavor
-                                if "memory" in numa:
-                                    ram = numa["memory"] * 1024
                                 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/
                                 # implemented/virt-driver-cpu-thread-pinning.html
-                                extra_specs["hw:cpu_sockets"] = 1
+                                extra_specs["hw:cpu_sockets"] = str(numa_nodes)
 
                                 if "paired-threads" in numa:
                                     vcpus = numa["paired-threads"] * 2
@@ -1369,6 +1376,23 @@
                                     "Invalid mempage-size %s. Will be ignored",
                                     extended.get("mempage-size"),
                                 )
+                        if extended.get("cpu-pinning-policy"):
+                            extra_specs["hw:cpu_policy"] = extended.get(
+                                "cpu-pinning-policy"
+                            ).lower()
+
+                        # Set the cpu thread pinning policy as specified in the descriptor
+                        if extended.get("cpu-thread-pinning-policy"):
+                            extra_specs["hw:cpu_thread_policy"] = extended.get(
+                                "cpu-thread-pinning-policy"
+                            ).lower()
+
+                        # Set the mem policy as specified in the descriptor
+                        if extended.get("mem-policy"):
+                            extra_specs["hw:numa_mempolicy"] = extended.get(
+                                "mem-policy"
+                            ).lower()
+
                     # create flavor
                     new_flavor = self.nova.flavors.create(
                         name=name,
@@ -1694,64 +1718,807 @@
                 "No enough availability zones at VIM for this deployment"
             )
 
+    def _prepare_port_dict_security_groups(self, net: dict, port_dict: dict) -> None:
+        """Fill up the security_groups in the port_dict.
+
+        Args:
+            net (dict):             Network details
+            port_dict   (dict):     Port details
+
+        """
+        if (
+            self.config.get("security_groups")
+            and net.get("port_security") is not False
+            and not self.config.get("no_port_security_extension")
+        ):
+            if not self.security_groups_id:
+                self._get_ids_from_name()
+
+            port_dict["security_groups"] = self.security_groups_id
+
+    def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None:
+        """Fill up the network binding depending on network type in the port_dict.
+
+        Args:
+            net (dict):             Network details
+            port_dict   (dict):     Port details
+
+        """
+        if not net.get("type"):
+            raise vimconn.VimConnException("Type is missing in the network details.")
+
+        if net["type"] == "virtual":
+            pass
+
+        # For VF
+        elif net["type"] == "VF" or net["type"] == "SR-IOV":
+
+            port_dict["binding:vnic_type"] = "direct"
+
+            # VIO specific Changes
+            if self.vim_type == "VIO":
+                # Need to create port with port_security_enabled = False and no-security-groups
+                port_dict["port_security_enabled"] = False
+                port_dict["provider_security_groups"] = []
+                port_dict["security_groups"] = []
+
+        else:
+            # For PT PCI-PASSTHROUGH
+            port_dict["binding:vnic_type"] = "direct-physical"
+
+    @staticmethod
+    def _set_fixed_ip(new_port: dict, net: dict) -> None:
+        """Set the "ip" parameter in net dictionary.
+
+        Args:
+            new_port    (dict):     New created port
+            net         (dict):     Network details
+
+        """
+        fixed_ips = new_port["port"].get("fixed_ips")
+
+        if fixed_ips:
+            net["ip"] = fixed_ips[0].get("ip_address")
+        else:
+            net["ip"] = None
+
+    @staticmethod
+    def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None:
+        """Fill up the mac_address and fixed_ips in port_dict.
+
+        Args:
+            net (dict):             Network details
+            port_dict   (dict):     Port details
+
+        """
+        if net.get("mac_address"):
+            port_dict["mac_address"] = net["mac_address"]
+
+        if net.get("ip_address"):
+            port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
+            # TODO add "subnet_id": <subnet_id>
+
+    def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
+        """Create new port using neutron.
+
+        Args:
+            port_dict   (dict):         Port details
+            created_items   (dict):     All created items
+            net (dict):                 Network details
+
+        Returns:
+            new_port    (dict):         New created port
+
+        """
+        new_port = self.neutron.create_port({"port": port_dict})
+        created_items["port:" + str(new_port["port"]["id"])] = True
+        net["mac_adress"] = new_port["port"]["mac_address"]
+        net["vim_id"] = new_port["port"]["id"]
+
+        return new_port
+
+    def _create_port(
+        self, net: dict, name: str, created_items: dict
+    ) -> Tuple[dict, dict]:
+        """Create port using net details.
+
+        Args:
+            net (dict):                 Network details
+            name    (str):              Name to be used as network name if net dict does not include name
+            created_items   (dict):     All created items
+
+        Returns:
+            new_port, port              New created port, port dictionary
+
+        """
+
+        port_dict = {
+            "network_id": net["net_id"],
+            "name": net.get("name"),
+            "admin_state_up": True,
+        }
+
+        if not port_dict["name"]:
+            port_dict["name"] = name
+
+        self._prepare_port_dict_security_groups(net, port_dict)
+
+        self._prepare_port_dict_binding(net, port_dict)
+
+        vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict)
+
+        new_port = self._create_new_port(port_dict, created_items, net)
+
+        vimconnector._set_fixed_ip(new_port, net)
+
+        port = {"port-id": new_port["port"]["id"]}
+
+        if float(self.nova.api_version.get_string()) >= 2.32:
+            port["tag"] = new_port["port"]["name"]
+
+        return new_port, port
+
+    def _prepare_network_for_vminstance(
+        self,
+        name: str,
+        net_list: list,
+        created_items: dict,
+        net_list_vim: list,
+        external_network: list,
+        no_secured_ports: list,
+    ) -> None:
+        """Create port and fill up net dictionary for new VM instance creation.
+
+        Args:
+            name    (str):                  Name of network
+            net_list    (list):             List of networks
+            created_items   (dict):         All created items belongs to a VM
+            net_list_vim    (list):         List of ports
+            external_network    (list):     List of external-networks
+            no_secured_ports    (list):     Port security disabled ports
+        """
+
+        self._reload_connection()
+
+        for net in net_list:
+            # Skip non-connected iface
+            if not net.get("net_id"):
+                continue
+
+            new_port, port = self._create_port(net, name, created_items)
+
+            net_list_vim.append(port)
+
+            if net.get("floating_ip", False):
+                net["exit_on_floating_ip_error"] = True
+                external_network.append(net)
+
+            elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
+                net["exit_on_floating_ip_error"] = False
+                external_network.append(net)
+                net["floating_ip"] = self.config.get("use_floating_ip")
+
+            # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
+            # is dropped. As a workaround we wait until the VM is active and then disable the port-security
+            if net.get("port_security") is False and not self.config.get(
+                "no_port_security_extension"
+            ):
+                no_secured_ports.append(
+                    (
+                        new_port["port"]["id"],
+                        net.get("port_security_disable_strategy"),
+                    )
+                )
+
+    def _prepare_persistent_root_volumes(
+        self,
+        name: str,
+        vm_av_zone: list,
+        disk: dict,
+        base_disk_index: int,
+        block_device_mapping: dict,
+        existing_vim_volumes: list,
+        created_items: dict,
+    ) -> Optional[str]:
+        """Prepare persistent root volumes for new VM instance.
+
+        Args:
+            name    (str):                      Name of VM instance
+            vm_av_zone  (list):                 List of availability zones
+            disk    (dict):                     Disk details
+            base_disk_index (int):              Disk index
+            block_device_mapping    (dict):     Block device details
+            existing_vim_volumes    (list):     Existing disk details
+            created_items   (dict):             All created items belongs to VM
+
+        Returns:
+            boot_volume_id  (str):              ID of boot volume
+
+        """
+        # Disk may include only vim_volume_id or only vim_id."
+        # Use existing persistent root volume finding with volume_id or vim_id
+        key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
+
+        if disk.get(key_id):
+
+            block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
+            existing_vim_volumes.append({"id": disk[key_id]})
+
+        else:
+            # Create persistent root volume
+            volume = self.cinder.volumes.create(
+                size=disk["size"],
+                name=name + "vd" + chr(base_disk_index),
+                imageRef=disk["image_id"],
+                # Make sure volume is in the same AZ as the VM to be attached to
+                availability_zone=vm_av_zone,
+            )
+            boot_volume_id = volume.id
+            self.update_block_device_mapping(
+                volume=volume,
+                block_device_mapping=block_device_mapping,
+                base_disk_index=base_disk_index,
+                disk=disk,
+                created_items=created_items,
+            )
+
+            return boot_volume_id
+
+    @staticmethod
+    def update_block_device_mapping(
+        volume: object,
+        block_device_mapping: dict,
+        base_disk_index: int,
+        disk: dict,
+        created_items: dict,
+    ) -> None:
+        """Add volume information to block device mapping dict.
+        Args:
+            volume  (object):                   Created volume object
+            block_device_mapping    (dict):     Block device details
+            base_disk_index (int):              Disk index
+            disk    (dict):                     Disk details
+            created_items   (dict):             All created items belongs to VM
+        """
+        if not volume:
+            raise vimconn.VimConnException("Volume is empty.")
+
+        if not hasattr(volume, "id"):
+            raise vimconn.VimConnException(
+                "Created volume is not valid, does not have id attribute."
+            )
+
+        volume_txt = "volume:" + str(volume.id)
+        if disk.get("keep"):
+            volume_txt += ":keep"
+        created_items[volume_txt] = True
+        block_device_mapping["vd" + chr(base_disk_index)] = volume.id
+
+    def _prepare_non_root_persistent_volumes(
+        self,
+        name: str,
+        disk: dict,
+        vm_av_zone: list,
+        block_device_mapping: dict,
+        base_disk_index: int,
+        existing_vim_volumes: list,
+        created_items: dict,
+    ) -> None:
+        """Prepare persistent volumes for new VM instance.
+
+        Args:
+            name    (str):                      Name of VM instance
+            disk    (dict):                     Disk details
+            vm_av_zone  (list):                 List of availability zones
+            block_device_mapping    (dict):     Block device details
+            base_disk_index (int):              Disk index
+            existing_vim_volumes    (list):     Existing disk details
+            created_items   (dict):             All created items belongs to VM
+        """
+        # Non-root persistent volumes
+        # Disk may include only vim_volume_id or only vim_id."
+        key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
+
+        if disk.get(key_id):
+
+            # Use existing persistent volume
+            block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
+            existing_vim_volumes.append({"id": disk[key_id]})
+
+        else:
+            # Create persistent volume
+            volume = self.cinder.volumes.create(
+                size=disk["size"],
+                name=name + "vd" + chr(base_disk_index),
+                # Make sure volume is in the same AZ as the VM to be attached to
+                availability_zone=vm_av_zone,
+            )
+            self.update_block_device_mapping(
+                volume=volume,
+                block_device_mapping=block_device_mapping,
+                base_disk_index=base_disk_index,
+                disk=disk,
+                created_items=created_items,
+            )
+
+    def _wait_for_created_volumes_availability(
+        self, elapsed_time: int, created_items: dict
+    ) -> Optional[int]:
+        """Wait till created volumes become available.
+
+        Args:
+            elapsed_time    (int):          Passed time while waiting
+            created_items   (dict):         All created items belongs to VM
+
+        Returns:
+            elapsed_time    (int):          Time spent while waiting
+
+        """
+
+        while elapsed_time < volume_timeout:
+            for created_item in created_items:
+                v, volume_id = (
+                    created_item.split(":")[0],
+                    created_item.split(":")[1],
+                )
+                if v == "volume":
+                    if self.cinder.volumes.get(volume_id).status != "available":
+                        break
+            else:
+                # All ready: break from while
+                break
+
+            time.sleep(5)
+            elapsed_time += 5
+
+        return elapsed_time
+
+    def _wait_for_existing_volumes_availability(
+        self, elapsed_time: int, existing_vim_volumes: list
+    ) -> Optional[int]:
+        """Wait till existing volumes become available.
+
+        Args:
+            elapsed_time    (int):          Passed time while waiting
+            existing_vim_volumes   (list):  Existing volume details
+
+        Returns:
+            elapsed_time    (int):          Time spent while waiting
+
+        """
+
+        while elapsed_time < volume_timeout:
+            for volume in existing_vim_volumes:
+                if self.cinder.volumes.get(volume["id"]).status != "available":
+                    break
+            else:  # all ready: break from while
+                break
+
+            time.sleep(5)
+            elapsed_time += 5
+
+        return elapsed_time
+
+    def _prepare_disk_for_vminstance(
+        self,
+        name: str,
+        existing_vim_volumes: list,
+        created_items: dict,
+        vm_av_zone: list,
+        block_device_mapping: dict,
+        disk_list: list = None,
+    ) -> None:
+        """Prepare all volumes for new VM instance.
+
+        Args:
+            name    (str):                      Name of Instance
+            existing_vim_volumes    (list):     List of existing volumes
+            created_items   (dict):             All created items belongs to VM
+            vm_av_zone  (list):                 VM availability zone
+            block_device_mapping (dict):        Block devices to be attached to VM
+            disk_list   (list):                 List of disks
+
+        """
+        # Create additional volumes in case these are present in disk_list
+        base_disk_index = ord("b")
+        boot_volume_id = None
+        elapsed_time = 0
+
+        for disk in disk_list:
+            if "image_id" in disk:
+                # Root persistent volume
+                base_disk_index = ord("a")
+                boot_volume_id = self._prepare_persistent_root_volumes(
+                    name=name,
+                    vm_av_zone=vm_av_zone,
+                    disk=disk,
+                    base_disk_index=base_disk_index,
+                    block_device_mapping=block_device_mapping,
+                    existing_vim_volumes=existing_vim_volumes,
+                    created_items=created_items,
+                )
+            else:
+                # Non-root persistent volume
+                self._prepare_non_root_persistent_volumes(
+                    name=name,
+                    disk=disk,
+                    vm_av_zone=vm_av_zone,
+                    block_device_mapping=block_device_mapping,
+                    base_disk_index=base_disk_index,
+                    existing_vim_volumes=existing_vim_volumes,
+                    created_items=created_items,
+                )
+            base_disk_index += 1
+
+        # Wait until created volumes are with status available
+        elapsed_time = self._wait_for_created_volumes_availability(
+            elapsed_time, created_items
+        )
+        # Wait until existing volumes in vim are with status available
+        elapsed_time = self._wait_for_existing_volumes_availability(
+            elapsed_time, existing_vim_volumes
+        )
+        # If we exceeded the timeout rollback
+        if elapsed_time >= volume_timeout:
+            raise vimconn.VimConnException(
+                "Timeout creating volumes for instance " + name,
+                http_code=vimconn.HTTP_Request_Timeout,
+            )
+        if boot_volume_id:
+            self.cinder.volumes.set_bootable(boot_volume_id, True)
+
+    def _find_the_external_network_for_floating_ip(self):
+        """Get the external network ip in order to create floating IP.
+
+        Returns:
+            pool_id (str):      External network pool ID
+
+        """
+
+        # Find the external network
+        external_nets = list()
+
+        for net in self.neutron.list_networks()["networks"]:
+            if net["router:external"]:
+                external_nets.append(net)
+
+        if len(external_nets) == 0:
+            raise vimconn.VimConnException(
+                "Cannot create floating_ip automatically since "
+                "no external network is present",
+                http_code=vimconn.HTTP_Conflict,
+            )
+
+        if len(external_nets) > 1:
+            raise vimconn.VimConnException(
+                "Cannot create floating_ip automatically since "
+                "multiple external networks are present",
+                http_code=vimconn.HTTP_Conflict,
+            )
+
+        # Pool ID
+        return external_nets[0].get("id")
+
+    def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None:
+        """Trigger neutron to create a new floating IP using external network ID.
+
+        Args:
+            param   (dict):             Input parameters to create a floating IP
+            created_items   (dict):     All created items belongs to new VM instance
+
+        Raises:
+
+            VimConnException
+        """
+        try:
+            self.logger.debug("Creating floating IP")
+            new_floating_ip = self.neutron.create_floatingip(param)
+            free_floating_ip = new_floating_ip["floatingip"]["id"]
+            created_items["floating_ip:" + str(free_floating_ip)] = True
+
+        except Exception as e:
+            raise vimconn.VimConnException(
+                type(e).__name__ + ": Cannot create new floating_ip " + str(e),
+                http_code=vimconn.HTTP_Conflict,
+            )
+
+    def _create_floating_ip(
+        self, floating_network: dict, server: object, created_items: dict
+    ) -> None:
+        """Get the available Pool ID and create a new floating IP.
+
+        Args:
+            floating_network    (dict):         Dict including external network ID
+            server   (object):                  Server object
+            created_items   (dict):             All created items belongs to new VM instance
+
+        """
+
+        # Pool_id is available
+        if (
+            isinstance(floating_network["floating_ip"], str)
+            and floating_network["floating_ip"].lower() != "true"
+        ):
+            pool_id = floating_network["floating_ip"]
+
+        # Find the Pool_id
+        else:
+            pool_id = self._find_the_external_network_for_floating_ip()
+
+        param = {
+            "floatingip": {
+                "floating_network_id": pool_id,
+                "tenant_id": server.tenant_id,
+            }
+        }
+
+        self._neutron_create_float_ip(param, created_items)
+
+    def _find_floating_ip(
+        self,
+        server: object,
+        floating_ips: list,
+        floating_network: dict,
+    ) -> Optional[str]:
+        """Find the available free floating IPs if there are.
+
+        Args:
+            server  (object):                   Server object
+            floating_ips    (list):             List of floating IPs
+            floating_network    (dict):         Details of floating network such as ID
+
+        Returns:
+            free_floating_ip    (str):          Free floating ip address
+
+        """
+        for fip in floating_ips:
+            if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id:
+                continue
+
+            if isinstance(floating_network["floating_ip"], str):
+                if fip.get("floating_network_id") != floating_network["floating_ip"]:
+                    continue
+
+            return fip["id"]
+
+    def _assign_floating_ip(
+        self, free_floating_ip: str, floating_network: dict
+    ) -> Dict:
+        """Assign the free floating ip address to port.
+
+        Args:
+            free_floating_ip    (str):          Floating IP to be assigned
+            floating_network    (dict):         ID of floating network
+
+        Returns:
+            fip (dict)          (dict):         Floating ip details
+
+        """
+        # The vim_id key contains the neutron.port_id
+        self.neutron.update_floatingip(
+            free_floating_ip,
+            {"floatingip": {"port_id": floating_network["vim_id"]}},
+        )
+        # For race condition ensure not re-assigned to other VM after 5 seconds
+        time.sleep(5)
+
+        return self.neutron.show_floatingip(free_floating_ip)
+
+    def _get_free_floating_ip(
+        self, server: object, floating_network: dict
+    ) -> Optional[str]:
+        """Get the free floating IP address.
+
+        Args:
+            server  (object):               Server Object
+            floating_network    (dict):     Floating network details
+
+        Returns:
+            free_floating_ip    (str):      Free floating ip addr
+
+        """
+
+        floating_ips = self.neutron.list_floatingips().get("floatingips", ())
+
+        # Randomize
+        random.shuffle(floating_ips)
+
+        return self._find_floating_ip(server, floating_ips, floating_network)
+
+    def _prepare_external_network_for_vminstance(
+        self,
+        external_network: list,
+        server: object,
+        created_items: dict,
+        vm_start_time: float,
+    ) -> None:
+        """Assign floating IP address for VM instance.
+
+        Args:
+            external_network    (list):         ID of External network
+            server  (object):                   Server Object
+            created_items   (dict):             All created items belongs to new VM instance
+            vm_start_time   (float):            Time as a floating point number expressed in seconds since the epoch, in UTC
+
+        Raises:
+            VimConnException
+
+        """
+        for floating_network in external_network:
+            try:
+                assigned = False
+                floating_ip_retries = 3
+                # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
+                # several times
+                while not assigned:
+
+                    free_floating_ip = self._get_free_floating_ip(
+                        server, floating_network
+                    )
+
+                    if not free_floating_ip:
+                        self._create_floating_ip(
+                            floating_network, server, created_items
+                        )
+
+                    try:
+                        # For race condition ensure not already assigned
+                        fip = self.neutron.show_floatingip(free_floating_ip)
+
+                        if fip["floatingip"].get("port_id"):
+                            continue
+
+                        # Assign floating ip
+                        fip = self._assign_floating_ip(
+                            free_floating_ip, floating_network
+                        )
+
+                        if fip["floatingip"]["port_id"] != floating_network["vim_id"]:
+                            self.logger.warning(
+                                "floating_ip {} re-assigned to other port".format(
+                                    free_floating_ip
+                                )
+                            )
+                            continue
+
+                        self.logger.debug(
+                            "Assigned floating_ip {} to VM {}".format(
+                                free_floating_ip, server.id
+                            )
+                        )
+
+                        assigned = True
+
+                    except Exception as e:
+                        # Openstack need some time after VM creation to assign an IP. So retry if fails
+                        vm_status = self.nova.servers.get(server.id).status
+
+                        if vm_status not in ("ACTIVE", "ERROR"):
+                            if time.time() - vm_start_time < server_timeout:
+                                time.sleep(5)
+                                continue
+                        elif floating_ip_retries > 0:
+                            floating_ip_retries -= 1
+                            continue
+
+                        raise vimconn.VimConnException(
+                            "Cannot create floating_ip: {} {}".format(
+                                type(e).__name__, e
+                            ),
+                            http_code=vimconn.HTTP_Conflict,
+                        )
+
+            except Exception as e:
+                if not floating_network["exit_on_floating_ip_error"]:
+                    self.logger.error("Cannot create floating_ip. %s", str(e))
+                    continue
+
+                raise
+
+    def _update_port_security_for_vminstance(
+        self,
+        no_secured_ports: list,
+        server: object,
+    ) -> None:
+        """Updates the port security according to no_secured_ports list.
+
+        Args:
+            no_secured_ports    (list):     List of ports that security will be disabled
+            server  (object):               Server Object
+
+        Raises:
+            VimConnException
+
+        """
+        # Wait until the VM is active and then disable the port-security
+        if no_secured_ports:
+            self.__wait_for_vm(server.id, "ACTIVE")
+
+        for port in no_secured_ports:
+            port_update = {
+                "port": {"port_security_enabled": False, "security_groups": None}
+            }
+
+            if port[1] == "allow-address-pairs":
+                port_update = {
+                    "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
+                }
+
+            try:
+                self.neutron.update_port(port[0], port_update)
+
+            except Exception:
+
+                raise vimconn.VimConnException(
+                    "It was not possible to disable port security for port {}".format(
+                        port[0]
+                    )
+                )
+
     def new_vminstance(
         self,
-        name,
-        description,
-        start,
-        image_id,
-        flavor_id,
-        affinity_group_list,
-        net_list,
+        name: str,
+        description: str,
+        start: bool,
+        image_id: str,
+        flavor_id: str,
+        affinity_group_list: list,
+        net_list: list,
         cloud_config=None,
         disk_list=None,
         availability_zone_index=None,
         availability_zone_list=None,
-    ):
-        """Adds a VM instance to VIM
-        Params:
-            start: indicates if VM must start or boot in pause mode. Ignored
-            image_id,flavor_id: image and flavor uuid
-            affinity_group_list: list of affinity groups, each one is a dictionary.
-                Ignore if empty.
-            net_list: list of interfaces, each one is a dictionary with:
-                name:
-                net_id: network uuid to connect
-                vpci: virtual vcpi to assign, ignored because openstack lack #TODO
-                model: interface model, ignored #TODO
-                mac_address: used for  SR-IOV ifaces #TODO for other types
-                use: 'data', 'bridge',  'mgmt'
-                type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
-                vim_id: filled/added by this function
-                floating_ip: True/False (or it can be None)
-                port_security: True/False
-            'cloud_config': (optional) dictionary with:
-                'key-pairs': (optional) list of strings with the public key to be inserted to the default user
-                'users': (optional) list of users to be inserted, each item is a dict with:
-                    'name': (mandatory) user name,
-                    'key-pairs': (optional) list of strings with the public key to be inserted to the user
-                'user-data': (optional) string is a text script to be passed directly to cloud-init
-                'config-files': (optional). List of files to be transferred. Each item is a dict with:
-                    'dest': (mandatory) string with the destination absolute path
-                    'encoding': (optional, by default text). Can be one of:
+    ) -> tuple:
+        """Adds a VM instance to VIM.
+
+        Args:
+            name    (str):          name of VM
+            description (str):      description
+            start   (bool):         indicates if VM must start or boot in pause mode. Ignored
+            image_id    (str)       image uuid
+            flavor_id   (str)       flavor uuid
+            affinity_group_list (list):     list of affinity groups, each one is a dictionary.Ignore if empty.
+            net_list    (list):         list of interfaces, each one is a dictionary with:
+                name:   name of network
+                net_id:     network uuid to connect
+                vpci:   virtual vcpi to assign, ignored because openstack lack #TODO
+                model:  interface model, ignored #TODO
+                mac_address:    used for  SR-IOV ifaces #TODO for other types
+                use:    'data', 'bridge',  'mgmt'
+                type:   'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
+                vim_id:     filled/added by this function
+                floating_ip:    True/False (or it can be None)
+                port_security:  True/False
+            cloud_config    (dict): (optional) dictionary with:
+                key-pairs:      (optional) list of strings with the public key to be inserted to the default user
+                users:      (optional) list of users to be inserted, each item is a dict with:
+                    name:   (mandatory) user name,
+                    key-pairs: (optional) list of strings with the public key to be inserted to the user
+                user-data:  (optional) string is a text script to be passed directly to cloud-init
+                config-files:   (optional). List of files to be transferred. Each item is a dict with:
+                    dest:   (mandatory) string with the destination absolute path
+                    encoding:   (optional, by default text). Can be one of:
                         'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
-                    'content' (mandatory): string with the content of the file
-                    'permissions': (optional) string with file permissions, typically octal notation '0644'
-                    'owner': (optional) file owner, string with the format 'owner:group'
-                'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
-            'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
-                'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
-                'size': (mandatory) string with the size of the disk in GB
-                'vim_id' (optional) should use this existing volume id
-            availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
-            availability_zone_list: list of availability zones given by user in the VNFD descriptor.  Ignore if
+                    content :    (mandatory) string with the content of the file
+                    permissions:    (optional) string with file permissions, typically octal notation '0644'
+                    owner:  (optional) file owner, string with the format 'owner:group'
+                boot-data-drive:    boolean to indicate if user-data must be passed using a boot drive (hard disk)
+            disk_list:  (optional) list with additional disks to the VM. Each item is a dict with:
+                image_id:   (optional). VIM id of an existing image. If not provided an empty disk must be mounted
+                size:   (mandatory) string with the size of the disk in GB
+                vim_id:  (optional) should use this existing volume id
+            availability_zone_index:    Index of availability_zone_list to use for this this VM. None if not AV required
+            availability_zone_list:     list of availability zones given by user in the VNFD descriptor.  Ignore if
                 availability_zone_index is None
                 #TODO ip, security groups
-        Returns a tuple with the instance identifier and created_items or raises an exception on error
+
+        Returns:
+            A tuple with the instance identifier and created_items or raises an exception on error
             created_items can be None or a dictionary where this method can include key-values that will be passed to
             the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
             Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
             as not present.
+
         """
         self.logger.debug(
             "new_vminstance input: image='%s' flavor='%s' nics='%s'",
@@ -1763,235 +2530,47 @@
         try:
             server = None
             created_items = {}
-            # metadata = {}
             net_list_vim = []
+            # list of external networks to be connected to instance, later on used to create floating_ip
             external_network = []
-            # ^list of external networks to be connected to instance, later on used to create floating_ip
-            no_secured_ports = []  # List of port-is with port-security disabled
+            # List of ports with port-security disabled
+            no_secured_ports = []
+            block_device_mapping = {}
+            existing_vim_volumes = []
+            server_group_id = None
+            scheduller_hints = {}
+
+            # Check the Openstack Connection
             self._reload_connection()
-            # metadata_vpci = {}  # For a specific neutron plugin
-            block_device_mapping = None
 
-            for net in net_list:
-                if not net.get("net_id"):  # skip non connected iface
-                    continue
-
-                port_dict = {
-                    "network_id": net["net_id"],
-                    "name": net.get("name"),
-                    "admin_state_up": True,
-                }
-
-                if (
-                    self.config.get("security_groups")
-                    and net.get("port_security") is not False
-                    and not self.config.get("no_port_security_extension")
-                ):
-                    if not self.security_groups_id:
-                        self._get_ids_from_name()
-
-                    port_dict["security_groups"] = self.security_groups_id
-
-                if net["type"] == "virtual":
-                    pass
-                    # if "vpci" in net:
-                    #     metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]]
-                elif net["type"] == "VF" or net["type"] == "SR-IOV":  # for VF
-                    # if "vpci" in net:
-                    #     if "VF" not in metadata_vpci:
-                    #         metadata_vpci["VF"]=[]
-                    #     metadata_vpci["VF"].append([ net["vpci"], "" ])
-                    port_dict["binding:vnic_type"] = "direct"
-
-                    # VIO specific Changes
-                    if self.vim_type == "VIO":
-                        # Need to create port with port_security_enabled = False and no-security-groups
-                        port_dict["port_security_enabled"] = False
-                        port_dict["provider_security_groups"] = []
-                        port_dict["security_groups"] = []
-                else:  # For PT PCI-PASSTHROUGH
-                    # if "vpci" in net:
-                    #     if "PF" not in metadata_vpci:
-                    #         metadata_vpci["PF"]=[]
-                    #     metadata_vpci["PF"].append([ net["vpci"], "" ])
-                    port_dict["binding:vnic_type"] = "direct-physical"
-
-                if not port_dict["name"]:
-                    port_dict["name"] = name
-
-                if net.get("mac_address"):
-                    port_dict["mac_address"] = net["mac_address"]
-
-                if net.get("ip_address"):
-                    port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
-                    # TODO add "subnet_id": <subnet_id>
-
-                new_port = self.neutron.create_port({"port": port_dict})
-                created_items["port:" + str(new_port["port"]["id"])] = True
-                net["mac_adress"] = new_port["port"]["mac_address"]
-                net["vim_id"] = new_port["port"]["id"]
-                # if try to use a network without subnetwork, it will return a emtpy list
-                fixed_ips = new_port["port"].get("fixed_ips")
-
-                if fixed_ips:
-                    net["ip"] = fixed_ips[0].get("ip_address")
-                else:
-                    net["ip"] = None
-
-                port = {"port-id": new_port["port"]["id"]}
-                if float(self.nova.api_version.get_string()) >= 2.32:
-                    port["tag"] = new_port["port"]["name"]
-
-                net_list_vim.append(port)
-
-                if net.get("floating_ip", False):
-                    net["exit_on_floating_ip_error"] = True
-                    external_network.append(net)
-                elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
-                    net["exit_on_floating_ip_error"] = False
-                    external_network.append(net)
-                    net["floating_ip"] = self.config.get("use_floating_ip")
-
-                # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
-                # is dropped.
-                # As a workaround we wait until the VM is active and then disable the port-security
-                if net.get("port_security") is False and not self.config.get(
-                    "no_port_security_extension"
-                ):
-                    no_secured_ports.append(
-                        (
-                            new_port["port"]["id"],
-                            net.get("port_security_disable_strategy"),
-                        )
-                    )
-
-            # if metadata_vpci:
-            #     metadata = {"pci_assignement": json.dumps(metadata_vpci)}
-            #     if len(metadata["pci_assignement"]) >255:
-            #         #limit the metadata size
-            #         #metadata["pci_assignement"] = metadata["pci_assignement"][0:255]
-            #         self.logger.warn("Metadata deleted since it exceeds the expected length (255) ")
-            #         metadata = {}
-
-            self.logger.debug(
-                "name '%s' image_id '%s'flavor_id '%s' net_list_vim '%s' description '%s'",
-                name,
-                image_id,
-                flavor_id,
-                str(net_list_vim),
-                description,
+            # Prepare network list
+            self._prepare_network_for_vminstance(
+                name=name,
+                net_list=net_list,
+                created_items=created_items,
+                net_list_vim=net_list_vim,
+                external_network=external_network,
+                no_secured_ports=no_secured_ports,
             )
 
-            # cloud config
+            # Cloud config
             config_drive, userdata = self._create_user_data(cloud_config)
 
-            # get availability Zone
+            # Get availability Zone
             vm_av_zone = self._get_vm_availability_zone(
                 availability_zone_index, availability_zone_list
             )
 
-            # Create additional volumes in case these are present in disk_list
-            existing_vim_volumes = []
-            base_disk_index = ord("b")
-            boot_volume_id = None
             if disk_list:
-                block_device_mapping = {}
-                for disk in disk_list:
-                    if "image_id" in disk:
-                        # persistent root volume
-                        base_disk_index = ord("a")
-                        image_id = ""
-                        # use existing persistent root volume
-                        if disk.get("vim_volume_id"):
-                            block_device_mapping["vd" + chr(base_disk_index)] = disk[
-                                "vim_volume_id"
-                            ]
-                            existing_vim_volumes.append({"id": disk["vim_volume_id"]})
-                        # use existing persistent root volume
-                        elif disk.get("vim_id"):
-                            block_device_mapping["vd" + chr(base_disk_index)] = disk[
-                                "vim_id"
-                            ]
-                            existing_vim_volumes.append({"id": disk["vim_id"]})
-                        else:
-                            # create persistent root volume
-                            volume = self.cinder.volumes.create(
-                                size=disk["size"],
-                                name=name + "vd" + chr(base_disk_index),
-                                imageRef=disk["image_id"],
-                                # Make sure volume is in the same AZ as the VM to be attached to
-                                availability_zone=vm_av_zone,
-                            )
-                            boot_volume_id = volume.id
-                            created_items["volume:" + str(volume.id)] = True
-                            block_device_mapping[
-                                "vd" + chr(base_disk_index)
-                            ] = volume.id
-                    else:
-                        # non-root persistent volume
-                        key_id = (
-                            "vim_volume_id"
-                            if "vim_volume_id" in disk.keys()
-                            else "vim_id"
-                        )
-                        if disk.get(key_id):
-                            # use existing persistent volume
-                            block_device_mapping["vd" + chr(base_disk_index)] = disk[
-                                key_id
-                            ]
-                            existing_vim_volumes.append({"id": disk[key_id]})
-                        else:
-                            # create persistent volume
-                            volume = self.cinder.volumes.create(
-                                size=disk["size"],
-                                name=name + "vd" + chr(base_disk_index),
-                                # Make sure volume is in the same AZ as the VM to be attached to
-                                availability_zone=vm_av_zone,
-                            )
-                            created_items["volume:" + str(volume.id)] = True
-                            block_device_mapping[
-                                "vd" + chr(base_disk_index)
-                            ] = volume.id
-
-                    base_disk_index += 1
-
-                # Wait until created volumes are with status available
-                elapsed_time = 0
-                while elapsed_time < volume_timeout:
-                    for created_item in created_items:
-                        v, _, volume_id = created_item.partition(":")
-                        if v == "volume":
-                            if self.cinder.volumes.get(volume_id).status != "available":
-                                break
-                    else:  # all ready: break from while
-                        break
-
-                    time.sleep(5)
-                    elapsed_time += 5
-
-                # Wait until existing volumes in vim are with status available
-                while elapsed_time < volume_timeout:
-                    for volume in existing_vim_volumes:
-                        if self.cinder.volumes.get(volume["id"]).status != "available":
-                            break
-                    else:  # all ready: break from while
-                        break
-
-                    time.sleep(5)
-                    elapsed_time += 5
-
-                # If we exceeded the timeout rollback
-                if elapsed_time >= volume_timeout:
-                    raise vimconn.VimConnException(
-                        "Timeout creating volumes for instance " + name,
-                        http_code=vimconn.HTTP_Request_Timeout,
-                    )
-                if boot_volume_id:
-                    self.cinder.volumes.set_bootable(boot_volume_id, True)
-
-            # Manage affinity groups/server groups
-            server_group_id = None
-            scheduller_hints = {}
+                # Prepare disks
+                self._prepare_disk_for_vminstance(
+                    name=name,
+                    existing_vim_volumes=existing_vim_volumes,
+                    created_items=created_items,
+                    vm_av_zone=vm_av_zone,
+                    block_device_mapping=block_device_mapping,
+                    disk_list=disk_list,
+                )
 
             if affinity_group_list:
                 # Only first id on the list will be used. Openstack restriction
@@ -2015,6 +2594,8 @@
                     server_group_id,
                 )
             )
+
+            # Create VM
             server = self.nova.servers.create(
                 name=name,
                 image=image_id,
@@ -2028,179 +2609,20 @@
                 config_drive=config_drive,
                 block_device_mapping=block_device_mapping,
                 scheduler_hints=scheduller_hints,
-            )  # , description=description)
+            )
 
             vm_start_time = time.time()
-            # Previously mentioned workaround to wait until the VM is active and then disable the port-security
-            if no_secured_ports:
-                self.__wait_for_vm(server.id, "ACTIVE")
 
-            for port in no_secured_ports:
-                port_update = {
-                    "port": {"port_security_enabled": False, "security_groups": None}
-                }
+            self._update_port_security_for_vminstance(no_secured_ports, server)
 
-                if port[1] == "allow-address-pairs":
-                    port_update = {
-                        "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
-                    }
-
-                try:
-                    self.neutron.update_port(port[0], port_update)
-                except Exception:
-                    raise vimconn.VimConnException(
-                        "It was not possible to disable port security for port {}".format(
-                            port[0]
-                        )
-                    )
-
-            # print "DONE :-)", server
-
-            # pool_id = None
-            for floating_network in external_network:
-                try:
-                    assigned = False
-                    floating_ip_retries = 3
-                    # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
-                    # several times
-                    while not assigned:
-                        floating_ips = self.neutron.list_floatingips().get(
-                            "floatingips", ()
-                        )
-                        random.shuffle(floating_ips)  # randomize
-                        for fip in floating_ips:
-                            if (
-                                fip.get("port_id")
-                                or fip.get("tenant_id") != server.tenant_id
-                            ):
-                                continue
-
-                            if isinstance(floating_network["floating_ip"], str):
-                                if (
-                                    fip.get("floating_network_id")
-                                    != floating_network["floating_ip"]
-                                ):
-                                    continue
-
-                            free_floating_ip = fip["id"]
-                            break
-                        else:
-                            if (
-                                isinstance(floating_network["floating_ip"], str)
-                                and floating_network["floating_ip"].lower() != "true"
-                            ):
-                                pool_id = floating_network["floating_ip"]
-                            else:
-                                # Find the external network
-                                external_nets = list()
-
-                                for net in self.neutron.list_networks()["networks"]:
-                                    if net["router:external"]:
-                                        external_nets.append(net)
-
-                                if len(external_nets) == 0:
-                                    raise vimconn.VimConnException(
-                                        "Cannot create floating_ip automatically since "
-                                        "no external network is present",
-                                        http_code=vimconn.HTTP_Conflict,
-                                    )
-
-                                if len(external_nets) > 1:
-                                    raise vimconn.VimConnException(
-                                        "Cannot create floating_ip automatically since "
-                                        "multiple external networks are present",
-                                        http_code=vimconn.HTTP_Conflict,
-                                    )
-
-                                pool_id = external_nets[0].get("id")
-
-                            param = {
-                                "floatingip": {
-                                    "floating_network_id": pool_id,
-                                    "tenant_id": server.tenant_id,
-                                }
-                            }
-
-                            try:
-                                # self.logger.debug("Creating floating IP")
-                                new_floating_ip = self.neutron.create_floatingip(param)
-                                free_floating_ip = new_floating_ip["floatingip"]["id"]
-                                created_items[
-                                    "floating_ip:" + str(free_floating_ip)
-                                ] = True
-                            except Exception as e:
-                                raise vimconn.VimConnException(
-                                    type(e).__name__
-                                    + ": Cannot create new floating_ip "
-                                    + str(e),
-                                    http_code=vimconn.HTTP_Conflict,
-                                )
-
-                        try:
-                            # for race condition ensure not already assigned
-                            fip = self.neutron.show_floatingip(free_floating_ip)
-
-                            if fip["floatingip"]["port_id"]:
-                                continue
-
-                            # the vim_id key contains the neutron.port_id
-                            self.neutron.update_floatingip(
-                                free_floating_ip,
-                                {"floatingip": {"port_id": floating_network["vim_id"]}},
-                            )
-                            # for race condition ensure not re-assigned to other VM after 5 seconds
-                            time.sleep(5)
-                            fip = self.neutron.show_floatingip(free_floating_ip)
-
-                            if (
-                                fip["floatingip"]["port_id"]
-                                != floating_network["vim_id"]
-                            ):
-                                self.logger.error(
-                                    "floating_ip {} re-assigned to other port".format(
-                                        free_floating_ip
-                                    )
-                                )
-                                continue
-
-                            self.logger.debug(
-                                "Assigned floating_ip {} to VM {}".format(
-                                    free_floating_ip, server.id
-                                )
-                            )
-                            assigned = True
-                        except Exception as e:
-                            # openstack need some time after VM creation to assign an IP. So retry if fails
-                            vm_status = self.nova.servers.get(server.id).status
-
-                            if vm_status not in ("ACTIVE", "ERROR"):
-                                if time.time() - vm_start_time < server_timeout:
-                                    time.sleep(5)
-                                    continue
-                            elif floating_ip_retries > 0:
-                                floating_ip_retries -= 1
-                                continue
-
-                            raise vimconn.VimConnException(
-                                "Cannot create floating_ip: {} {}".format(
-                                    type(e).__name__, e
-                                ),
-                                http_code=vimconn.HTTP_Conflict,
-                            )
-
-                except Exception as e:
-                    if not floating_network["exit_on_floating_ip_error"]:
-                        self.logger.error("Cannot create floating_ip. %s", str(e))
-                        continue
-
-                    raise
+            self._prepare_external_network_for_vminstance(
+                external_network=external_network,
+                server=server,
+                created_items=created_items,
+                vm_start_time=vm_start_time,
+            )
 
             return server.id, created_items
-        # except nvExceptions.NotFound as e:
-        #     error_value=-vimconn.HTTP_Not_Found
-        #     error_text= "vm instance %s not found" % vm_id
-        # except TypeError as e:
-        #     raise vimconn.VimConnException(type(e).__name__ + ": "+  str(e), http_code=vimconn.HTTP_Bad_Request)
 
         except Exception as e:
             server_id = None
@@ -2208,12 +2630,32 @@
                 server_id = server.id
 
             try:
+                created_items = self.remove_keep_tag_from_persistent_volumes(
+                    created_items
+                )
+
                 self.delete_vminstance(server_id, created_items)
+
             except Exception as e2:
                 self.logger.error("new_vminstance rollback fail {}".format(e2))
 
             self._format_exception(e)
 
+    @staticmethod
+    def remove_keep_tag_from_persistent_volumes(created_items: Dict) -> Dict:
+        """Removes the keep flag from persistent volumes. So, those volumes could be removed.
+
+        Args:
+            created_items (dict):       All created items belongs to VM
+
+        Returns:
+            updated_created_items   (dict):     Dict which does not include keep flag for volumes.
+
+        """
+        return {
+            key.replace(":keep", ""): value for (key, value) in created_items.items()
+        }
+
     def get_vminstance(self, vm_id):
         """Returns the VM instance information from VIM"""
         # self.logger.debug("Getting VM from VIM")
@@ -2307,76 +2749,180 @@
         ) as e:
             self._format_exception(e)
 
-    def delete_vminstance(self, vm_id, created_items=None, volumes_to_hold=None):
-        """Removes a VM instance from VIM. Returns the old identifier"""
-        # print "osconnector: Getting VM from VIM"
-        if created_items is None:
-            created_items = {}
-
+    def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
+        """Neutron delete ports by id.
+        Args:
+            k_id    (str):      Port id in the VIM
+        """
         try:
-            self._reload_connection()
-            # delete VM ports attached to this networks before the virtual machine
-            for k, v in created_items.items():
-                if not v:  # skip already deleted
-                    continue
 
-                try:
-                    k_item, _, k_id = k.partition(":")
-                    if k_item == "port":
-                        port_dict = self.neutron.list_ports()
-                        existing_ports = [
-                            port["id"] for port in port_dict["ports"] if port_dict
-                        ]
-                        if k_id in existing_ports:
-                            self.neutron.delete_port(k_id)
-                except Exception as e:
-                    self.logger.error(
-                        "Error deleting port: {}: {}".format(type(e).__name__, e)
+            port_dict = self.neutron.list_ports()
+            existing_ports = [port["id"] for port in port_dict["ports"] if port_dict]
+
+            if k_id in existing_ports:
+                self.neutron.delete_port(k_id)
+
+        except Exception as e:
+
+            self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
+
+    def _delete_volumes_by_id_wth_cinder(
+        self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
+    ) -> bool:
+        """Cinder delete volume by id.
+        Args:
+            k   (str):                      Full item name in created_items
+            k_id    (str):                  ID of floating ip in VIM
+            volumes_to_hold (list):          Volumes not to delete
+            created_items   (dict):         All created items belongs to VM
+        """
+        try:
+            if k_id in volumes_to_hold:
+                return
+
+            if self.cinder.volumes.get(k_id).status != "available":
+                return True
+
+            else:
+                self.cinder.volumes.delete(k_id)
+                created_items[k] = None
+
+        except Exception as e:
+            self.logger.error(
+                "Error deleting volume: {}: {}".format(type(e).__name__, e)
+            )
+
+    def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None:
+        """Neutron delete floating ip by id.
+        Args:
+            k   (str):                      Full item name in created_items
+            k_id    (str):                  ID of floating ip in VIM
+            created_items   (dict):         All created items belongs to VM
+        """
+        try:
+            self.neutron.delete_floatingip(k_id)
+            created_items[k] = None
+
+        except Exception as e:
+            self.logger.error(
+                "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
+            )
+
+    @staticmethod
+    def _get_item_name_id(k: str) -> Tuple[str, str]:
+        k_item, _, k_id = k.partition(":")
+        return k_item, k_id
+
+    def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None:
+        """Delete VM ports attached to the networks before deleting virtual machine.
+        Args:
+            created_items   (dict):     All created items belongs to VM
+        """
+
+        for k, v in created_items.items():
+            if not v:  # skip already deleted
+                continue
+
+            try:
+                k_item, k_id = self._get_item_name_id(k)
+                if k_item == "port":
+                    self._delete_ports_by_id_wth_neutron(k_id)
+
+            except Exception as e:
+                self.logger.error(
+                    "Error deleting port: {}: {}".format(type(e).__name__, e)
+                )
+
+    def _delete_created_items(
+        self, created_items: dict, volumes_to_hold: list, keep_waiting: bool
+    ) -> bool:
+        """Delete Volumes and floating ip if they exist in created_items."""
+        for k, v in created_items.items():
+            if not v:  # skip already deleted
+                continue
+
+            try:
+                k_item, k_id = self._get_item_name_id(k)
+
+                if k_item == "volume":
+
+                    unavailable_vol = self._delete_volumes_by_id_wth_cinder(
+                        k, k_id, volumes_to_hold, created_items
                     )
 
-            # #commented because detaching the volumes makes the servers.delete not work properly ?!?
-            # #dettach volumes attached
-            # server = self.nova.servers.get(vm_id)
-            # volumes_attached_dict = server._info["os-extended-volumes:volumes_attached"]   #volume["id"]
-            # #for volume in volumes_attached_dict:
-            # #    self.cinder.volumes.detach(volume["id"])
+                    if unavailable_vol:
+                        keep_waiting = True
+
+                elif k_item == "floating_ip":
+
+                    self._delete_floating_ip_by_id(k, k_id, created_items)
+
+            except Exception as e:
+                self.logger.error("Error deleting {}: {}".format(k, e))
+
+        return keep_waiting
+
+    @staticmethod
+    def _extract_items_wth_keep_flag_from_created_items(created_items: dict) -> dict:
+        """Remove the volumes which has key flag from created_items
+
+        Args:
+            created_items   (dict):         All created items belongs to VM
+
+        Returns:
+            created_items   (dict):         Persistent volumes eliminated created_items
+        """
+        return {
+            key: value
+            for (key, value) in created_items.items()
+            if len(key.split(":")) == 2
+        }
+
+    def delete_vminstance(
+        self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
+    ) -> None:
+        """Removes a VM instance from VIM. Returns the old identifier.
+        Args:
+            vm_id   (str):              Identifier of VM instance
+            created_items   (dict):     All created items belongs to VM
+            volumes_to_hold (list):     Volumes_to_hold
+        """
+        if created_items is None:
+            created_items = {}
+        if volumes_to_hold is None:
+            volumes_to_hold = []
+
+        try:
+            created_items = self._extract_items_wth_keep_flag_from_created_items(
+                created_items
+            )
+
+            self._reload_connection()
+
+            # Delete VM ports attached to the networks before the virtual machine
+            if created_items:
+                self._delete_vm_ports_attached_to_network(created_items)
 
             if vm_id:
                 self.nova.servers.delete(vm_id)
 
-            # delete volumes. Although having detached, they should have in active status before deleting
-            # we ensure in this loop
+            # Although having detached, volumes should have in active status before deleting.
+            # We ensure in this loop
             keep_waiting = True
             elapsed_time = 0
 
             while keep_waiting and elapsed_time < volume_timeout:
                 keep_waiting = False
 
-                for k, v in created_items.items():
-                    if not v:  # skip already deleted
-                        continue
-
-                    try:
-                        k_item, _, k_id = k.partition(":")
-                        if k_item == "volume":
-                            if self.cinder.volumes.get(k_id).status != "available":
-                                keep_waiting = True
-                            else:
-                                if k_id not in volumes_to_hold:
-                                    self.cinder.volumes.delete(k_id)
-                                    created_items[k] = None
-                        elif k_item == "floating_ip":  # floating ip
-                            self.neutron.delete_floatingip(k_id)
-                            created_items[k] = None
-
-                    except Exception as e:
-                        self.logger.error("Error deleting {}: {}".format(k, e))
+                # Delete volumes and floating IP.
+                keep_waiting = self._delete_created_items(
+                    created_items, volumes_to_hold, keep_waiting
+                )
 
                 if keep_waiting:
                     time.sleep(1)
                     elapsed_time += 1
 
-            return None
         except (
             nvExceptions.NotFound,
             ksExceptions.ClientException,
diff --git a/releasenotes/notes/Refactor_openstack_new_vminstance-18ca76a74fd351cb.yaml b/releasenotes/notes/Refactor_openstack_new_vminstance-18ca76a74fd351cb.yaml
new file mode 100644
index 0000000..dbc0320
--- /dev/null
+++ b/releasenotes/notes/Refactor_openstack_new_vminstance-18ca76a74fd351cb.yaml
@@ -0,0 +1,20 @@
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+other:
+  - |
+    Refactor openstack new_vminstance method
diff --git a/releasenotes/notes/binding_flake8-6d57c768bc7f88c2.yaml b/releasenotes/notes/binding_flake8-6d57c768bc7f88c2.yaml
new file mode 100644
index 0000000..20ee4ba
--- /dev/null
+++ b/releasenotes/notes/binding_flake8-6d57c768bc7f88c2.yaml
@@ -0,0 +1,81 @@
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+prelude: >
+    Replace this text with content to appear at the top of the section for this
+    release. All of the prelude content is merged together and then rendered
+    separately from the items listed in other parts of the file, so the text
+    needs to be worded so that both the prelude and the other items make sense
+    when read independently. This may mean repeating some details. Not every
+    release note requires a prelude. Usually only notes describing major
+    features or adding release theme details should have a prelude.
+features:
+  - |
+    List new features here, or remove this section.  All of the list items in
+    this section are combined when the release notes are rendered, so the text
+    needs to be worded so that it does not depend on any information only
+    available in another section, such as the prelude. This may mean repeating
+    some details.
+issues:
+  - |
+    List known issues here, or remove this section.  All of the list items in
+    this section are combined when the release notes are rendered, so the text
+    needs to be worded so that it does not depend on any information only
+    available in another section, such as the prelude. This may mean repeating
+    some details.
+upgrade:
+  - |
+    List upgrade notes here, or remove this section.  All of the list items in
+    this section are combined when the release notes are rendered, so the text
+    needs to be worded so that it does not depend on any information only
+    available in another section, such as the prelude. This may mean repeating
+    some details.
+deprecations:
+  - |
+    List deprecations notes here, or remove this section.  All of the list
+    items in this section are combined when the release notes are rendered, so
+    the text needs to be worded so that it does not depend on any information
+    only available in another section, such as the prelude. This may mean
+    repeating some details.
+critical:
+  - |
+    Add critical notes here, or remove this section.  All of the list items in
+    this section are combined when the release notes are rendered, so the text
+    needs to be worded so that it does not depend on any information only
+    available in another section, such as the prelude. This may mean repeating
+    some details.
+security:
+  - |
+    Add security notes here, or remove this section.  All of the list items in
+    this section are combined when the release notes are rendered, so the text
+    needs to be worded so that it does not depend on any information only
+    available in another section, such as the prelude. This may mean repeating
+    some details.
+fixes:
+  - |
+    Add normal bug fixes here, or remove this section.  All of the list items
+    in this section are combined when the release notes are rendered, so the
+    text needs to be worded so that it does not depend on any information only
+    available in another section, such as the prelude. This may mean repeating
+    some details.
+other:
+  - |
+    Add other notes here, or remove this section.  All of the list items in
+    this section are combined when the release notes are rendered, so the text
+    needs to be worded so that it does not depend on any information only
+    available in another section, such as the prelude. This may mean repeating
+    some details.
diff --git a/releasenotes/notes/change_log_level-d841584449c863fa.yaml b/releasenotes/notes/change_log_level-d841584449c863fa.yaml
new file mode 100644
index 0000000..ce22c0e
--- /dev/null
+++ b/releasenotes/notes/change_log_level-d841584449c863fa.yaml
@@ -0,0 +1,22 @@
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+other:
+  - |
+    Fixing Bug 2172 Fixing FileNotFoundError during unloading VIM. Changing log level from exception to warning in NG-RO __init__ file
+    as if the osm_ng_ro is not installed it throws exception. So, changing the log level to warning as this always happens while running
+    the tests.
diff --git a/releasenotes/notes/feauture_10936-d0301da2e7d933de.yaml b/releasenotes/notes/feauture_10936-d0301da2e7d933de.yaml
new file mode 100644
index 0000000..1910c5d
--- /dev/null
+++ b/releasenotes/notes/feauture_10936-d0301da2e7d933de.yaml
@@ -0,0 +1,25 @@
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+features:
+  - |
+    Feature 10936 - Keeping Persistent Volume of VNF
+    Keeping created persistent volumes when the NS terminated according to
+    vdu-storage-requirements configuration in VNFD.
+
+
+
diff --git a/releasenotes/notes/fix_bug_2159-0f354a90f97425bb.yaml b/releasenotes/notes/fix_bug_2159-0f354a90f97425bb.yaml
new file mode 100644
index 0000000..22d4053
--- /dev/null
+++ b/releasenotes/notes/fix_bug_2159-0f354a90f97425bb.yaml
@@ -0,0 +1,21 @@
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+fixes:
+  - |
+    This fixes the bug 2159. RO will create the network with the provided provider-network 
+    profile when specified, even if it's an overlay network or a management network.
diff --git a/releasenotes/notes/fix_bug_2180-dd1ab93148aa4eb2.yaml b/releasenotes/notes/fix_bug_2180-dd1ab93148aa4eb2.yaml
new file mode 100644
index 0000000..2388637
--- /dev/null
+++ b/releasenotes/notes/fix_bug_2180-dd1ab93148aa4eb2.yaml
@@ -0,0 +1,22 @@
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+fixes:
+  - |
+    This fixes the bug 2180.EPA will set only the parameters that are explicitly
+    defined in the vnfd descriptor.Numa policy supports for multiple nodes with numa 
+    cpus.
diff --git a/releasenotes/notes/fixing_block_device_mapping_variable-0512331f482fe8bc.yaml b/releasenotes/notes/fixing_block_device_mapping_variable-0512331f482fe8bc.yaml
new file mode 100644
index 0000000..d26c59e
--- /dev/null
+++ b/releasenotes/notes/fixing_block_device_mapping_variable-0512331f482fe8bc.yaml
@@ -0,0 +1,20 @@
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+other:
+  - |
+    Adding missing variable "block_device_mapping" to prepare_disk_for_vminstance method.
diff --git a/releasenotes/notes/fixing_method_signature-d09f0b0320474a79.yaml b/releasenotes/notes/fixing_method_signature-d09f0b0320474a79.yaml
new file mode 100644
index 0000000..69896ab
--- /dev/null
+++ b/releasenotes/notes/fixing_method_signature-d09f0b0320474a79.yaml
@@ -0,0 +1,21 @@
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+fixes:
+  - |
+    Fixing the signature of method _get_free_floating_ip in vimconn_openstack.py.
+
diff --git a/releasenotes/notes/refactoring_process_vdu_params-45301da2e7d933de.yaml b/releasenotes/notes/refactoring_process_vdu_params-45301da2e7d933de.yaml
new file mode 100644
index 0000000..6f5bead
--- /dev/null
+++ b/releasenotes/notes/refactoring_process_vdu_params-45301da2e7d933de.yaml
@@ -0,0 +1,24 @@
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+other:
+  - |
+     Refactoring NG_RO/ns.py _process_vdu_params
+     and vimconn_openstack.py delete_vminstance methods together with adding unit tests.
+
+
+
diff --git a/tox.ini b/tox.ini
index 567cefd..04a5ad5 100644
--- a/tox.ini
+++ b/tox.ini
@@ -138,7 +138,7 @@
 #######################################################################################
 [testenv:flake8]
 deps =
-        flake8
+        flake8==5.0.4
         flake8-import-order
 skip_install = true
 commands =