Disable the check of the release notes
[osm/RO.git] / NG-RO / osm_ng_ro / ns.py
index 0a40367..fc2b9a7 100644 (file)
 # limitations under the License.
 ##
 
+from copy import deepcopy
 from http import HTTPStatus
+from itertools import product
 import logging
 from random import choice as random_choice
 from threading import Lock
 from time import time
 from traceback import format_exc as traceback_format_exc
-from typing import Any, Dict, Tuple, Type
+from typing import Any, Dict, List, Optional, Tuple, Type
 from uuid import uuid4
 
 from cryptography.hazmat.backends import default_backend as crypto_default_backend
@@ -30,6 +32,7 @@ from cryptography.hazmat.primitives import serialization as crypto_serialization
 from cryptography.hazmat.primitives.asymmetric import rsa
 from jinja2 import (
     Environment,
+    select_autoescape,
     StrictUndefined,
     TemplateError,
     TemplateNotFound,
@@ -76,8 +79,8 @@ def get_process_id():
 
             if text_id:
                 return text_id
-    except Exception:
-        pass
+    except Exception as error:
+        logging.exception(f"{error} occured while getting process id")
 
     # Return a random id
     return "".join(random_choice("0123456789abcdef") for _ in range(12))
@@ -114,14 +117,24 @@ class Ns(object):
             "image": Ns._process_image_params,
             "flavor": Ns._process_flavor_params,
             "vdu": Ns._process_vdu_params,
+            "classification": Ns._process_classification_params,
+            "sfi": Ns._process_sfi_params,
+            "sf": Ns._process_sf_params,
+            "sfp": Ns._process_sfp_params,
             "affinity-or-anti-affinity-group": Ns._process_affinity_group_params,
+            "shared-volumes": Ns._process_shared_volumes_params,
         }
         self.db_path_map = {
             "net": "vld",
             "image": "image",
             "flavor": "flavor",
             "vdu": "vdur",
+            "classification": "classification",
+            "sfi": "sfi",
+            "sf": "sf",
+            "sfp": "sfp",
             "affinity-or-anti-affinity-group": "affinity-or-anti-affinity-group",
+            "shared-volumes": "shared-volumes",
         }
 
     def init_db(self, target_version):
@@ -379,7 +392,10 @@ class Ns(object):
             str: [description]
         """
         try:
-            env = Environment(undefined=StrictUndefined)
+            env = Environment(
+                undefined=StrictUndefined,
+                autoescape=select_autoescape(default_for_string=True, default=True),
+            )
             template = env.from_string(cloud_init_content)
 
             return template.render(params or {})
@@ -642,44 +658,57 @@ class Ns(object):
             Tuple[Dict[str, Any], bool]: [description]
         """
         numa = {}
+        numa_list = []
         epa_vcpu_set = False
 
         if guest_epa_quota.get("numa-node-policy"):
             numa_node_policy = guest_epa_quota.get("numa-node-policy")
 
             if numa_node_policy.get("node"):
-                numa_node = numa_node_policy["node"][0]
-
-                if numa_node.get("num-cores"):
-                    numa["cores"] = numa_node["num-cores"]
-                    epa_vcpu_set = True
-
-                paired_threads = numa_node.get("paired-threads", {})
-                if paired_threads.get("num-paired-threads"):
-                    numa["paired-threads"] = int(
-                        numa_node["paired-threads"]["num-paired-threads"]
-                    )
-                    epa_vcpu_set = True
+                for numa_node in numa_node_policy["node"]:
+                    vcpu_list = []
+                    if numa_node.get("id"):
+                        numa["id"] = int(numa_node["id"])
+
+                    if numa_node.get("vcpu"):
+                        for vcpu in numa_node.get("vcpu"):
+                            vcpu_id = int(vcpu.get("id"))
+                            vcpu_list.append(vcpu_id)
+                        numa["vcpu"] = vcpu_list
+
+                    if numa_node.get("num-cores"):
+                        numa["cores"] = numa_node["num-cores"]
+                        epa_vcpu_set = True
+
+                    paired_threads = numa_node.get("paired-threads", {})
+                    if paired_threads.get("num-paired-threads"):
+                        numa["paired_threads"] = int(
+                            numa_node["paired-threads"]["num-paired-threads"]
+                        )
+                        epa_vcpu_set = True
 
-                if paired_threads.get("paired-thread-ids"):
-                    numa["paired-threads-id"] = []
+                    if paired_threads.get("paired-thread-ids"):
+                        numa["paired-threads-id"] = []
 
-                    for pair in paired_threads["paired-thread-ids"]:
-                        numa["paired-threads-id"].append(
-                            (
-                                str(pair["thread-a"]),
-                                str(pair["thread-b"]),
+                        for pair in paired_threads["paired-thread-ids"]:
+                            numa["paired-threads-id"].append(
+                                (
+                                    str(pair["thread-a"]),
+                                    str(pair["thread-b"]),
+                                )
                             )
-                        )
 
-                if numa_node.get("num-threads"):
-                    numa["threads"] = int(numa_node["num-threads"])
-                    epa_vcpu_set = True
+                    if numa_node.get("num-threads"):
+                        numa["threads"] = int(numa_node["num-threads"])
+                        epa_vcpu_set = True
+
+                    if numa_node.get("memory-mb"):
+                        numa["memory"] = max(int(int(numa_node["memory-mb"]) / 1024), 1)
 
-                if numa_node.get("memory-mb"):
-                    numa["memory"] = max(int(int(numa_node["memory-mb"]) / 1024), 1)
+                    numa_list.append(numa)
+                    numa = {}
 
-        return numa, epa_vcpu_set
+        return numa_list, epa_vcpu_set
 
     @staticmethod
     def _process_guest_epa_cpu_pinning_params(
@@ -704,10 +733,15 @@ class Ns(object):
             guest_epa_quota.get("cpu-pinning-policy") == "DEDICATED"
             and not epa_vcpu_set
         ):
+            # Pinning policy "REQUIRE" uses threads as host should support SMT architecture
+            # Pinning policy "ISOLATE" uses cores as host should not support SMT architecture
+            # Pinning policy "PREFER" uses threads in case host supports SMT architecture
             numa[
-                "cores"
-                if guest_epa_quota.get("cpu-thread-pinning-policy") != "PREFER"
-                else "threads"
+                (
+                    "cores"
+                    if guest_epa_quota.get("cpu-thread-pinning-policy") == "ISOLATE"
+                    else "threads"
+                )
             ] = max(vcpu_count, 1)
             local_epa_vcpu_set = True
 
@@ -727,23 +761,39 @@ class Ns(object):
         """
         extended = {}
         numa = {}
+        numa_list = []
 
         if target_flavor.get("guest-epa"):
             guest_epa = target_flavor["guest-epa"]
 
-            numa, epa_vcpu_set = Ns._process_guest_epa_numa_params(
+            numa_list, epa_vcpu_set = Ns._process_guest_epa_numa_params(
                 guest_epa_quota=guest_epa
             )
 
             if guest_epa.get("mempage-size"):
                 extended["mempage-size"] = guest_epa.get("mempage-size")
 
+            if guest_epa.get("cpu-pinning-policy"):
+                extended["cpu-pinning-policy"] = guest_epa.get("cpu-pinning-policy")
+
+            if guest_epa.get("cpu-thread-pinning-policy"):
+                extended["cpu-thread-pinning-policy"] = guest_epa.get(
+                    "cpu-thread-pinning-policy"
+                )
+
+            if guest_epa.get("numa-node-policy"):
+                if guest_epa.get("numa-node-policy").get("mem-policy"):
+                    extended["mem-policy"] = guest_epa.get("numa-node-policy").get(
+                        "mem-policy"
+                    )
+
             tmp_numa, epa_vcpu_set = Ns._process_guest_epa_cpu_pinning_params(
                 guest_epa_quota=guest_epa,
                 vcpu_count=int(target_flavor.get("vcpu-count", 1)),
                 epa_vcpu_set=epa_vcpu_set,
             )
-            numa.update(tmp_numa)
+            for numa in numa_list:
+                numa.update(tmp_numa)
 
             extended.update(
                 Ns._process_guest_epa_quota_params(
@@ -753,7 +803,7 @@ class Ns(object):
             )
 
         if numa:
-            extended["numas"] = [numa]
+            extended["numas"] = numa_list
 
         return extended
 
@@ -776,17 +826,40 @@ class Ns(object):
         Returns:
             Dict[str, Any]: [description]
         """
+        db = kwargs.get("db")
+        target_vdur = {}
+
+        for vnf in indata.get("vnf", []):
+            for vdur in vnf.get("vdur", []):
+                if vdur.get("ns-flavor-id") == target_flavor.get("id"):
+                    target_vdur = vdur
+
+        vim_flavor_id = (
+            target_vdur.get("additionalParams", {}).get("OSM", {}).get("vim_flavor_id")
+        )
+        if vim_flavor_id:  # vim-flavor-id was passed so flavor won't be created
+            return {"find_params": {"vim_flavor_id": vim_flavor_id}}
+
         flavor_data = {
             "disk": int(target_flavor["storage-gb"]),
             "ram": int(target_flavor["memory-mb"]),
             "vcpus": int(target_flavor["vcpu-count"]),
         }
 
-        target_vdur = {}
-        for vnf in indata.get("vnf", []):
-            for vdur in vnf.get("vdur", []):
-                if vdur.get("ns-flavor-id") == target_flavor["id"]:
-                    target_vdur = vdur
+        if db and isinstance(indata.get("vnf"), list):
+            vnfd_id = indata.get("vnf")[0].get("vnfd-id")
+            vnfd = db.get_one("vnfds", {"_id": vnfd_id})
+            # check if there is persistent root disk
+            for vdu in vnfd.get("vdu", ()):
+                if vdu["name"] == target_vdur.get("vdu-name"):
+                    for vsd in vnfd.get("virtual-storage-desc", ()):
+                        if vsd.get("id") == vdu.get("virtual-storage-desc", [[]])[0]:
+                            root_disk = vsd
+                            if (
+                                root_disk.get("type-of-storage")
+                                == "persistent-storage:persistent-storage"
+                            ):
+                                flavor_data["disk"] = 0
 
         for storage in target_vdur.get("virtual-storages", []):
             if (
@@ -805,46 +878,222 @@ class Ns(object):
         flavor_data_name = flavor_data.copy()
         flavor_data_name["name"] = target_flavor["name"]
         extra_dict["params"] = {"flavor_data": flavor_data_name}
+        return extra_dict
+
+    @staticmethod
+    def _prefix_ip_address(ip_address):
+        if "/" not in ip_address:
+            ip_address += "/32"
+        return ip_address
+
+    @staticmethod
+    def _process_ip_proto(ip_proto):
+        if ip_proto:
+            if ip_proto == 1:
+                ip_proto = "icmp"
+            elif ip_proto == 6:
+                ip_proto = "tcp"
+            elif ip_proto == 17:
+                ip_proto = "udp"
+        return ip_proto
+
+    @staticmethod
+    def _process_classification_params(
+        target_classification: Dict[str, Any],
+        indata: Dict[str, Any],
+        vim_info: Dict[str, Any],
+        target_record_id: str,
+        **kwargs: Dict[str, Any],
+    ) -> Dict[str, Any]:
+        """[summary]
+
+        Args:
+            target_classification (Dict[str, Any]): Classification dictionary parameters that needs to be processed to create resource on VIM
+            indata (Dict[str, Any]): Deployment info
+            vim_info (Dict[str, Any]):To add items created by OSM on the VIM.
+            target_record_id (str): Task record ID.
+            **kwargs (Dict[str, Any]): Used to send additional information to the task.
+
+        Returns:
+            Dict[str, Any]: Return parameters required to create classification and Items on which classification is dependent.
+        """
+        vnfr_id = target_classification["vnfr_id"]
+        vdur_id = target_classification["vdur_id"]
+        port_index = target_classification["ingress_port_index"]
+        extra_dict = {}
+
+        classification_data = {
+            "name": target_classification["id"],
+            "source_port_range_min": target_classification["source-port"],
+            "source_port_range_max": target_classification["source-port"],
+            "destination_port_range_min": target_classification["destination-port"],
+            "destination_port_range_max": target_classification["destination-port"],
+        }
+
+        classification_data["source_ip_prefix"] = Ns._prefix_ip_address(
+            target_classification["source-ip-address"]
+        )
+
+        classification_data["destination_ip_prefix"] = Ns._prefix_ip_address(
+            target_classification["destination-ip-address"]
+        )
+
+        classification_data["protocol"] = Ns._process_ip_proto(
+            int(target_classification["ip-proto"])
+        )
+
+        db = kwargs.get("db")
+        vdu_text = Ns._get_vnfr_vdur_text(db, vnfr_id, vdur_id)
+
+        extra_dict = {"depends_on": [vdu_text]}
+
+        extra_dict = {"depends_on": [vdu_text]}
+        classification_data["logical_source_port"] = "TASK-" + vdu_text
+        classification_data["logical_source_port_index"] = port_index
+
+        extra_dict["params"] = classification_data
 
         return extra_dict
 
     @staticmethod
-    def _ip_profile_to_ro(
-        ip_profile: Dict[str, Any],
+    def _process_sfi_params(
+        target_sfi: Dict[str, Any],
+        indata: Dict[str, Any],
+        vim_info: Dict[str, Any],
+        target_record_id: str,
+        **kwargs: Dict[str, Any],
     ) -> Dict[str, Any]:
         """[summary]
 
         Args:
-            ip_profile (Dict[str, Any]): [description]
+            target_sfi (Dict[str, Any]): SFI dictionary parameters that needs to be processed to create resource on VIM
+            indata (Dict[str, Any]): deployment info
+            vim_info (Dict[str, Any]): To add items created by OSM on the VIM.
+            target_record_id (str): Task record ID.
+            **kwargs (Dict[str, Any]): Used to send additional information to the task.
 
         Returns:
-            Dict[str, Any]: [description]
+            Dict[str, Any]: Return parameters required to create SFI and Items on which SFI is dependent.
         """
-        if not ip_profile:
-            return None
-
-        ro_ip_profile = {
-            "ip_version": "IPv4"
-            if "v4" in ip_profile.get("ip-version", "ipv4")
-            else "IPv6",
-            "subnet_address": ip_profile.get("subnet-address"),
-            "gateway_address": ip_profile.get("gateway-address"),
-            "dhcp_enabled": ip_profile.get("dhcp-params", {}).get("enabled", False),
-            "dhcp_start_address": ip_profile.get("dhcp-params", {}).get(
-                "start-address", None
-            ),
-            "dhcp_count": ip_profile.get("dhcp-params", {}).get("count", None),
+
+        vnfr_id = target_sfi["vnfr_id"]
+        vdur_id = target_sfi["vdur_id"]
+
+        sfi_data = {
+            "name": target_sfi["id"],
+            "ingress_port_index": target_sfi["ingress_port_index"],
+            "egress_port_index": target_sfi["egress_port_index"],
         }
 
-        if ip_profile.get("dns-server"):
-            ro_ip_profile["dns_address"] = ";".join(
-                [v["address"] for v in ip_profile["dns-server"] if v.get("address")]
-            )
+        db = kwargs.get("db")
+        vdu_text = Ns._get_vnfr_vdur_text(db, vnfr_id, vdur_id)
+
+        extra_dict = {"depends_on": [vdu_text]}
+        sfi_data["ingress_port"] = "TASK-" + vdu_text
+        sfi_data["egress_port"] = "TASK-" + vdu_text
+
+        extra_dict["params"] = sfi_data
 
-        if ip_profile.get("security-group"):
-            ro_ip_profile["security_group"] = ip_profile["security-group"]
+        return extra_dict
+
+    @staticmethod
+    def _get_vnfr_vdur_text(db, vnfr_id, vdur_id):
+        vnf_preffix = "vnfrs:{}".format(vnfr_id)
+        db_vnfr = db.get_one("vnfrs", {"_id": vnfr_id})
+        vdur_list = []
+        vdu_text = ""
 
-        return ro_ip_profile
+        if db_vnfr:
+            vdur_list = [
+                vdur["id"] for vdur in db_vnfr["vdur"] if vdur["vdu-id-ref"] == vdur_id
+            ]
+
+        if vdur_list:
+            vdu_text = vnf_preffix + ":vdur." + vdur_list[0]
+
+        return vdu_text
+
+    @staticmethod
+    def _process_sf_params(
+        target_sf: Dict[str, Any],
+        indata: Dict[str, Any],
+        vim_info: Dict[str, Any],
+        target_record_id: str,
+        **kwargs: Dict[str, Any],
+    ) -> Dict[str, Any]:
+        """[summary]
+
+        Args:
+            target_sf (Dict[str, Any]): SF dictionary parameters that needs to be processed to create resource on VIM
+            indata (Dict[str, Any]): Deployment info.
+            vim_info (Dict[str, Any]):To add items created by OSM on the VIM.
+            target_record_id (str): Task record ID.
+            **kwargs (Dict[str, Any]): Used to send additional information to the task.
+
+        Returns:
+            Dict[str, Any]: Return parameters required to create SF and Items on which SF is dependent.
+        """
+
+        nsr_id = kwargs.get("nsr_id", "")
+        sfis = target_sf["sfis"]
+        ns_preffix = "nsrs:{}".format(nsr_id)
+        extra_dict = {"depends_on": [], "params": []}
+        sf_data = {"name": target_sf["id"], "sfis": sfis}
+
+        for count, sfi in enumerate(sfis):
+            sfi_text = ns_preffix + ":sfi." + sfi
+            sfis[count] = "TASK-" + sfi_text
+            extra_dict["depends_on"].append(sfi_text)
+
+        extra_dict["params"] = sf_data
+
+        return extra_dict
+
+    @staticmethod
+    def _process_sfp_params(
+        target_sfp: Dict[str, Any],
+        indata: Dict[str, Any],
+        vim_info: Dict[str, Any],
+        target_record_id: str,
+        **kwargs: Dict[str, Any],
+    ) -> Dict[str, Any]:
+        """[summary]
+
+        Args:
+            target_sfp (Dict[str, Any]): SFP dictionary parameters that needs to be processed to create resource on VIM.
+            indata (Dict[str, Any]): Deployment info
+            vim_info (Dict[str, Any]):To add items created by OSM on the VIM.
+            target_record_id (str): Task record ID.
+            **kwargs (Dict[str, Any]): Used to send additional information to the task.
+
+        Returns:
+            Dict[str, Any]: Return parameters required to create SFP and Items on which SFP is dependent.
+        """
+
+        nsr_id = kwargs.get("nsr_id")
+        sfs = target_sfp["sfs"]
+        classifications = target_sfp["classifications"]
+        ns_preffix = "nsrs:{}".format(nsr_id)
+        extra_dict = {"depends_on": [], "params": []}
+        sfp_data = {
+            "name": target_sfp["id"],
+            "sfs": sfs,
+            "classifications": classifications,
+        }
+
+        for count, sf in enumerate(sfs):
+            sf_text = ns_preffix + ":sf." + sf
+            sfs[count] = "TASK-" + sf_text
+            extra_dict["depends_on"].append(sf_text)
+
+        for count, classi in enumerate(classifications):
+            classi_text = ns_preffix + ":classification." + classi
+            classifications[count] = "TASK-" + classi_text
+            extra_dict["depends_on"].append(classi_text)
+
+        extra_dict["params"] = sfp_data
+
+        return extra_dict
 
     @staticmethod
     def _process_net_params(
@@ -898,7 +1147,7 @@ class Ns(object):
                     "id": vim_info.get("vim_network_id"),
                 },
             }
-        elif target_vld.get("mgmt-network"):
+        elif target_vld.get("mgmt-network") and not vim_info.get("provider_network"):
             extra_dict["find_params"] = {
                 "mgmt": True,
                 "name": target_vld["id"],
@@ -909,7 +1158,7 @@ class Ns(object):
                 "net_name": (
                     f"{indata.get('name')[:16]}-{target_vld.get('name', target_vld.get('id'))[:16]}"
                 ),
-                "ip_profile": Ns._ip_profile_to_ro(vim_info.get("ip_profile")),
+                "ip_profile": vim_info.get("ip_profile"),
                 "provider_network_profile": vim_info.get("provider_network"),
             }
 
@@ -923,122 +1172,220 @@ class Ns(object):
         return extra_dict
 
     @staticmethod
-    def _process_vdu_params(
-        target_vdu: Dict[str, Any],
-        indata: Dict[str, Any],
-        vim_info: Dict[str, Any],
-        target_record_id: str,
-        **kwargs: Dict[str, Any],
-    ) -> Dict[str, Any]:
-        """Function to process VDU parameters.
+    def find_persistent_root_volumes(
+        vnfd: dict,
+        target_vdu: dict,
+        vdu_instantiation_volumes_list: list,
+        disk_list: list,
+    ) -> Dict[str, any]:
+        """Find the persistent root volumes and add them to the disk_list
+        by parsing the instantiation parameters.
 
         Args:
-            target_vdu (Dict[str, Any]): [description]
-            indata (Dict[str, Any]): [description]
-            vim_info (Dict[str, Any]): [description]
-            target_record_id (str): [description]
+            vnfd    (dict):                                 VNF descriptor
+            target_vdu      (dict):                         processed VDU
+            vdu_instantiation_volumes_list  (list):         instantiation parameters for the each VDU as a list
+            disk_list   (list):                             to be filled up
 
         Returns:
-            Dict[str, Any]: [description]
+            persistent_root_disk    (dict):                 Details of persistent root disk
+
         """
-        vnfr_id = kwargs.get("vnfr_id")
-        nsr_id = kwargs.get("nsr_id")
-        vnfr = kwargs.get("vnfr")
-        vdu2cloud_init = kwargs.get("vdu2cloud_init")
-        tasks_by_target_record_id = kwargs.get("tasks_by_target_record_id")
-        logger = kwargs.get("logger")
-        db = kwargs.get("db")
-        fs = kwargs.get("fs")
-        ro_nsr_public_key = kwargs.get("ro_nsr_public_key")
+        persistent_root_disk = {}
+        # There can be only one root disk, when we find it, it will return the result
 
-        vnf_preffix = "vnfrs:{}".format(vnfr_id)
-        ns_preffix = "nsrs:{}".format(nsr_id)
-        image_text = ns_preffix + ":image." + target_vdu["ns-image-id"]
-        flavor_text = ns_preffix + ":flavor." + target_vdu["ns-flavor-id"]
-        extra_dict = {"depends_on": [image_text, flavor_text]}
-        net_list = []
+        for vdu, vsd in product(
+            vnfd.get("vdu", ()), vnfd.get("virtual-storage-desc", ())
+        ):
+            if (
+                vdu["name"] == target_vdu["vdu-name"]
+                and vsd.get("id") == vdu.get("virtual-storage-desc", [[]])[0]
+            ):
+                root_disk = vsd
+                if (
+                    root_disk.get("type-of-storage")
+                    == "persistent-storage:persistent-storage"
+                ):
+                    for vdu_volume in vdu_instantiation_volumes_list:
+                        if (
+                            vdu_volume["vim-volume-id"]
+                            and root_disk["id"] == vdu_volume["name"]
+                        ):
+                            persistent_root_disk[vsd["id"]] = {
+                                "vim_volume_id": vdu_volume["vim-volume-id"],
+                                "image_id": vdu.get("sw-image-desc"),
+                            }
 
-        for iface_index, interface in enumerate(target_vdu["interfaces"]):
-            if interface.get("ns-vld-id"):
-                net_text = ns_preffix + ":vld." + interface["ns-vld-id"]
-            elif interface.get("vnf-vld-id"):
-                net_text = vnf_preffix + ":vld." + interface["vnf-vld-id"]
-            else:
-                logger.error(
-                    "Interface {} from vdu {} not connected to any vld".format(
-                        iface_index, target_vdu["vdu-name"]
-                    )
-                )
+                            disk_list.append(persistent_root_disk[vsd["id"]])
 
-                continue  # interface not connected to any vld
+                            return persistent_root_disk
 
-            extra_dict["depends_on"].append(net_text)
+                    else:
+                        if root_disk.get("size-of-storage"):
+                            persistent_root_disk[vsd["id"]] = {
+                                "image_id": vdu.get("sw-image-desc"),
+                                "size": root_disk.get("size-of-storage"),
+                                "keep": Ns.is_volume_keeping_required(root_disk),
+                            }
 
-            if "port-security-enabled" in interface:
-                interface["port_security"] = interface.pop("port-security-enabled")
+                            disk_list.append(persistent_root_disk[vsd["id"]])
 
-            if "port-security-disable-strategy" in interface:
-                interface["port_security_disable_strategy"] = interface.pop(
-                    "port-security-disable-strategy"
-                )
+                            return persistent_root_disk
+                return persistent_root_disk
 
-            net_item = {
-                x: v
-                for x, v in interface.items()
-                if x
-                in (
-                    "name",
-                    "vpci",
-                    "port_security",
-                    "port_security_disable_strategy",
-                    "floating_ip",
-                )
-            }
-            net_item["net_id"] = "TASK-" + net_text
-            net_item["type"] = "virtual"
+    @staticmethod
+    def find_persistent_volumes(
+        persistent_root_disk: dict,
+        target_vdu: dict,
+        vdu_instantiation_volumes_list: list,
+        disk_list: list,
+    ) -> None:
+        """Find the ordinary persistent volumes and add them to the disk_list
+        by parsing the instantiation parameters.
 
-            # TODO mac_address: used for  SR-IOV ifaces #TODO for other types
-            # TODO floating_ip: True/False (or it can be None)
-            if interface.get("type") in ("SR-IOV", "PCI-PASSTHROUGH"):
-                # mark the net create task as type data
-                if deep_get(
-                    tasks_by_target_record_id,
-                    net_text,
-                    "extra_dict",
-                    "params",
-                    "net_type",
-                ):
-                    tasks_by_target_record_id[net_text]["extra_dict"]["params"][
-                        "net_type"
-                    ] = "data"
+        Args:
+            persistent_root_disk:   persistent root disk dictionary
+            target_vdu: processed VDU
+            vdu_instantiation_volumes_list: instantiation parameters for the each VDU as a list
+            disk_list:  to be filled up
 
-                net_item["use"] = "data"
-                net_item["model"] = interface["type"]
-                net_item["type"] = interface["type"]
-            elif (
-                interface.get("type") == "OM-MGMT"
-                or interface.get("mgmt-interface")
-                or interface.get("mgmt-vnf")
+        """
+        # Find the ordinary volumes which are not added to the persistent_root_disk
+        persistent_disk = {}
+        for disk in target_vdu.get("virtual-storages", {}):
+            if (
+                disk.get("type-of-storage") == "persistent-storage:persistent-storage"
+                and disk["id"] not in persistent_root_disk.keys()
             ):
-                net_item["use"] = "mgmt"
-            else:
-                # if interface.get("type") in ("VIRTIO", "E1000", "PARAVIRT"):
-                net_item["use"] = "bridge"
-                net_item["model"] = interface.get("type")
+                for vdu_volume in vdu_instantiation_volumes_list:
+                    if vdu_volume["vim-volume-id"] and disk["id"] == vdu_volume["name"]:
+                        persistent_disk[disk["id"]] = {
+                            "vim_volume_id": vdu_volume["vim-volume-id"],
+                        }
+                        disk_list.append(persistent_disk[disk["id"]])
 
-            if interface.get("ip-address"):
-                net_item["ip_address"] = interface["ip-address"]
+                else:
+                    if disk["id"] not in persistent_disk.keys():
+                        persistent_disk[disk["id"]] = {
+                            "size": disk.get("size-of-storage"),
+                            "keep": Ns.is_volume_keeping_required(disk),
+                        }
+                        disk_list.append(persistent_disk[disk["id"]])
 
-            if interface.get("mac-address"):
-                net_item["mac_address"] = interface["mac-address"]
+    @staticmethod
+    def is_volume_keeping_required(virtual_storage_desc: Dict[str, Any]) -> bool:
+        """Function to decide keeping persistent volume
+        upon VDU deletion.
 
-            net_list.append(net_item)
+        Args:
+            virtual_storage_desc (Dict[str, Any]): virtual storage description dictionary
 
-            if interface.get("mgmt-vnf"):
-                extra_dict["mgmt_vnf_interface"] = iface_index
-            elif interface.get("mgmt-interface"):
-                extra_dict["mgmt_vdu_interface"] = iface_index
+        Returns:
+            bool (True/False)
+        """
+
+        if not virtual_storage_desc.get("vdu-storage-requirements"):
+            return False
+        for item in virtual_storage_desc.get("vdu-storage-requirements", {}):
+            if item.get("key") == "keep-volume" and item.get("value").lower() == "true":
+                return True
+        return False
 
+    @staticmethod
+    def is_shared_volume(
+        virtual_storage_desc: Dict[str, Any], vnfd_id: str
+    ) -> (str, bool):
+        """Function to decide if the volume type is multi attached or not .
+
+        Args:
+            virtual_storage_desc (Dict[str, Any]): virtual storage description dictionary
+            vnfd_id (str): vnfd id
+
+        Returns:
+            bool (True/False)
+            name (str) New name if it is a multiattach disk
+        """
+
+        if vdu_storage_requirements := virtual_storage_desc.get(
+            "vdu-storage-requirements", {}
+        ):
+            for item in vdu_storage_requirements:
+                if (
+                    item.get("key") == "multiattach"
+                    and item.get("value").lower() == "true"
+                ):
+                    name = f"shared-{virtual_storage_desc['id']}-{vnfd_id}"
+                    return name, True
+        return virtual_storage_desc["id"], False
+
+    @staticmethod
+    def _sort_vdu_interfaces(target_vdu: dict) -> None:
+        """Sort the interfaces according to position number.
+
+        Args:
+            target_vdu  (dict):     Details of VDU to be created
+
+        """
+        # If the position info is provided for all the interfaces, it will be sorted
+        # according to position number ascendingly.
+        sorted_interfaces = sorted(
+            target_vdu["interfaces"],
+            key=lambda x: (x.get("position") is None, x.get("position")),
+        )
+        target_vdu["interfaces"] = sorted_interfaces
+
+    @staticmethod
+    def _partially_locate_vdu_interfaces(target_vdu: dict) -> None:
+        """Only place the interfaces which has specific position.
+
+        Args:
+            target_vdu  (dict):     Details of VDU to be created
+
+        """
+        # If the position info is provided for some interfaces but not all of them, the interfaces
+        # which has specific position numbers will be placed and others' positions will not be taken care.
+        if any(
+            i.get("position") + 1
+            for i in target_vdu["interfaces"]
+            if i.get("position") is not None
+        ):
+            n = len(target_vdu["interfaces"])
+            sorted_interfaces = [-1] * n
+            k, m = 0, 0
+
+            while k < n:
+                if target_vdu["interfaces"][k].get("position") is not None:
+                    if any(i.get("position") == 0 for i in target_vdu["interfaces"]):
+                        idx = target_vdu["interfaces"][k]["position"] + 1
+                    else:
+                        idx = target_vdu["interfaces"][k]["position"]
+                    sorted_interfaces[idx - 1] = target_vdu["interfaces"][k]
+                k += 1
+
+            while m < n:
+                if target_vdu["interfaces"][m].get("position") is None:
+                    idy = sorted_interfaces.index(-1)
+                    sorted_interfaces[idy] = target_vdu["interfaces"][m]
+                m += 1
+
+            target_vdu["interfaces"] = sorted_interfaces
+
+    @staticmethod
+    def _prepare_vdu_cloud_init(
+        target_vdu: dict, vdu2cloud_init: dict, db: object, fs: object
+    ) -> Dict:
+        """Fill cloud_config dict with cloud init details.
+
+        Args:
+            target_vdu  (dict):         Details of VDU to be created
+            vdu2cloud_init  (dict):     Cloud init dict
+            db  (object):               DB object
+            fs  (object):               FS object
+
+        Returns:
+            cloud_config (dict):        Cloud config details of VDU
+
+        """
         # cloud config
         cloud_config = {}
 
@@ -1060,6 +1407,190 @@ class Ns(object):
         if target_vdu.get("boot-data-drive"):
             cloud_config["boot-data-drive"] = target_vdu.get("boot-data-drive")
 
+        return cloud_config
+
+    @staticmethod
+    def _check_vld_information_of_interfaces(
+        interface: dict, ns_preffix: str, vnf_preffix: str
+    ) -> Optional[str]:
+        """Prepare the net_text by the virtual link information for vnf and ns level.
+        Args:
+            interface   (dict):         Interface details
+            ns_preffix  (str):          Prefix of NS
+            vnf_preffix (str):          Prefix of VNF
+
+        Returns:
+            net_text    (str):          information of net
+
+        """
+        net_text = ""
+        if interface.get("ns-vld-id"):
+            net_text = ns_preffix + ":vld." + interface["ns-vld-id"]
+        elif interface.get("vnf-vld-id"):
+            net_text = vnf_preffix + ":vld." + interface["vnf-vld-id"]
+
+        return net_text
+
+    @staticmethod
+    def _prepare_interface_port_security(interface: dict) -> None:
+        """
+
+        Args:
+            interface   (dict):     Interface details
+
+        """
+        if "port-security-enabled" in interface:
+            interface["port_security"] = interface.pop("port-security-enabled")
+
+        if "port-security-disable-strategy" in interface:
+            interface["port_security_disable_strategy"] = interface.pop(
+                "port-security-disable-strategy"
+            )
+
+    @staticmethod
+    def _create_net_item_of_interface(interface: dict, net_text: str) -> dict:
+        """Prepare net item including name, port security, floating ip etc.
+
+        Args:
+            interface   (dict):         Interface details
+            net_text    (str):          information of net
+
+        Returns:
+            net_item    (dict):         Dict including net details
+
+        """
+
+        net_item = {
+            x: v
+            for x, v in interface.items()
+            if x
+            in (
+                "name",
+                "vpci",
+                "port_security",
+                "port_security_disable_strategy",
+                "floating_ip",
+            )
+        }
+        net_item["net_id"] = "TASK-" + net_text
+        net_item["type"] = "virtual"
+
+        return net_item
+
+    @staticmethod
+    def _prepare_type_of_interface(
+        interface: dict, tasks_by_target_record_id: dict, net_text: str, net_item: dict
+    ) -> None:
+        """Fill the net item type by interface type such as SR-IOV, OM-MGMT, bridge etc.
+
+        Args:
+            interface   (dict):                     Interface details
+            tasks_by_target_record_id   (dict):     Task details
+            net_text    (str):                      information of net
+            net_item    (dict):                     Dict including net details
+
+        """
+        # TODO mac_address: used for  SR-IOV ifaces #TODO for other types
+        # TODO floating_ip: True/False (or it can be None)
+
+        if interface.get("type") in ("SR-IOV", "PCI-PASSTHROUGH"):
+            # Mark the net create task as type data
+            if deep_get(
+                tasks_by_target_record_id,
+                net_text,
+                "extra_dict",
+                "params",
+                "net_type",
+            ):
+                tasks_by_target_record_id[net_text]["extra_dict"]["params"][
+                    "net_type"
+                ] = "data"
+
+            net_item["use"] = "data"
+            net_item["model"] = interface["type"]
+            net_item["type"] = interface["type"]
+
+        elif (
+            interface.get("type") == "OM-MGMT"
+            or interface.get("mgmt-interface")
+            or interface.get("mgmt-vnf")
+        ):
+            net_item["use"] = "mgmt"
+
+        else:
+            # If interface.get("type") in ("VIRTIO", "E1000", "PARAVIRT"):
+            net_item["use"] = "bridge"
+            net_item["model"] = interface.get("type")
+
+    @staticmethod
+    def _prepare_vdu_interfaces(
+        target_vdu: dict,
+        extra_dict: dict,
+        ns_preffix: str,
+        vnf_preffix: str,
+        logger: object,
+        tasks_by_target_record_id: dict,
+        net_list: list,
+    ) -> None:
+        """Prepare the net_item and add net_list, add mgmt interface to extra_dict.
+
+        Args:
+            target_vdu  (dict):                             VDU to be created
+            extra_dict  (dict):                             Dictionary to be filled
+            ns_preffix  (str):                              NS prefix as string
+            vnf_preffix (str):                              VNF prefix as string
+            logger  (object):                               Logger Object
+            tasks_by_target_record_id  (dict):              Task details
+            net_list    (list):                             Net list of VDU
+        """
+        for iface_index, interface in enumerate(target_vdu["interfaces"]):
+            net_text = Ns._check_vld_information_of_interfaces(
+                interface, ns_preffix, vnf_preffix
+            )
+            if not net_text:
+                # Interface not connected to any vld
+                logger.error(
+                    "Interface {} from vdu {} not connected to any vld".format(
+                        iface_index, target_vdu["vdu-name"]
+                    )
+                )
+                continue
+
+            extra_dict["depends_on"].append(net_text)
+
+            Ns._prepare_interface_port_security(interface)
+
+            net_item = Ns._create_net_item_of_interface(interface, net_text)
+
+            Ns._prepare_type_of_interface(
+                interface, tasks_by_target_record_id, net_text, net_item
+            )
+
+            if interface.get("ip-address"):
+                net_item["ip_address"] = interface["ip-address"]
+
+            if interface.get("mac-address"):
+                net_item["mac_address"] = interface["mac-address"]
+
+            net_list.append(net_item)
+
+            if interface.get("mgmt-vnf"):
+                extra_dict["mgmt_vnf_interface"] = iface_index
+            elif interface.get("mgmt-interface"):
+                extra_dict["mgmt_vdu_interface"] = iface_index
+
+    @staticmethod
+    def _prepare_vdu_ssh_keys(
+        target_vdu: dict, ro_nsr_public_key: dict, cloud_config: dict
+    ) -> None:
+        """Add ssh keys to cloud config.
+
+        Args:
+           target_vdu  (dict):                 Details of VDU to be created
+           ro_nsr_public_key   (dict):          RO NSR public Key
+           cloud_config  (dict):               Cloud config details
+
+        """
         ssh_keys = []
 
         if target_vdu.get("ssh-keys"):
@@ -1071,26 +1602,72 @@ class Ns(object):
         if ssh_keys:
             cloud_config["key-pairs"] = ssh_keys
 
-        persistent_root_disk = {}
-        disk_list = []
-        vnfd_id = vnfr["vnfd-id"]
-        vnfd = db.get_one("vnfds", {"_id": vnfd_id})
+    @staticmethod
+    def _select_persistent_root_disk(vsd: dict, vdu: dict) -> dict:
+        """Selects the persistent root disk if exists.
+        Args:
+            vsd (dict):             Virtual storage descriptors in VNFD
+            vdu (dict):             VNF descriptor
+
+        Returns:
+            root_disk   (dict):     Selected persistent root disk
+        """
+        if vsd.get("id") == vdu.get("virtual-storage-desc", [[]])[0]:
+            root_disk = vsd
+            if root_disk.get(
+                "type-of-storage"
+            ) == "persistent-storage:persistent-storage" and root_disk.get(
+                "size-of-storage"
+            ):
+                return root_disk
+
+    @staticmethod
+    def _add_persistent_root_disk_to_disk_list(
+        vnfd: dict, target_vdu: dict, persistent_root_disk: dict, disk_list: list
+    ) -> None:
+        """Find the persistent root disk and add to disk list.
+
+        Args:
+            vnfd  (dict):                           VNF descriptor
+            target_vdu  (dict):                     Details of VDU to be created
+            persistent_root_disk    (dict):         Details of persistent root disk
+            disk_list   (list):                     Disks of VDU
+
+        """
         for vdu in vnfd.get("vdu", ()):
             if vdu["name"] == target_vdu["vdu-name"]:
                 for vsd in vnfd.get("virtual-storage-desc", ()):
-                    if vsd.get("id") == vdu.get("virtual-storage-desc", [[]])[0]:
-                        root_disk = vsd
-                        if root_disk.get(
-                            "type-of-storage"
-                        ) == "persistent-storage:persistent-storage" and root_disk.get(
-                            "size-of-storage"
-                        ):
-                            persistent_root_disk[vsd["id"]] = {
-                                "image_id": vdu.get("sw-image-desc"),
-                                "size": root_disk["size-of-storage"],
-                            }
-                            disk_list.append(persistent_root_disk[vsd["id"]])
+                    root_disk = Ns._select_persistent_root_disk(vsd, vdu)
+                    if not root_disk:
+                        continue
+
+                    persistent_root_disk[vsd["id"]] = {
+                        "image_id": vdu.get("sw-image-desc"),
+                        "size": root_disk["size-of-storage"],
+                        "keep": Ns.is_volume_keeping_required(root_disk),
+                    }
+                    disk_list.append(persistent_root_disk[vsd["id"]])
+                    break
+
+    @staticmethod
+    def _add_persistent_ordinary_disks_to_disk_list(
+        target_vdu: dict,
+        persistent_root_disk: dict,
+        persistent_ordinary_disk: dict,
+        disk_list: list,
+        extra_dict: dict,
+        vnf_id: str = None,
+        nsr_id: str = None,
+    ) -> None:
+        """Fill the disk list by adding persistent ordinary disks.
 
+        Args:
+            target_vdu  (dict):                     Details of VDU to be created
+            persistent_root_disk    (dict):         Details of persistent root disk
+            persistent_ordinary_disk    (dict):     Details of persistent ordinary disk
+            disk_list   (list):                     Disks of VDU
+
+        """
         if target_vdu.get("virtual-storages"):
             for disk in target_vdu["virtual-storages"]:
                 if (
@@ -1098,21 +1675,166 @@ class Ns(object):
                     == "persistent-storage:persistent-storage"
                     and disk["id"] not in persistent_root_disk.keys()
                 ):
-                    disk_list.append({"size": disk["size-of-storage"]})
+                    name, multiattach = Ns.is_shared_volume(disk, vnf_id)
+                    persistent_ordinary_disk[disk["id"]] = {
+                        "name": name,
+                        "size": disk["size-of-storage"],
+                        "keep": Ns.is_volume_keeping_required(disk),
+                        "multiattach": multiattach,
+                    }
+                    disk_list.append(persistent_ordinary_disk[disk["id"]])
+                    if multiattach:  # VDU creation has to wait for shared volumes
+                        extra_dict["depends_on"].append(
+                            f"nsrs:{nsr_id}:shared-volumes.{name}"
+                        )
+
+    @staticmethod
+    def _prepare_vdu_affinity_group_list(
+        target_vdu: dict, extra_dict: dict, ns_preffix: str
+    ) -> List[Dict[str, any]]:
+        """Process affinity group details to prepare affinity group list.
 
+        Args:
+            target_vdu  (dict):     Details of VDU to be created
+            extra_dict  (dict):     Dictionary to be filled
+            ns_preffix  (str):      Prefix as string
+
+        Returns:
+
+            affinity_group_list (list):     Affinity group details
+
+        """
         affinity_group_list = []
 
         if target_vdu.get("affinity-or-anti-affinity-group-id"):
-            affinity_group = {}
             for affinity_group_id in target_vdu["affinity-or-anti-affinity-group-id"]:
+                affinity_group = {}
                 affinity_group_text = (
                     ns_preffix + ":affinity-or-anti-affinity-group." + affinity_group_id
                 )
 
+                if not isinstance(extra_dict.get("depends_on"), list):
+                    raise NsException("Invalid extra_dict format.")
+
                 extra_dict["depends_on"].append(affinity_group_text)
                 affinity_group["affinity_group_id"] = "TASK-" + affinity_group_text
                 affinity_group_list.append(affinity_group)
 
+        return affinity_group_list
+
+    @staticmethod
+    def _process_vdu_params(
+        target_vdu: Dict[str, Any],
+        indata: Dict[str, Any],
+        vim_info: Dict[str, Any],
+        target_record_id: str,
+        **kwargs: Dict[str, Any],
+    ) -> Dict[str, Any]:
+        """Function to process VDU parameters.
+
+        Args:
+            target_vdu (Dict[str, Any]): [description]
+            indata (Dict[str, Any]): [description]
+            vim_info (Dict[str, Any]): [description]
+            target_record_id (str): [description]
+
+        Returns:
+            Dict[str, Any]: [description]
+        """
+        vnfr_id = kwargs.get("vnfr_id")
+        nsr_id = kwargs.get("nsr_id")
+        vnfr = kwargs.get("vnfr")
+        vdu2cloud_init = kwargs.get("vdu2cloud_init")
+        tasks_by_target_record_id = kwargs.get("tasks_by_target_record_id")
+        logger = kwargs.get("logger")
+        db = kwargs.get("db")
+        fs = kwargs.get("fs")
+        ro_nsr_public_key = kwargs.get("ro_nsr_public_key")
+
+        vnf_preffix = "vnfrs:{}".format(vnfr_id)
+        ns_preffix = "nsrs:{}".format(nsr_id)
+        image_text = ns_preffix + ":image." + target_vdu["ns-image-id"]
+        flavor_text = ns_preffix + ":flavor." + target_vdu["ns-flavor-id"]
+        extra_dict = {"depends_on": [image_text, flavor_text]}
+        net_list = []
+        persistent_root_disk = {}
+        persistent_ordinary_disk = {}
+        vdu_instantiation_volumes_list = []
+        disk_list = []
+        vnfd_id = vnfr["vnfd-id"]
+        vnfd = db.get_one("vnfds", {"_id": vnfd_id})
+        # If the position info is provided for all the interfaces, it will be sorted
+        # according to position number ascendingly.
+        if all(
+            True if i.get("position") is not None else False
+            for i in target_vdu["interfaces"]
+        ):
+            Ns._sort_vdu_interfaces(target_vdu)
+
+        # If the position info is provided for some interfaces but not all of them, the interfaces
+        # which has specific position numbers will be placed and others' positions will not be taken care.
+        else:
+            Ns._partially_locate_vdu_interfaces(target_vdu)
+
+        # If the position info is not provided for the interfaces, interfaces will be attached
+        # according to the order in the VNFD.
+        Ns._prepare_vdu_interfaces(
+            target_vdu,
+            extra_dict,
+            ns_preffix,
+            vnf_preffix,
+            logger,
+            tasks_by_target_record_id,
+            net_list,
+        )
+
+        # cloud config
+        cloud_config = Ns._prepare_vdu_cloud_init(target_vdu, vdu2cloud_init, db, fs)
+
+        # Prepare VDU ssh keys
+        Ns._prepare_vdu_ssh_keys(target_vdu, ro_nsr_public_key, cloud_config)
+
+        if target_vdu.get("additionalParams"):
+            vdu_instantiation_volumes_list = (
+                target_vdu.get("additionalParams").get("OSM", {}).get("vdu_volumes")
+            )
+
+        if vdu_instantiation_volumes_list:
+            # Find the root volumes and add to the disk_list
+            persistent_root_disk = Ns.find_persistent_root_volumes(
+                vnfd, target_vdu, vdu_instantiation_volumes_list, disk_list
+            )
+
+            # Find the ordinary volumes which are not added to the persistent_root_disk
+            # and put them to the disk list
+            Ns.find_persistent_volumes(
+                persistent_root_disk,
+                target_vdu,
+                vdu_instantiation_volumes_list,
+                disk_list,
+            )
+
+        else:
+            # Vdu_instantiation_volumes_list is empty
+            # First get add the persistent root disks to disk_list
+            Ns._add_persistent_root_disk_to_disk_list(
+                vnfd, target_vdu, persistent_root_disk, disk_list
+            )
+            # Add the persistent non-root disks to disk_list
+            Ns._add_persistent_ordinary_disks_to_disk_list(
+                target_vdu,
+                persistent_root_disk,
+                persistent_ordinary_disk,
+                disk_list,
+                extra_dict,
+                vnfd["id"],
+                nsr_id,
+            )
+
+        affinity_group_list = Ns._prepare_vdu_affinity_group_list(
+            target_vdu, extra_dict, ns_preffix
+        )
+
         extra_dict["params"] = {
             "name": "{}-{}-{}-{}".format(
                 indata["name"][:16],
@@ -1131,7 +1853,24 @@ class Ns(object):
             "availability_zone_index": None,  # TODO
             "availability_zone_list": None,  # TODO
         }
+        return extra_dict
 
+    @staticmethod
+    def _process_shared_volumes_params(
+        target_shared_volume: Dict[str, Any],
+        indata: Dict[str, Any],
+        vim_info: Dict[str, Any],
+        target_record_id: str,
+        **kwargs: Dict[str, Any],
+    ) -> Dict[str, Any]:
+        extra_dict = {}
+        shared_volume_data = {
+            "size": target_shared_volume["size-of-storage"],
+            "name": target_shared_volume["id"],
+            "type": target_shared_volume["type-of-storage"],
+            "keep": Ns.is_volume_keeping_required(target_shared_volume),
+        }
+        extra_dict["params"] = shared_volume_data
         return extra_dict
 
     @staticmethod
@@ -1169,7 +1908,6 @@ class Ns(object):
         extra_dict["params"] = {
             "affinity_group_data": affinity_group_data,
         }
-
         return extra_dict
 
     @staticmethod
@@ -1205,11 +1943,11 @@ class Ns(object):
 
         vim_details = {}
         vim_details_text = existing_vdu["vim_info"][target_id].get("vim_details", None)
+
         if vim_details_text:
             vim_details = yaml.safe_load(f"{vim_details_text}")
 
         for iface_index, interface in enumerate(existing_vdu["interfaces"]):
-
             if "port-security-enabled" in interface:
                 interface["port_security"] = interface.pop("port-security-enabled")
 
@@ -1230,7 +1968,9 @@ class Ns(object):
                     "floating_ip",
                 )
             }
-            existing_ifaces = existing_vdu["vim_info"][target_id].get("interfaces", [])
+            existing_ifaces = existing_vdu["vim_info"][target_id].get(
+                "interfaces_backup", []
+            )
             net_id = next(
                 (
                     i["vim_net_id"]
@@ -1397,12 +2137,17 @@ class Ns(object):
         process_params = None
         vdu2cloud_init = indata.get("cloud_init_content") or {}
         ro_nsr_public_key = db_ro_nsr["public_key"]
-
         # According to the type of item, the path, the target_list,
         # the existing_list and the method to process params are set
         db_path = self.db_path_map[item]
         process_params = self.process_params_function_map[item]
-        if item in ("net", "vdu"):
+
+        if item in ("sfp", "classification", "sf", "sfi"):
+            db_record = "nsrs:{}:{}".format(nsr_id, db_path)
+            target_vnffg = indata.get("vnffg", [])[0]
+            target_list = target_vnffg[item]
+            existing_list = db_nsr.get(item, [])
+        elif item in ("net", "vdu"):
             # This case is specific for the NS VLD (not applied to VDU)
             if vnfr is None:
                 db_record = "nsrs:{}:{}".format(nsr_id, db_path)
@@ -1417,27 +2162,29 @@ class Ns(object):
                 )
                 target_list = target_vnf.get(db_path, []) if target_vnf else []
                 existing_list = vnfr.get(db_path, [])
-        elif item in ("image", "flavor", "affinity-or-anti-affinity-group"):
+        elif item in (
+            "image",
+            "flavor",
+            "affinity-or-anti-affinity-group",
+            "shared-volumes",
+        ):
             db_record = "nsrs:{}:{}".format(nsr_id, db_path)
             target_list = indata.get(item, [])
             existing_list = db_nsr.get(item, [])
         else:
             raise NsException("Item not supported: {}", item)
-
         # ensure all the target_list elements has an "id". If not assign the index as id
         if target_list is None:
             target_list = []
         for target_index, tl in enumerate(target_list):
             if tl and not tl.get("id"):
                 tl["id"] = str(target_index)
-
         # step 1 items (networks,vdus,...) to be deleted/updated
         for item_index, existing_item in enumerate(existing_list):
             target_item = next(
                 (t for t in target_list if t["id"] == existing_item["id"]),
                 None,
             )
-
             for target_vim, existing_viminfo in existing_item.get(
                 "vim_info", {}
             ).items():
@@ -1455,7 +2202,7 @@ class Ns(object):
                     target_record_id = "{}.{}".format(db_record, existing_item["id"])
                     item_ = item
 
-                    if target_vim.startswith("sdn"):
+                    if target_vim.startswith("sdn") or target_vim.startswith("wim"):
                         # item must be sdn-net instead of net if target_vim is a sdn
                         item_ = "sdn_net"
                         target_record_id += ".sdn"
@@ -1481,7 +2228,6 @@ class Ns(object):
         # step 2 items (networks,vdus,...) to be created
         for target_item in target_list:
             item_index = -1
-
             for item_index, existing_item in enumerate(existing_list):
                 if existing_item["id"] == target_item["id"]:
                     break
@@ -1503,19 +2249,27 @@ class Ns(object):
                 target_record_id = "{}.{}".format(db_record, target_item["id"])
                 item_ = item
 
-                if target_vim.startswith("sdn"):
+                if target_vim.startswith("sdn") or target_vim.startswith("wim"):
                     # item must be sdn-net instead of net if target_vim is a sdn
                     item_ = "sdn_net"
                     target_record_id += ".sdn"
 
                 kwargs = {}
-                self.logger.warning(
+                self.logger.debug(
                     "ns.calculate_diff_items target_item={}".format(target_item)
                 )
-                if process_params == Ns._process_vdu_params:
-                    self.logger.warning(
-                        "calculate_diff_items self.fs={}".format(self.fs)
+                if process_params == Ns._process_flavor_params:
+                    kwargs.update(
+                        {
+                            "db": self.db,
+                        }
+                    )
+                    self.logger.debug(
+                        "calculate_diff_items for flavor kwargs={}".format(kwargs)
                     )
+
+                if process_params == Ns._process_vdu_params:
+                    self.logger.debug("calculate_diff_items self.fs={}".format(self.fs))
                     kwargs.update(
                         {
                             "vnfr_id": vnfr_id,
@@ -1529,7 +2283,16 @@ class Ns(object):
                             "ro_nsr_public_key": ro_nsr_public_key,
                         }
                     )
-                    self.logger.warning("calculate_diff_items kwargs={}".format(kwargs))
+                    self.logger.debug("calculate_diff_items kwargs={}".format(kwargs))
+                if (
+                    process_params == Ns._process_sfi_params
+                    or Ns._process_sf_params
+                    or Ns._process_classification_params
+                    or Ns._process_sfp_params
+                ):
+                    kwargs.update({"nsr_id": nsr_id, "db": self.db})
+
+                    self.logger.debug("calculate_diff_items kwargs={}".format(kwargs))
 
                 extra_dict = process_params(
                     target_item,
@@ -1564,6 +2327,253 @@ class Ns(object):
 
         return diff_items, task_index
 
+    def _process_vnfgd_sfp(self, sfp):
+        processed_sfp = {}
+        # getting sfp name, sfs and classifications in sfp to store it in processed_sfp
+        processed_sfp["id"] = sfp["id"]
+        sfs_in_sfp = [
+            sf["id"] for sf in sfp.get("position-desc-id", [])[0].get("cp-profile-id")
+        ]
+        classifications_in_sfp = [
+            classi["id"]
+            for classi in sfp.get("position-desc-id", [])[0].get("match-attributes")
+        ]
+
+        # creating a list of sfp with sfs and classifications
+        processed_sfp["sfs"] = sfs_in_sfp
+        processed_sfp["classifications"] = classifications_in_sfp
+
+        return processed_sfp
+
+    def _process_vnfgd_sf(self, sf):
+        processed_sf = {}
+        # getting name of sf
+        processed_sf["id"] = sf["id"]
+        # getting sfis in sf
+        sfis_in_sf = sf.get("constituent-profile-elements")
+        sorted_sfis = sorted(sfis_in_sf, key=lambda i: i["order"])
+        # getting sfis names
+        processed_sf["sfis"] = [sfi["id"] for sfi in sorted_sfis]
+
+        return processed_sf
+
+    def _process_vnfgd_sfi(self, sfi, db_vnfrs):
+        processed_sfi = {}
+        # getting name of sfi
+        processed_sfi["id"] = sfi["id"]
+
+        # getting ports in sfi
+        ingress_port = sfi["ingress-constituent-cpd-id"]
+        egress_port = sfi["egress-constituent-cpd-id"]
+        sfi_vnf_member_index = sfi["constituent-base-element-id"]
+
+        processed_sfi["ingress_port"] = ingress_port
+        processed_sfi["egress_port"] = egress_port
+
+        all_vnfrs = db_vnfrs.values()
+
+        sfi_vnfr = [
+            element
+            for element in all_vnfrs
+            if element["member-vnf-index-ref"] == sfi_vnf_member_index
+        ]
+        processed_sfi["vnfr_id"] = sfi_vnfr[0]["id"]
+
+        sfi_vnfr_cp = sfi_vnfr[0]["connection-point"]
+
+        ingress_port_index = [
+            c for c, element in enumerate(sfi_vnfr_cp) if element["id"] == ingress_port
+        ]
+        ingress_port_index = ingress_port_index[0]
+
+        processed_sfi["vdur_id"] = sfi_vnfr_cp[ingress_port_index][
+            "connection-point-vdu-id"
+        ]
+        processed_sfi["ingress_port_index"] = ingress_port_index
+        processed_sfi["egress_port_index"] = ingress_port_index
+
+        if egress_port != ingress_port:
+            egress_port_index = [
+                c
+                for c, element in enumerate(sfi_vnfr_cp)
+                if element["id"] == egress_port
+            ]
+            processed_sfi["egress_port_index"] = egress_port_index
+
+        return processed_sfi
+
+    def _process_vnfgd_classification(self, classification, db_vnfrs):
+        processed_classification = {}
+
+        processed_classification = deepcopy(classification)
+        classi_vnf_member_index = processed_classification[
+            "constituent-base-element-id"
+        ]
+        logical_source_port = processed_classification["constituent-cpd-id"]
+
+        all_vnfrs = db_vnfrs.values()
+
+        classi_vnfr = [
+            element
+            for element in all_vnfrs
+            if element["member-vnf-index-ref"] == classi_vnf_member_index
+        ]
+        processed_classification["vnfr_id"] = classi_vnfr[0]["id"]
+
+        classi_vnfr_cp = classi_vnfr[0]["connection-point"]
+
+        ingress_port_index = [
+            c
+            for c, element in enumerate(classi_vnfr_cp)
+            if element["id"] == logical_source_port
+        ]
+        ingress_port_index = ingress_port_index[0]
+
+        processed_classification["ingress_port_index"] = ingress_port_index
+        processed_classification["vdur_id"] = classi_vnfr_cp[ingress_port_index][
+            "connection-point-vdu-id"
+        ]
+
+        return processed_classification
+
+    def _update_db_nsr_with_vnffg(self, processed_vnffg, vim_info, nsr_id):
+        """This method used to add viminfo dict to sfi, sf sfp and classification in indata and count info in db_nsr.
+
+        Args:
+            processed_vnffg (Dict[str, Any]): deployment info
+            vim_info (Dict): dictionary to store VIM resource information
+            nsr_id (str): NSR id
+
+        Returns: None
+        """
+
+        nsr_sfi = {}
+        nsr_sf = {}
+        nsr_sfp = {}
+        nsr_classification = {}
+        db_nsr_vnffg = deepcopy(processed_vnffg)
+
+        for count, sfi in enumerate(processed_vnffg["sfi"]):
+            sfi["vim_info"] = vim_info
+            sfi_count = "sfi.{}".format(count)
+            nsr_sfi[sfi_count] = db_nsr_vnffg["sfi"][count]
+
+        self.db.set_list("nsrs", {"_id": nsr_id}, nsr_sfi)
+
+        for count, sf in enumerate(processed_vnffg["sf"]):
+            sf["vim_info"] = vim_info
+            sf_count = "sf.{}".format(count)
+            nsr_sf[sf_count] = db_nsr_vnffg["sf"][count]
+
+        self.db.set_list("nsrs", {"_id": nsr_id}, nsr_sf)
+
+        for count, sfp in enumerate(processed_vnffg["sfp"]):
+            sfp["vim_info"] = vim_info
+            sfp_count = "sfp.{}".format(count)
+            nsr_sfp[sfp_count] = db_nsr_vnffg["sfp"][count]
+
+        self.db.set_list("nsrs", {"_id": nsr_id}, nsr_sfp)
+
+        for count, classi in enumerate(processed_vnffg["classification"]):
+            classi["vim_info"] = vim_info
+            classification_count = "classification.{}".format(count)
+            nsr_classification[classification_count] = db_nsr_vnffg["classification"][
+                count
+            ]
+
+            self.db.set_list("nsrs", {"_id": nsr_id}, nsr_classification)
+
+    def process_vnffgd_descriptor(
+        self,
+        indata: dict,
+        nsr_id: str,
+        db_nsr: dict,
+        db_vnfrs: dict,
+    ) -> dict:
+        """This method used to process vnffgd parameters from descriptor.
+
+        Args:
+            indata (Dict[str, Any]): deployment info
+            nsr_id (str): NSR id
+            db_nsr: NSR record from DB
+            db_vnfrs: VNFRS record from DB
+
+        Returns:
+            Dict: Processed vnffg parameters.
+        """
+
+        processed_vnffg = {}
+        vnffgd = db_nsr.get("nsd", {}).get("vnffgd")
+        vnf_list = indata.get("vnf", [])
+        vim_text = ""
+
+        if vnf_list:
+            vim_text = "vim:" + vnf_list[0].get("vim-account-id", "")
+
+        vim_info = {}
+        vim_info[vim_text] = {}
+        processed_sfps = []
+        processed_classifications = []
+        processed_sfs = []
+        processed_sfis = []
+
+        # setting up intial empty entries for vnffg items in mongodb.
+        self.db.set_list(
+            "nsrs",
+            {"_id": nsr_id},
+            {
+                "sfi": [],
+                "sf": [],
+                "sfp": [],
+                "classification": [],
+            },
+        )
+
+        vnffg = vnffgd[0]
+        # getting sfps
+        sfps = vnffg.get("nfpd")
+        for sfp in sfps:
+            processed_sfp = self._process_vnfgd_sfp(sfp)
+            # appending the list of processed sfps
+            processed_sfps.append(processed_sfp)
+
+            # getting sfs in sfp
+            sfs = sfp.get("position-desc-id")[0].get("cp-profile-id")
+            for sf in sfs:
+                processed_sf = self._process_vnfgd_sf(sf)
+
+                # appending the list of processed sfs
+                processed_sfs.append(processed_sf)
+
+                # getting sfis in sf
+                sfis_in_sf = sf.get("constituent-profile-elements")
+                sorted_sfis = sorted(sfis_in_sf, key=lambda i: i["order"])
+
+                for sfi in sorted_sfis:
+                    processed_sfi = self._process_vnfgd_sfi(sfi, db_vnfrs)
+
+                    processed_sfis.append(processed_sfi)
+
+            classifications = sfp.get("position-desc-id")[0].get("match-attributes")
+            # getting classifications from sfp
+            for classification in classifications:
+                processed_classification = self._process_vnfgd_classification(
+                    classification, db_vnfrs
+                )
+
+                processed_classifications.append(processed_classification)
+
+        processed_vnffg["sfi"] = processed_sfis
+        processed_vnffg["sf"] = processed_sfs
+        processed_vnffg["classification"] = processed_classifications
+        processed_vnffg["sfp"] = processed_sfps
+
+        # adding viminfo dict to sfi, sf sfp and classification
+        self._update_db_nsr_with_vnffg(processed_vnffg, vim_info, nsr_id)
+
+        # updating indata with vnffg porcessed parameters
+        indata["vnffg"].append(processed_vnffg)
+
     def calculate_all_differences_to_deploy(
         self,
         indata,
@@ -1599,8 +2609,62 @@ class Ns(object):
         # set list with diffs:
         changes_list = []
 
+        # processing vnffg from descriptor parameter
+        vnffgd = db_nsr.get("nsd").get("vnffgd")
+        if vnffgd is not None:
+            indata["vnffg"] = []
+            vnf_list = indata["vnf"]
+            processed_vnffg = {}
+
+            # in case of ns-delete
+            if not vnf_list:
+                processed_vnffg["sfi"] = []
+                processed_vnffg["sf"] = []
+                processed_vnffg["classification"] = []
+                processed_vnffg["sfp"] = []
+
+                indata["vnffg"].append(processed_vnffg)
+
+            else:
+                self.process_vnffgd_descriptor(
+                    indata=indata,
+                    nsr_id=nsr_id,
+                    db_nsr=db_nsr,
+                    db_vnfrs=db_vnfrs,
+                )
+
+                # getting updated db_nsr having vnffg parameters
+                db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+
+                self.logger.debug(
+                    "After processing vnffd parameters indata={} nsr={}".format(
+                        indata, db_nsr
+                    )
+                )
+
+            for item in ["sfp", "classification", "sf", "sfi"]:
+                self.logger.debug("process NS={} {}".format(nsr_id, item))
+                diff_items, task_index = self.calculate_diff_items(
+                    indata=indata,
+                    db_nsr=db_nsr,
+                    db_ro_nsr=db_ro_nsr,
+                    db_nsr_update=db_nsr_update,
+                    item=item,
+                    tasks_by_target_record_id=tasks_by_target_record_id,
+                    action_id=action_id,
+                    nsr_id=nsr_id,
+                    task_index=task_index,
+                    vnfr_id=None,
+                )
+                changes_list += diff_items
+
         # NS vld, image and flavor
-        for item in ["net", "image", "flavor", "affinity-or-anti-affinity-group"]:
+        for item in [
+            "net",
+            "image",
+            "flavor",
+            "affinity-or-anti-affinity-group",
+        ]:
             self.logger.debug("process NS={} {}".format(nsr_id, item))
             diff_items, task_index = self.calculate_diff_items(
                 indata=indata,
@@ -1619,7 +2683,7 @@ class Ns(object):
         # VNF vlds and vdus
         for vnfr_id, vnfr in db_vnfrs.items():
             # vnfr_id need to be set as global variable for among others nested method _process_vdu_params
-            for item in ["net", "vdu"]:
+            for item in ["net", "vdu", "shared-volumes"]:
                 self.logger.debug("process VNF={} {}".format(vnfr_id, item))
                 diff_items, task_index = self.calculate_diff_items(
                     indata=indata,
@@ -1667,7 +2731,7 @@ class Ns(object):
                 extra_dict=change.get("extra_dict", None),
             )
 
-            self.logger.warning("ns.define_all_tasks task={}".format(task))
+            self.logger.debug("ns.define_all_tasks task={}".format(task))
             tasks_by_target_record_id[change["target_record_id"]] = task
             db_new_tasks.append(task)
 
@@ -1751,7 +2815,7 @@ class Ns(object):
 
         for db_task in db_new_tasks:
             target_id = db_task.pop("target_id")
-            self.logger.warning("target_id={} db_task={}".format(target_id, db_task))
+            self.logger.debug("target_id={} db_task={}".format(target_id, db_task))
 
             action = db_task.get("action", None)
 
@@ -1772,7 +2836,7 @@ class Ns(object):
                 db_ro_task["vim_info"]["vim_id"] = db_task.get("vim_id", None)
 
             nb_ro_tasks += 1
-            self.logger.warning("upload_all_tasks db_ro_task={}".format(db_ro_task))
+            self.logger.debug("upload_all_tasks db_ro_task={}".format(db_ro_task))
             self.db.create("ro_tasks", db_ro_task)
 
         self.logger.debug(
@@ -1846,30 +2910,39 @@ class Ns(object):
 
         # Check each VNF of the target
         for target_vnf in target_list:
-            # Find this VNF in the list from DB
-            vnfr_id = target_vnf.get("vnfInstanceId", None)
-            if vnfr_id:
-                existing_vnf = db_vnfrs.get(vnfr_id)
-                db_record = "vnfrs:{}:{}".format(vnfr_id, db_path)
-                # vim_account_id = existing_vnf.get("vim-account-id", "")
+            # Find this VNF in the list from DB, raise exception if vnfInstanceId is not found
+            vnfr_id = target_vnf["vnfInstanceId"]
+            existing_vnf = db_vnfrs.get(vnfr_id, {})
+            db_record = "vnfrs:{}:{}".format(vnfr_id, db_path)
+            # vim_account_id = existing_vnf.get("vim-account-id", "")
 
+            target_vdus = target_vnf.get("additionalParams", {}).get("vdu", [])
             # Check each VDU of this VNF
-            for target_vdu in target_vnf["additionalParams"].get("vdu", None):
+            if not target_vdus:
+                # Create target_vdu_list from DB, if VDUs are not specified
+                target_vdus = []
+                for existing_vdu in existing_vnf.get("vdur"):
+                    vdu_name = existing_vdu.get("vdu-name", None)
+                    vdu_index = existing_vdu.get("count-index", 0)
+                    vdu_to_be_healed = {"vdu-id": vdu_name, "count-index": vdu_index}
+                    target_vdus.append(vdu_to_be_healed)
+            for target_vdu in target_vdus:
                 vdu_name = target_vdu.get("vdu-id", None)
                 # For multi instance VDU count-index is mandatory
                 # For single session VDU count-indes is 0
                 count_index = target_vdu.get("count-index", 0)
                 item_index = 0
-                existing_instance = None
-                for instance in existing_vnf.get("vdur", None):
-                    if (
-                        instance["vdu-name"] == vdu_name
-                        and instance["count-index"] == count_index
-                    ):
-                        existing_instance = instance
-                        break
-                    else:
-                        item_index += 1
+                existing_instance = {}
+                if existing_vnf:
+                    for instance in existing_vnf.get("vdur", {}):
+                        if (
+                            instance["vdu-name"] == vdu_name
+                            and instance["count-index"] == count_index
+                        ):
+                            existing_instance = instance
+                            break
+                        else:
+                            item_index += 1
 
                 target_record_id = "{}.{}".format(db_record, existing_instance["id"])
 
@@ -2322,9 +3395,9 @@ class Ns(object):
 
         return_data = {
             "status": global_status,
-            "details": ". ".join(details)
-            if details
-            else "progress {}/{}".format(done, total),
+            "details": (
+                ". ".join(details) if details else "progress {}/{}".format(done, total)
+            ),
             "nsr_id": nsr_id,
             "action_id": action_id,
             "tasks": task_list,
@@ -2346,6 +3419,101 @@ class Ns(object):
 
         return None, None, True
 
+    def rebuild_start_stop_task(
+        self,
+        vdu_id,
+        vnf_id,
+        vdu_index,
+        action_id,
+        nsr_id,
+        task_index,
+        target_vim,
+        extra_dict,
+    ):
+        self._assign_vim(target_vim)
+        target_record = "vnfrs:{}:vdur.{}.vim_info.{}".format(
+            vnf_id, vdu_index, target_vim
+        )
+        target_record_id = "vnfrs:{}:vdur.{}".format(vnf_id, vdu_id)
+        deployment_info = {
+            "action_id": action_id,
+            "nsr_id": nsr_id,
+            "task_index": task_index,
+        }
+
+        task = Ns._create_task(
+            deployment_info=deployment_info,
+            target_id=target_vim,
+            item="update",
+            action="EXEC",
+            target_record=target_record,
+            target_record_id=target_record_id,
+            extra_dict=extra_dict,
+        )
+        return task
+
+    def rebuild_start_stop(
+        self, session, action_dict, version, nsr_id, *args, **kwargs
+    ):
+        task_index = 0
+        extra_dict = {}
+        now = time()
+        action_id = action_dict.get("action_id", str(uuid4()))
+        step = ""
+        logging_text = "Task deploy nsr_id={} action_id={} ".format(nsr_id, action_id)
+        self.logger.debug(logging_text + "Enter")
+
+        action = list(action_dict.keys())[0]
+        task_dict = action_dict.get(action)
+        vim_vm_id = action_dict.get(action).get("vim_vm_id")
+
+        if action_dict.get("stop"):
+            action = "shutoff"
+        db_new_tasks = []
+        try:
+            step = "lock the operation & do task creation"
+            with self.write_lock:
+                extra_dict["params"] = {
+                    "vim_vm_id": vim_vm_id,
+                    "action": action,
+                }
+                task = self.rebuild_start_stop_task(
+                    task_dict["vdu_id"],
+                    task_dict["vnf_id"],
+                    task_dict["vdu_index"],
+                    action_id,
+                    nsr_id,
+                    task_index,
+                    task_dict["target_vim"],
+                    extra_dict,
+                )
+                db_new_tasks.append(task)
+                step = "upload Task to db"
+                self.upload_all_tasks(
+                    db_new_tasks=db_new_tasks,
+                    now=now,
+                )
+                self.logger.debug(
+                    logging_text + "Exit. Created {} tasks".format(len(db_new_tasks))
+                )
+                return (
+                    {"status": "ok", "nsr_id": nsr_id, "action_id": action_id},
+                    action_id,
+                    True,
+                )
+        except Exception as e:
+            if isinstance(e, (DbException, NsException)):
+                self.logger.error(
+                    logging_text + "Exit Exception while '{}': {}".format(step, e)
+                )
+            else:
+                e = traceback_format_exc()
+                self.logger.critical(
+                    logging_text + "Exit Exception while '{}': {}".format(step, e),
+                    exc_info=True,
+                )
+            raise NsException(e)
+
     def get_deploy(self, session, indata, version, nsr_id, action_id, *args, **kwargs):
         nsrs = self.db.get_list("nsrs", {})
         return_data = []
@@ -2371,7 +3539,9 @@ class Ns(object):
     ):
         target_vim, vim_info = next(k_v for k_v in vdu["vim_info"].items())
         self._assign_vim(target_vim)
-        target_record = "vnfrs:{}:vdur.{}".format(vnf["_id"], vdu_index)
+        target_record = "vnfrs:{}:vdur.{}.vim_info.{}".format(
+            vnf["_id"], vdu_index, target_vim
+        )
         target_record_id = "vnfrs:{}:vdur.{}".format(vnf["_id"], vdu["id"])
         deployment_info = {
             "action_id": action_id,
@@ -2435,7 +3605,6 @@ class Ns(object):
                             task_index += 1
                             break
                 else:
-
                     for vdu_index, vdu in enumerate(db_vnfr["vdur"]):
                         extra_dict["params"] = {
                             "vim_vm_id": vdu["vim-id"],
@@ -2480,3 +3649,154 @@ class Ns(object):
                     exc_info=True,
                 )
             raise NsException(e)
+
+    def verticalscale_task(
+        self, vdu, vnf, vdu_index, action_id, nsr_id, task_index, extra_dict
+    ):
+        target_vim, vim_info = next(k_v for k_v in vdu["vim_info"].items())
+        self._assign_vim(target_vim)
+        ns_preffix = "nsrs:{}".format(nsr_id)
+        flavor_text = ns_preffix + ":flavor." + vdu["ns-flavor-id"]
+        extra_dict["depends_on"] = [flavor_text]
+        extra_dict["params"].update({"flavor_id": "TASK-" + flavor_text})
+        target_record = "vnfrs:{}:vdur.{}.vim_info.{}".format(
+            vnf["_id"], vdu_index, target_vim
+        )
+        target_record_id = "vnfrs:{}:vdur.{}".format(vnf["_id"], vdu["id"])
+        deployment_info = {
+            "action_id": action_id,
+            "nsr_id": nsr_id,
+            "task_index": task_index,
+        }
+
+        task = Ns._create_task(
+            deployment_info=deployment_info,
+            target_id=target_vim,
+            item="verticalscale",
+            action="EXEC",
+            target_record=target_record,
+            target_record_id=target_record_id,
+            extra_dict=extra_dict,
+        )
+        return task
+
+    def verticalscale_flavor_task(
+        self, vdu, vnf, vdu_index, action_id, nsr_id, task_index, extra_dict
+    ):
+        target_vim, vim_info = next(k_v for k_v in vdu["vim_info"].items())
+        self._assign_vim(target_vim)
+        db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+        target_record = "nsrs:{}:flavor.{}.vim_info.{}".format(
+            nsr_id, len(db_nsr["flavor"]) - 1, target_vim
+        )
+        target_record_id = "nsrs:{}:flavor.{}".format(nsr_id, len(db_nsr["flavor"]) - 1)
+        deployment_info = {
+            "action_id": action_id,
+            "nsr_id": nsr_id,
+            "task_index": task_index,
+        }
+        task = Ns._create_task(
+            deployment_info=deployment_info,
+            target_id=target_vim,
+            item="flavor",
+            action="CREATE",
+            target_record=target_record,
+            target_record_id=target_record_id,
+            extra_dict=extra_dict,
+        )
+        return task
+
+    def verticalscale(self, session, indata, version, nsr_id, *args, **kwargs):
+        task_index = 0
+        extra_dict = {}
+        flavor_extra_dict = {}
+        now = time()
+        action_id = indata.get("action_id", str(uuid4()))
+        step = ""
+        logging_text = "Task deploy nsr_id={} action_id={} ".format(nsr_id, action_id)
+        self.logger.debug(logging_text + "Enter")
+        try:
+            VnfFlavorData = indata.get("changeVnfFlavorData")
+            vnf_instance_id = VnfFlavorData["vnfInstanceId"]
+            step = "Getting vnfrs from db"
+            db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
+            vduid = VnfFlavorData["additionalParams"]["vduid"]
+            vduCountIndex = VnfFlavorData["additionalParams"]["vduCountIndex"]
+            virtualMemory = VnfFlavorData["additionalParams"]["virtualMemory"]
+            numVirtualCpu = VnfFlavorData["additionalParams"]["numVirtualCpu"]
+            sizeOfStorage = VnfFlavorData["additionalParams"]["sizeOfStorage"]
+            flavor_dict = {
+                "name": vduid + "-flv",
+                "ram": virtualMemory,
+                "vcpus": numVirtualCpu,
+                "disk": sizeOfStorage,
+            }
+            flavor_data = {
+                "ram": virtualMemory,
+                "vcpus": numVirtualCpu,
+                "disk": sizeOfStorage,
+            }
+            flavor_extra_dict["find_params"] = {"flavor_data": flavor_data}
+            flavor_extra_dict["params"] = {"flavor_data": flavor_dict}
+            db_new_tasks = []
+            step = "Creating Tasks for vertical scaling"
+            with self.write_lock:
+                for vdu_index, vdu in enumerate(db_vnfr["vdur"]):
+                    if (
+                        vdu["vdu-id-ref"] == vduid
+                        and vdu["count-index"] == vduCountIndex
+                    ):
+                        extra_dict["params"] = {
+                            "vim_vm_id": vdu["vim-id"],
+                            "flavor_dict": flavor_dict,
+                            "vdu-id-ref": vdu["vdu-id-ref"],
+                            "count-index": vdu["count-index"],
+                            "vnf_instance_id": vnf_instance_id,
+                        }
+                        task = self.verticalscale_flavor_task(
+                            vdu,
+                            db_vnfr,
+                            vdu_index,
+                            action_id,
+                            nsr_id,
+                            task_index,
+                            flavor_extra_dict,
+                        )
+                        db_new_tasks.append(task)
+                        task_index += 1
+                        task = self.verticalscale_task(
+                            vdu,
+                            db_vnfr,
+                            vdu_index,
+                            action_id,
+                            nsr_id,
+                            task_index,
+                            extra_dict,
+                        )
+                        db_new_tasks.append(task)
+                        task_index += 1
+                        break
+                self.upload_all_tasks(
+                    db_new_tasks=db_new_tasks,
+                    now=now,
+                )
+            self.logger.debug(
+                logging_text + "Exit. Created {} tasks".format(len(db_new_tasks))
+            )
+            return (
+                {"status": "ok", "nsr_id": nsr_id, "action_id": action_id},
+                action_id,
+                True,
+            )
+        except Exception as e:
+            if isinstance(e, (DbException, NsException)):
+                self.logger.error(
+                    logging_text + "Exit Exception while '{}': {}".format(step, e)
+                )
+            else:
+                e = traceback_format_exc()
+                self.logger.critical(
+                    logging_text + "Exit Exception while '{}': {}".format(step, e),
+                    exc_info=True,
+                )
+            raise NsException(e)