Bug Fix - 2305: Automated scaling of Vnf is not happening through metrics collected...
[osm/LCM.git] / osm_lcm / ns.py
index 9940a8f..76f0bf5 100644 (file)
@@ -30,9 +30,11 @@ from jinja2 import (
     TemplateNotFound,
     StrictUndefined,
     UndefinedError,
     TemplateNotFound,
     StrictUndefined,
     UndefinedError,
+    select_autoescape,
 )
 
 from osm_lcm import ROclient
 )
 
 from osm_lcm import ROclient
+from osm_lcm.data_utils.lcm_config import LcmCfg
 from osm_lcm.data_utils.nsr import (
     get_deployed_kdu,
     get_deployed_vca,
 from osm_lcm.data_utils.nsr import (
     get_deployed_kdu,
     get_deployed_vca,
@@ -51,13 +53,14 @@ from osm_lcm.data_utils.vca import (
 from osm_lcm.ng_ro import NgRoClient, NgRoException
 from osm_lcm.lcm_utils import (
     LcmException,
 from osm_lcm.ng_ro import NgRoClient, NgRoException
 from osm_lcm.lcm_utils import (
     LcmException,
-    LcmExceptionNoMgmtIP,
     LcmBase,
     deep_get,
     get_iterable,
     populate_dict,
     check_juju_bundle_existence,
     get_charm_artifact_path,
     LcmBase,
     deep_get,
     get_iterable,
     populate_dict,
     check_juju_bundle_existence,
     get_charm_artifact_path,
+    get_ee_id_parts,
+    vld_to_ro_ip_profile,
 )
 from osm_lcm.data_utils.nsd import (
     get_ns_configuration_relation_list,
 )
 from osm_lcm.data_utils.nsd import (
     get_ns_configuration_relation_list,
@@ -82,13 +85,18 @@ from osm_lcm.data_utils.vnfd import (
     get_juju_ee_ref,
     get_kdu_resource_profile,
     find_software_version,
     get_juju_ee_ref,
     get_kdu_resource_profile,
     find_software_version,
+    check_helm_ee_in_ns,
 )
 from osm_lcm.data_utils.list_utils import find_in_list
 )
 from osm_lcm.data_utils.list_utils import find_in_list
-from osm_lcm.data_utils.vnfr import get_osm_params, get_vdur_index, get_kdur
+from osm_lcm.data_utils.vnfr import (
+    get_osm_params,
+    get_vdur_index,
+    get_kdur,
+    get_volumes_from_instantiation_params,
+)
 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
 from osm_lcm.data_utils.database.vim_account import VimAccountDB
 from n2vc.definitions import RelationEndpoint
 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
 from osm_lcm.data_utils.database.vim_account import VimAccountDB
 from n2vc.definitions import RelationEndpoint
-from n2vc.k8s_helm_conn import K8sHelmConnector
 from n2vc.k8s_helm3_conn import K8sHelm3Connector
 from n2vc.k8s_juju_conn import K8sJujuConnector
 
 from n2vc.k8s_helm3_conn import K8sHelm3Connector
 from n2vc.k8s_juju_conn import K8sJujuConnector
 
@@ -97,6 +105,11 @@ from osm_common.fsbase import FsException
 
 from osm_lcm.data_utils.database.database import Database
 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
 
 from osm_lcm.data_utils.database.database import Database
 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
+from osm_lcm.data_utils.wim import (
+    get_sdn_ports,
+    get_target_wim_attrs,
+    select_feasible_wim_account,
+)
 
 from n2vc.n2vc_juju_conn import N2VCJujuConnector
 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
 
 from n2vc.n2vc_juju_conn import N2VCJujuConnector
 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
@@ -109,31 +122,27 @@ from copy import copy, deepcopy
 from time import time
 from uuid import uuid4
 
 from time import time
 from uuid import uuid4
 
-from random import randint
+from random import SystemRandom
 
 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
 
 
 class NsLcm(LcmBase):
 
 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
 
 
 class NsLcm(LcmBase):
-    timeout_vca_on_error = (
-        5 * 60
-    )  # Time for charm from first time at blocked,error status to mark as failed
-    timeout_ns_deploy = 2 * 3600  # default global timeout for deployment a ns
-    timeout_ns_terminate = 1800  # default global timeout for un deployment a ns
-    timeout_charm_delete = 10 * 60
-    timeout_primitive = 30 * 60  # timeout for primitive execution
-    timeout_ns_update = 30 * 60  # timeout for ns update
-    timeout_progress_primitive = (
-        10 * 60
-    )  # timeout for some progress in a primitive execution
-    timeout_migrate = 1800  # default global timeout for migrating vnfs
-
     SUBOPERATION_STATUS_NOT_FOUND = -1
     SUBOPERATION_STATUS_NEW = -2
     SUBOPERATION_STATUS_SKIP = -3
     SUBOPERATION_STATUS_NOT_FOUND = -1
     SUBOPERATION_STATUS_NEW = -2
     SUBOPERATION_STATUS_SKIP = -3
+    EE_TLS_NAME = "ee-tls"
     task_name_deploy_vca = "Deploying VCA"
     task_name_deploy_vca = "Deploying VCA"
-
-    def __init__(self, msg, lcm_tasks, config, loop):
+    rel_operation_types = {
+        "GE": ">=",
+        "LE": "<=",
+        "GT": ">",
+        "LT": "<",
+        "EQ": "==",
+        "NE": "!=",
+    }
+
+    def __init__(self, msg, lcm_tasks, config: LcmCfg):
         """
         Init, Connect to database, filesystem storage, and messaging
         :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
         """
         Init, Connect to database, filesystem storage, and messaging
         :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
@@ -143,17 +152,14 @@ class NsLcm(LcmBase):
 
         self.db = Database().instance.db
         self.fs = Filesystem().instance.fs
 
         self.db = Database().instance.db
         self.fs = Filesystem().instance.fs
-        self.loop = loop
         self.lcm_tasks = lcm_tasks
         self.lcm_tasks = lcm_tasks
-        self.timeout = config["timeout"]
-        self.ro_config = config["ro_config"]
-        self.ng_ro = config["ro_config"].get("ng")
-        self.vca_config = config["VCA"].copy()
+        self.timeout = config.timeout
+        self.ro_config = config.RO
+        self.vca_config = config.VCA
 
         # create N2VC connector
         self.n2vc = N2VCJujuConnector(
             log=self.logger,
 
         # create N2VC connector
         self.n2vc = N2VCJujuConnector(
             log=self.logger,
-            loop=self.loop,
             on_update_db=self._on_update_n2vc_db,
             fs=self.fs,
             db=self.db,
             on_update_db=self._on_update_n2vc_db,
             fs=self.fs,
             db=self.db,
@@ -161,23 +167,13 @@ class NsLcm(LcmBase):
 
         self.conn_helm_ee = LCMHelmConn(
             log=self.logger,
 
         self.conn_helm_ee = LCMHelmConn(
             log=self.logger,
-            loop=self.loop,
             vca_config=self.vca_config,
             on_update_db=self._on_update_n2vc_db,
         )
 
             vca_config=self.vca_config,
             on_update_db=self._on_update_n2vc_db,
         )
 
-        self.k8sclusterhelm2 = K8sHelmConnector(
-            kubectl_command=self.vca_config.get("kubectlpath"),
-            helm_command=self.vca_config.get("helmpath"),
-            log=self.logger,
-            on_update_db=None,
-            fs=self.fs,
-            db=self.db,
-        )
-
         self.k8sclusterhelm3 = K8sHelm3Connector(
         self.k8sclusterhelm3 = K8sHelm3Connector(
-            kubectl_command=self.vca_config.get("kubectlpath"),
-            helm_command=self.vca_config.get("helm3path"),
+            kubectl_command=self.vca_config.kubectlpath,
+            helm_command=self.vca_config.helm3path,
             fs=self.fs,
             log=self.logger,
             db=self.db,
             fs=self.fs,
             log=self.logger,
             db=self.db,
@@ -185,17 +181,15 @@ class NsLcm(LcmBase):
         )
 
         self.k8sclusterjuju = K8sJujuConnector(
         )
 
         self.k8sclusterjuju = K8sJujuConnector(
-            kubectl_command=self.vca_config.get("kubectlpath"),
-            juju_command=self.vca_config.get("jujupath"),
+            kubectl_command=self.vca_config.kubectlpath,
+            juju_command=self.vca_config.jujupath,
             log=self.logger,
             log=self.logger,
-            loop=self.loop,
             on_update_db=self._on_update_k8s_db,
             fs=self.fs,
             db=self.db,
         )
 
         self.k8scluster_map = {
             on_update_db=self._on_update_k8s_db,
             fs=self.fs,
             db=self.db,
         )
 
         self.k8scluster_map = {
-            "helm-chart": self.k8sclusterhelm2,
             "helm-chart-v3": self.k8sclusterhelm3,
             "chart": self.k8sclusterhelm3,
             "juju-bundle": self.k8sclusterjuju,
             "helm-chart-v3": self.k8sclusterhelm3,
             "chart": self.k8sclusterhelm3,
             "juju-bundle": self.k8sclusterjuju,
@@ -211,7 +205,16 @@ class NsLcm(LcmBase):
         }
 
         # create RO client
         }
 
         # create RO client
-        self.RO = NgRoClient(self.loop, **self.ro_config)
+        self.RO = NgRoClient(**self.ro_config.to_dict())
+
+        self.op_status_map = {
+            "instantiation": self.RO.status,
+            "termination": self.RO.status,
+            "migrate": self.RO.status,
+            "healing": self.RO.recreate_status,
+            "verticalscale": self.RO.status,
+            "start_stop_rebuild": self.RO.status,
+        }
 
     @staticmethod
     def increment_ip_mac(ip_mac, vm_index=1):
 
     @staticmethod
     def increment_ip_mac(ip_mac, vm_index=1):
@@ -235,26 +238,7 @@ class NsLcm(LcmBase):
             pass
         return None
 
             pass
         return None
 
-    def _on_update_ro_db(self, nsrs_id, ro_descriptor):
-
-        # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
-
-        try:
-            # TODO filter RO descriptor fields...
-
-            # write to database
-            db_dict = dict()
-            # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
-            db_dict["deploymentStatus"] = ro_descriptor
-            self.update_db_2("nsrs", nsrs_id, db_dict)
-
-        except Exception as e:
-            self.logger.warn(
-                "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
-            )
-
     async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
     async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
-
         # remove last dot from path (if exists)
         if path.endswith("."):
             path = path[:-1]
         # remove last dot from path (if exists)
         if path.endswith("."):
             path = path[:-1]
@@ -262,7 +246,6 @@ class NsLcm(LcmBase):
         # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
         #                   .format(table, filter, path, updated_data))
         try:
         # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
         #                   .format(table, filter, path, updated_data))
         try:
-
             nsr_id = filter.get("_id")
 
             # read ns record from database
             nsr_id = filter.get("_id")
 
             # read ns record from database
@@ -277,7 +260,6 @@ class NsLcm(LcmBase):
             # vcaStatus
             db_dict = dict()
             db_dict["vcaStatus"] = status_dict
             # vcaStatus
             db_dict = dict()
             db_dict["vcaStatus"] = status_dict
-            await self.n2vc.update_vca_status(db_dict["vcaStatus"], vca_id=vca_id)
 
             # update configurationStatus for this VCA
             try:
 
             # update configurationStatus for this VCA
             try:
@@ -385,15 +367,6 @@ class NsLcm(LcmBase):
             db_dict = dict()
             db_dict["vcaStatus"] = {nsr_id: vca_status}
 
             db_dict = dict()
             db_dict["vcaStatus"] = {nsr_id: vca_status}
 
-            if cluster_type in ("juju-bundle", "juju"):
-                # TODO -> this should be done in a more uniform way, I think in N2VC, in order to update the K8s VCA
-                #  status in a similar way between Juju Bundles and Helm Charts on this side
-                await self.k8sclusterjuju.update_vca_status(
-                    db_dict["vcaStatus"],
-                    kdu_instance,
-                    vca_id=vca_id,
-                )
-
             self.logger.debug(
                 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
             )
             self.logger.debug(
                 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
             )
@@ -408,7 +381,10 @@ class NsLcm(LcmBase):
     @staticmethod
     def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
         try:
     @staticmethod
     def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
         try:
-            env = Environment(undefined=StrictUndefined)
+            env = Environment(
+                undefined=StrictUndefined,
+                autoescape=select_autoescape(default_for_string=True, default=True),
+            )
             template = env.from_string(cloud_init_text)
             return template.render(additional_params or {})
         except UndefinedError as e:
             template = env.from_string(cloud_init_text)
             return template.render(additional_params or {})
         except UndefinedError as e:
@@ -455,38 +431,11 @@ class NsLcm(LcmBase):
 
     def _get_vdu_additional_params(self, db_vnfr, vdu_id):
         vdur = next(
 
     def _get_vdu_additional_params(self, db_vnfr, vdu_id):
         vdur = next(
-            (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]),
-            {}
+            (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
         )
         additional_params = vdur.get("additionalParams")
         return parse_yaml_strings(additional_params)
 
         )
         additional_params = vdur.get("additionalParams")
         return parse_yaml_strings(additional_params)
 
-    def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
-        """
-        Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
-        :param vnfd: input vnfd
-        :param new_id: overrides vnf id if provided
-        :param additionalParams: Instantiation params for VNFs provided
-        :param nsrId: Id of the NSR
-        :return: copy of vnfd
-        """
-        vnfd_RO = deepcopy(vnfd)
-        # remove unused by RO configuration, monitoring, scaling and internal keys
-        vnfd_RO.pop("_id", None)
-        vnfd_RO.pop("_admin", None)
-        vnfd_RO.pop("monitoring-param", None)
-        vnfd_RO.pop("scaling-group-descriptor", None)
-        vnfd_RO.pop("kdu", None)
-        vnfd_RO.pop("k8s-cluster", None)
-        if new_id:
-            vnfd_RO["id"] = new_id
-
-        # parse cloud-init or cloud-init-file with the provided variables using Jinja2
-        for vdu in get_iterable(vnfd_RO, "vdu"):
-            vdu.pop("cloud-init-file", None)
-            vdu.pop("cloud-init", None)
-        return vnfd_RO
-
     @staticmethod
     def ip_profile_2_RO(ip_profile):
         RO_ip_profile = deepcopy(ip_profile)
     @staticmethod
     def ip_profile_2_RO(ip_profile):
         RO_ip_profile = deepcopy(ip_profile)
@@ -505,33 +454,7 @@ class NsLcm(LcmBase):
             RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
         return RO_ip_profile
 
             RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
         return RO_ip_profile
 
-    def _get_ro_vim_id_for_vim_account(self, vim_account):
-        db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
-        if db_vim["_admin"]["operationalState"] != "ENABLED":
-            raise LcmException(
-                "VIM={} is not available. operationalState={}".format(
-                    vim_account, db_vim["_admin"]["operationalState"]
-                )
-            )
-        RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
-        return RO_vim_id
-
-    def get_ro_wim_id_for_wim_account(self, wim_account):
-        if isinstance(wim_account, str):
-            db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
-            if db_wim["_admin"]["operationalState"] != "ENABLED":
-                raise LcmException(
-                    "WIM={} is not available. operationalState={}".format(
-                        wim_account, db_wim["_admin"]["operationalState"]
-                    )
-                )
-            RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
-            return RO_wim_id
-        else:
-            return wim_account
-
     def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
     def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
-
         db_vdu_push_list = []
         template_vdur = []
         db_update = {"_admin.modified": time()}
         db_vdu_push_list = []
         template_vdur = []
         db_update = {"_admin.modified": time()}
@@ -547,21 +470,24 @@ class NsLcm(LcmBase):
                 )
                 if not vdur:
                     # Read the template saved in the db:
                 )
                 if not vdur:
                     # Read the template saved in the db:
-                    self.logger.debug(f"No vdur in the database. Using the vdur-template to scale")
+                    self.logger.debug(
+                        "No vdur in the database. Using the vdur-template to scale"
+                    )
                     vdur_template = db_vnfr.get("vdur-template")
                     if not vdur_template:
                         raise LcmException(
                     vdur_template = db_vnfr.get("vdur-template")
                     if not vdur_template:
                         raise LcmException(
-                           "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
-                            vdu_id
+                            "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
+                                vdu_id
                             )
                         )
                     vdur = vdur_template[0]
                             )
                         )
                     vdur = vdur_template[0]
-                    #Delete a template from the database after using it
-                    self.db.set_one("vnfrs",
-                                {"_id": db_vnfr["_id"]},
-                                None,
-                                pull={"vdur-template": {"_id": vdur['_id']}}
-                            )
+                    # Delete a template from the database after using it
+                    self.db.set_one(
+                        "vnfrs",
+                        {"_id": db_vnfr["_id"]},
+                        None,
+                        pull={"vdur-template": {"_id": vdur["_id"]}},
+                    )
                 for count in range(vdu_count):
                     vdur_copy = deepcopy(vdur)
                     vdur_copy["status"] = "BUILD"
                 for count in range(vdu_count):
                     vdur_copy = deepcopy(vdur)
                     vdur_copy["status"] = "BUILD"
@@ -595,7 +521,9 @@ class NsLcm(LcmBase):
         if vdu_delete:
             if len(db_vnfr["vdur"]) == 1:
                 # The scale will move to 0 instances
         if vdu_delete:
             if len(db_vnfr["vdur"]) == 1:
                 # The scale will move to 0 instances
-                self.logger.debug(f"Scaling to 0 !, creating the template with the last vdur")
+                self.logger.debug(
+                    "Scaling to 0 !, creating the template with the last vdur"
+                )
                 template_vdur = [db_vnfr["vdur"][0]]
             for vdu_id, vdu_count in vdu_delete.items():
                 if mark_delete:
                 template_vdur = [db_vnfr["vdur"][0]]
             for vdu_id, vdu_count in vdu_delete.items():
                 if mark_delete:
@@ -678,103 +606,6 @@ class NsLcm(LcmBase):
         except DbException as e:
             self.logger.error("Cannot update vnf. {}".format(e))
 
         except DbException as e:
             self.logger.error("Cannot update vnf. {}".format(e))
 
-    def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
-        """
-        Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
-        :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
-        :param nsr_desc_RO: nsr descriptor from RO
-        :return: Nothing, LcmException is raised on errors
-        """
-        for vnf_index, db_vnfr in db_vnfrs.items():
-            for vnf_RO in nsr_desc_RO["vnfs"]:
-                if vnf_RO["member_vnf_index"] != vnf_index:
-                    continue
-                vnfr_update = {}
-                if vnf_RO.get("ip_address"):
-                    db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
-                        "ip_address"
-                    ].split(";")[0]
-                elif not db_vnfr.get("ip-address"):
-                    if db_vnfr.get("vdur"):  # if not VDUs, there is not ip_address
-                        raise LcmExceptionNoMgmtIP(
-                            "ns member_vnf_index '{}' has no IP address".format(
-                                vnf_index
-                            )
-                        )
-
-                for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
-                    vdur_RO_count_index = 0
-                    if vdur.get("pdu-type"):
-                        continue
-                    for vdur_RO in get_iterable(vnf_RO, "vms"):
-                        if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
-                            continue
-                        if vdur["count-index"] != vdur_RO_count_index:
-                            vdur_RO_count_index += 1
-                            continue
-                        vdur["vim-id"] = vdur_RO.get("vim_vm_id")
-                        if vdur_RO.get("ip_address"):
-                            vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
-                        else:
-                            vdur["ip-address"] = None
-                        vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
-                        vdur["name"] = vdur_RO.get("vim_name")
-                        vdur["status"] = vdur_RO.get("status")
-                        vdur["status-detailed"] = vdur_RO.get("error_msg")
-                        for ifacer in get_iterable(vdur, "interfaces"):
-                            for interface_RO in get_iterable(vdur_RO, "interfaces"):
-                                if ifacer["name"] == interface_RO.get("internal_name"):
-                                    ifacer["ip-address"] = interface_RO.get(
-                                        "ip_address"
-                                    )
-                                    ifacer["mac-address"] = interface_RO.get(
-                                        "mac_address"
-                                    )
-                                    break
-                            else:
-                                raise LcmException(
-                                    "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
-                                    "from VIM info".format(
-                                        vnf_index, vdur["vdu-id-ref"], ifacer["name"]
-                                    )
-                                )
-                        vnfr_update["vdur.{}".format(vdu_index)] = vdur
-                        break
-                    else:
-                        raise LcmException(
-                            "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
-                            "VIM info".format(
-                                vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
-                            )
-                        )
-
-                for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
-                    for net_RO in get_iterable(nsr_desc_RO, "nets"):
-                        if vld["id"] != net_RO.get("vnf_net_osm_id"):
-                            continue
-                        vld["vim-id"] = net_RO.get("vim_net_id")
-                        vld["name"] = net_RO.get("vim_name")
-                        vld["status"] = net_RO.get("status")
-                        vld["status-detailed"] = net_RO.get("error_msg")
-                        vnfr_update["vld.{}".format(vld_index)] = vld
-                        break
-                    else:
-                        raise LcmException(
-                            "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
-                                vnf_index, vld["id"]
-                            )
-                        )
-
-                self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
-                break
-
-            else:
-                raise LcmException(
-                    "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
-                        vnf_index
-                    )
-                )
-
     def _get_ns_config_info(self, nsr_id):
         """
         Generates a mapping between vnf,vdu elements and the N2VC id
     def _get_ns_config_info(self, nsr_id):
         """
         Generates a mapping between vnf,vdu elements and the N2VC id
@@ -814,7 +645,6 @@ class NsLcm(LcmBase):
         start_deploy,
         timeout_ns_deploy,
     ):
         start_deploy,
         timeout_ns_deploy,
     ):
-
         db_vims = {}
 
         def get_vim_account(vim_account_id):
         db_vims = {}
 
         def get_vim_account(vim_account_id):
@@ -830,9 +660,9 @@ class NsLcm(LcmBase):
             target_vim, target_vld, vld_params, target_sdn
         ):
             if vld_params.get("ip-profile"):
             target_vim, target_vld, vld_params, target_sdn
         ):
             if vld_params.get("ip-profile"):
-                target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
-                    "ip-profile"
-                ]
+                target_vld["vim_info"][target_vim]["ip_profile"] = vld_to_ro_ip_profile(
+                    vld_params["ip-profile"]
+                )
             if vld_params.get("provider-network"):
                 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
                     "provider-network"
             if vld_params.get("provider-network"):
                 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
                     "provider-network"
@@ -841,9 +671,30 @@ class NsLcm(LcmBase):
                     target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
                         "provider-network"
                     ]["sdn-ports"]
                     target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
                         "provider-network"
                     ]["sdn-ports"]
-            if vld_params.get("wimAccountId"):
-                target_wim = "wim:{}".format(vld_params["wimAccountId"])
-                target_vld["vim_info"][target_wim] = {}
+
+            # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
+            # if wim_account_id is specified in vld_params, validate if it is feasible.
+            wim_account_id, db_wim = select_feasible_wim_account(
+                db_nsr, db_vnfrs, target_vld, vld_params, self.logger
+            )
+
+            if wim_account_id:
+                # WIM is needed and a feasible one was found, populate WIM target and SDN ports
+                self.logger.info("WIM selected: {:s}".format(str(wim_account_id)))
+                # update vld_params with correct WIM account Id
+                vld_params["wimAccountId"] = wim_account_id
+
+                target_wim = "wim:{}".format(wim_account_id)
+                target_wim_attrs = get_target_wim_attrs(nsr_id, target_vld, vld_params)
+                sdn_ports = get_sdn_ports(vld_params, db_wim)
+                if len(sdn_ports) > 0:
+                    target_vld["vim_info"][target_wim] = target_wim_attrs
+                    target_vld["vim_info"][target_wim]["sdn-ports"] = sdn_ports
+
+                self.logger.debug(
+                    "Target VLD with WIM data: {:s}".format(str(target_vld))
+                )
+
             for param in ("vim-network-name", "vim-network-id"):
                 if vld_params.get(param):
                     if isinstance(vld_params[param], dict):
             for param in ("vim-network-name", "vim-network-id"):
                 if vld_params.get(param):
                     if isinstance(vld_params[param], dict):
@@ -875,23 +726,61 @@ class NsLcm(LcmBase):
                         None,
                     )
                     vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
                         None,
                     )
                     vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
+                    if not vdur:
+                        continue
                     for a_index, a_vld in enumerate(target["ns"]["vld"]):
                         target_vld = find_in_list(
                             get_iterable(vdur, "interfaces"),
                             lambda iface: iface.get("ns-vld-id") == a_vld["name"],
                         )
                     for a_index, a_vld in enumerate(target["ns"]["vld"]):
                         target_vld = find_in_list(
                             get_iterable(vdur, "interfaces"),
                             lambda iface: iface.get("ns-vld-id") == a_vld["name"],
                         )
+
+                        vld_params = find_in_list(
+                            get_iterable(ns_params, "vld"),
+                            lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
+                        )
                         if target_vld:
                             if vnf_params.get("vimAccountId") not in a_vld.get(
                                 "vim_info", {}
                             ):
                         if target_vld:
                             if vnf_params.get("vimAccountId") not in a_vld.get(
                                 "vim_info", {}
                             ):
+                                target_vim_network_list = [
+                                    v for _, v in a_vld.get("vim_info").items()
+                                ]
+                                target_vim_network_name = next(
+                                    (
+                                        item.get("vim_network_name", "")
+                                        for item in target_vim_network_list
+                                    ),
+                                    "",
+                                )
+
                                 target["ns"]["vld"][a_index].get("vim_info").update(
                                     {
                                         "vim:{}".format(vnf_params["vimAccountId"]): {
                                 target["ns"]["vld"][a_index].get("vim_info").update(
                                     {
                                         "vim:{}".format(vnf_params["vimAccountId"]): {
-                                            "vim_network_name": ""
+                                            "vim_network_name": target_vim_network_name,
                                         }
                                     }
                                 )
 
                                         }
                                     }
                                 )
 
+                                if vld_params:
+                                    for param in ("vim-network-name", "vim-network-id"):
+                                        if vld_params.get(param) and isinstance(
+                                            vld_params[param], dict
+                                        ):
+                                            for vim, vim_net in vld_params[
+                                                param
+                                            ].items():
+                                                other_target_vim = "vim:" + vim
+                                                populate_dict(
+                                                    target["ns"]["vld"][a_index].get(
+                                                        "vim_info"
+                                                    ),
+                                                    (
+                                                        other_target_vim,
+                                                        param.replace("-", "_"),
+                                                    ),
+                                                    vim_net,
+                                                )
+
         nslcmop_id = db_nslcmop["_id"]
         target = {
             "name": db_nsr["name"],
         nslcmop_id = db_nslcmop["_id"]
         target = {
             "name": db_nsr["name"],
@@ -906,6 +795,10 @@ class NsLcm(LcmBase):
             image["vim_info"] = {}
         for flavor in target["flavor"]:
             flavor["vim_info"] = {}
             image["vim_info"] = {}
         for flavor in target["flavor"]:
             flavor["vim_info"] = {}
+        if db_nsr.get("shared-volumes"):
+            target["shared-volumes"] = deepcopy(db_nsr["shared-volumes"])
+            for shared_volumes in target["shared-volumes"]:
+                shared_volumes["vim_info"] = {}
         if db_nsr.get("affinity-or-anti-affinity-group"):
             target["affinity-or-anti-affinity-group"] = deepcopy(
                 db_nsr["affinity-or-anti-affinity-group"]
         if db_nsr.get("affinity-or-anti-affinity-group"):
             target["affinity-or-anti-affinity-group"] = deepcopy(
                 db_nsr["affinity-or-anti-affinity-group"]
@@ -948,16 +841,16 @@ class NsLcm(LcmBase):
             # check if this network needs SDN assist
             if vld.get("pci-interfaces"):
                 db_vim = get_vim_account(ns_params["vimAccountId"])
             # check if this network needs SDN assist
             if vld.get("pci-interfaces"):
                 db_vim = get_vim_account(ns_params["vimAccountId"])
-                sdnc_id = db_vim["config"].get("sdn-controller")
-                if sdnc_id:
-                    sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
-                    target_sdn = "sdn:{}".format(sdnc_id)
-                    target_vld["vim_info"][target_sdn] = {
-                        "sdn": True,
-                        "target_vim": target_vim,
-                        "vlds": [sdn_vld],
-                        "type": vld.get("type"),
-                    }
+                if vim_config := db_vim.get("config"):
+                    if sdnc_id := vim_config.get("sdn-controller"):
+                        sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
+                        target_sdn = "sdn:{}".format(sdnc_id)
+                        target_vld["vim_info"][target_sdn] = {
+                            "sdn": True,
+                            "target_vim": target_vim,
+                            "vlds": [sdn_vld],
+                            "type": vld.get("type"),
+                        }
 
             nsd_vnf_profiles = get_vnf_profiles(nsd)
             for nsd_vnf_profile in nsd_vnf_profiles:
 
             nsd_vnf_profiles = get_vnf_profiles(nsd)
             for nsd_vnf_profile in nsd_vnf_profiles:
@@ -984,27 +877,9 @@ class NsLcm(LcmBase):
                 and nsd_vlp.get("virtual-link-protocol-data")
                 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
             ):
                 and nsd_vlp.get("virtual-link-protocol-data")
                 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
             ):
-                ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
+                vld_params["ip-profile"] = nsd_vlp["virtual-link-protocol-data"][
                     "l3-protocol-data"
                 ]
                     "l3-protocol-data"
                 ]
-                ip_profile_dest_data = {}
-                if "ip-version" in ip_profile_source_data:
-                    ip_profile_dest_data["ip-version"] = ip_profile_source_data[
-                        "ip-version"
-                    ]
-                if "cidr" in ip_profile_source_data:
-                    ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
-                        "cidr"
-                    ]
-                if "gateway-ip" in ip_profile_source_data:
-                    ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
-                        "gateway-ip"
-                    ]
-                if "dhcp-enabled" in ip_profile_source_data:
-                    ip_profile_dest_data["dhcp-params"] = {
-                        "enabled": ip_profile_source_data["dhcp-enabled"]
-                    }
-                vld_params["ip-profile"] = ip_profile_dest_data
 
             # update vld_params with instantiation params
             vld_instantiation_params = find_in_list(
 
             # update vld_params with instantiation params
             vld_instantiation_params = find_in_list(
@@ -1070,28 +945,9 @@ class NsLcm(LcmBase):
                     and vnfd_vlp.get("virtual-link-protocol-data")
                     and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
                 ):
                     and vnfd_vlp.get("virtual-link-protocol-data")
                     and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
                 ):
-                    ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
+                    vld_params["ip-profile"] = vnfd_vlp["virtual-link-protocol-data"][
                         "l3-protocol-data"
                     ]
                         "l3-protocol-data"
                     ]
-                    ip_profile_dest_data = {}
-                    if "ip-version" in ip_profile_source_data:
-                        ip_profile_dest_data["ip-version"] = ip_profile_source_data[
-                            "ip-version"
-                        ]
-                    if "cidr" in ip_profile_source_data:
-                        ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
-                            "cidr"
-                        ]
-                    if "gateway-ip" in ip_profile_source_data:
-                        ip_profile_dest_data[
-                            "gateway-address"
-                        ] = ip_profile_source_data["gateway-ip"]
-                    if "dhcp-enabled" in ip_profile_source_data:
-                        ip_profile_dest_data["dhcp-params"] = {
-                            "enabled": ip_profile_source_data["dhcp-enabled"]
-                        }
-
-                    vld_params["ip-profile"] = ip_profile_dest_data
                 # update vld_params with instantiation params
                 if vnf_params:
                     vld_instantiation_params = find_in_list(
                 # update vld_params with instantiation params
                 if vnf_params:
                     vld_instantiation_params = find_in_list(
@@ -1213,20 +1069,47 @@ class NsLcm(LcmBase):
                         if target_vim not in ns_ags["vim_info"]:
                             ns_ags["vim_info"][target_vim] = {}
 
                         if target_vim not in ns_ags["vim_info"]:
                             ns_ags["vim_info"][target_vim] = {}
 
+                # shared-volumes
+                if vdur.get("shared-volumes-id"):
+                    for sv_id in vdur["shared-volumes-id"]:
+                        ns_sv = find_in_list(
+                            target["shared-volumes"], lambda sv: sv_id in sv["id"]
+                        )
+                        if ns_sv:
+                            ns_sv["vim_info"][target_vim] = {}
+
                 vdur["vim_info"] = {target_vim: {}}
                 # instantiation parameters
                 vdur["vim_info"] = {target_vim: {}}
                 # instantiation parameters
-                # if vnf_params:
-                #     vdu_instantiation_params = next((v for v in get_iterable(vnf_params, "vdu") if v["id"] ==
-                #     vdud["id"]), None)
+                if vnf_params:
+                    vdu_instantiation_params = find_in_list(
+                        get_iterable(vnf_params, "vdu"),
+                        lambda i_vdu: i_vdu["id"] == vdud["id"],
+                    )
+                    if vdu_instantiation_params:
+                        # Parse the vdu_volumes from the instantiation params
+                        vdu_volumes = get_volumes_from_instantiation_params(
+                            vdu_instantiation_params, vdud
+                        )
+                        vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
+                        vdur["additionalParams"]["OSM"][
+                            "vim_flavor_id"
+                        ] = vdu_instantiation_params.get("vim-flavor-id")
                 vdur_list.append(vdur)
             target_vnf["vdur"] = vdur_list
             target["vnf"].append(target_vnf)
 
                 vdur_list.append(vdur)
             target_vnf["vdur"] = vdur_list
             target["vnf"].append(target_vnf)
 
+        self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
         desc = await self.RO.deploy(nsr_id, target)
         self.logger.debug("RO return > {}".format(desc))
         action_id = desc["action_id"]
         await self._wait_ng_ro(
         desc = await self.RO.deploy(nsr_id, target)
         self.logger.debug("RO return > {}".format(desc))
         action_id = desc["action_id"]
         await self._wait_ng_ro(
-            nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage
+            nsr_id,
+            action_id,
+            nslcmop_id,
+            start_deploy,
+            timeout_ns_deploy,
+            stage,
+            operation="instantiation",
         )
 
         # Updating NSR
         )
 
         # Updating NSR
@@ -1250,12 +1133,13 @@ class NsLcm(LcmBase):
         start_time=None,
         timeout=600,
         stage=None,
         start_time=None,
         timeout=600,
         stage=None,
+        operation=None,
     ):
         detailed_status_old = None
         db_nsr_update = {}
         start_time = start_time or time()
         while time() <= start_time + timeout:
     ):
         detailed_status_old = None
         db_nsr_update = {}
         start_time = start_time or time()
         while time() <= start_time + timeout:
-            desc_status = await self.RO.status(nsr_id, action_id)
+            desc_status = await self.op_status_map[operation](nsr_id, action_id)
             self.logger.debug("Wait NG RO > {}".format(desc_status))
             if desc_status["status"] == "FAILED":
                 raise NgRoException(desc_status["details"])
             self.logger.debug("Wait NG RO > {}".format(desc_status))
             if desc_status["status"] == "FAILED":
                 raise NgRoException(desc_status["details"])
@@ -1275,7 +1159,7 @@ class NsLcm(LcmBase):
                 db_nsr_update["detailed-status"] = " ".join(stage)
                 self.update_db_2("nsrs", nsr_id, db_nsr_update)
                 self._write_op_status(nslcmop_id, stage)
                 db_nsr_update["detailed-status"] = " ".join(stage)
                 self.update_db_2("nsrs", nsr_id, db_nsr_update)
                 self._write_op_status(nslcmop_id, stage)
-            await asyncio.sleep(15, loop=self.loop)
+            await asyncio.sleep(15)
         else:  # timeout_ns_deploy
             raise NgRoException("Timeout waiting ns to deploy")
 
         else:  # timeout_ns_deploy
             raise NgRoException("Timeout waiting ns to deploy")
 
@@ -1296,7 +1180,6 @@ class NsLcm(LcmBase):
             }
             desc = await self.RO.deploy(nsr_id, target)
             action_id = desc["action_id"]
             }
             desc = await self.RO.deploy(nsr_id, target)
             action_id = desc["action_id"]
-            db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
             db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
             self.logger.debug(
                 logging_text
             db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
             self.logger.debug(
                 logging_text
@@ -1306,22 +1189,25 @@ class NsLcm(LcmBase):
             # wait until done
             delete_timeout = 20 * 60  # 20 minutes
             await self._wait_ng_ro(
             # wait until done
             delete_timeout = 20 * 60  # 20 minutes
             await self._wait_ng_ro(
-                nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage
+                nsr_id,
+                action_id,
+                nslcmop_id,
+                start_deploy,
+                delete_timeout,
+                stage,
+                operation="termination",
             )
             )
-
-            db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
             db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
             # delete all nsr
             await self.RO.delete(nsr_id)
             db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
             # delete all nsr
             await self.RO.delete(nsr_id)
-        except Exception as e:
-            if isinstance(e, NgRoException) and e.http_code == 404:  # not found
+        except NgRoException as e:
+            if e.http_code == 404:  # not found
                 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
                 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
                 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
                 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
-                db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
                 self.logger.debug(
                     logging_text + "RO_action_id={} already deleted".format(action_id)
                 )
                 self.logger.debug(
                     logging_text + "RO_action_id={} already deleted".format(action_id)
                 )
-            elif isinstance(e, NgRoException) and e.http_code == 409:  # conflict
+            elif e.http_code == 409:  # conflict
                 failed_detail.append("delete conflict: {}".format(e))
                 self.logger.debug(
                     logging_text
                 failed_detail.append("delete conflict: {}".format(e))
                 self.logger.debug(
                     logging_text
@@ -1333,6 +1219,11 @@ class NsLcm(LcmBase):
                     logging_text
                     + "RO_action_id={} delete error: {}".format(action_id, e)
                 )
                     logging_text
                     + "RO_action_id={} delete error: {}".format(action_id, e)
                 )
+        except Exception as e:
+            failed_detail.append("delete error: {}".format(e))
+            self.logger.error(
+                logging_text + "RO_action_id={} delete error: {}".format(action_id, e)
+            )
 
         if failed_detail:
             stage[2] = "Error deleting from VIM"
 
         if failed_detail:
             stage[2] = "Error deleting from VIM"
@@ -1377,9 +1268,7 @@ class NsLcm(LcmBase):
             if ns_params and ns_params.get("timeout_ns_deploy"):
                 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
             else:
             if ns_params and ns_params.get("timeout_ns_deploy"):
                 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
             else:
-                timeout_ns_deploy = self.timeout.get(
-                    "ns_deploy", self.timeout_ns_deploy
-                )
+                timeout_ns_deploy = self.timeout.ns_deploy
 
             # Check for and optionally request placement optimization. Database will be updated if placement activated
             stage[2] = "Waiting for Placement."
 
             # Check for and optionally request placement optimization. Database will be updated if placement activated
             stage[2] = "Waiting for Placement."
@@ -1456,7 +1345,7 @@ class NsLcm(LcmBase):
                         "target KDU={} is in error state".format(kdu_name)
                     )
 
                         "target KDU={} is in error state".format(kdu_name)
                     )
 
-            await asyncio.sleep(10, loop=self.loop)
+            await asyncio.sleep(10)
             nb_tries += 1
         raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
 
             nb_tries += 1
         raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
 
@@ -1476,21 +1365,18 @@ class NsLcm(LcmBase):
         """
 
         self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
         """
 
         self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
-        ro_nsr_id = None
         ip_address = None
         ip_address = None
-        nb_tries = 0
         target_vdu_id = None
         ro_retries = 0
 
         while True:
         target_vdu_id = None
         ro_retries = 0
 
         while True:
-
             ro_retries += 1
             if ro_retries >= 360:  # 1 hour
                 raise LcmException(
                     "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
                 )
 
             ro_retries += 1
             if ro_retries >= 360:  # 1 hour
                 raise LcmException(
                     "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
                 )
 
-            await asyncio.sleep(10, loop=self.loop)
+            await asyncio.sleep(10)
 
             # get ip address
             if not target_vdu_id:
 
             # get ip address
             if not target_vdu_id:
@@ -1566,72 +1452,24 @@ class NsLcm(LcmBase):
                     self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
                     return ip_address
                 try:
                     self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
                     return ip_address
                 try:
-                    ro_vm_id = "{}-{}".format(
-                        db_vnfr["member-vnf-index-ref"], target_vdu_id
-                    )  # TODO add vdu_index
-                    if self.ng_ro:
-                        target = {
-                            "action": {
-                                "action": "inject_ssh_key",
-                                "key": pub_key,
-                                "user": user,
-                            },
-                            "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
-                        }
-                        desc = await self.RO.deploy(nsr_id, target)
-                        action_id = desc["action_id"]
-                        await self._wait_ng_ro(nsr_id, action_id, timeout=600)
-                        break
-                    else:
-                        # wait until NS is deployed at RO
-                        if not ro_nsr_id:
-                            db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
-                            ro_nsr_id = deep_get(
-                                db_nsrs, ("_admin", "deployed", "RO", "nsr_id")
-                            )
-                        if not ro_nsr_id:
-                            continue
-                        result_dict = await self.RO.create_action(
-                            item="ns",
-                            item_id_name=ro_nsr_id,
-                            descriptor={
-                                "add_public_key": pub_key,
-                                "vms": [ro_vm_id],
-                                "user": user,
-                            },
-                        )
-                        # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
-                        if not result_dict or not isinstance(result_dict, dict):
-                            raise LcmException(
-                                "Unknown response from RO when injecting key"
-                            )
-                        for result in result_dict.values():
-                            if result.get("vim_result") == 200:
-                                break
-                            else:
-                                raise ROclient.ROClientException(
-                                    "error injecting key: {}".format(
-                                        result.get("description")
-                                    )
-                                )
-                        break
+                    target = {
+                        "action": {
+                            "action": "inject_ssh_key",
+                            "key": pub_key,
+                            "user": user,
+                        },
+                        "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
+                    }
+                    desc = await self.RO.deploy(nsr_id, target)
+                    action_id = desc["action_id"]
+                    await self._wait_ng_ro(
+                        nsr_id, action_id, timeout=600, operation="instantiation"
+                    )
+                    break
                 except NgRoException as e:
                     raise LcmException(
                         "Reaching max tries injecting key. Error: {}".format(e)
                     )
                 except NgRoException as e:
                     raise LcmException(
                         "Reaching max tries injecting key. Error: {}".format(e)
                     )
-                except ROclient.ROClientException as e:
-                    if not nb_tries:
-                        self.logger.debug(
-                            logging_text
-                            + "error injecting key: {}. Retrying until {} seconds".format(
-                                e, 20 * 10
-                            )
-                        )
-                    nb_tries += 1
-                    if nb_tries >= 20:
-                        raise LcmException(
-                            "Reaching max tries injecting key. Error: {}".format(e)
-                        )
             else:
                 break
 
             else:
                 break
 
@@ -1694,6 +1532,7 @@ class NsLcm(LcmBase):
         vdu_id,
         kdu_name,
         vdu_index,
         vdu_id,
         kdu_name,
         vdu_index,
+        kdu_index,
         config_descriptor,
         deploy_params,
         base_folder,
         config_descriptor,
         deploy_params,
         base_folder,
@@ -1715,7 +1554,6 @@ class NsLcm(LcmBase):
         }
         step = ""
         try:
         }
         step = ""
         try:
-
             element_type = "NS"
             element_under_configuration = nsr_id
 
             element_type = "NS"
             element_under_configuration = nsr_id
 
@@ -1798,8 +1636,7 @@ class NsLcm(LcmBase):
 
             vca_id = self.get_vca_id(db_vnfr, db_nsr)
             # create or register execution environment in VCA
 
             vca_id = self.get_vca_id(db_vnfr, db_nsr)
             # create or register execution environment in VCA
-            if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
-
+            if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm-v3"):
                 self._write_configuration_status(
                     nsr_id=nsr_id,
                     vca_index=vca_index,
                 self._write_configuration_status(
                     nsr_id=nsr_id,
                     vca_index=vca_index,
@@ -1821,15 +1658,16 @@ class NsLcm(LcmBase):
                         db_dict=db_dict,
                         vca_id=vca_id,
                     )
                         db_dict=db_dict,
                         vca_id=vca_id,
                     )
-                elif vca_type == "helm" or vca_type == "helm-v3":
+                elif vca_type == "helm-v3":
                     ee_id, credentials = await self.vca_map[
                         vca_type
                     ].create_execution_environment(
                     ee_id, credentials = await self.vca_map[
                         vca_type
                     ].create_execution_environment(
-                        namespace=namespace,
+                        namespace=nsr_id,
                         reuse_ee_id=ee_id,
                         db_dict=db_dict,
                         config=osm_config,
                         artifact_path=artifact_path,
                         reuse_ee_id=ee_id,
                         db_dict=db_dict,
                         config=osm_config,
                         artifact_path=artifact_path,
+                        chart_model=vca_name,
                         vca_type=vca_type,
                     )
                 else:
                         vca_type=vca_type,
                     )
                 else:
@@ -1954,16 +1792,19 @@ class NsLcm(LcmBase):
             )
 
             # add relations for this VCA (wait for other peers related with this VCA)
             )
 
             # add relations for this VCA (wait for other peers related with this VCA)
-            await self._add_vca_relations(
+            is_relation_added = await self._add_vca_relations(
                 logging_text=logging_text,
                 nsr_id=nsr_id,
                 vca_type=vca_type,
                 vca_index=vca_index,
             )
 
                 logging_text=logging_text,
                 nsr_id=nsr_id,
                 vca_type=vca_type,
                 vca_index=vca_index,
             )
 
+            if not is_relation_added:
+                raise LcmException("Relations could not be added to VCA.")
+
             # if SSH access is required, then get execution environment SSH public
             # if native charm we have waited already to VM be UP
             # if SSH access is required, then get execution environment SSH public
             # if native charm we have waited already to VM be UP
-            if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
+            if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm-v3"):
                 pub_key = None
                 user = None
                 # self.logger.debug("get ssh key block")
                 pub_key = None
                 user = None
                 # self.logger.debug("get ssh key block")
@@ -1989,6 +1830,9 @@ class NsLcm(LcmBase):
                     step = "Waiting to VM being up and getting IP address"
                 self.logger.debug(logging_text + step)
 
                     step = "Waiting to VM being up and getting IP address"
                 self.logger.debug(logging_text + step)
 
+                # default rw_mgmt_ip to None, avoiding the non definition of the variable
+                rw_mgmt_ip = None
+
                 # n2vc_redesign STEP 5.1
                 # wait for RO (ip-address) Insert pub_key into VM
                 if vnfr_id:
                 # n2vc_redesign STEP 5.1
                 # wait for RO (ip-address) Insert pub_key into VM
                 if vnfr_id:
@@ -2020,7 +1864,13 @@ class NsLcm(LcmBase):
                             },
                             vca_id=vca_id,
                         )
                             },
                             vca_id=vca_id,
                         )
-                    else:
+
+                    # This verification is needed in order to avoid trying to add a public key
+                    # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
+                    # for a KNF and not for its KDUs, the previous verification gives False, and the code
+                    # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
+                    # or it is a KNF)
+                    elif db_vnfr.get("vdur"):
                         rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
                             logging_text,
                             nsr_id,
                         rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
                             logging_text,
                             nsr_id,
@@ -2031,9 +1881,6 @@ class NsLcm(LcmBase):
                             pub_key=pub_key,
                         )
 
                             pub_key=pub_key,
                         )
 
-                else:
-                    rw_mgmt_ip = None  # This is for a NS configuration
-
                 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
 
             # store rw_mgmt_ip in deploy params for later replacement
                 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
 
             # store rw_mgmt_ip in deploy params for later replacement
@@ -2099,7 +1946,9 @@ class NsLcm(LcmBase):
                 # TODO register in database that primitive is done
 
             # STEP 7 Configure metrics
                 # TODO register in database that primitive is done
 
             # STEP 7 Configure metrics
-            if vca_type == "helm" or vca_type == "helm-v3":
+            if vca_type == "helm-v3":
+                # TODO: review for those cases where the helm chart is a reference and
+                # is not part of the NF package
                 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
                     ee_id=ee_id,
                     artifact_path=artifact_path,
                 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
                     ee_id=ee_id,
                     artifact_path=artifact_path,
@@ -2107,6 +1956,12 @@ class NsLcm(LcmBase):
                     vnfr_id=vnfr_id,
                     nsr_id=nsr_id,
                     target_ip=rw_mgmt_ip,
                     vnfr_id=vnfr_id,
                     nsr_id=nsr_id,
                     target_ip=rw_mgmt_ip,
+                    element_type=element_type,
+                    vnf_member_index=db_vnfr.get("member-vnf-index-ref", ""),
+                    vdu_id=vdu_id,
+                    vdu_index=vdu_index,
+                    kdu_name=kdu_name,
+                    kdu_index=kdu_index,
                 )
                 if prometheus_jobs:
                     self.update_db_2(
                 )
                 if prometheus_jobs:
                     self.update_db_2(
@@ -2142,7 +1997,7 @@ class NsLcm(LcmBase):
             self._write_configuration_status(
                 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
             )
             self._write_configuration_status(
                 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
             )
-            raise LcmException("{} {}".format(step, e)) from e
+            raise LcmException("{}. {}".format(step, e)) from e
 
     def _write_ns_status(
         self,
 
     def _write_ns_status(
         self,
@@ -2242,7 +2097,6 @@ class NsLcm(LcmBase):
         element_type: str = None,
         other_update: dict = None,
     ):
         element_type: str = None,
         other_update: dict = None,
     ):
-
         # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
         #                   .format(vca_index, status))
 
         # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
         #                   .format(vca_index, status))
 
@@ -2283,9 +2137,7 @@ class NsLcm(LcmBase):
             self.logger.debug(
                 logging_text + "Invoke and wait for placement optimization"
             )
             self.logger.debug(
                 logging_text + "Invoke and wait for placement optimization"
             )
-            await self.msg.aiowrite(
-                "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
-            )
+            await self.msg.aiowrite("pla", "get_placement", {"nslcmopId": nslcmop_id})
             db_poll_interval = 5
             wait = db_poll_interval * 10
             pla_result = None
             db_poll_interval = 5
             wait = db_poll_interval * 10
             pla_result = None
@@ -2314,6 +2166,299 @@ class NsLcm(LcmBase):
                 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
         return modified
 
                 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
         return modified
 
+    def _gather_vnfr_healing_alerts(self, vnfr, vnfd):
+        alerts = []
+        nsr_id = vnfr["nsr-id-ref"]
+        df = vnfd.get("df", [{}])[0]
+        # Checking for auto-healing configuration
+        if "healing-aspect" in df:
+            healing_aspects = df["healing-aspect"]
+            for healing in healing_aspects:
+                for healing_policy in healing.get("healing-policy", ()):
+                    vdu_id = healing_policy["vdu-id"]
+                    vdur = next(
+                        (vdur for vdur in vnfr["vdur"] if vdu_id == vdur["vdu-id-ref"]),
+                        {},
+                    )
+                    if not vdur:
+                        continue
+                    metric_name = "vm_status"
+                    vdu_name = vdur.get("name")
+                    vnf_member_index = vnfr["member-vnf-index-ref"]
+                    uuid = str(uuid4())
+                    name = f"healing_{uuid}"
+                    action = healing_policy
+                    # action_on_recovery = healing.get("action-on-recovery")
+                    # cooldown_time = healing.get("cooldown-time")
+                    # day1 = healing.get("day1")
+                    alert = {
+                        "uuid": uuid,
+                        "name": name,
+                        "metric": metric_name,
+                        "tags": {
+                            "ns_id": nsr_id,
+                            "vnf_member_index": vnf_member_index,
+                            "vdu_name": vdu_name,
+                        },
+                        "alarm_status": "ok",
+                        "action_type": "healing",
+                        "action": action,
+                    }
+                    alerts.append(alert)
+        return alerts
+
+    def _gather_vnfr_scaling_alerts(self, vnfr, vnfd):
+        alerts = []
+        nsr_id = vnfr["nsr-id-ref"]
+        df = vnfd.get("df", [{}])[0]
+        # Checking for auto-scaling configuration
+        if "scaling-aspect" in df:
+            scaling_aspects = df["scaling-aspect"]
+            all_vnfd_monitoring_params = {}
+            for ivld in vnfd.get("int-virtual-link-desc", ()):
+                for mp in ivld.get("monitoring-parameters", ()):
+                    all_vnfd_monitoring_params[mp.get("id")] = mp
+            for vdu in vnfd.get("vdu", ()):
+                for mp in vdu.get("monitoring-parameter", ()):
+                    all_vnfd_monitoring_params[mp.get("id")] = mp
+            for df in vnfd.get("df", ()):
+                for mp in df.get("monitoring-parameter", ()):
+                    all_vnfd_monitoring_params[mp.get("id")] = mp
+            for scaling_aspect in scaling_aspects:
+                scaling_group_name = scaling_aspect.get("name", "")
+                # Get monitored VDUs
+                all_monitored_vdus = set()
+                for delta in scaling_aspect.get("aspect-delta-details", {}).get(
+                    "deltas", ()
+                ):
+                    for vdu_delta in delta.get("vdu-delta", ()):
+                        all_monitored_vdus.add(vdu_delta.get("id"))
+                monitored_vdurs = list(
+                    filter(
+                        lambda vdur: vdur["vdu-id-ref"] in all_monitored_vdus,
+                        vnfr["vdur"],
+                    )
+                )
+                if not monitored_vdurs:
+                    self.logger.error(
+                        "Scaling criteria is referring to a vnf-monitoring-param that does not contain a reference to a vdu or vnf metric"
+                    )
+                    continue
+                for scaling_policy in scaling_aspect.get("scaling-policy", ()):
+                    if scaling_policy["scaling-type"] != "automatic":
+                        continue
+                    threshold_time = scaling_policy.get("threshold-time", "1")
+                    cooldown_time = scaling_policy.get("cooldown-time", "0")
+                    for scaling_criteria in scaling_policy["scaling-criteria"]:
+                        monitoring_param_ref = scaling_criteria.get(
+                            "vnf-monitoring-param-ref"
+                        )
+                        vnf_monitoring_param = all_vnfd_monitoring_params[
+                            monitoring_param_ref
+                        ]
+                        for vdur in monitored_vdurs:
+                            vdu_id = vdur["vdu-id-ref"]
+                            metric_name = vnf_monitoring_param.get("performance-metric")
+                            metric_name = f"osm_{metric_name}"
+                            vnf_member_index = vnfr["member-vnf-index-ref"]
+                            scalein_threshold = scaling_criteria.get(
+                                "scale-in-threshold"
+                            )
+                            scaleout_threshold = scaling_criteria.get(
+                                "scale-out-threshold"
+                            )
+                            # Looking for min/max-number-of-instances
+                            instances_min_number = 1
+                            instances_max_number = 1
+                            vdu_profile = df["vdu-profile"]
+                            if vdu_profile:
+                                profile = next(
+                                    item for item in vdu_profile if item["id"] == vdu_id
+                                )
+                                instances_min_number = profile.get(
+                                    "min-number-of-instances", 1
+                                )
+                                instances_max_number = profile.get(
+                                    "max-number-of-instances", 1
+                                )
+
+                            if scalein_threshold:
+                                uuid = str(uuid4())
+                                name = f"scalein_{uuid}"
+                                operation = scaling_criteria[
+                                    "scale-in-relational-operation"
+                                ]
+                                rel_operator = self.rel_operation_types.get(
+                                    operation, "<="
+                                )
+                                metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
+                                expression = f"(count ({metric_selector}) > {instances_min_number}) and (avg({metric_selector}) {rel_operator} {scalein_threshold})"
+                                labels = {
+                                    "ns_id": nsr_id,
+                                    "vnf_member_index": vnf_member_index,
+                                    "vdu_id": vdu_id,
+                                }
+                                prom_cfg = {
+                                    "alert": name,
+                                    "expr": expression,
+                                    "for": str(threshold_time) + "m",
+                                    "labels": labels,
+                                }
+                                action = scaling_policy
+                                action = {
+                                    "scaling-group": scaling_group_name,
+                                    "cooldown-time": cooldown_time,
+                                }
+                                alert = {
+                                    "uuid": uuid,
+                                    "name": name,
+                                    "metric": metric_name,
+                                    "tags": {
+                                        "ns_id": nsr_id,
+                                        "vnf_member_index": vnf_member_index,
+                                        "vdu_id": vdu_id,
+                                    },
+                                    "alarm_status": "ok",
+                                    "action_type": "scale_in",
+                                    "action": action,
+                                    "prometheus_config": prom_cfg,
+                                }
+                                alerts.append(alert)
+
+                            if scaleout_threshold:
+                                uuid = str(uuid4())
+                                name = f"scaleout_{uuid}"
+                                operation = scaling_criteria[
+                                    "scale-out-relational-operation"
+                                ]
+                                rel_operator = self.rel_operation_types.get(
+                                    operation, "<="
+                                )
+                                metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
+                                expression = f"(count ({metric_selector}) < {instances_max_number}) and (avg({metric_selector}) {rel_operator} {scaleout_threshold})"
+                                labels = {
+                                    "ns_id": nsr_id,
+                                    "vnf_member_index": vnf_member_index,
+                                    "vdu_id": vdu_id,
+                                }
+                                prom_cfg = {
+                                    "alert": name,
+                                    "expr": expression,
+                                    "for": str(threshold_time) + "m",
+                                    "labels": labels,
+                                }
+                                action = scaling_policy
+                                action = {
+                                    "scaling-group": scaling_group_name,
+                                    "cooldown-time": cooldown_time,
+                                }
+                                alert = {
+                                    "uuid": uuid,
+                                    "name": name,
+                                    "metric": metric_name,
+                                    "tags": {
+                                        "ns_id": nsr_id,
+                                        "vnf_member_index": vnf_member_index,
+                                        "vdu_id": vdu_id,
+                                    },
+                                    "alarm_status": "ok",
+                                    "action_type": "scale_out",
+                                    "action": action,
+                                    "prometheus_config": prom_cfg,
+                                }
+                                alerts.append(alert)
+        return alerts
+
+    def _gather_vnfr_alarm_alerts(self, vnfr, vnfd):
+        alerts = []
+        nsr_id = vnfr["nsr-id-ref"]
+        vnf_member_index = vnfr["member-vnf-index-ref"]
+
+        # Checking for VNF alarm configuration
+        for vdur in vnfr["vdur"]:
+            vdu_id = vdur["vdu-id-ref"]
+            vdu = next(filter(lambda vdu: vdu["id"] == vdu_id, vnfd["vdu"]))
+            if "alarm" in vdu:
+                # Get VDU monitoring params, since alerts are based on them
+                vdu_monitoring_params = {}
+                for mp in vdu.get("monitoring-parameter", []):
+                    vdu_monitoring_params[mp.get("id")] = mp
+                if not vdu_monitoring_params:
+                    self.logger.error(
+                        "VDU alarm refers to a VDU monitoring param, but there are no VDU monitoring params in the VDU"
+                    )
+                    continue
+                # Get alarms in the VDU
+                alarm_descriptors = vdu["alarm"]
+                # Create VDU alarms for each alarm in the VDU
+                for alarm_descriptor in alarm_descriptors:
+                    # Check that the VDU alarm refers to a proper monitoring param
+                    alarm_monitoring_param = alarm_descriptor.get(
+                        "vnf-monitoring-param-ref", ""
+                    )
+                    vdu_specific_monitoring_param = vdu_monitoring_params.get(
+                        alarm_monitoring_param, {}
+                    )
+                    if not vdu_specific_monitoring_param:
+                        self.logger.error(
+                            "VDU alarm refers to a VDU monitoring param not present in the VDU"
+                        )
+                        continue
+                    metric_name = vdu_specific_monitoring_param.get(
+                        "performance-metric"
+                    )
+                    if not metric_name:
+                        self.logger.error(
+                            "VDU alarm refers to a VDU monitoring param that has no associated performance-metric"
+                        )
+                        continue
+                    # Set params of the alarm to be created in Prometheus
+                    metric_name = f"osm_{metric_name}"
+                    metric_threshold = alarm_descriptor.get("value")
+                    uuid = str(uuid4())
+                    alert_name = f"vdu_alarm_{uuid}"
+                    operation = alarm_descriptor["operation"]
+                    rel_operator = self.rel_operation_types.get(operation, "<=")
+                    metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
+                    expression = f"{metric_selector} {rel_operator} {metric_threshold}"
+                    labels = {
+                        "ns_id": nsr_id,
+                        "vnf_member_index": vnf_member_index,
+                        "vdu_id": vdu_id,
+                        "vdu_name": "{{ $labels.vdu_name }}",
+                    }
+                    prom_cfg = {
+                        "alert": alert_name,
+                        "expr": expression,
+                        "for": "1m",  # default value. Ideally, this should be related to an IM param, but there is not such param
+                        "labels": labels,
+                    }
+                    alarm_action = dict()
+                    for action_type in ["ok", "insufficient-data", "alarm"]:
+                        if (
+                            "actions" in alarm_descriptor
+                            and action_type in alarm_descriptor["actions"]
+                        ):
+                            alarm_action[action_type] = alarm_descriptor["actions"][
+                                action_type
+                            ]
+                    alert = {
+                        "uuid": uuid,
+                        "name": alert_name,
+                        "metric": metric_name,
+                        "tags": {
+                            "ns_id": nsr_id,
+                            "vnf_member_index": vnf_member_index,
+                            "vdu_id": vdu_id,
+                        },
+                        "alarm_status": "ok",
+                        "action_type": "vdu_alarm",
+                        "action": alarm_action,
+                        "prometheus_config": prom_cfg,
+                    }
+                    alerts.append(alert)
+        return alerts
+
     def update_nsrs_with_pla_result(self, params):
         try:
             nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
     def update_nsrs_with_pla_result(self, params):
         try:
             nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
@@ -2355,6 +2500,8 @@ class NsLcm(LcmBase):
         # update operation on nslcmops
         db_nslcmop_update = {}
 
         # update operation on nslcmops
         db_nslcmop_update = {}
 
+        timeout_ns_deploy = self.timeout.ns_deploy
+
         nslcmop_operation_state = None
         db_vnfrs = {}  # vnf's info indexed by member-index
         # n2vc_info = {}
         nslcmop_operation_state = None
         db_vnfrs = {}  # vnf's info indexed by member-index
         # n2vc_info = {}
@@ -2395,10 +2542,6 @@ class NsLcm(LcmBase):
             ns_params = db_nslcmop.get("operationParams")
             if ns_params and ns_params.get("timeout_ns_deploy"):
                 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
             ns_params = db_nslcmop.get("operationParams")
             if ns_params and ns_params.get("timeout_ns_deploy"):
                 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
-            else:
-                timeout_ns_deploy = self.timeout.get(
-                    "ns_deploy", self.timeout_ns_deploy
-                )
 
             # read from db: ns
             stage[1] = "Getting nsr={} from db.".format(nsr_id)
 
             # read from db: ns
             stage[1] = "Getting nsr={} from db.".format(nsr_id)
@@ -2499,8 +2642,8 @@ class NsLcm(LcmBase):
             # feature 1429. Add n2vc public key to needed VMs
             n2vc_key = self.n2vc.get_public_key()
             n2vc_key_list = [n2vc_key]
             # feature 1429. Add n2vc public key to needed VMs
             n2vc_key = self.n2vc.get_public_key()
             n2vc_key_list = [n2vc_key]
-            if self.vca_config.get("public_key"):
-                n2vc_key_list.append(self.vca_config["public_key"])
+            if self.vca_config.public_key:
+                n2vc_key_list.append(self.vca_config.public_key)
 
             stage[1] = "Deploying NS at VIM."
             task_ro = asyncio.ensure_future(
 
             stage[1] = "Deploying NS at VIM."
             task_ro = asyncio.ensure_future(
@@ -2523,6 +2666,20 @@ class NsLcm(LcmBase):
             stage[1] = "Deploying Execution Environments."
             self.logger.debug(logging_text + stage[1])
 
             stage[1] = "Deploying Execution Environments."
             self.logger.debug(logging_text + stage[1])
 
+            # create namespace and certificate if any helm based EE is present in the NS
+            if check_helm_ee_in_ns(db_vnfds):
+                await self.vca_map["helm-v3"].setup_ns_namespace(
+                    name=nsr_id,
+                )
+                # create TLS certificates
+                await self.vca_map["helm-v3"].create_tls_certificate(
+                    secret_name=self.EE_TLS_NAME,
+                    dns_prefix="*",
+                    nsr_id=nsr_id,
+                    usage="server auth",
+                    namespace=nsr_id,
+                )
+
             nsi_id = None  # TODO put nsi_id when this nsr belongs to a NSI
             for vnf_profile in get_vnf_profiles(nsd):
                 vnfd_id = vnf_profile["vnfd-id"]
             nsi_id = None  # TODO put nsi_id when this nsr belongs to a NSI
             for vnf_profile in get_vnf_profiles(nsd):
                 vnfd_id = vnf_profile["vnfd-id"]
@@ -2534,6 +2691,7 @@ class NsLcm(LcmBase):
                 vdu_index = 0
                 vdu_name = None
                 kdu_name = None
                 vdu_index = 0
                 vdu_name = None
                 kdu_name = None
+                kdu_index = None
 
                 # Get additional parameters
                 deploy_params = {"OSM": get_osm_params(db_vnfr)}
 
                 # Get additional parameters
                 deploy_params = {"OSM": get_osm_params(db_vnfr)}
@@ -2557,6 +2715,7 @@ class NsLcm(LcmBase):
                         kdu_name=kdu_name,
                         member_vnf_index=member_vnf_index,
                         vdu_index=vdu_index,
                         kdu_name=kdu_name,
                         member_vnf_index=member_vnf_index,
                         vdu_index=vdu_index,
+                        kdu_index=kdu_index,
                         vdu_name=vdu_name,
                         deploy_params=deploy_params,
                         descriptor_config=descriptor_config,
                         vdu_name=vdu_name,
                         deploy_params=deploy_params,
                         descriptor_config=descriptor_config,
@@ -2589,6 +2748,7 @@ class NsLcm(LcmBase):
                     if descriptor_config:
                         vdu_name = None
                         kdu_name = None
                     if descriptor_config:
                         vdu_name = None
                         kdu_name = None
+                        kdu_index = None
                         for vdu_index in range(vdud_count):
                             # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
                             self._deploy_n2vc(
                         for vdu_index in range(vdud_count):
                             # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
                             self._deploy_n2vc(
@@ -2604,6 +2764,7 @@ class NsLcm(LcmBase):
                                 vnfd_id=vnfd_id,
                                 vdu_id=vdu_id,
                                 kdu_name=kdu_name,
                                 vnfd_id=vnfd_id,
                                 vdu_id=vdu_id,
                                 kdu_name=kdu_name,
+                                kdu_index=kdu_index,
                                 member_vnf_index=member_vnf_index,
                                 vdu_index=vdu_index,
                                 vdu_name=vdu_name,
                                 member_vnf_index=member_vnf_index,
                                 vdu_index=vdu_index,
                                 vdu_name=vdu_name,
@@ -2620,8 +2781,10 @@ class NsLcm(LcmBase):
                         vdu_id = None
                         vdu_index = 0
                         vdu_name = None
                         vdu_id = None
                         vdu_index = 0
                         vdu_name = None
-                        kdur = next(
-                            x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name
+                        kdu_index, kdur = next(
+                            x
+                            for x in enumerate(db_vnfr["kdur"])
+                            if x[1]["kdu-name"] == kdu_name
                         )
                         deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
                         if kdur.get("additionalParams"):
                         )
                         deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
                         if kdur.get("additionalParams"):
@@ -2641,6 +2804,7 @@ class NsLcm(LcmBase):
                             kdu_name=kdu_name,
                             member_vnf_index=member_vnf_index,
                             vdu_index=vdu_index,
                             kdu_name=kdu_name,
                             member_vnf_index=member_vnf_index,
                             vdu_index=vdu_index,
+                            kdu_index=kdu_index,
                             vdu_name=vdu_name,
                             deploy_params=deploy_params_kdu,
                             descriptor_config=descriptor_config,
                             vdu_name=vdu_name,
                             deploy_params=deploy_params_kdu,
                             descriptor_config=descriptor_config,
@@ -2649,14 +2813,74 @@ class NsLcm(LcmBase):
                             stage=stage,
                         )
 
                             stage=stage,
                         )
 
-            # Check if this NS has a charm configuration
-            descriptor_config = nsd.get("ns-configuration")
-            if descriptor_config and descriptor_config.get("juju"):
-                vnfd_id = None
-                db_vnfr = None
-                member_vnf_index = None
-                vdu_id = None
+            # Check if each vnf has exporter for metric collection if so update prometheus job records
+            if "exporters-endpoints" in vnfd.get("df")[0]:
+                exporter_config = vnfd.get("df")[0].get("exporters-endpoints")
+                self.logger.debug("exporter config :{}".format(exporter_config))
+                artifact_path = "{}/{}/{}".format(
+                    base_folder["folder"],
+                    base_folder["pkg-dir"],
+                    "exporter-endpoint",
+                )
+                ee_id = None
+                ee_config_descriptor = exporter_config
+                vnfr_id = db_vnfr["id"]
+                rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
+                    logging_text,
+                    nsr_id,
+                    vnfr_id,
+                    vdu_id=None,
+                    vdu_index=None,
+                    user=None,
+                    pub_key=None,
+                )
+                self.logger.debug("rw_mgmt_ip:{}".format(rw_mgmt_ip))
+                self.logger.debug("Artifact_path:{}".format(artifact_path))
+                db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
+                vdu_id_for_prom = None
+                vdu_index_for_prom = None
+                for x in get_iterable(db_vnfr, "vdur"):
+                    vdu_id_for_prom = x.get("vdu-id-ref")
+                    vdu_index_for_prom = x.get("count-index")
+                prometheus_jobs = await self.extract_prometheus_scrape_jobs(
+                    ee_id=ee_id,
+                    artifact_path=artifact_path,
+                    ee_config_descriptor=ee_config_descriptor,
+                    vnfr_id=vnfr_id,
+                    nsr_id=nsr_id,
+                    target_ip=rw_mgmt_ip,
+                    element_type="VDU",
+                    vdu_id=vdu_id_for_prom,
+                    vdu_index=vdu_index_for_prom,
+                )
+
+                self.logger.debug("Prometheus job:{}".format(prometheus_jobs))
+                if prometheus_jobs:
+                    db_nsr_update["_admin.deployed.prometheus_jobs"] = prometheus_jobs
+                    self.update_db_2(
+                        "nsrs",
+                        nsr_id,
+                        db_nsr_update,
+                    )
+
+                    for job in prometheus_jobs:
+                        self.db.set_one(
+                            "prometheus_jobs",
+                            {"job_name": job["job_name"]},
+                            job,
+                            upsert=True,
+                            fail_on_empty=False,
+                        )
+
+            # Check if this NS has a charm configuration
+            descriptor_config = nsd.get("ns-configuration")
+            if descriptor_config and descriptor_config.get("juju"):
+                vnfd_id = None
+                db_vnfr = None
+                member_vnf_index = None
+                vdu_id = None
                 kdu_name = None
                 kdu_name = None
+                kdu_index = None
                 vdu_index = 0
                 vdu_name = None
 
                 vdu_index = 0
                 vdu_name = None
 
@@ -2679,6 +2903,7 @@ class NsLcm(LcmBase):
                     kdu_name=kdu_name,
                     member_vnf_index=member_vnf_index,
                     vdu_index=vdu_index,
                     kdu_name=kdu_name,
                     member_vnf_index=member_vnf_index,
                     vdu_index=vdu_index,
+                    kdu_index=kdu_index,
                     vdu_name=vdu_name,
                     deploy_params=deploy_params,
                     descriptor_config=descriptor_config,
                     vdu_name=vdu_name,
                     deploy_params=deploy_params,
                     descriptor_config=descriptor_config,
@@ -2729,7 +2954,15 @@ class NsLcm(LcmBase):
                 stage[1] = stage[2] = ""
             except asyncio.CancelledError:
                 error_list.append("Cancelled")
                 stage[1] = stage[2] = ""
             except asyncio.CancelledError:
                 error_list.append("Cancelled")
-                # TODO cancel all tasks
+                await self._cancel_pending_tasks(logging_text, tasks_dict_info)
+                await self._wait_for_tasks(
+                    logging_text,
+                    tasks_dict_info,
+                    timeout_ns_deploy,
+                    stage,
+                    nslcmop_id,
+                    nsr_id=nsr_id,
+                )
             except Exception as exc:
                 error_list.append(str(exc))
 
             except Exception as exc:
                 error_list.append(str(exc))
 
@@ -2770,7 +3003,27 @@ class NsLcm(LcmBase):
                 db_nsr_update["detailed-status"] = "Done"
                 db_nslcmop_update["detailed-status"] = "Done"
                 nslcmop_operation_state = "COMPLETED"
                 db_nsr_update["detailed-status"] = "Done"
                 db_nslcmop_update["detailed-status"] = "Done"
                 nslcmop_operation_state = "COMPLETED"
-
+                # Gather auto-healing and auto-scaling alerts for each vnfr
+                healing_alerts = []
+                scaling_alerts = []
+                for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
+                    vnfd = next(
+                        (sub for sub in db_vnfds if sub["_id"] == vnfr["vnfd-id"]), None
+                    )
+                    healing_alerts = self._gather_vnfr_healing_alerts(vnfr, vnfd)
+                    for alert in healing_alerts:
+                        self.logger.info(f"Storing healing alert in MongoDB: {alert}")
+                        self.db.create("alerts", alert)
+
+                    scaling_alerts = self._gather_vnfr_scaling_alerts(vnfr, vnfd)
+                    for alert in scaling_alerts:
+                        self.logger.info(f"Storing scaling alert in MongoDB: {alert}")
+                        self.db.create("alerts", alert)
+
+                    alarm_alerts = self._gather_vnfr_alarm_alerts(vnfr, vnfd)
+                    for alert in alarm_alerts:
+                        self.logger.info(f"Storing VNF alarm alert in MongoDB: {alert}")
+                        self.db.create("alerts", alert)
             if db_nsr:
                 self._write_ns_status(
                     nsr_id=nsr_id,
             if db_nsr:
                 self._write_ns_status(
                     nsr_id=nsr_id,
@@ -2798,8 +3051,13 @@ class NsLcm(LcmBase):
                             "nsr_id": nsr_id,
                             "nslcmop_id": nslcmop_id,
                             "operationState": nslcmop_operation_state,
                             "nsr_id": nsr_id,
                             "nslcmop_id": nslcmop_id,
                             "operationState": nslcmop_operation_state,
+                            "startTime": db_nslcmop["startTime"],
+                            "links": db_nslcmop["links"],
+                            "operationParams": {
+                                "nsInstanceId": nsr_id,
+                                "nsdId": db_nsr["nsd-id"],
+                            },
                         },
                         },
-                        loop=self.loop,
                     )
                 except Exception as e:
                     self.logger.error(
                     )
                 except Exception as e:
                     self.logger.error(
@@ -2809,9 +3067,11 @@ class NsLcm(LcmBase):
             self.logger.debug(logging_text + "Exit")
             self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
 
             self.logger.debug(logging_text + "Exit")
             self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
 
-    def _get_vnfd(self, vnfd_id: str, cached_vnfds: Dict[str, Any]):
+    def _get_vnfd(self, vnfd_id: str, projects_read: str, cached_vnfds: Dict[str, Any]):
         if vnfd_id not in cached_vnfds:
         if vnfd_id not in cached_vnfds:
-            cached_vnfds[vnfd_id] = self.db.get_one("vnfds", {"id": vnfd_id})
+            cached_vnfds[vnfd_id] = self.db.get_one(
+                "vnfds", {"id": vnfd_id, "_admin.projects_read": projects_read}
+            )
         return cached_vnfds[vnfd_id]
 
     def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
         return cached_vnfds[vnfd_id]
 
     def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
@@ -2853,7 +3113,8 @@ class NsLcm(LcmBase):
         ]:
             vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
             vnfd_id = vnf_profile["vnfd-id"]
         ]:
             vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
             vnfd_id = vnf_profile["vnfd-id"]
-            db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
+            project = nsd["_admin"]["projects_read"][0]
+            db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
             entity_id = (
                 vnfd_id
                 if ee_relation_level == EELevel.VNF
             entity_id = (
                 vnfd_id
                 if ee_relation_level == EELevel.VNF
@@ -2923,10 +3184,14 @@ class NsLcm(LcmBase):
         cached_vnfds: Dict[str, Any],
     ) -> List[Relation]:
         relations = []
         cached_vnfds: Dict[str, Any],
     ) -> List[Relation]:
         relations = []
+        if vca.target_element == "ns":
+            self.logger.debug("VCA is a NS charm, not a VNF.")
+            return relations
         vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
         vnf_profile_id = vnf_profile["id"]
         vnfd_id = vnf_profile["vnfd-id"]
         vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
         vnf_profile_id = vnf_profile["id"]
         vnfd_id = vnf_profile["vnfd-id"]
-        db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
+        project = nsd["_admin"]["projects_read"][0]
+        db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
         db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
         for r in db_vnf_relations:
             provider_dict = None
         db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
         for r in db_vnf_relations:
             provider_dict = None
@@ -2981,7 +3246,8 @@ class NsLcm(LcmBase):
             vnf_profiles,
             lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
         )["vnfd-id"]
             vnf_profiles,
             lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
         )["vnfd-id"]
-        db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
+        project = nsd["_admin"]["projects_read"][0]
+        db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
         kdu_resource_profile = get_kdu_resource_profile(
             db_vnfd, ee_relation.kdu_resource_profile_id
         )
         kdu_resource_profile = get_kdu_resource_profile(
             db_vnfd, ee_relation.kdu_resource_profile_id
         )
@@ -3087,11 +3353,14 @@ class NsLcm(LcmBase):
                 requirer_vca_id,
                 relation.requirer.endpoint,
             )
                 requirer_vca_id,
                 relation.requirer.endpoint,
             )
-            await self.vca_map[vca_type].add_relation(
-                provider=provider_relation_endpoint,
-                requirer=requirer_relation_endpoint,
-            )
-            # remove entry from relations list
+            try:
+                await self.vca_map[vca_type].add_relation(
+                    provider=provider_relation_endpoint,
+                    requirer=requirer_relation_endpoint,
+                )
+            except N2VCException as exception:
+                self.logger.error(exception)
+                raise LcmException(exception)
             return True
         return False
 
             return True
         return False
 
@@ -3103,7 +3372,6 @@ class NsLcm(LcmBase):
         vca_index: int,
         timeout: int = 3600,
     ) -> bool:
         vca_index: int,
         timeout: int = 3600,
     ) -> bool:
-
         # steps:
         # 1. find all relations for this VCA
         # 2. wait for other peers related
         # steps:
         # 1. find all relations for this VCA
         # 2. wait for other peers related
@@ -3181,7 +3449,6 @@ class NsLcm(LcmBase):
         timeout: int = 600,
         vca_id: str = None,
     ):
         timeout: int = 600,
         vca_id: str = None,
     ):
-
         try:
             k8sclustertype = k8s_instance_info["k8scluster-type"]
             # Instantiate kdu
         try:
             k8sclustertype = k8s_instance_info["k8scluster-type"]
             # Instantiate kdu
@@ -3201,9 +3468,45 @@ class NsLcm(LcmBase):
                     kdu_model=k8s_instance_info["kdu-model"],
                     kdu_name=k8s_instance_info["kdu-name"],
                 )
                     kdu_model=k8s_instance_info["kdu-model"],
                     kdu_name=k8s_instance_info["kdu-name"],
                 )
+
+            # Update the nsrs table with the kdu-instance value
             self.update_db_2(
             self.update_db_2(
-                "nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance}
+                item="nsrs",
+                _id=nsr_id,
+                _desc={nsr_db_path + ".kdu-instance": kdu_instance},
             )
             )
+
+            # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
+            # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
+            # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
+            # namespace, this first verification could be removed, and the next step would be done for any kind
+            # of KNF.
+            # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
+            # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
+            if k8sclustertype in ("juju", "juju-bundle"):
+                # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
+                # that the user passed a namespace which he wants its KDU to be deployed in)
+                if (
+                    self.db.count(
+                        table="nsrs",
+                        q_filter={
+                            "_id": nsr_id,
+                            "_admin.projects_write": k8s_instance_info["namespace"],
+                            "_admin.projects_read": k8s_instance_info["namespace"],
+                        },
+                    )
+                    > 0
+                ):
+                    self.logger.debug(
+                        f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
+                    )
+                    self.update_db_2(
+                        item="nsrs",
+                        _id=nsr_id,
+                        _desc={f"{nsr_db_path}.namespace": kdu_instance},
+                    )
+                    k8s_instance_info["namespace"] = kdu_instance
+
             await self.k8scluster_map[k8sclustertype].install(
                 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
                 kdu_model=k8s_instance_info["kdu-model"],
             await self.k8scluster_map[k8sclustertype].install(
                 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
                 kdu_model=k8s_instance_info["kdu-model"],
@@ -3216,9 +3519,6 @@ class NsLcm(LcmBase):
                 kdu_instance=kdu_instance,
                 vca_id=vca_id,
             )
                 kdu_instance=kdu_instance,
                 vca_id=vca_id,
             )
-            self.update_db_2(
-                "nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance}
-            )
 
             # Obtain services to obtain management service ip
             services = await self.k8scluster_map[k8sclustertype].get_services(
 
             # Obtain services to obtain management service ip
             services = await self.k8scluster_map[k8sclustertype].get_services(
@@ -3325,9 +3625,11 @@ class NsLcm(LcmBase):
                     vnfr_data.get("_id"),
                     {"kdur.{}.status".format(kdu_index): "ERROR"},
                 )
                     vnfr_data.get("_id"),
                     {"kdur.{}.status".format(kdu_index): "ERROR"},
                 )
-            except Exception:
+            except Exception as error:
                 # ignore to keep original exception
                 # ignore to keep original exception
-                pass
+                self.logger.warning(
+                    f"An exception occurred while updating DB: {str(error)}"
+                )
             # reraise original error
             raise
 
             # reraise original error
             raise
 
@@ -3346,7 +3648,6 @@ class NsLcm(LcmBase):
 
         k8scluster_id_2_uuic = {
             "helm-chart-v3": {},
 
         k8scluster_id_2_uuic = {
             "helm-chart-v3": {},
-            "helm-chart": {},
             "juju-bundle": {},
         }
 
             "juju-bundle": {},
         }
 
@@ -3445,11 +3746,6 @@ class NsLcm(LcmBase):
                         # Default version: helm3, if helm-version is v2 assign v2
                         k8sclustertype = "helm-chart-v3"
                         self.logger.debug("kdur: {}".format(kdur))
                         # Default version: helm3, if helm-version is v2 assign v2
                         k8sclustertype = "helm-chart-v3"
                         self.logger.debug("kdur: {}".format(kdur))
-                        if (
-                            kdur.get("helm-version")
-                            and kdur.get("helm-version") == "v2"
-                        ):
-                            k8sclustertype = "helm-chart"
                     elif kdur.get("juju-bundle"):
                         kdumodel = kdur["juju-bundle"]
                         k8sclustertype = "juju-bundle"
                     elif kdur.get("juju-bundle"):
                         kdumodel = kdur["juju-bundle"]
                         k8sclustertype = "juju-bundle"
@@ -3487,8 +3783,8 @@ class NsLcm(LcmBase):
                                 kdumodel = self.fs.path + filename
                     except (asyncio.TimeoutError, asyncio.CancelledError):
                         raise
                                 kdumodel = self.fs.path + filename
                     except (asyncio.TimeoutError, asyncio.CancelledError):
                         raise
-                    except Exception:  # it is not a file
-                        pass
+                    except Exception as e:  # it is not a file
+                        self.logger.warning(f"An exception occurred: {str(e)}")
 
                     k8s_cluster_id = kdur["k8s-cluster"]["id"]
                     step = "Synchronize repos for k8s cluster '{}'".format(
 
                     k8s_cluster_id = kdur["k8s-cluster"]["id"]
                     step = "Synchronize repos for k8s cluster '{}'".format(
@@ -3575,7 +3871,7 @@ class NsLcm(LcmBase):
                             vnfd_with_id,
                             k8s_instance_info,
                             k8params=desc_params,
                             vnfd_with_id,
                             k8s_instance_info,
                             k8params=desc_params,
-                            timeout=600,
+                            timeout=1800,
                             vca_id=vca_id,
                         )
                     )
                             vca_id=vca_id,
                         )
                     )
@@ -3618,6 +3914,7 @@ class NsLcm(LcmBase):
         kdu_name,
         member_vnf_index,
         vdu_index,
         kdu_name,
         member_vnf_index,
         vdu_index,
+        kdu_index,
         vdu_name,
         deploy_params,
         descriptor_config,
         vdu_name,
         deploy_params,
         descriptor_config,
@@ -3633,10 +3930,16 @@ class NsLcm(LcmBase):
         self.logger.debug(
             logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
         )
         self.logger.debug(
             logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
         )
+
+        charm_name = ""
+        get_charm_name = False
         if "execution-environment-list" in descriptor_config:
             ee_list = descriptor_config.get("execution-environment-list", [])
         elif "juju" in descriptor_config:
             ee_list = [descriptor_config]  # ns charms
         if "execution-environment-list" in descriptor_config:
             ee_list = descriptor_config.get("execution-environment-list", [])
         elif "juju" in descriptor_config:
             ee_list = [descriptor_config]  # ns charms
+            if "execution-environment-list" not in descriptor_config:
+                # charm name is only required for ns charms
+                get_charm_name = True
         else:  # other types as script are not supported
             ee_list = []
 
         else:  # other types as script are not supported
             ee_list = []
 
@@ -3650,6 +3953,8 @@ class NsLcm(LcmBase):
             ee_descriptor_id = ee_item.get("id")
             if ee_item.get("juju"):
                 vca_name = ee_item["juju"].get("charm")
             ee_descriptor_id = ee_item.get("id")
             if ee_item.get("juju"):
                 vca_name = ee_item["juju"].get("charm")
+                if get_charm_name:
+                    charm_name = self.find_charm_name(db_nsr, str(vca_name))
                 vca_type = (
                     "lxc_proxy_charm"
                     if ee_item["juju"].get("charm") is not None
                 vca_type = (
                     "lxc_proxy_charm"
                     if ee_item["juju"].get("charm") is not None
@@ -3661,10 +3966,7 @@ class NsLcm(LcmBase):
                     vca_type = "native_charm"
             elif ee_item.get("helm-chart"):
                 vca_name = ee_item["helm-chart"]
                     vca_type = "native_charm"
             elif ee_item.get("helm-chart"):
                 vca_name = ee_item["helm-chart"]
-                if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
-                    vca_type = "helm"
-                else:
-                    vca_type = "helm-v3"
+                vca_type = "helm-v3"
             else:
                 self.logger.debug(
                     logging_text + "skipping non juju neither charm configuration"
             else:
                 self.logger.debug(
                     logging_text + "skipping non juju neither charm configuration"
@@ -3708,6 +4010,7 @@ class NsLcm(LcmBase):
                     "vdu_name": vdu_name,
                     "type": vca_type,
                     "ee_descriptor_id": ee_descriptor_id,
                     "vdu_name": vdu_name,
                     "type": vca_type,
                     "ee_descriptor_id": ee_descriptor_id,
+                    "charm_name": charm_name,
                 }
                 vca_index += 1
 
                 }
                 vca_index += 1
 
@@ -3735,6 +4038,7 @@ class NsLcm(LcmBase):
                     vdu_id=vdu_id,
                     kdu_name=kdu_name,
                     vdu_index=vdu_index,
                     vdu_id=vdu_id,
                     kdu_name=kdu_name,
                     vdu_index=vdu_index,
+                    kdu_index=kdu_index,
                     deploy_params=deploy_params,
                     config_descriptor=descriptor_config,
                     base_folder=base_folder,
                     deploy_params=deploy_params,
                     config_descriptor=descriptor_config,
                     base_folder=base_folder,
@@ -3758,41 +4062,6 @@ class NsLcm(LcmBase):
                 member_vnf_index or "", vdu_id or ""
             )
 
                 member_vnf_index or "", vdu_id or ""
             )
 
-    @staticmethod
-    def _create_nslcmop(nsr_id, operation, params):
-        """
-        Creates a ns-lcm-opp content to be stored at database.
-        :param nsr_id: internal id of the instance
-        :param operation: instantiate, terminate, scale, action, ...
-        :param params: user parameters for the operation
-        :return: dictionary following SOL005 format
-        """
-        # Raise exception if invalid arguments
-        if not (nsr_id and operation and params):
-            raise LcmException(
-                "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
-            )
-        now = time()
-        _id = str(uuid4())
-        nslcmop = {
-            "id": _id,
-            "_id": _id,
-            # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
-            "operationState": "PROCESSING",
-            "statusEnteredTime": now,
-            "nsInstanceId": nsr_id,
-            "lcmOperationType": operation,
-            "startTime": now,
-            "isAutomaticInvocation": False,
-            "operationParams": params,
-            "isCancelPending": False,
-            "links": {
-                "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
-                "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
-            },
-        }
-        return nslcmop
-
     def _format_additional_params(self, params):
         params = params or {}
         for key, value in params.items():
     def _format_additional_params(self, params):
         params = params or {}
         for key, value in params.items():
@@ -3988,12 +4257,6 @@ class NsLcm(LcmBase):
 
     # Function to return execution_environment id
 
 
     # Function to return execution_environment id
 
-    def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
-        # TODO vdu_index_count
-        for vca in vca_deployed_list:
-            if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
-                return vca["ee_id"]
-
     async def destroy_N2VC(
         self,
         logging_text,
     async def destroy_N2VC(
         self,
         logging_text,
@@ -4106,213 +4369,13 @@ class NsLcm(LcmBase):
         try:
             await self.n2vc.delete_namespace(
                 namespace=namespace,
         try:
             await self.n2vc.delete_namespace(
                 namespace=namespace,
-                total_timeout=self.timeout_charm_delete,
+                total_timeout=self.timeout.charm_delete,
                 vca_id=vca_id,
             )
         except N2VCNotFound:  # already deleted. Skip
             pass
         self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
 
                 vca_id=vca_id,
             )
         except N2VCNotFound:  # already deleted. Skip
             pass
         self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
 
-    async def _terminate_RO(
-        self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
-    ):
-        """
-        Terminates a deployment from RO
-        :param logging_text:
-        :param nsr_deployed: db_nsr._admin.deployed
-        :param nsr_id:
-        :param nslcmop_id:
-        :param stage: list of string with the content to write on db_nslcmop.detailed-status.
-            this method will update only the index 2, but it will write on database the concatenated content of the list
-        :return:
-        """
-        db_nsr_update = {}
-        failed_detail = []
-        ro_nsr_id = ro_delete_action = None
-        if nsr_deployed and nsr_deployed.get("RO"):
-            ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
-            ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
-        try:
-            if ro_nsr_id:
-                stage[2] = "Deleting ns from VIM."
-                db_nsr_update["detailed-status"] = " ".join(stage)
-                self._write_op_status(nslcmop_id, stage)
-                self.logger.debug(logging_text + stage[2])
-                self.update_db_2("nsrs", nsr_id, db_nsr_update)
-                self._write_op_status(nslcmop_id, stage)
-                desc = await self.RO.delete("ns", ro_nsr_id)
-                ro_delete_action = desc["action_id"]
-                db_nsr_update[
-                    "_admin.deployed.RO.nsr_delete_action_id"
-                ] = ro_delete_action
-                db_nsr_update["_admin.deployed.RO.nsr_id"] = None
-                db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
-            if ro_delete_action:
-                # wait until NS is deleted from VIM
-                stage[2] = "Waiting ns deleted from VIM."
-                detailed_status_old = None
-                self.logger.debug(
-                    logging_text
-                    + stage[2]
-                    + " RO_id={} ro_delete_action={}".format(
-                        ro_nsr_id, ro_delete_action
-                    )
-                )
-                self.update_db_2("nsrs", nsr_id, db_nsr_update)
-                self._write_op_status(nslcmop_id, stage)
-
-                delete_timeout = 20 * 60  # 20 minutes
-                while delete_timeout > 0:
-                    desc = await self.RO.show(
-                        "ns",
-                        item_id_name=ro_nsr_id,
-                        extra_item="action",
-                        extra_item_id=ro_delete_action,
-                    )
-
-                    # deploymentStatus
-                    self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
-
-                    ns_status, ns_status_info = self.RO.check_action_status(desc)
-                    if ns_status == "ERROR":
-                        raise ROclient.ROClientException(ns_status_info)
-                    elif ns_status == "BUILD":
-                        stage[2] = "Deleting from VIM {}".format(ns_status_info)
-                    elif ns_status == "ACTIVE":
-                        db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
-                        db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
-                        break
-                    else:
-                        assert (
-                            False
-                        ), "ROclient.check_action_status returns unknown {}".format(
-                            ns_status
-                        )
-                    if stage[2] != detailed_status_old:
-                        detailed_status_old = stage[2]
-                        db_nsr_update["detailed-status"] = " ".join(stage)
-                        self._write_op_status(nslcmop_id, stage)
-                        self.update_db_2("nsrs", nsr_id, db_nsr_update)
-                    await asyncio.sleep(5, loop=self.loop)
-                    delete_timeout -= 5
-                else:  # delete_timeout <= 0:
-                    raise ROclient.ROClientException(
-                        "Timeout waiting ns deleted from VIM"
-                    )
-
-        except Exception as e:
-            self.update_db_2("nsrs", nsr_id, db_nsr_update)
-            if (
-                isinstance(e, ROclient.ROClientException) and e.http_code == 404
-            ):  # not found
-                db_nsr_update["_admin.deployed.RO.nsr_id"] = None
-                db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
-                db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
-                self.logger.debug(
-                    logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)
-                )
-            elif (
-                isinstance(e, ROclient.ROClientException) and e.http_code == 409
-            ):  # conflict
-                failed_detail.append("delete conflict: {}".format(e))
-                self.logger.debug(
-                    logging_text
-                    + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)
-                )
-            else:
-                failed_detail.append("delete error: {}".format(e))
-                self.logger.error(
-                    logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)
-                )
-
-        # Delete nsd
-        if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
-            ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
-            try:
-                stage[2] = "Deleting nsd from RO."
-                db_nsr_update["detailed-status"] = " ".join(stage)
-                self.update_db_2("nsrs", nsr_id, db_nsr_update)
-                self._write_op_status(nslcmop_id, stage)
-                await self.RO.delete("nsd", ro_nsd_id)
-                self.logger.debug(
-                    logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)
-                )
-                db_nsr_update["_admin.deployed.RO.nsd_id"] = None
-            except Exception as e:
-                if (
-                    isinstance(e, ROclient.ROClientException) and e.http_code == 404
-                ):  # not found
-                    db_nsr_update["_admin.deployed.RO.nsd_id"] = None
-                    self.logger.debug(
-                        logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)
-                    )
-                elif (
-                    isinstance(e, ROclient.ROClientException) and e.http_code == 409
-                ):  # conflict
-                    failed_detail.append(
-                        "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)
-                    )
-                    self.logger.debug(logging_text + failed_detail[-1])
-                else:
-                    failed_detail.append(
-                        "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)
-                    )
-                    self.logger.error(logging_text + failed_detail[-1])
-
-        if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
-            for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
-                if not vnf_deployed or not vnf_deployed["id"]:
-                    continue
-                try:
-                    ro_vnfd_id = vnf_deployed["id"]
-                    stage[
-                        2
-                    ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
-                        vnf_deployed["member-vnf-index"], ro_vnfd_id
-                    )
-                    db_nsr_update["detailed-status"] = " ".join(stage)
-                    self.update_db_2("nsrs", nsr_id, db_nsr_update)
-                    self._write_op_status(nslcmop_id, stage)
-                    await self.RO.delete("vnfd", ro_vnfd_id)
-                    self.logger.debug(
-                        logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)
-                    )
-                    db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
-                except Exception as e:
-                    if (
-                        isinstance(e, ROclient.ROClientException) and e.http_code == 404
-                    ):  # not found
-                        db_nsr_update[
-                            "_admin.deployed.RO.vnfd.{}.id".format(index)
-                        ] = None
-                        self.logger.debug(
-                            logging_text
-                            + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)
-                        )
-                    elif (
-                        isinstance(e, ROclient.ROClientException) and e.http_code == 409
-                    ):  # conflict
-                        failed_detail.append(
-                            "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)
-                        )
-                        self.logger.debug(logging_text + failed_detail[-1])
-                    else:
-                        failed_detail.append(
-                            "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)
-                        )
-                        self.logger.error(logging_text + failed_detail[-1])
-
-        if failed_detail:
-            stage[2] = "Error deleting from VIM"
-        else:
-            stage[2] = "Deleted from VIM"
-        db_nsr_update["detailed-status"] = " ".join(stage)
-        self.update_db_2("nsrs", nsr_id, db_nsr_update)
-        self._write_op_status(nslcmop_id, stage)
-
-        if failed_detail:
-            raise LcmException("; ".join(failed_detail))
-
     async def terminate(self, nsr_id, nslcmop_id):
         # Try to lock HA task here
         task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
     async def terminate(self, nsr_id, nslcmop_id):
         # Try to lock HA task here
         task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
@@ -4321,7 +4384,7 @@ class NsLcm(LcmBase):
 
         logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
         self.logger.debug(logging_text + "Enter")
 
         logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
         self.logger.debug(logging_text + "Enter")
-        timeout_ns_terminate = self.timeout_ns_terminate
+        timeout_ns_terminate = self.timeout.ns_terminate
         db_nsr = None
         db_nslcmop = None
         operation_params = None
         db_nsr = None
         db_nslcmop = None
         operation_params = None
@@ -4419,9 +4482,7 @@ class NsLcm(LcmBase):
                 ) and vca.get("needed_terminate")
                 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
                 # pending native charms
                 ) and vca.get("needed_terminate")
                 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
                 # pending native charms
-                destroy_ee = (
-                    True if vca_type in ("helm", "helm-v3", "native_charm") else False
-                )
+                destroy_ee = True if vca_type in ("helm-v3", "native_charm") else False
                 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
                 #     vca_index, vca.get("ee_id"), vca_type, destroy_ee))
                 task = asyncio.ensure_future(
                 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
                 #     vca_index, vca.get("ee_id"), vca_type, destroy_ee))
                 task = asyncio.ensure_future(
@@ -4447,7 +4508,7 @@ class NsLcm(LcmBase):
                 error_list = await self._wait_for_tasks(
                     logging_text,
                     tasks_dict_info,
                 error_list = await self._wait_for_tasks(
                     logging_text,
                     tasks_dict_info,
-                    min(self.timeout_charm_delete, timeout_ns_terminate),
+                    min(self.timeout.charm_delete, timeout_ns_terminate),
                     stage,
                     nslcmop_id,
                 )
                     stage,
                     nslcmop_id,
                 )
@@ -4465,12 +4526,22 @@ class NsLcm(LcmBase):
                 task_delete_ee = asyncio.ensure_future(
                     asyncio.wait_for(
                         self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
                 task_delete_ee = asyncio.ensure_future(
                     asyncio.wait_for(
                         self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
-                        timeout=self.timeout_charm_delete,
+                        timeout=self.timeout.charm_delete,
                     )
                 )
                 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
                 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
 
                     )
                 )
                 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
                 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
 
+            # Delete Namespace and Certificates if necessary
+            if check_helm_ee_in_ns(list(db_vnfds_from_member_index.values())):
+                await self.vca_map["helm-v3"].delete_tls_certificate(
+                    namespace=db_nslcmop["nsInstanceId"],
+                    certificate_name=self.EE_TLS_NAME,
+                )
+                await self.vca_map["helm-v3"].delete_namespace(
+                    namespace=db_nslcmop["nsInstanceId"],
+                )
+
             # Delete from k8scluster
             stage[1] = "Deleting KDUs."
             self.logger.debug(logging_text + stage[1])
             # Delete from k8scluster
             stage[1] = "Deleting KDUs."
             self.logger.debug(logging_text + stage[1])
@@ -4487,6 +4558,7 @@ class NsLcm(LcmBase):
                             cluster_uuid=kdu.get("k8scluster-uuid"),
                             kdu_instance=kdu_instance,
                             vca_id=vca_id,
                             cluster_uuid=kdu.get("k8scluster-uuid"),
                             kdu_instance=kdu_instance,
                             vca_id=vca_id,
+                            namespace=kdu.get("namespace"),
                         )
                     )
                 else:
                         )
                     )
                 else:
@@ -4503,19 +4575,13 @@ class NsLcm(LcmBase):
 
             # remove from RO
             stage[1] = "Deleting ns from VIM."
 
             # remove from RO
             stage[1] = "Deleting ns from VIM."
-            if self.ng_ro:
+            if self.ro_config.ng:
                 task_delete_ro = asyncio.ensure_future(
                     self._terminate_ng_ro(
                         logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
                     )
                 )
                 task_delete_ro = asyncio.ensure_future(
                     self._terminate_ng_ro(
                         logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
                     )
                 )
-            else:
-                task_delete_ro = asyncio.ensure_future(
-                    self._terminate_RO(
-                        logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
-                    )
-                )
-            tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
+                tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
 
             # rest of staff will be done at finally
 
 
             # rest of staff will be done at finally
 
@@ -4556,7 +4622,14 @@ class NsLcm(LcmBase):
                 stage[1] = stage[2] = ""
             except asyncio.CancelledError:
                 error_list.append("Cancelled")
                 stage[1] = stage[2] = ""
             except asyncio.CancelledError:
                 error_list.append("Cancelled")
-                # TODO cancell all tasks
+                await self._cancel_pending_tasks(logging_text, tasks_dict_info)
+                await self._wait_for_tasks(
+                    logging_text,
+                    tasks_dict_info,
+                    timeout_ns_terminate,
+                    stage,
+                    nslcmop_id,
+                )
             except Exception as exc:
                 error_list.append(str(exc))
             # update status at database
             except Exception as exc:
                 error_list.append(str(exc))
             # update status at database
@@ -4631,12 +4704,13 @@ class NsLcm(LcmBase):
                             "operationState": nslcmop_operation_state,
                             "autoremove": autoremove,
                         },
                             "operationState": nslcmop_operation_state,
                             "autoremove": autoremove,
                         },
-                        loop=self.loop,
                     )
                 except Exception as e:
                     self.logger.error(
                         logging_text + "kafka_write notification Exception {}".format(e)
                     )
                     )
                 except Exception as e:
                     self.logger.error(
                         logging_text + "kafka_write notification Exception {}".format(e)
                     )
+                self.logger.debug(f"Deleting alerts: ns_id={nsr_id}")
+                self.db.del_list("alerts", {"tags.ns_id": nsr_id})
 
             self.logger.debug(logging_text + "Exit")
             self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
 
             self.logger.debug(logging_text + "Exit")
             self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
@@ -4718,6 +4792,11 @@ class NsLcm(LcmBase):
             self._write_op_status(nslcmop_id, stage)
         return error_detail_list
 
             self._write_op_status(nslcmop_id, stage)
         return error_detail_list
 
+    async def _cancel_pending_tasks(self, logging_text, created_tasks_info):
+        for task, name in created_tasks_info.items():
+            self.logger.debug(logging_text + "Cancelling task: " + name)
+            task.cancel()
+
     @staticmethod
     def _map_primitive_params(primitive_desc, params, instantiation_params):
         """
     @staticmethod
     def _map_primitive_params(primitive_desc, params, instantiation_params):
         """
@@ -4866,21 +4945,19 @@ class NsLcm(LcmBase):
                             ee_id=ee_id,
                             primitive_name=primitive,
                             params_dict=primitive_params,
                             ee_id=ee_id,
                             primitive_name=primitive,
                             params_dict=primitive_params,
-                            progress_timeout=self.timeout_progress_primitive,
-                            total_timeout=self.timeout_primitive,
+                            progress_timeout=self.timeout.progress_primitive,
+                            total_timeout=self.timeout.primitive,
                             db_dict=db_dict,
                             vca_id=vca_id,
                             vca_type=vca_type,
                         ),
                             db_dict=db_dict,
                             vca_id=vca_id,
                             vca_type=vca_type,
                         ),
-                        timeout=timeout or self.timeout_primitive,
+                        timeout=timeout or self.timeout.primitive,
                     )
                     # execution was OK
                     break
                 except asyncio.CancelledError:
                     raise
                     )
                     # execution was OK
                     break
                 except asyncio.CancelledError:
                     raise
-                except Exception as e:  # asyncio.TimeoutError
-                    if isinstance(e, asyncio.TimeoutError):
-                        e = "Timeout"
+                except Exception as e:
                     retries -= 1
                     if retries >= 0:
                         self.logger.debug(
                     retries -= 1
                     if retries >= 0:
                         self.logger.debug(
@@ -4889,9 +4966,13 @@ class NsLcm(LcmBase):
                             )
                         )
                         # wait and retry
                             )
                         )
                         # wait and retry
-                        await asyncio.sleep(retries_interval, loop=self.loop)
+                        await asyncio.sleep(retries_interval)
                     else:
                     else:
-                        return "FAILED", str(e)
+                        if isinstance(e, asyncio.TimeoutError):
+                            e = N2VCException(
+                                message="Timed out waiting for action to complete"
+                            )
+                        return "FAILED", getattr(e, "message", repr(e))
 
             return "COMPLETED", output
 
 
             return "COMPLETED", output
 
@@ -4950,6 +5031,7 @@ class NsLcm(LcmBase):
         nslcmop_operation_state = None
         error_description_nslcmop = None
         exc = None
         nslcmop_operation_state = None
         error_description_nslcmop = None
         exc = None
+        step = ""
         try:
             # wait for any previous tasks in process
             step = "Waiting for previous operations to terminate"
         try:
             # wait for any previous tasks in process
             step = "Waiting for previous operations to terminate"
@@ -4978,7 +5060,7 @@ class NsLcm(LcmBase):
             primitive = db_nslcmop["operationParams"]["primitive"]
             primitive_params = db_nslcmop["operationParams"]["primitive_params"]
             timeout_ns_action = db_nslcmop["operationParams"].get(
             primitive = db_nslcmop["operationParams"]["primitive"]
             primitive_params = db_nslcmop["operationParams"]["primitive_params"]
             timeout_ns_action = db_nslcmop["operationParams"].get(
-                "timeout_ns_action", self.timeout_primitive
+                "timeout_ns_action", self.timeout.primitive
             )
 
             if vnf_index:
             )
 
             if vnf_index:
@@ -5079,7 +5161,7 @@ class NsLcm(LcmBase):
                 kdu_action = (
                     True
                     if primitive_name in actions
                 kdu_action = (
                     True
                     if primitive_name in actions
-                    and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
+                    and kdu["k8scluster-type"] != "helm-chart-v3"
                     else False
                 )
 
                     else False
                 )
 
@@ -5124,15 +5206,23 @@ class NsLcm(LcmBase):
                         del desc_params["kdu_model"]
                     else:
                         kdu_model = kdu.get("kdu-model")
                         del desc_params["kdu_model"]
                     else:
                         kdu_model = kdu.get("kdu-model")
-                        parts = kdu_model.split(sep=":")
-                        if len(parts) == 2:
-                            kdu_model = parts[0]
+                        if kdu_model.count("/") < 2:  # helm chart is not embedded
+                            parts = kdu_model.split(sep=":")
+                            if len(parts) == 2:
+                                kdu_model = parts[0]
+                    if desc_params.get("kdu_atomic_upgrade"):
+                        atomic_upgrade = desc_params.get(
+                            "kdu_atomic_upgrade"
+                        ).lower() in ("yes", "true", "1")
+                        del desc_params["kdu_atomic_upgrade"]
+                    else:
+                        atomic_upgrade = True
 
                     detailed_status = await asyncio.wait_for(
                         self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
                             cluster_uuid=kdu.get("k8scluster-uuid"),
                             kdu_instance=kdu.get("kdu-instance"),
 
                     detailed_status = await asyncio.wait_for(
                         self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
                             cluster_uuid=kdu.get("k8scluster-uuid"),
                             kdu_instance=kdu.get("kdu-instance"),
-                            atomic=True,
+                            atomic=atomic_upgrade,
                             kdu_model=kdu_model,
                             params=desc_params,
                             db_dict=db_dict,
                             kdu_model=kdu_model,
                             params=desc_params,
                             db_dict=db_dict,
@@ -5226,7 +5316,7 @@ class NsLcm(LcmBase):
             )
             self.logger.debug(
                 logging_text
             )
             self.logger.debug(
                 logging_text
-                + " task Done with result {} {}".format(
+                + "Done with result {} {}".format(
                     nslcmop_operation_state, detailed_status
                 )
             )
                     nslcmop_operation_state, detailed_status
                 )
             )
@@ -5288,7 +5378,6 @@ class NsLcm(LcmBase):
                             "nslcmop_id": nslcmop_id,
                             "operationState": nslcmop_operation_state,
                         },
                             "nslcmop_id": nslcmop_id,
                             "operationState": nslcmop_operation_state,
                         },
-                        loop=self.loop,
                     )
                 except Exception as e:
                     self.logger.error(
                     )
                 except Exception as e:
                     self.logger.error(
@@ -5298,50 +5387,294 @@ class NsLcm(LcmBase):
             self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
             return nslcmop_operation_state, detailed_status
 
             self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
             return nslcmop_operation_state, detailed_status
 
-    async def _ns_charm_upgrade(
-        self,
-        ee_id,
-        charm_id,
-        charm_type,
-        path,
-        timeout: float = None,
-    ) -> (str, str):
-        """This method upgrade charms in VNF instances
+    async def terminate_vdus(
+        self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
+    ):
+        """This method terminates VDUs
 
         Args:
 
         Args:
-            ee_id:  Execution environment id
-            path:   Local path to the charm
-            charm_id: charm-id
-            charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
-            timeout: (Float)    Timeout for the ns update operation
+            db_vnfr: VNF instance record
+            member_vnf_index: VNF index to identify the VDUs to be removed
+            db_nsr: NS instance record
+            update_db_nslcmops: Nslcmop update record
+        """
+        vca_scaling_info = []
+        scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
+        scaling_info["scaling_direction"] = "IN"
+        scaling_info["vdu-delete"] = {}
+        scaling_info["kdu-delete"] = {}
+        db_vdur = db_vnfr.get("vdur")
+        vdur_list = copy(db_vdur)
+        count_index = 0
+        for index, vdu in enumerate(vdur_list):
+            vca_scaling_info.append(
+                {
+                    "osm_vdu_id": vdu["vdu-id-ref"],
+                    "member-vnf-index": member_vnf_index,
+                    "type": "delete",
+                    "vdu_index": count_index,
+                }
+            )
+            scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
+            scaling_info["vdu"].append(
+                {
+                    "name": vdu.get("name") or vdu.get("vdu-name"),
+                    "vdu_id": vdu["vdu-id-ref"],
+                    "interface": [],
+                }
+            )
+            for interface in vdu["interfaces"]:
+                scaling_info["vdu"][index]["interface"].append(
+                    {
+                        "name": interface["name"],
+                        "ip_address": interface["ip-address"],
+                        "mac_address": interface.get("mac-address"),
+                    }
+                )
+            self.logger.info("NS update scaling info{}".format(scaling_info))
+            stage[2] = "Terminating VDUs"
+            if scaling_info.get("vdu-delete"):
+                # scale_process = "RO"
+                if self.ro_config.ng:
+                    await self._scale_ng_ro(
+                        logging_text,
+                        db_nsr,
+                        update_db_nslcmops,
+                        db_vnfr,
+                        scaling_info,
+                        stage,
+                    )
+
+    async def remove_vnf(self, nsr_id, nslcmop_id, vnf_instance_id):
+        """This method is to Remove VNF instances from NS.
+
+        Args:
+            nsr_id: NS instance id
+            nslcmop_id: nslcmop id of update
+            vnf_instance_id: id of the VNF instance to be removed
 
         Returns:
             result: (str, str) COMPLETED/FAILED, details
         """
         try:
 
         Returns:
             result: (str, str) COMPLETED/FAILED, details
         """
         try:
-            charm_type = charm_type or "lxc_proxy_charm"
-            output = await self.vca_map[charm_type].upgrade_charm(
-                ee_id=ee_id,
-                path=path,
-                charm_id=charm_id,
-                charm_type=charm_type,
-                timeout=timeout or self.timeout_ns_update,
-            )
-
-            if output:
-                return "COMPLETED", output
+            db_nsr_update = {}
+            logging_text = "Task ns={} update ".format(nsr_id)
+            check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
+            self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
+            if check_vnfr_count > 1:
+                stage = ["", "", ""]
+                step = "Getting nslcmop from database"
+                self.logger.debug(
+                    step + " after having waited for previous tasks to be completed"
+                )
+                # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
+                db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+                db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
+                member_vnf_index = db_vnfr["member-vnf-index-ref"]
+                """ db_vnfr = self.db.get_one(
+                    "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
+
+                update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
+                await self.terminate_vdus(
+                    db_vnfr,
+                    member_vnf_index,
+                    db_nsr,
+                    update_db_nslcmops,
+                    stage,
+                    logging_text,
+                )
 
 
+                constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
+                constituent_vnfr.remove(db_vnfr.get("_id"))
+                db_nsr_update["constituent-vnfr-ref"] = db_nsr.get(
+                    "constituent-vnfr-ref"
+                )
+                self.update_db_2("nsrs", nsr_id, db_nsr_update)
+                self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
+                self.update_db_2("nsrs", nsr_id, db_nsr_update)
+                return "COMPLETED", "Done"
+            else:
+                step = "Terminate VNF Failed with"
+                raise LcmException(
+                    "{} Cannot terminate the last VNF in this NS.".format(
+                        vnf_instance_id
+                    )
+                )
         except (LcmException, asyncio.CancelledError):
             raise
         except (LcmException, asyncio.CancelledError):
             raise
-
         except Exception as e:
         except Exception as e:
+            self.logger.debug("Error removing VNF {}".format(e))
+            return "FAILED", "Error removing VNF {}".format(e)
 
 
-            self.logger.debug("Error upgrading charm {}".format(path))
-
-            return "FAILED", "Error upgrading charm {}: {}".format(path, e)
-
-    async def update(self, nsr_id, nslcmop_id):
-        """Update NS according to different update types
+    async def _ns_redeploy_vnf(
+        self,
+        nsr_id,
+        nslcmop_id,
+        db_vnfd,
+        db_vnfr,
+        db_nsr,
+    ):
+        """This method updates and redeploys VNF instances
+
+        Args:
+            nsr_id: NS instance id
+            nslcmop_id:   nslcmop id
+            db_vnfd: VNF descriptor
+            db_vnfr: VNF instance record
+            db_nsr: NS instance record
+
+        Returns:
+            result: (str, str) COMPLETED/FAILED, details
+        """
+        try:
+            count_index = 0
+            stage = ["", "", ""]
+            logging_text = "Task ns={} update ".format(nsr_id)
+            latest_vnfd_revision = db_vnfd["_admin"].get("revision")
+            member_vnf_index = db_vnfr["member-vnf-index-ref"]
+
+            # Terminate old VNF resources
+            update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
+            await self.terminate_vdus(
+                db_vnfr,
+                member_vnf_index,
+                db_nsr,
+                update_db_nslcmops,
+                stage,
+                logging_text,
+            )
+
+            # old_vnfd_id = db_vnfr["vnfd-id"]
+            # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
+            new_db_vnfd = db_vnfd
+            # new_vnfd_ref = new_db_vnfd["id"]
+            # new_vnfd_id = vnfd_id
+
+            # Create VDUR
+            new_vnfr_cp = []
+            for cp in new_db_vnfd.get("ext-cpd", ()):
+                vnf_cp = {
+                    "name": cp.get("id"),
+                    "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
+                    "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
+                    "id": cp.get("id"),
+                }
+                new_vnfr_cp.append(vnf_cp)
+            new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
+            # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
+            # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
+            new_vnfr_update = {
+                "revision": latest_vnfd_revision,
+                "connection-point": new_vnfr_cp,
+                "vdur": new_vdur,
+                "ip-address": "",
+            }
+            self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
+            updated_db_vnfr = self.db.get_one(
+                "vnfrs",
+                {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id},
+            )
+
+            # Instantiate new VNF resources
+            # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
+            vca_scaling_info = []
+            scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
+            scaling_info["scaling_direction"] = "OUT"
+            scaling_info["vdu-create"] = {}
+            scaling_info["kdu-create"] = {}
+            vdud_instantiate_list = db_vnfd["vdu"]
+            for index, vdud in enumerate(vdud_instantiate_list):
+                cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
+                if cloud_init_text:
+                    additional_params = (
+                        self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
+                        or {}
+                    )
+                cloud_init_list = []
+                if cloud_init_text:
+                    # TODO Information of its own ip is not available because db_vnfr is not updated.
+                    additional_params["OSM"] = get_osm_params(
+                        updated_db_vnfr, vdud["id"], 1
+                    )
+                    cloud_init_list.append(
+                        self._parse_cloud_init(
+                            cloud_init_text,
+                            additional_params,
+                            db_vnfd["id"],
+                            vdud["id"],
+                        )
+                    )
+                    vca_scaling_info.append(
+                        {
+                            "osm_vdu_id": vdud["id"],
+                            "member-vnf-index": member_vnf_index,
+                            "type": "create",
+                            "vdu_index": count_index,
+                        }
+                    )
+                scaling_info["vdu-create"][vdud["id"]] = count_index
+            if self.ro_config.ng:
+                self.logger.debug(
+                    "New Resources to be deployed: {}".format(scaling_info)
+                )
+                await self._scale_ng_ro(
+                    logging_text,
+                    db_nsr,
+                    update_db_nslcmops,
+                    updated_db_vnfr,
+                    scaling_info,
+                    stage,
+                )
+                return "COMPLETED", "Done"
+        except (LcmException, asyncio.CancelledError):
+            raise
+        except Exception as e:
+            self.logger.debug("Error updating VNF {}".format(e))
+            return "FAILED", "Error updating VNF {}".format(e)
+
+    async def _ns_charm_upgrade(
+        self,
+        ee_id,
+        charm_id,
+        charm_type,
+        path,
+        timeout: float = None,
+    ) -> (str, str):
+        """This method upgrade charms in VNF instances
+
+        Args:
+            ee_id:  Execution environment id
+            path:   Local path to the charm
+            charm_id: charm-id
+            charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
+            timeout: (Float)    Timeout for the ns update operation
+
+        Returns:
+            result: (str, str) COMPLETED/FAILED, details
+        """
+        try:
+            charm_type = charm_type or "lxc_proxy_charm"
+            output = await self.vca_map[charm_type].upgrade_charm(
+                ee_id=ee_id,
+                path=path,
+                charm_id=charm_id,
+                charm_type=charm_type,
+                timeout=timeout or self.timeout.ns_update,
+            )
+
+            if output:
+                return "COMPLETED", output
+
+        except (LcmException, asyncio.CancelledError):
+            raise
+
+        except Exception as e:
+            self.logger.debug("Error upgrading charm {}".format(path))
+
+            return "FAILED", "Error upgrading charm {}: {}".format(path, e)
+
+    async def update(self, nsr_id, nslcmop_id):
+        """Update NS according to different update types
 
         This method performs upgrade of VNF instances then updates the revision
         number in VNF record
 
         This method performs upgrade of VNF instances then updates the revision
         number in VNF record
@@ -5370,8 +5703,9 @@ class NsLcm(LcmBase):
         db_nsr_update = {}
         error_description_nslcmop = ""
         exc = None
         db_nsr_update = {}
         error_description_nslcmop = ""
         exc = None
-        change_type = ""
+        change_type = "updated"
         detailed_status = ""
         detailed_status = ""
+        member_vnf_index = None
 
         try:
             # wait for any previous tasks in process
 
         try:
             # wait for any previous tasks in process
@@ -5398,7 +5732,6 @@ class NsLcm(LcmBase):
             nsr_deployed = db_nsr["_admin"].get("deployed")
 
             if update_type == "CHANGE_VNFPKG":
             nsr_deployed = db_nsr["_admin"].get("deployed")
 
             if update_type == "CHANGE_VNFPKG":
-
                 # Get the input parameters given through update request
                 vnf_instance_id = db_nslcmop["operationParams"][
                     "changeVnfPackageData"
                 # Get the input parameters given through update request
                 vnf_instance_id = db_nslcmop["operationParams"][
                     "changeVnfPackageData"
@@ -5433,14 +5766,16 @@ class NsLcm(LcmBase):
                     current_charm_artifact_path,
                     target_charm_artifact_path,
                     charm_artifact_paths,
                     current_charm_artifact_path,
                     target_charm_artifact_path,
                     charm_artifact_paths,
-                ) = ([], [], [])
+                    helm_artifacts,
+                ) = ([], [], [], [])
 
                 step = "Checking if revision has changed in VNFD"
                 if current_vnf_revision != latest_vnfd_revision:
 
                 step = "Checking if revision has changed in VNFD"
                 if current_vnf_revision != latest_vnfd_revision:
+                    change_type = "policy_updated"
 
                     # There is new revision of VNFD, update operation is required
                     current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
 
                     # There is new revision of VNFD, update operation is required
                     current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
-                    latest_vnfd_path = vnfd_id
+                    latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
 
                     step = "Removing the VNFD packages if they exist in the local path"
                     shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
 
                     step = "Removing the VNFD packages if they exist in the local path"
                     shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
@@ -5453,24 +5788,33 @@ class NsLcm(LcmBase):
                     step = (
                         "Get the charm-type, charm-id, ee-id if there is deployed VCA"
                     )
                     step = (
                         "Get the charm-type, charm-id, ee-id if there is deployed VCA"
                     )
-                    base_folder = latest_vnfd["_admin"]["storage"]
+                    current_base_folder = current_vnfd["_admin"]["storage"]
+                    latest_base_folder = latest_vnfd["_admin"]["storage"]
 
 
-                    for charm_index, charm_deployed in enumerate(
+                    for vca_index, vca_deployed in enumerate(
                         get_iterable(nsr_deployed, "VCA")
                     ):
                         vnf_index = db_vnfr.get("member-vnf-index-ref")
 
                         # Getting charm-id and charm-type
                         get_iterable(nsr_deployed, "VCA")
                     ):
                         vnf_index = db_vnfr.get("member-vnf-index-ref")
 
                         # Getting charm-id and charm-type
-                        if charm_deployed.get("member-vnf-index") == vnf_index:
-                            charm_id = self.get_vca_id(db_vnfr, db_nsr)
-                            charm_type = charm_deployed.get("type")
+                        if vca_deployed.get("member-vnf-index") == vnf_index:
+                            vca_id = self.get_vca_id(db_vnfr, db_nsr)
+                            vca_type = vca_deployed.get("type")
+                            vdu_count_index = vca_deployed.get("vdu_count_index")
 
                             # Getting ee-id
 
                             # Getting ee-id
-                            ee_id = charm_deployed.get("ee_id")
+                            ee_id = vca_deployed.get("ee_id")
 
                             step = "Getting descriptor config"
 
                             step = "Getting descriptor config"
+                            if current_vnfd.get("kdu"):
+                                search_key = "kdu_name"
+                            else:
+                                search_key = "vnfd_id"
+
+                            entity_id = vca_deployed.get(search_key)
+
                             descriptor_config = get_configuration(
                             descriptor_config = get_configuration(
-                                current_vnfd, current_vnfd["id"]
+                                current_vnfd, entity_id
                             )
 
                             if "execution-environment-list" in descriptor_config:
                             )
 
                             if "execution-environment-list" in descriptor_config:
@@ -5483,26 +5827,52 @@ class NsLcm(LcmBase):
                             # There could be several charm used in the same VNF
                             for ee_item in ee_list:
                                 if ee_item.get("juju"):
                             # There could be several charm used in the same VNF
                             for ee_item in ee_list:
                                 if ee_item.get("juju"):
-
                                     step = "Getting charm name"
                                     charm_name = ee_item["juju"].get("charm")
 
                                     step = "Setting Charm artifact paths"
                                     current_charm_artifact_path.append(
                                         get_charm_artifact_path(
                                     step = "Getting charm name"
                                     charm_name = ee_item["juju"].get("charm")
 
                                     step = "Setting Charm artifact paths"
                                     current_charm_artifact_path.append(
                                         get_charm_artifact_path(
-                                            base_folder,
+                                            current_base_folder,
                                             charm_name,
                                             charm_name,
-                                            charm_type,
+                                            vca_type,
                                             current_vnf_revision,
                                         )
                                     )
                                     target_charm_artifact_path.append(
                                         get_charm_artifact_path(
                                             current_vnf_revision,
                                         )
                                     )
                                     target_charm_artifact_path.append(
                                         get_charm_artifact_path(
-                                            base_folder,
+                                            latest_base_folder,
                                             charm_name,
                                             charm_name,
-                                            charm_type,
+                                            vca_type,
+                                            latest_vnfd_revision,
                                         )
                                     )
                                         )
                                     )
+                                elif ee_item.get("helm-chart"):
+                                    # add chart to list and all parameters
+                                    step = "Getting helm chart name"
+                                    chart_name = ee_item.get("helm-chart")
+                                    vca_type = "helm-v3"
+                                    step = "Setting Helm chart artifact paths"
+
+                                    helm_artifacts.append(
+                                        {
+                                            "current_artifact_path": get_charm_artifact_path(
+                                                current_base_folder,
+                                                chart_name,
+                                                vca_type,
+                                                current_vnf_revision,
+                                            ),
+                                            "target_artifact_path": get_charm_artifact_path(
+                                                latest_base_folder,
+                                                chart_name,
+                                                vca_type,
+                                                latest_vnfd_revision,
+                                            ),
+                                            "ee_id": ee_id,
+                                            "vca_index": vca_index,
+                                            "vdu_index": vdu_count_index,
+                                        }
+                                    )
 
                             charm_artifact_paths = zip(
                                 current_charm_artifact_path, target_charm_artifact_path
 
                             charm_artifact_paths = zip(
                                 current_charm_artifact_path, target_charm_artifact_path
@@ -5512,7 +5882,6 @@ class NsLcm(LcmBase):
                     if find_software_version(current_vnfd) != find_software_version(
                         latest_vnfd
                     ):
                     if find_software_version(current_vnfd) != find_software_version(
                         latest_vnfd
                     ):
-
                         step = "Checking if existing VNF has charm"
                         for current_charm_path, target_charm_path in list(
                             charm_artifact_paths
                         step = "Checking if existing VNF has charm"
                         for current_charm_path, target_charm_path in list(
                             charm_artifact_paths
@@ -5527,7 +5896,38 @@ class NsLcm(LcmBase):
                         # There is no change in the charm package, then redeploy the VNF
                         # based on new descriptor
                         step = "Redeploying VNF"
                         # There is no change in the charm package, then redeploy the VNF
                         # based on new descriptor
                         step = "Redeploying VNF"
-                        # This part is in https://osm.etsi.org/gerrit/11943
+                        member_vnf_index = db_vnfr["member-vnf-index-ref"]
+                        (result, detailed_status) = await self._ns_redeploy_vnf(
+                            nsr_id, nslcmop_id, latest_vnfd, db_vnfr, db_nsr
+                        )
+                        if result == "FAILED":
+                            nslcmop_operation_state = result
+                            error_description_nslcmop = detailed_status
+                            old_operational_status = "failed"
+                        db_nslcmop_update["detailed-status"] = detailed_status
+                        db_nsr_update["detailed-status"] = detailed_status
+                        scaling_aspect = get_scaling_aspect(latest_vnfd)
+                        scaling_group_desc = db_nsr.get("_admin").get(
+                            "scaling-group", None
+                        )
+                        if scaling_group_desc:
+                            for aspect in scaling_aspect:
+                                scaling_group_id = aspect.get("id")
+                                for scale_index, scaling_group in enumerate(
+                                    scaling_group_desc
+                                ):
+                                    if scaling_group.get("name") == scaling_group_id:
+                                        db_nsr_update[
+                                            "_admin.scaling-group.{}.nb-scale-op".format(
+                                                scale_index
+                                            )
+                                        ] = 0
+                        self.logger.debug(
+                            logging_text
+                            + " step {} Done with result {} {}".format(
+                                step, nslcmop_operation_state, detailed_status
+                            )
+                        )
 
                     else:
                         step = "Checking if any charm package has changed or not"
 
                     else:
                         step = "Checking if any charm package has changed or not"
@@ -5541,10 +5941,8 @@ class NsLcm(LcmBase):
                                     current_charm_path, target_charm_path
                                 )
                             ):
                                     current_charm_path, target_charm_path
                                 )
                             ):
-
                                 step = "Checking whether VNF uses juju bundle"
                                 if check_juju_bundle_existence(current_vnfd):
                                 step = "Checking whether VNF uses juju bundle"
                                 if check_juju_bundle_existence(current_vnfd):
-
                                     raise LcmException(
                                         "Charm upgrade is not supported for the instance which"
                                         " uses juju-bundle: {}".format(
                                     raise LcmException(
                                         "Charm upgrade is not supported for the instance which"
                                         " uses juju-bundle: {}".format(
@@ -5558,8 +5956,8 @@ class NsLcm(LcmBase):
                                     detailed_status,
                                 ) = await self._ns_charm_upgrade(
                                     ee_id=ee_id,
                                     detailed_status,
                                 ) = await self._ns_charm_upgrade(
                                     ee_id=ee_id,
-                                    charm_id=charm_id,
-                                    charm_type=charm_type,
+                                    charm_id=vca_id,
+                                    charm_type=vca_type,
                                     path=self.fs.path + target_charm_path,
                                     timeout=timeout_seconds,
                                 )
                                     path=self.fs.path + target_charm_path,
                                     timeout=timeout_seconds,
                                 )
@@ -5577,7 +5975,125 @@ class NsLcm(LcmBase):
                                 )
 
                         step = "Updating policies"
                                 )
 
                         step = "Updating policies"
-                        # This part is in https://osm.etsi.org/gerrit/11943
+                        member_vnf_index = db_vnfr["member-vnf-index-ref"]
+                        result = "COMPLETED"
+                        detailed_status = "Done"
+                        db_nslcmop_update["detailed-status"] = "Done"
+
+                    # helm base EE
+                    for item in helm_artifacts:
+                        if not (
+                            item["current_artifact_path"]
+                            and item["target_artifact_path"]
+                            and self.check_charm_hash_changed(
+                                item["current_artifact_path"],
+                                item["target_artifact_path"],
+                            )
+                        ):
+                            continue
+                        db_update_entry = "_admin.deployed.VCA.{}.".format(
+                            item["vca_index"]
+                        )
+                        vnfr_id = db_vnfr["_id"]
+                        osm_config = {"osm": {"ns_id": nsr_id, "vnf_id": vnfr_id}}
+                        db_dict = {
+                            "collection": "nsrs",
+                            "filter": {"_id": nsr_id},
+                            "path": db_update_entry,
+                        }
+                        vca_type, namespace, helm_id = get_ee_id_parts(item["ee_id"])
+                        await self.vca_map[vca_type].upgrade_execution_environment(
+                            namespace=namespace,
+                            helm_id=helm_id,
+                            db_dict=db_dict,
+                            config=osm_config,
+                            artifact_path=item["target_artifact_path"],
+                            vca_type=vca_type,
+                        )
+                        vnf_id = db_vnfr.get("vnfd-ref")
+                        config_descriptor = get_configuration(latest_vnfd, vnf_id)
+                        self.logger.debug("get ssh key block")
+                        rw_mgmt_ip = None
+                        if deep_get(
+                            config_descriptor,
+                            ("config-access", "ssh-access", "required"),
+                        ):
+                            # Needed to inject a ssh key
+                            user = deep_get(
+                                config_descriptor,
+                                ("config-access", "ssh-access", "default-user"),
+                            )
+                            step = (
+                                "Install configuration Software, getting public ssh key"
+                            )
+                            pub_key = await self.vca_map[
+                                vca_type
+                            ].get_ee_ssh_public__key(
+                                ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
+                            )
+
+                            step = (
+                                "Insert public key into VM user={} ssh_key={}".format(
+                                    user, pub_key
+                                )
+                            )
+                            self.logger.debug(logging_text + step)
+
+                            # wait for RO (ip-address) Insert pub_key into VM
+                            rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
+                                logging_text,
+                                nsr_id,
+                                vnfr_id,
+                                None,
+                                item["vdu_index"],
+                                user=user,
+                                pub_key=pub_key,
+                            )
+
+                        initial_config_primitive_list = config_descriptor.get(
+                            "initial-config-primitive"
+                        )
+                        config_primitive = next(
+                            (
+                                p
+                                for p in initial_config_primitive_list
+                                if p["name"] == "config"
+                            ),
+                            None,
+                        )
+                        if not config_primitive:
+                            continue
+
+                        deploy_params = {"OSM": get_osm_params(db_vnfr)}
+                        if rw_mgmt_ip:
+                            deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
+                        if db_vnfr.get("additionalParamsForVnf"):
+                            deploy_params.update(
+                                parse_yaml_strings(
+                                    db_vnfr["additionalParamsForVnf"].copy()
+                                )
+                            )
+                        primitive_params_ = self._map_primitive_params(
+                            config_primitive, {}, deploy_params
+                        )
+
+                        step = "execute primitive '{}' params '{}'".format(
+                            config_primitive["name"], primitive_params_
+                        )
+                        self.logger.debug(logging_text + step)
+                        await self.vca_map[vca_type].exec_primitive(
+                            ee_id=ee_id,
+                            primitive_name=config_primitive["name"],
+                            params_dict=primitive_params_,
+                            db_dict=db_dict,
+                            vca_id=vca_id,
+                            vca_type=vca_type,
+                        )
+
+                        step = "Updating policies"
+                        member_vnf_index = db_vnfr["member-vnf-index-ref"]
+                        detailed_status = "Done"
+                        db_nslcmop_update["detailed-status"] = "Done"
 
                     #  If nslcmop_operation_state is None, so any operation is not failed.
                     if not nslcmop_operation_state:
 
                     #  If nslcmop_operation_state is None, so any operation is not failed.
                     if not nslcmop_operation_state:
@@ -5596,7 +6112,52 @@ class NsLcm(LcmBase):
                     )
             elif update_type == "REMOVE_VNF":
                 # This part is included in https://osm.etsi.org/gerrit/11876
                     )
             elif update_type == "REMOVE_VNF":
                 # This part is included in https://osm.etsi.org/gerrit/11876
-                pass
+                vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
+                db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
+                member_vnf_index = db_vnfr["member-vnf-index-ref"]
+                step = "Removing VNF"
+                (result, detailed_status) = await self.remove_vnf(
+                    nsr_id, nslcmop_id, vnf_instance_id
+                )
+                if result == "FAILED":
+                    nslcmop_operation_state = result
+                    error_description_nslcmop = detailed_status
+                db_nslcmop_update["detailed-status"] = detailed_status
+                change_type = "vnf_terminated"
+                if not nslcmop_operation_state:
+                    nslcmop_operation_state = "COMPLETED"
+                self.logger.debug(
+                    logging_text
+                    + " task Done with result {} {}".format(
+                        nslcmop_operation_state, detailed_status
+                    )
+                )
+
+            elif update_type == "OPERATE_VNF":
+                vnf_id = db_nslcmop["operationParams"]["operateVnfData"][
+                    "vnfInstanceId"
+                ]
+                operation_type = db_nslcmop["operationParams"]["operateVnfData"][
+                    "changeStateTo"
+                ]
+                additional_param = db_nslcmop["operationParams"]["operateVnfData"][
+                    "additionalParam"
+                ]
+                (result, detailed_status) = await self.rebuild_start_stop(
+                    nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
+                )
+                if result == "FAILED":
+                    nslcmop_operation_state = result
+                    error_description_nslcmop = detailed_status
+                db_nslcmop_update["detailed-status"] = detailed_status
+                if not nslcmop_operation_state:
+                    nslcmop_operation_state = "COMPLETED"
+                self.logger.debug(
+                    logging_text
+                    + " task Done with result {} {}".format(
+                        nslcmop_operation_state, detailed_status
+                    )
+                )
 
             #  If nslcmop_operation_state is None, so any operation is not failed.
             #  All operations are executed in overall.
 
             #  If nslcmop_operation_state is None, so any operation is not failed.
             #  All operations are executed in overall.
@@ -5649,16 +6210,17 @@ class NsLcm(LcmBase):
 
             if nslcmop_operation_state:
                 try:
 
             if nslcmop_operation_state:
                 try:
-                    await self.msg.aiowrite(
-                        "ns",
-                        "updated",
-                        {
-                            "nsr_id": nsr_id,
-                            "nslcmop_id": nslcmop_id,
-                            "operationState": nslcmop_operation_state,
-                        },
-                        loop=self.loop,
-                    )
+                    msg = {
+                        "nsr_id": nsr_id,
+                        "nslcmop_id": nslcmop_id,
+                        "operationState": nslcmop_operation_state,
+                    }
+                    if (
+                        change_type in ("vnf_terminated", "policy_updated")
+                        and member_vnf_index
+                    ):
+                        msg.update({"vnf_member_index": member_vnf_index})
+                    await self.msg.aiowrite("ns", change_type, msg)
                 except Exception as e:
                     self.logger.error(
                         logging_text + "kafka_write notification Exception {}".format(e)
                 except Exception as e:
                     self.logger.error(
                         logging_text + "kafka_write notification Exception {}".format(e)
@@ -5688,6 +6250,7 @@ class NsLcm(LcmBase):
         old_operational_status = ""
         old_config_status = ""
         nsi_id = None
         old_operational_status = ""
         old_config_status = ""
         nsi_id = None
+        prom_job_name = ""
         try:
             # wait for any previous tasks in process
             step = "Waiting for previous operations to terminate"
         try:
             # wait for any previous tasks in process
             step = "Waiting for previous operations to terminate"
@@ -5798,6 +6361,15 @@ class NsLcm(LcmBase):
                         vdud = get_vdu(db_vnfd, vdu_delta["id"])
                         # vdu_index also provides the number of instance of the targeted vdu
                         vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
                         vdud = get_vdu(db_vnfd, vdu_delta["id"])
                         # vdu_index also provides the number of instance of the targeted vdu
                         vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
+                        if vdu_index <= len(db_vnfr["vdur"]):
+                            vdu_name_id = db_vnfr["vdur"][vdu_index - 1]["vdu-name"]
+                            prom_job_name = (
+                                db_vnfr["_id"] + vdu_name_id + str(vdu_index - 1)
+                            )
+                            prom_job_name = prom_job_name.replace("_", "")
+                            prom_job_name = prom_job_name.replace("-", "")
+                        else:
+                            prom_job_name = None
                         cloud_init_text = self._get_vdu_cloud_init_content(
                             vdud, db_vnfd
                         )
                         cloud_init_text = self._get_vdu_cloud_init_content(
                             vdud, db_vnfd
                         )
@@ -5874,11 +6446,6 @@ class NsLcm(LcmBase):
                         if kdur.get("helm-chart"):
                             k8s_cluster_type = "helm-chart-v3"
                             self.logger.debug("kdur: {}".format(kdur))
                         if kdur.get("helm-chart"):
                             k8s_cluster_type = "helm-chart-v3"
                             self.logger.debug("kdur: {}".format(kdur))
-                            if (
-                                kdur.get("helm-version")
-                                and kdur.get("helm-version") == "v2"
-                            ):
-                                k8s_cluster_type = "helm-chart"
                         elif kdur.get("juju-bundle"):
                             k8s_cluster_type = "juju-bundle"
                         else:
                         elif kdur.get("juju-bundle"):
                             k8s_cluster_type = "juju-bundle"
                         else:
@@ -6007,11 +6574,6 @@ class NsLcm(LcmBase):
                         if kdur.get("helm-chart"):
                             k8s_cluster_type = "helm-chart-v3"
                             self.logger.debug("kdur: {}".format(kdur))
                         if kdur.get("helm-chart"):
                             k8s_cluster_type = "helm-chart-v3"
                             self.logger.debug("kdur: {}".format(kdur))
-                            if (
-                                kdur.get("helm-version")
-                                and kdur.get("helm-version") == "v2"
-                            ):
-                                k8s_cluster_type = "helm-chart"
                         elif kdur.get("juju-bundle"):
                             k8s_cluster_type = "juju-bundle"
                         else:
                         elif kdur.get("juju-bundle"):
                             k8s_cluster_type = "juju-bundle"
                         else:
@@ -6298,7 +6860,7 @@ class NsLcm(LcmBase):
                                             scaling_in=True,
                                             vca_id=vca_id,
                                         ),
                                             scaling_in=True,
                                             vca_id=vca_id,
                                         ),
-                                        timeout=self.timeout_charm_delete,
+                                        timeout=self.timeout.charm_delete,
                                     )
                                 )
                                 tasks_dict_info[task] = "Terminating VCA {}".format(
                                     )
                                 )
                                 tasks_dict_info[task] = "Terminating VCA {}".format(
@@ -6318,7 +6880,7 @@ class NsLcm(LcmBase):
                                 logging_text,
                                 tasks_dict_info,
                                 min(
                                 logging_text,
                                 tasks_dict_info,
                                 min(
-                                    self.timeout_charm_delete, self.timeout_ns_terminate
+                                    self.timeout.charm_delete, self.timeout.ns_terminate
                                 ),
                                 stage,
                                 nslcmop_id,
                                 ),
                                 stage,
                                 nslcmop_id,
@@ -6340,7 +6902,7 @@ class NsLcm(LcmBase):
             # SCALE RO - BEGIN
             if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
                 scale_process = "RO"
             # SCALE RO - BEGIN
             if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
                 scale_process = "RO"
-                if self.ro_config.get("ng"):
+                if self.ro_config.ng:
                     await self._scale_ng_ro(
                         logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
                     )
                     await self._scale_ng_ro(
                         logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
                     )
@@ -6394,6 +6956,7 @@ class NsLcm(LcmBase):
                                 vdu_id = None
                                 vdu_name = None
                                 kdu_name = None
                                 vdu_id = None
                                 vdu_name = None
                                 kdu_name = None
+                                kdu_index = None
                                 self._deploy_n2vc(
                                     logging_text=logging_text
                                     + "member_vnf_index={} ".format(member_vnf_index),
                                 self._deploy_n2vc(
                                     logging_text=logging_text
                                     + "member_vnf_index={} ".format(member_vnf_index),
@@ -6405,6 +6968,7 @@ class NsLcm(LcmBase):
                                     vnfd_id=vnfd_id,
                                     vdu_id=vdu_id,
                                     kdu_name=kdu_name,
                                     vnfd_id=vnfd_id,
                                     vdu_id=vdu_id,
                                     kdu_name=kdu_name,
+                                    kdu_index=kdu_index,
                                     member_vnf_index=member_vnf_index,
                                     vdu_index=vdu_index,
                                     vdu_name=vdu_name,
                                     member_vnf_index=member_vnf_index,
                                     vdu_index=vdu_index,
                                     vdu_name=vdu_name,
@@ -6431,6 +6995,7 @@ class NsLcm(LcmBase):
                             if descriptor_config:
                                 vdu_name = None
                                 kdu_name = None
                             if descriptor_config:
                                 vdu_name = None
                                 kdu_name = None
+                                kdu_index = None
                                 stage[
                                     1
                                 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
                                 stage[
                                     1
                                 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
@@ -6453,6 +7018,7 @@ class NsLcm(LcmBase):
                                     kdu_name=kdu_name,
                                     member_vnf_index=member_vnf_index,
                                     vdu_index=vdu_index,
                                     kdu_name=kdu_name,
                                     member_vnf_index=member_vnf_index,
                                     vdu_index=vdu_index,
+                                    kdu_index=kdu_index,
                                     vdu_name=vdu_name,
                                     deploy_params=deploy_params_vdu,
                                     descriptor_config=descriptor_config,
                                     vdu_name=vdu_name,
                                     deploy_params=deploy_params_vdu,
                                     descriptor_config=descriptor_config,
@@ -6592,7 +7158,69 @@ class NsLcm(LcmBase):
                         db_nsr_update["config-status"] = old_config_status
                         scale_process = None
             # POST-SCALE END
                         db_nsr_update["config-status"] = old_config_status
                         scale_process = None
             # POST-SCALE END
+            # Check if each vnf has exporter for metric collection if so update prometheus job records
+            if scaling_type == "SCALE_OUT":
+                if "exporters-endpoints" in db_vnfd.get("df")[0]:
+                    vnfr_id = db_vnfr["id"]
+                    db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
+                    exporter_config = db_vnfd.get("df")[0].get("exporters-endpoints")
+                    self.logger.debug("exporter config :{}".format(exporter_config))
+                    artifact_path = "{}/{}/{}".format(
+                        base_folder["folder"],
+                        base_folder["pkg-dir"],
+                        "exporter-endpoint",
+                    )
+                    ee_id = None
+                    ee_config_descriptor = exporter_config
+                    rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
+                        logging_text,
+                        nsr_id,
+                        vnfr_id,
+                        vdu_id=db_vnfr["vdur"][-1]["vdu-id-ref"],
+                        vdu_index=db_vnfr["vdur"][-1]["count-index"],
+                        user=None,
+                        pub_key=None,
+                    )
+                    self.logger.debug("rw_mgmt_ip:{}".format(rw_mgmt_ip))
+                    self.logger.debug("Artifact_path:{}".format(artifact_path))
+                    vdu_id_for_prom = None
+                    vdu_index_for_prom = None
+                    for x in get_iterable(db_vnfr, "vdur"):
+                        vdu_id_for_prom = x.get("vdu-id-ref")
+                        vdu_index_for_prom = x.get("count-index")
+                    vnfr_id = vnfr_id + vdu_id + str(vdu_index)
+                    vnfr_id = vnfr_id.replace("_", "")
+                    prometheus_jobs = await self.extract_prometheus_scrape_jobs(
+                        ee_id=ee_id,
+                        artifact_path=artifact_path,
+                        ee_config_descriptor=ee_config_descriptor,
+                        vnfr_id=vnfr_id,
+                        nsr_id=nsr_id,
+                        target_ip=rw_mgmt_ip,
+                        element_type="VDU",
+                        vdu_id=vdu_id_for_prom,
+                        vdu_index=vdu_index_for_prom,
+                    )
 
 
+                    self.logger.debug("Prometheus job:{}".format(prometheus_jobs))
+                    if prometheus_jobs:
+                        db_nsr_update[
+                            "_admin.deployed.prometheus_jobs"
+                        ] = prometheus_jobs
+                        self.update_db_2(
+                            "nsrs",
+                            nsr_id,
+                            db_nsr_update,
+                        )
+
+                        for job in prometheus_jobs:
+                            self.db.set_one(
+                                "prometheus_jobs",
+                                {"job_name": ""},
+                                job,
+                                upsert=True,
+                                fail_on_empty=False,
+                            )
             db_nsr_update[
                 "detailed-status"
             ] = ""  # "scaled {} {}".format(scaling_group, scaling_type)
             db_nsr_update[
                 "detailed-status"
             ] = ""  # "scaled {} {}".format(scaling_group, scaling_type)
@@ -6623,27 +7251,45 @@ class NsLcm(LcmBase):
                 exc_info=True,
             )
         finally:
                 exc_info=True,
             )
         finally:
+            error_list = list()
+            if exc:
+                error_list.append(str(exc))
             self._write_ns_status(
                 nsr_id=nsr_id,
                 ns_state=None,
                 current_operation="IDLE",
                 current_operation_id=None,
             )
             self._write_ns_status(
                 nsr_id=nsr_id,
                 ns_state=None,
                 current_operation="IDLE",
                 current_operation_id=None,
             )
-            if tasks_dict_info:
-                stage[1] = "Waiting for instantiate pending tasks."
-                self.logger.debug(logging_text + stage[1])
-                exc = await self._wait_for_tasks(
+            try:
+                if tasks_dict_info:
+                    stage[1] = "Waiting for instantiate pending tasks."
+                    self.logger.debug(logging_text + stage[1])
+                    exc = await self._wait_for_tasks(
+                        logging_text,
+                        tasks_dict_info,
+                        self.timeout.ns_deploy,
+                        stage,
+                        nslcmop_id,
+                        nsr_id=nsr_id,
+                    )
+            except asyncio.CancelledError:
+                error_list.append("Cancelled")
+                await self._cancel_pending_tasks(logging_text, tasks_dict_info)
+                await self._wait_for_tasks(
                     logging_text,
                     tasks_dict_info,
                     logging_text,
                     tasks_dict_info,
-                    self.timeout_ns_deploy,
+                    self.timeout.ns_deploy,
                     stage,
                     nslcmop_id,
                     nsr_id=nsr_id,
                 )
                     stage,
                     nslcmop_id,
                     nsr_id=nsr_id,
                 )
-            if exc:
+            if error_list:
+                error_detail = "; ".join(error_list)
                 db_nslcmop_update[
                     "detailed-status"
                 db_nslcmop_update[
                     "detailed-status"
-                ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
+                ] = error_description_nslcmop = "FAILED {}: {}".format(
+                    step, error_detail
+                )
                 nslcmop_operation_state = "FAILED"
                 if db_nsr:
                     db_nsr_update["operational-status"] = old_operational_status
                 nslcmop_operation_state = "FAILED"
                 if db_nsr:
                     db_nsr_update["operational-status"] = old_operational_status
@@ -6657,12 +7303,18 @@ class NsLcm(LcmBase):
                         db_nsr_update[
                             "detailed-status"
                         ] = "FAILED scaling nslcmop={} {}: {}".format(
                         db_nsr_update[
                             "detailed-status"
                         ] = "FAILED scaling nslcmop={} {}: {}".format(
-                            nslcmop_id, step, exc
+                            nslcmop_id, step, error_detail
                         )
             else:
                 error_description_nslcmop = None
                 nslcmop_operation_state = "COMPLETED"
                 db_nslcmop_update["detailed-status"] = "Done"
                         )
             else:
                 error_description_nslcmop = None
                 nslcmop_operation_state = "COMPLETED"
                 db_nslcmop_update["detailed-status"] = "Done"
+                if scaling_type == "SCALE_IN" and prom_job_name is not None:
+                    self.db.del_one(
+                        "prometheus_jobs",
+                        {"job_name": prom_job_name},
+                        fail_on_empty=False,
+                    )
 
             self._write_op_status(
                 op_id=nslcmop_id,
 
             self._write_op_status(
                 op_id=nslcmop_id,
@@ -6687,7 +7339,7 @@ class NsLcm(LcmBase):
                         "nslcmop_id": nslcmop_id,
                         "operationState": nslcmop_operation_state,
                     }
                         "nslcmop_id": nslcmop_id,
                         "operationState": nslcmop_operation_state,
                     }
-                    await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
+                    await self.msg.aiowrite("ns", "scaled", msg)
                 except Exception as e:
                     self.logger.error(
                         logging_text + "kafka_write notification Exception {}".format(e)
                 except Exception as e:
                     self.logger.error(
                         logging_text + "kafka_write notification Exception {}".format(e)
@@ -6750,23 +7402,27 @@ class NsLcm(LcmBase):
                                     primitive_name=terminate_config_primitive["name"],
                                     params=primitive_params_,
                                     db_dict=db_dict,
                                     primitive_name=terminate_config_primitive["name"],
                                     params=primitive_params_,
                                     db_dict=db_dict,
+                                    total_timeout=self.timeout.primitive,
                                     vca_id=vca_id,
                                 ),
                                     vca_id=vca_id,
                                 ),
-                                timeout=600,
+                                timeout=self.timeout.primitive
+                                * self.timeout.primitive_outer_factor,
                             )
 
                 await asyncio.wait_for(
                     self.k8scluster_map[k8s_cluster_type].scale(
                             )
 
                 await asyncio.wait_for(
                     self.k8scluster_map[k8s_cluster_type].scale(
-                        kdu_instance,
-                        scale,
-                        kdu_scaling_info["resource-name"],
+                        kdu_instance=kdu_instance,
+                        scale=scale,
+                        resource_name=kdu_scaling_info["resource-name"],
+                        total_timeout=self.timeout.scale_on_error,
                         vca_id=vca_id,
                         cluster_uuid=cluster_uuid,
                         kdu_model=kdu_model,
                         atomic=True,
                         db_dict=db_dict,
                     ),
                         vca_id=vca_id,
                         cluster_uuid=cluster_uuid,
                         kdu_model=kdu_model,
                         atomic=True,
                         db_dict=db_dict,
                     ),
-                    timeout=self.timeout_vca_on_error,
+                    timeout=self.timeout.scale_on_error
+                    * self.timeout.scale_on_error_outer_factor,
                 )
 
                 if kdu_scaling_info["type"] == "create":
                 )
 
                 if kdu_scaling_info["type"] == "create":
@@ -6841,7 +7497,7 @@ class NsLcm(LcmBase):
             n2vc_key_list,
             stage=stage,
             start_deploy=time(),
             n2vc_key_list,
             stage=stage,
             start_deploy=time(),
-            timeout_ns_deploy=self.timeout_ns_deploy,
+            timeout_ns_deploy=self.timeout.ns_deploy,
         )
         if vdu_scaling_info.get("vdu-delete"):
             self.scale_vnfr(
         )
         if vdu_scaling_info.get("vdu-delete"):
             self.scale_vnfr(
@@ -6849,8 +7505,48 @@ class NsLcm(LcmBase):
             )
 
     async def extract_prometheus_scrape_jobs(
             )
 
     async def extract_prometheus_scrape_jobs(
-        self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip
-    ):
+        self,
+        ee_id: str,
+        artifact_path: str,
+        ee_config_descriptor: dict,
+        vnfr_id: str,
+        nsr_id: str,
+        target_ip: str,
+        element_type: str,
+        vnf_member_index: str = "",
+        vdu_id: str = "",
+        vdu_index: int = None,
+        kdu_name: str = "",
+        kdu_index: int = None,
+    ) -> dict:
+        """Method to extract prometheus scrape jobs from EE's Prometheus template job file
+            This method will wait until the corresponding VDU or KDU is fully instantiated
+
+        Args:
+            ee_id (str): Execution Environment ID
+            artifact_path (str): Path where the EE's content is (including the Prometheus template file)
+            ee_config_descriptor (dict): Execution Environment's configuration descriptor
+            vnfr_id (str): VNFR ID where this EE applies
+            nsr_id (str): NSR ID where this EE applies
+            target_ip (str): VDU/KDU instance IP address
+            element_type (str): NS or VNF or VDU or KDU
+            vnf_member_index (str, optional): VNF index where this EE applies. Defaults to "".
+            vdu_id (str, optional): VDU ID where this EE applies. Defaults to "".
+            vdu_index (int, optional): VDU index where this EE applies. Defaults to None.
+            kdu_name (str, optional): KDU name where this EE applies. Defaults to "".
+            kdu_index (int, optional): KDU index where this EE applies. Defaults to None.
+
+        Raises:
+            LcmException: When the VDU or KDU instance was not found in an hour
+
+        Returns:
+            _type_: Prometheus jobs
+        """
+        # default the vdur and kdur names to an empty string, to avoid any later
+        # problem with Prometheus when the element type is not VDU or KDU
+        vdur_name = ""
+        kdur_name = ""
+
         # look if exist a file called 'prometheus*.j2' and
         artifact_content = self.fs.dir_ls(artifact_path)
         job_file = next(
         # look if exist a file called 'prometheus*.j2' and
         artifact_content = self.fs.dir_ls(artifact_path)
         job_file = next(
@@ -6863,58 +7559,189 @@ class NsLcm(LcmBase):
         )
         if not job_file:
             return
         )
         if not job_file:
             return
+        self.logger.debug("Artifact path{}".format(artifact_path))
+        self.logger.debug("job file{}".format(job_file))
         with self.fs.file_open((artifact_path, job_file), "r") as f:
             job_data = f.read()
 
         with self.fs.file_open((artifact_path, job_file), "r") as f:
             job_data = f.read()
 
-        # TODO get_service
-        _, _, service = ee_id.partition(".")  # remove prefix   "namespace."
-        host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
-        host_port = "80"
-        vnfr_id = vnfr_id.replace("-", "")
-        variables = {
-            "JOB_NAME": vnfr_id,
-            "TARGET_IP": target_ip,
-            "EXPORTER_POD_IP": host_name,
-            "EXPORTER_POD_PORT": host_port,
-        }
-        job_list = parse_job(job_data, variables)
-        # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
-        for job in job_list:
-            if (
-                not isinstance(job.get("job_name"), str)
-                or vnfr_id not in job["job_name"]
-            ):
-                job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
-            job["nsr_id"] = nsr_id
-            job["vnfr_id"] = vnfr_id
-        return job_list
-
-    def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
-        """
-        Get VCA Cloud and VCA Cloud Credentials for the VIM account
-
-        :param: vim_account_id:     VIM Account ID
-
-        :return: (cloud_name, cloud_credential)
-        """
-        config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
-        return config.get("vca_cloud"), config.get("vca_cloud_credential")
-
-    def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
-        """
-        Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
-
-        :param: vim_account_id:     VIM Account ID
-
-        :return: (cloud_name, cloud_credential)
-        """
-        config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
-        return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
-
-    async def migrate(self, nsr_id, nslcmop_id):
-        """
-        Migrate VNFs and VDUs instances in a NS
-
+        # obtain the VDUR or KDUR, if the element type is VDU or KDU
+        if element_type in ("VDU", "KDU"):
+            for _ in range(360):
+                db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
+                if vdu_id and vdu_index is not None:
+                    vdur = next(
+                        (
+                            x
+                            for x in get_iterable(db_vnfr, "vdur")
+                            if (
+                                x.get("vdu-id-ref") == vdu_id
+                                and x.get("count-index") == vdu_index
+                            )
+                        ),
+                        {},
+                    )
+                    if vdur.get("name"):
+                        vdur_name = vdur.get("name")
+                        break
+                if kdu_name and kdu_index is not None:
+                    kdur = next(
+                        (
+                            x
+                            for x in get_iterable(db_vnfr, "kdur")
+                            if (
+                                x.get("kdu-name") == kdu_name
+                                and x.get("count-index") == kdu_index
+                            )
+                        ),
+                        {},
+                    )
+                    if kdur.get("name"):
+                        kdur_name = kdur.get("name")
+                        break
+
+                await asyncio.sleep(10)
+            else:
+                if vdu_id and vdu_index is not None:
+                    raise LcmException(
+                        f"Timeout waiting VDU with name={vdu_id} and index={vdu_index} to be intantiated"
+                    )
+                if kdu_name and kdu_index is not None:
+                    raise LcmException(
+                        f"Timeout waiting KDU with name={kdu_name} and index={kdu_index} to be intantiated"
+                    )
+
+        if ee_id is not None:
+            _, namespace, helm_id = get_ee_id_parts(
+                ee_id
+            )  # get namespace and EE gRPC service name
+            host_name = f'{helm_id}-{ee_config_descriptor["metric-service"]}.{namespace}.svc'  # svc_name.namespace.svc
+            host_port = "80"
+            vnfr_id = vnfr_id.replace("-", "")
+            variables = {
+                "JOB_NAME": vnfr_id,
+                "TARGET_IP": target_ip,
+                "EXPORTER_POD_IP": host_name,
+                "EXPORTER_POD_PORT": host_port,
+                "NSR_ID": nsr_id,
+                "VNF_MEMBER_INDEX": vnf_member_index,
+                "VDUR_NAME": vdur_name,
+                "KDUR_NAME": kdur_name,
+                "ELEMENT_TYPE": element_type,
+            }
+        else:
+            metric_path = ee_config_descriptor["metric-path"]
+            target_port = ee_config_descriptor["metric-port"]
+            vnfr_id = vnfr_id.replace("-", "")
+            variables = {
+                "JOB_NAME": vnfr_id,
+                "TARGET_IP": target_ip,
+                "TARGET_PORT": target_port,
+                "METRIC_PATH": metric_path,
+            }
+
+        job_list = parse_job(job_data, variables)
+        # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
+        for job in job_list:
+            if (
+                not isinstance(job.get("job_name"), str)
+                or vnfr_id not in job["job_name"]
+            ):
+                job["job_name"] = vnfr_id + "_" + str(SystemRandom().randint(1, 10000))
+            job["nsr_id"] = nsr_id
+            job["vnfr_id"] = vnfr_id
+        return job_list
+
+    async def rebuild_start_stop(
+        self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
+    ):
+        logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
+        self.logger.info(logging_text + "Enter")
+        stage = ["Preparing the environment", ""]
+        # database nsrs record
+        db_nsr_update = {}
+        vdu_vim_name = None
+        vim_vm_id = None
+        # in case of error, indicates what part of scale was failed to put nsr at error status
+        start_deploy = time()
+        try:
+            db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
+            vim_account_id = db_vnfr.get("vim-account-id")
+            vim_info_key = "vim:" + vim_account_id
+            vdu_id = additional_param["vdu_id"]
+            vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
+            vdur = find_in_list(
+                vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
+            )
+            if vdur:
+                vdu_vim_name = vdur["name"]
+                vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
+                target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
+            else:
+                raise LcmException("Target vdu is not found")
+            self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
+            # wait for any previous tasks in process
+            stage[1] = "Waiting for previous operations to terminate"
+            self.logger.info(stage[1])
+            await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
+
+            stage[1] = "Reading from database."
+            self.logger.info(stage[1])
+            self._write_ns_status(
+                nsr_id=nsr_id,
+                ns_state=None,
+                current_operation=operation_type.upper(),
+                current_operation_id=nslcmop_id,
+            )
+            self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
+
+            # read from db: ns
+            stage[1] = "Getting nsr={} from db.".format(nsr_id)
+            db_nsr_update["operational-status"] = operation_type
+            self.update_db_2("nsrs", nsr_id, db_nsr_update)
+            # Payload for RO
+            desc = {
+                operation_type: {
+                    "vim_vm_id": vim_vm_id,
+                    "vnf_id": vnf_id,
+                    "vdu_index": additional_param["count-index"],
+                    "vdu_id": vdur["id"],
+                    "target_vim": target_vim,
+                    "vim_account_id": vim_account_id,
+                }
+            }
+            stage[1] = "Sending rebuild request to RO... {}".format(desc)
+            self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
+            self.logger.info("ro nsr id: {}".format(nsr_id))
+            result_dict = await self.RO.operate(nsr_id, desc, operation_type)
+            self.logger.info("response from RO: {}".format(result_dict))
+            action_id = result_dict["action_id"]
+            await self._wait_ng_ro(
+                nsr_id,
+                action_id,
+                nslcmop_id,
+                start_deploy,
+                self.timeout.operate,
+                None,
+                "start_stop_rebuild",
+            )
+            return "COMPLETED", "Done"
+        except (ROclient.ROClientException, DbException, LcmException) as e:
+            self.logger.error("Exit Exception {}".format(e))
+            exc = e
+        except asyncio.CancelledError:
+            self.logger.error("Cancelled Exception while '{}'".format(stage))
+            exc = "Operation was cancelled"
+        except Exception as e:
+            exc = traceback.format_exc()
+            self.logger.critical(
+                "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
+            )
+            return "FAILED", "Error in operate VNF {}".format(exc)
+
+    async def migrate(self, nsr_id, nslcmop_id):
+        """
+        Migrate VNFs and VDUs instances in a NS
+
         :param: nsr_id: NS Instance ID
         :param: nslcmop_id: nslcmop ID of migrate
 
         :param: nsr_id: NS Instance ID
         :param: nslcmop_id: nslcmop ID of migrate
 
@@ -6938,16 +7765,18 @@ class NsLcm(LcmBase):
         try:
             # wait for any previous tasks in process
             step = "Waiting for previous operations to terminate"
         try:
             # wait for any previous tasks in process
             step = "Waiting for previous operations to terminate"
-            await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
+            await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
 
             self._write_ns_status(
                 nsr_id=nsr_id,
                 ns_state=None,
                 current_operation="MIGRATING",
 
             self._write_ns_status(
                 nsr_id=nsr_id,
                 ns_state=None,
                 current_operation="MIGRATING",
-                current_operation_id=nslcmop_id
+                current_operation_id=nslcmop_id,
             )
             step = "Getting nslcmop from database"
             )
             step = "Getting nslcmop from database"
-            self.logger.debug(step + " after having waited for previous tasks to be completed")
+            self.logger.debug(
+                step + " after having waited for previous tasks to be completed"
+            )
             db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
             migrate_params = db_nslcmop.get("operationParams")
 
             db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
             migrate_params = db_nslcmop.get("operationParams")
 
@@ -6957,7 +7786,12 @@ class NsLcm(LcmBase):
             self.logger.debug("RO return > {}".format(desc))
             action_id = desc["action_id"]
             await self._wait_ng_ro(
             self.logger.debug("RO return > {}".format(desc))
             action_id = desc["action_id"]
             await self._wait_ng_ro(
-                nsr_id, action_id, nslcmop_id, start_deploy, self.timeout_migrate
+                nsr_id,
+                action_id,
+                nslcmop_id,
+                start_deploy,
+                self.timeout.migrate,
+                operation="migrate",
             )
         except (ROclient.ROClientException, DbException, LcmException) as e:
             self.logger.error("Exit Exception {}".format(e))
             )
         except (ROclient.ROClientException, DbException, LcmException) as e:
             self.logger.error("Exit Exception {}".format(e))
@@ -6967,7 +7801,9 @@ class NsLcm(LcmBase):
             exc = "Operation was cancelled"
         except Exception as e:
             exc = traceback.format_exc()
             exc = "Operation was cancelled"
         except Exception as e:
             exc = traceback.format_exc()
-            self.logger.critical("Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
+            self.logger.critical(
+                "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
+            )
         finally:
             self._write_ns_status(
                 nsr_id=nsr_id,
         finally:
             self._write_ns_status(
                 nsr_id=nsr_id,
@@ -6976,9 +7812,7 @@ class NsLcm(LcmBase):
                 current_operation_id=None,
             )
             if exc:
                 current_operation_id=None,
             )
             if exc:
-                db_nslcmop_update[
-                    "detailed-status"
-                ] = "FAILED {}: {}".format(step, exc)
+                db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
                 nslcmop_operation_state = "FAILED"
             else:
                 nslcmop_operation_state = "COMPLETED"
                 nslcmop_operation_state = "FAILED"
             else:
                 nslcmop_operation_state = "COMPLETED"
@@ -6999,10 +7833,1167 @@ class NsLcm(LcmBase):
                         "nslcmop_id": nslcmop_id,
                         "operationState": nslcmop_operation_state,
                     }
                         "nslcmop_id": nslcmop_id,
                         "operationState": nslcmop_operation_state,
                     }
-                    await self.msg.aiowrite("ns", "migrated", msg, loop=self.loop)
+                    await self.msg.aiowrite("ns", "migrated", msg)
                 except Exception as e:
                     self.logger.error(
                         logging_text + "kafka_write notification Exception {}".format(e)
                     )
             self.logger.debug(logging_text + "Exit")
             self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
                 except Exception as e:
                     self.logger.error(
                         logging_text + "kafka_write notification Exception {}".format(e)
                     )
             self.logger.debug(logging_text + "Exit")
             self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
+
+    async def heal(self, nsr_id, nslcmop_id):
+        """
+        Heal NS
+
+        :param nsr_id: ns instance to heal
+        :param nslcmop_id: operation to run
+        :return:
+        """
+
+        # Try to lock HA task here
+        task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
+        if not task_is_locked_by_me:
+            return
+
+        logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
+        stage = ["", "", ""]
+        tasks_dict_info = {}
+        # ^ stage, step, VIM progress
+        self.logger.debug(logging_text + "Enter")
+        # get all needed from database
+        db_nsr = None
+        db_nslcmop_update = {}
+        db_nsr_update = {}
+        db_vnfrs = {}  # vnf's info indexed by _id
+        exc = None
+        old_operational_status = ""
+        old_config_status = ""
+        nsi_id = None
+        try:
+            # wait for any previous tasks in process
+            step = "Waiting for previous operations to terminate"
+            await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
+            self._write_ns_status(
+                nsr_id=nsr_id,
+                ns_state=None,
+                current_operation="HEALING",
+                current_operation_id=nslcmop_id,
+            )
+
+            step = "Getting nslcmop from database"
+            self.logger.debug(
+                step + " after having waited for previous tasks to be completed"
+            )
+            db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
+
+            step = "Getting nsr from database"
+            db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+            old_operational_status = db_nsr["operational-status"]
+            old_config_status = db_nsr["config-status"]
+
+            db_nsr_update = {
+                "_admin.deployed.RO.operational-status": "healing",
+            }
+            self.update_db_2("nsrs", nsr_id, db_nsr_update)
+
+            step = "Sending heal order to VIM"
+            await self.heal_RO(
+                logging_text=logging_text,
+                nsr_id=nsr_id,
+                db_nslcmop=db_nslcmop,
+                stage=stage,
+            )
+            # VCA tasks
+            # read from db: nsd
+            stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
+            self.logger.debug(logging_text + stage[1])
+            nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
+            self.fs.sync(db_nsr["nsd-id"])
+            db_nsr["nsd"] = nsd
+            # read from db: vnfr's of this ns
+            step = "Getting vnfrs from db"
+            db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
+            for vnfr in db_vnfrs_list:
+                db_vnfrs[vnfr["_id"]] = vnfr
+            self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
+
+            # Check for each target VNF
+            target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
+            for target_vnf in target_list:
+                # Find this VNF in the list from DB
+                vnfr_id = target_vnf.get("vnfInstanceId", None)
+                if vnfr_id:
+                    db_vnfr = db_vnfrs[vnfr_id]
+                    vnfd_id = db_vnfr.get("vnfd-id")
+                    vnfd_ref = db_vnfr.get("vnfd-ref")
+                    vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
+                    base_folder = vnfd["_admin"]["storage"]
+                    vdu_id = None
+                    vdu_index = 0
+                    vdu_name = None
+                    kdu_name = None
+                    nsi_id = None  # TODO put nsi_id when this nsr belongs to a NSI
+                    member_vnf_index = db_vnfr.get("member-vnf-index-ref")
+
+                    # Check each target VDU and deploy N2VC
+                    target_vdu_list = target_vnf.get("additionalParams", {}).get(
+                        "vdu", []
+                    )
+                    if not target_vdu_list:
+                        # Codigo nuevo para crear diccionario
+                        target_vdu_list = []
+                        for existing_vdu in db_vnfr.get("vdur"):
+                            vdu_name = existing_vdu.get("vdu-name", None)
+                            vdu_index = existing_vdu.get("count-index", 0)
+                            vdu_run_day1 = target_vnf.get("additionalParams", {}).get(
+                                "run-day1", False
+                            )
+                            vdu_to_be_healed = {
+                                "vdu-id": vdu_name,
+                                "count-index": vdu_index,
+                                "run-day1": vdu_run_day1,
+                            }
+                            target_vdu_list.append(vdu_to_be_healed)
+                    for target_vdu in target_vdu_list:
+                        deploy_params_vdu = target_vdu
+                        # Set run-day1 vnf level value if not vdu level value exists
+                        if not deploy_params_vdu.get("run-day1") and target_vnf.get(
+                            "additionalParams", {}
+                        ).get("run-day1"):
+                            deploy_params_vdu["run-day1"] = target_vnf[
+                                "additionalParams"
+                            ].get("run-day1")
+                        vdu_name = target_vdu.get("vdu-id", None)
+                        # TODO: Get vdu_id from vdud.
+                        vdu_id = vdu_name
+                        # For multi instance VDU count-index is mandatory
+                        # For single session VDU count-indes is 0
+                        vdu_index = target_vdu.get("count-index", 0)
+
+                        # n2vc_redesign STEP 3 to 6 Deploy N2VC
+                        stage[1] = "Deploying Execution Environments."
+                        self.logger.debug(logging_text + stage[1])
+
+                        # VNF Level charm. Normal case when proxy charms.
+                        # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
+                        descriptor_config = get_configuration(vnfd, vnfd_ref)
+                        if descriptor_config:
+                            # Continue if healed machine is management machine
+                            vnf_ip_address = db_vnfr.get("ip-address")
+                            target_instance = None
+                            for instance in db_vnfr.get("vdur", None):
+                                if (
+                                    instance["vdu-name"] == vdu_name
+                                    and instance["count-index"] == vdu_index
+                                ):
+                                    target_instance = instance
+                                    break
+                            if vnf_ip_address == target_instance.get("ip-address"):
+                                self._heal_n2vc(
+                                    logging_text=logging_text
+                                    + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
+                                        member_vnf_index, vdu_name, vdu_index
+                                    ),
+                                    db_nsr=db_nsr,
+                                    db_vnfr=db_vnfr,
+                                    nslcmop_id=nslcmop_id,
+                                    nsr_id=nsr_id,
+                                    nsi_id=nsi_id,
+                                    vnfd_id=vnfd_ref,
+                                    vdu_id=None,
+                                    kdu_name=None,
+                                    member_vnf_index=member_vnf_index,
+                                    vdu_index=0,
+                                    vdu_name=None,
+                                    deploy_params=deploy_params_vdu,
+                                    descriptor_config=descriptor_config,
+                                    base_folder=base_folder,
+                                    task_instantiation_info=tasks_dict_info,
+                                    stage=stage,
+                                )
+
+                        # VDU Level charm. Normal case with native charms.
+                        descriptor_config = get_configuration(vnfd, vdu_name)
+                        if descriptor_config:
+                            self._heal_n2vc(
+                                logging_text=logging_text
+                                + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
+                                    member_vnf_index, vdu_name, vdu_index
+                                ),
+                                db_nsr=db_nsr,
+                                db_vnfr=db_vnfr,
+                                nslcmop_id=nslcmop_id,
+                                nsr_id=nsr_id,
+                                nsi_id=nsi_id,
+                                vnfd_id=vnfd_ref,
+                                vdu_id=vdu_id,
+                                kdu_name=kdu_name,
+                                member_vnf_index=member_vnf_index,
+                                vdu_index=vdu_index,
+                                vdu_name=vdu_name,
+                                deploy_params=deploy_params_vdu,
+                                descriptor_config=descriptor_config,
+                                base_folder=base_folder,
+                                task_instantiation_info=tasks_dict_info,
+                                stage=stage,
+                            )
+
+        except (
+            ROclient.ROClientException,
+            DbException,
+            LcmException,
+            NgRoException,
+        ) as e:
+            self.logger.error(logging_text + "Exit Exception {}".format(e))
+            exc = e
+        except asyncio.CancelledError:
+            self.logger.error(
+                logging_text + "Cancelled Exception while '{}'".format(step)
+            )
+            exc = "Operation was cancelled"
+        except Exception as e:
+            exc = traceback.format_exc()
+            self.logger.critical(
+                logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
+                exc_info=True,
+            )
+        finally:
+            error_list = list()
+            if exc:
+                error_list.append(str(exc))
+            try:
+                if tasks_dict_info:
+                    stage[1] = "Waiting for healing pending tasks."
+                    self.logger.debug(logging_text + stage[1])
+                    exc = await self._wait_for_tasks(
+                        logging_text,
+                        tasks_dict_info,
+                        self.timeout.ns_deploy,
+                        stage,
+                        nslcmop_id,
+                        nsr_id=nsr_id,
+                    )
+            except asyncio.CancelledError:
+                error_list.append("Cancelled")
+                await self._cancel_pending_tasks(logging_text, tasks_dict_info)
+                await self._wait_for_tasks(
+                    logging_text,
+                    tasks_dict_info,
+                    self.timeout.ns_deploy,
+                    stage,
+                    nslcmop_id,
+                    nsr_id=nsr_id,
+                )
+            if error_list:
+                error_detail = "; ".join(error_list)
+                db_nslcmop_update[
+                    "detailed-status"
+                ] = error_description_nslcmop = "FAILED {}: {}".format(
+                    step, error_detail
+                )
+                nslcmop_operation_state = "FAILED"
+                if db_nsr:
+                    db_nsr_update["operational-status"] = old_operational_status
+                    db_nsr_update["config-status"] = old_config_status
+                    db_nsr_update[
+                        "detailed-status"
+                    ] = "FAILED healing nslcmop={} {}: {}".format(
+                        nslcmop_id, step, error_detail
+                    )
+                    for task, task_name in tasks_dict_info.items():
+                        if not task.done() or task.cancelled() or task.exception():
+                            if task_name.startswith(self.task_name_deploy_vca):
+                                # A N2VC task is pending
+                                db_nsr_update["config-status"] = "failed"
+                            else:
+                                # RO task is pending
+                                db_nsr_update["operational-status"] = "failed"
+            else:
+                error_description_nslcmop = None
+                nslcmop_operation_state = "COMPLETED"
+                db_nslcmop_update["detailed-status"] = "Done"
+                db_nsr_update["detailed-status"] = "Done"
+                db_nsr_update["operational-status"] = "running"
+                db_nsr_update["config-status"] = "configured"
+
+            self._write_op_status(
+                op_id=nslcmop_id,
+                stage="",
+                error_message=error_description_nslcmop,
+                operation_state=nslcmop_operation_state,
+                other_update=db_nslcmop_update,
+            )
+            if db_nsr:
+                self._write_ns_status(
+                    nsr_id=nsr_id,
+                    ns_state=None,
+                    current_operation="IDLE",
+                    current_operation_id=None,
+                    other_update=db_nsr_update,
+                )
+
+            if nslcmop_operation_state:
+                try:
+                    msg = {
+                        "nsr_id": nsr_id,
+                        "nslcmop_id": nslcmop_id,
+                        "operationState": nslcmop_operation_state,
+                    }
+                    await self.msg.aiowrite("ns", "healed", msg)
+                except Exception as e:
+                    self.logger.error(
+                        logging_text + "kafka_write notification Exception {}".format(e)
+                    )
+            self.logger.debug(logging_text + "Exit")
+            self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
+
+    async def heal_RO(
+        self,
+        logging_text,
+        nsr_id,
+        db_nslcmop,
+        stage,
+    ):
+        """
+        Heal at RO
+        :param logging_text: preffix text to use at logging
+        :param nsr_id: nsr identity
+        :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
+        :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
+        :return: None or exception
+        """
+
+        def get_vim_account(vim_account_id):
+            nonlocal db_vims
+            if vim_account_id in db_vims:
+                return db_vims[vim_account_id]
+            db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
+            db_vims[vim_account_id] = db_vim
+            return db_vim
+
+        try:
+            start_heal = time()
+            ns_params = db_nslcmop.get("operationParams")
+            if ns_params and ns_params.get("timeout_ns_heal"):
+                timeout_ns_heal = ns_params["timeout_ns_heal"]
+            else:
+                timeout_ns_heal = self.timeout.ns_heal
+
+            db_vims = {}
+
+            nslcmop_id = db_nslcmop["_id"]
+            target = {
+                "action_id": nslcmop_id,
+            }
+            self.logger.warning(
+                "db_nslcmop={} and timeout_ns_heal={}".format(
+                    db_nslcmop, timeout_ns_heal
+                )
+            )
+            target.update(db_nslcmop.get("operationParams", {}))
+
+            self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
+            desc = await self.RO.recreate(nsr_id, target)
+            self.logger.debug("RO return > {}".format(desc))
+            action_id = desc["action_id"]
+            # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
+            await self._wait_ng_ro(
+                nsr_id,
+                action_id,
+                nslcmop_id,
+                start_heal,
+                timeout_ns_heal,
+                stage,
+                operation="healing",
+            )
+
+            # Updating NSR
+            db_nsr_update = {
+                "_admin.deployed.RO.operational-status": "running",
+                "detailed-status": " ".join(stage),
+            }
+            self.update_db_2("nsrs", nsr_id, db_nsr_update)
+            self._write_op_status(nslcmop_id, stage)
+            self.logger.debug(
+                logging_text + "ns healed at RO. RO_id={}".format(action_id)
+            )
+
+        except Exception as e:
+            stage[2] = "ERROR healing at VIM"
+            # self.set_vnfr_at_error(db_vnfrs, str(e))
+            self.logger.error(
+                "Error healing at VIM {}".format(e),
+                exc_info=not isinstance(
+                    e,
+                    (
+                        ROclient.ROClientException,
+                        LcmException,
+                        DbException,
+                        NgRoException,
+                    ),
+                ),
+            )
+            raise
+
+    def _heal_n2vc(
+        self,
+        logging_text,
+        db_nsr,
+        db_vnfr,
+        nslcmop_id,
+        nsr_id,
+        nsi_id,
+        vnfd_id,
+        vdu_id,
+        kdu_name,
+        member_vnf_index,
+        vdu_index,
+        vdu_name,
+        deploy_params,
+        descriptor_config,
+        base_folder,
+        task_instantiation_info,
+        stage,
+    ):
+        # launch instantiate_N2VC in a asyncio task and register task object
+        # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
+        # if not found, create one entry and update database
+        # fill db_nsr._admin.deployed.VCA.<index>
+
+        self.logger.debug(
+            logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
+        )
+
+        charm_name = ""
+        get_charm_name = False
+        if "execution-environment-list" in descriptor_config:
+            ee_list = descriptor_config.get("execution-environment-list", [])
+        elif "juju" in descriptor_config:
+            ee_list = [descriptor_config]  # ns charms
+            if "execution-environment-list" not in descriptor_config:
+                # charm name is only required for ns charms
+                get_charm_name = True
+        else:  # other types as script are not supported
+            ee_list = []
+
+        for ee_item in ee_list:
+            self.logger.debug(
+                logging_text
+                + "_deploy_n2vc ee_item juju={}, helm={}".format(
+                    ee_item.get("juju"), ee_item.get("helm-chart")
+                )
+            )
+            ee_descriptor_id = ee_item.get("id")
+            if ee_item.get("juju"):
+                vca_name = ee_item["juju"].get("charm")
+                if get_charm_name:
+                    charm_name = self.find_charm_name(db_nsr, str(vca_name))
+                vca_type = (
+                    "lxc_proxy_charm"
+                    if ee_item["juju"].get("charm") is not None
+                    else "native_charm"
+                )
+                if ee_item["juju"].get("cloud") == "k8s":
+                    vca_type = "k8s_proxy_charm"
+                elif ee_item["juju"].get("proxy") is False:
+                    vca_type = "native_charm"
+            elif ee_item.get("helm-chart"):
+                vca_name = ee_item["helm-chart"]
+                vca_type = "helm-v3"
+            else:
+                self.logger.debug(
+                    logging_text + "skipping non juju neither charm configuration"
+                )
+                continue
+
+            vca_index = -1
+            for vca_index, vca_deployed in enumerate(
+                db_nsr["_admin"]["deployed"]["VCA"]
+            ):
+                if not vca_deployed:
+                    continue
+                if (
+                    vca_deployed.get("member-vnf-index") == member_vnf_index
+                    and vca_deployed.get("vdu_id") == vdu_id
+                    and vca_deployed.get("kdu_name") == kdu_name
+                    and vca_deployed.get("vdu_count_index", 0) == vdu_index
+                    and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
+                ):
+                    break
+            else:
+                # not found, create one.
+                target = (
+                    "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
+                )
+                if vdu_id:
+                    target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
+                elif kdu_name:
+                    target += "/kdu/{}".format(kdu_name)
+                vca_deployed = {
+                    "target_element": target,
+                    # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
+                    "member-vnf-index": member_vnf_index,
+                    "vdu_id": vdu_id,
+                    "kdu_name": kdu_name,
+                    "vdu_count_index": vdu_index,
+                    "operational-status": "init",  # TODO revise
+                    "detailed-status": "",  # TODO revise
+                    "step": "initial-deploy",  # TODO revise
+                    "vnfd_id": vnfd_id,
+                    "vdu_name": vdu_name,
+                    "type": vca_type,
+                    "ee_descriptor_id": ee_descriptor_id,
+                    "charm_name": charm_name,
+                }
+                vca_index += 1
+
+                # create VCA and configurationStatus in db
+                db_dict = {
+                    "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
+                    "configurationStatus.{}".format(vca_index): dict(),
+                }
+                self.update_db_2("nsrs", nsr_id, db_dict)
+
+                db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
+
+            self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
+            self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
+            self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
+
+            # Launch task
+            task_n2vc = asyncio.ensure_future(
+                self.heal_N2VC(
+                    logging_text=logging_text,
+                    vca_index=vca_index,
+                    nsi_id=nsi_id,
+                    db_nsr=db_nsr,
+                    db_vnfr=db_vnfr,
+                    vdu_id=vdu_id,
+                    kdu_name=kdu_name,
+                    vdu_index=vdu_index,
+                    deploy_params=deploy_params,
+                    config_descriptor=descriptor_config,
+                    base_folder=base_folder,
+                    nslcmop_id=nslcmop_id,
+                    stage=stage,
+                    vca_type=vca_type,
+                    vca_name=vca_name,
+                    ee_config_descriptor=ee_item,
+                )
+            )
+            self.lcm_tasks.register(
+                "ns",
+                nsr_id,
+                nslcmop_id,
+                "instantiate_N2VC-{}".format(vca_index),
+                task_n2vc,
+            )
+            task_instantiation_info[
+                task_n2vc
+            ] = self.task_name_deploy_vca + " {}.{}".format(
+                member_vnf_index or "", vdu_id or ""
+            )
+
+    async def heal_N2VC(
+        self,
+        logging_text,
+        vca_index,
+        nsi_id,
+        db_nsr,
+        db_vnfr,
+        vdu_id,
+        kdu_name,
+        vdu_index,
+        config_descriptor,
+        deploy_params,
+        base_folder,
+        nslcmop_id,
+        stage,
+        vca_type,
+        vca_name,
+        ee_config_descriptor,
+    ):
+        nsr_id = db_nsr["_id"]
+        db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
+        vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
+        vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
+        osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
+        db_dict = {
+            "collection": "nsrs",
+            "filter": {"_id": nsr_id},
+            "path": db_update_entry,
+        }
+        step = ""
+        try:
+            element_type = "NS"
+            element_under_configuration = nsr_id
+
+            vnfr_id = None
+            if db_vnfr:
+                vnfr_id = db_vnfr["_id"]
+                osm_config["osm"]["vnf_id"] = vnfr_id
+
+            namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
+
+            if vca_type == "native_charm":
+                index_number = 0
+            else:
+                index_number = vdu_index or 0
+
+            if vnfr_id:
+                element_type = "VNF"
+                element_under_configuration = vnfr_id
+                namespace += ".{}-{}".format(vnfr_id, index_number)
+                if vdu_id:
+                    namespace += ".{}-{}".format(vdu_id, index_number)
+                    element_type = "VDU"
+                    element_under_configuration = "{}-{}".format(vdu_id, index_number)
+                    osm_config["osm"]["vdu_id"] = vdu_id
+                elif kdu_name:
+                    namespace += ".{}".format(kdu_name)
+                    element_type = "KDU"
+                    element_under_configuration = kdu_name
+                    osm_config["osm"]["kdu_name"] = kdu_name
+
+            # Get artifact path
+            if base_folder["pkg-dir"]:
+                artifact_path = "{}/{}/{}/{}".format(
+                    base_folder["folder"],
+                    base_folder["pkg-dir"],
+                    "charms"
+                    if vca_type
+                    in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
+                    else "helm-charts",
+                    vca_name,
+                )
+            else:
+                artifact_path = "{}/Scripts/{}/{}/".format(
+                    base_folder["folder"],
+                    "charms"
+                    if vca_type
+                    in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
+                    else "helm-charts",
+                    vca_name,
+                )
+
+            self.logger.debug("Artifact path > {}".format(artifact_path))
+
+            # get initial_config_primitive_list that applies to this element
+            initial_config_primitive_list = config_descriptor.get(
+                "initial-config-primitive"
+            )
+
+            self.logger.debug(
+                "Initial config primitive list > {}".format(
+                    initial_config_primitive_list
+                )
+            )
+
+            # add config if not present for NS charm
+            ee_descriptor_id = ee_config_descriptor.get("id")
+            self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
+            initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
+                initial_config_primitive_list, vca_deployed, ee_descriptor_id
+            )
+
+            self.logger.debug(
+                "Initial config primitive list #2 > {}".format(
+                    initial_config_primitive_list
+                )
+            )
+            # n2vc_redesign STEP 3.1
+            # find old ee_id if exists
+            ee_id = vca_deployed.get("ee_id")
+
+            vca_id = self.get_vca_id(db_vnfr, db_nsr)
+            # create or register execution environment in VCA. Only for native charms when healing
+            if vca_type == "native_charm":
+                step = "Waiting to VM being up and getting IP address"
+                self.logger.debug(logging_text + step)
+                rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
+                    logging_text,
+                    nsr_id,
+                    vnfr_id,
+                    vdu_id,
+                    vdu_index,
+                    user=None,
+                    pub_key=None,
+                )
+                credentials = {"hostname": rw_mgmt_ip}
+                # get username
+                username = deep_get(
+                    config_descriptor, ("config-access", "ssh-access", "default-user")
+                )
+                # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
+                #  merged. Meanwhile let's get username from initial-config-primitive
+                if not username and initial_config_primitive_list:
+                    for config_primitive in initial_config_primitive_list:
+                        for param in config_primitive.get("parameter", ()):
+                            if param["name"] == "ssh-username":
+                                username = param["value"]
+                                break
+                if not username:
+                    raise LcmException(
+                        "Cannot determine the username neither with 'initial-config-primitive' nor with "
+                        "'config-access.ssh-access.default-user'"
+                    )
+                credentials["username"] = username
+
+                # n2vc_redesign STEP 3.2
+                # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
+                self._write_configuration_status(
+                    nsr_id=nsr_id,
+                    vca_index=vca_index,
+                    status="REGISTERING",
+                    element_under_configuration=element_under_configuration,
+                    element_type=element_type,
+                )
+
+                step = "register execution environment {}".format(credentials)
+                self.logger.debug(logging_text + step)
+                ee_id = await self.vca_map[vca_type].register_execution_environment(
+                    credentials=credentials,
+                    namespace=namespace,
+                    db_dict=db_dict,
+                    vca_id=vca_id,
+                )
+
+                # update ee_id en db
+                db_dict_ee_id = {
+                    "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
+                }
+                self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
+
+            # for compatibility with MON/POL modules, the need model and application name at database
+            # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
+            # Not sure if this need to be done when healing
+            """
+            ee_id_parts = ee_id.split(".")
+            db_nsr_update = {db_update_entry + "ee_id": ee_id}
+            if len(ee_id_parts) >= 2:
+                model_name = ee_id_parts[0]
+                application_name = ee_id_parts[1]
+                db_nsr_update[db_update_entry + "model"] = model_name
+                db_nsr_update[db_update_entry + "application"] = application_name
+            """
+
+            # n2vc_redesign STEP 3.3
+            # Install configuration software. Only for native charms.
+            step = "Install configuration Software"
+
+            self._write_configuration_status(
+                nsr_id=nsr_id,
+                vca_index=vca_index,
+                status="INSTALLING SW",
+                element_under_configuration=element_under_configuration,
+                element_type=element_type,
+                # other_update=db_nsr_update,
+                other_update=None,
+            )
+
+            # TODO check if already done
+            self.logger.debug(logging_text + step)
+            config = None
+            if vca_type == "native_charm":
+                config_primitive = next(
+                    (p for p in initial_config_primitive_list if p["name"] == "config"),
+                    None,
+                )
+                if config_primitive:
+                    config = self._map_primitive_params(
+                        config_primitive, {}, deploy_params
+                    )
+                await self.vca_map[vca_type].install_configuration_sw(
+                    ee_id=ee_id,
+                    artifact_path=artifact_path,
+                    db_dict=db_dict,
+                    config=config,
+                    num_units=1,
+                    vca_id=vca_id,
+                    vca_type=vca_type,
+                )
+
+            # write in db flag of configuration_sw already installed
+            self.update_db_2(
+                "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
+            )
+
+            # Not sure if this need to be done when healing
+            """
+            # add relations for this VCA (wait for other peers related with this VCA)
+            await self._add_vca_relations(
+                logging_text=logging_text,
+                nsr_id=nsr_id,
+                vca_type=vca_type,
+                vca_index=vca_index,
+            )
+            """
+
+            # if SSH access is required, then get execution environment SSH public
+            # if native charm we have waited already to VM be UP
+            if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm-v3"):
+                pub_key = None
+                user = None
+                # self.logger.debug("get ssh key block")
+                if deep_get(
+                    config_descriptor, ("config-access", "ssh-access", "required")
+                ):
+                    # self.logger.debug("ssh key needed")
+                    # Needed to inject a ssh key
+                    user = deep_get(
+                        config_descriptor,
+                        ("config-access", "ssh-access", "default-user"),
+                    )
+                    step = "Install configuration Software, getting public ssh key"
+                    pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
+                        ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
+                    )
+
+                    step = "Insert public key into VM user={} ssh_key={}".format(
+                        user, pub_key
+                    )
+                else:
+                    # self.logger.debug("no need to get ssh key")
+                    step = "Waiting to VM being up and getting IP address"
+                self.logger.debug(logging_text + step)
+
+                # n2vc_redesign STEP 5.1
+                # wait for RO (ip-address) Insert pub_key into VM
+                # IMPORTANT: We need do wait for RO to complete healing operation.
+                await self._wait_heal_ro(nsr_id, self.timeout.ns_heal)
+                if vnfr_id:
+                    if kdu_name:
+                        rw_mgmt_ip = await self.wait_kdu_up(
+                            logging_text, nsr_id, vnfr_id, kdu_name
+                        )
+                    else:
+                        rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
+                            logging_text,
+                            nsr_id,
+                            vnfr_id,
+                            vdu_id,
+                            vdu_index,
+                            user=user,
+                            pub_key=pub_key,
+                        )
+                else:
+                    rw_mgmt_ip = None  # This is for a NS configuration
+
+                self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
+
+            # store rw_mgmt_ip in deploy params for later replacement
+            deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
+
+            # Day1 operations.
+            # get run-day1 operation parameter
+            runDay1 = deploy_params.get("run-day1", False)
+            self.logger.debug(
+                "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id, vdu_id, runDay1)
+            )
+            if runDay1:
+                # n2vc_redesign STEP 6  Execute initial config primitive
+                step = "execute initial config primitive"
+
+                # wait for dependent primitives execution (NS -> VNF -> VDU)
+                if initial_config_primitive_list:
+                    await self._wait_dependent_n2vc(
+                        nsr_id, vca_deployed_list, vca_index
+                    )
+
+                # stage, in function of element type: vdu, kdu, vnf or ns
+                my_vca = vca_deployed_list[vca_index]
+                if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
+                    # VDU or KDU
+                    stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
+                elif my_vca.get("member-vnf-index"):
+                    # VNF
+                    stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
+                else:
+                    # NS
+                    stage[0] = "Stage 5/5: running Day-1 primitives for NS."
+
+                self._write_configuration_status(
+                    nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
+                )
+
+                self._write_op_status(op_id=nslcmop_id, stage=stage)
+
+                check_if_terminated_needed = True
+                for initial_config_primitive in initial_config_primitive_list:
+                    # adding information on the vca_deployed if it is a NS execution environment
+                    if not vca_deployed["member-vnf-index"]:
+                        deploy_params["ns_config_info"] = json.dumps(
+                            self._get_ns_config_info(nsr_id)
+                        )
+                    # TODO check if already done
+                    primitive_params_ = self._map_primitive_params(
+                        initial_config_primitive, {}, deploy_params
+                    )
+
+                    step = "execute primitive '{}' params '{}'".format(
+                        initial_config_primitive["name"], primitive_params_
+                    )
+                    self.logger.debug(logging_text + step)
+                    await self.vca_map[vca_type].exec_primitive(
+                        ee_id=ee_id,
+                        primitive_name=initial_config_primitive["name"],
+                        params_dict=primitive_params_,
+                        db_dict=db_dict,
+                        vca_id=vca_id,
+                        vca_type=vca_type,
+                    )
+                    # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
+                    if check_if_terminated_needed:
+                        if config_descriptor.get("terminate-config-primitive"):
+                            self.update_db_2(
+                                "nsrs",
+                                nsr_id,
+                                {db_update_entry + "needed_terminate": True},
+                            )
+                        check_if_terminated_needed = False
+
+                    # TODO register in database that primitive is done
+
+            # STEP 7 Configure metrics
+            # Not sure if this need to be done when healing
+            """
+            if vca_type == "helm" or vca_type == "helm-v3":
+                prometheus_jobs = await self.extract_prometheus_scrape_jobs(
+                    ee_id=ee_id,
+                    artifact_path=artifact_path,
+                    ee_config_descriptor=ee_config_descriptor,
+                    vnfr_id=vnfr_id,
+                    nsr_id=nsr_id,
+                    target_ip=rw_mgmt_ip,
+                )
+                if prometheus_jobs:
+                    self.update_db_2(
+                        "nsrs",
+                        nsr_id,
+                        {db_update_entry + "prometheus_jobs": prometheus_jobs},
+                    )
+
+                    for job in prometheus_jobs:
+                        self.db.set_one(
+                            "prometheus_jobs",
+                            {"job_name": job["job_name"]},
+                            job,
+                            upsert=True,
+                            fail_on_empty=False,
+                        )
+
+            """
+            step = "instantiated at VCA"
+            self.logger.debug(logging_text + step)
+
+            self._write_configuration_status(
+                nsr_id=nsr_id, vca_index=vca_index, status="READY"
+            )
+
+        except Exception as e:  # TODO not use Exception but N2VC exception
+            # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
+            if not isinstance(
+                e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
+            ):
+                self.logger.error(
+                    "Exception while {} : {}".format(step, e), exc_info=True
+                )
+            self._write_configuration_status(
+                nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
+            )
+            raise LcmException("{} {}".format(step, e)) from e
+
+    async def _wait_heal_ro(
+        self,
+        nsr_id,
+        timeout=600,
+    ):
+        start_time = time()
+        while time() <= start_time + timeout:
+            db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+            operational_status_ro = db_nsr["_admin"]["deployed"]["RO"][
+                "operational-status"
+            ]
+            self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
+            if operational_status_ro != "healing":
+                break
+            await asyncio.sleep(15)
+        else:  # timeout_ns_deploy
+            raise NgRoException("Timeout waiting ns to deploy")
+
+    async def vertical_scale(self, nsr_id, nslcmop_id):
+        """
+        Vertical Scale the VDUs in a NS
+
+        :param: nsr_id: NS Instance ID
+        :param: nslcmop_id: nslcmop ID of migrate
+
+        """
+        # Try to lock HA task here
+        task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
+        if not task_is_locked_by_me:
+            return
+        logging_text = "Task ns={} vertical scale ".format(nsr_id)
+        self.logger.debug(logging_text + "Enter")
+        # get all needed from database
+        db_nslcmop = None
+        db_nslcmop_update = {}
+        nslcmop_operation_state = None
+        old_db_update = {}
+        q_filter = {}
+        old_vdu_index = None
+        old_flavor_id = None
+        db_nsr_update = {}
+        target = {}
+        exc = None
+        # in case of error, indicates what part of scale was failed to put nsr at error status
+        start_deploy = time()
+
+        try:
+            # wait for any previous tasks in process
+            step = "Waiting for previous operations to terminate"
+            await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
+
+            self._write_ns_status(
+                nsr_id=nsr_id,
+                ns_state=None,
+                current_operation="VerticalScale",
+                current_operation_id=nslcmop_id,
+            )
+            step = "Getting nslcmop from database"
+            self.logger.debug(
+                step + " after having waited for previous tasks to be completed"
+            )
+            db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
+            operationParams = db_nslcmop.get("operationParams")
+            # Update the VNFRS and NSRS with the requested flavour detail, So that ro tasks can function properly
+            db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+            db_flavor = db_nsr.get("flavor")
+            db_flavor_index = str(len(db_flavor))
+            change_vnf_flavor_data = operationParams["changeVnfFlavorData"]
+            flavor_dict = change_vnf_flavor_data["additionalParams"]
+            count_index = flavor_dict["vduCountIndex"]
+            vdu_id_ref = flavor_dict["vduid"]
+            flavor_dict_update = {
+                "id": db_flavor_index,
+                "memory-mb": flavor_dict["virtualMemory"],
+                "name": f"{vdu_id_ref}-{count_index}-flv",
+                "storage-gb": flavor_dict["sizeOfStorage"],
+                "vcpu-count": flavor_dict["numVirtualCpu"],
+            }
+            db_flavor.append(flavor_dict_update)
+            db_update = {}
+            db_update["flavor"] = db_flavor
+            ns_q_filter = {
+                "_id": nsr_id,
+            }
+            self.db.set_one(
+                "nsrs",
+                q_filter=ns_q_filter,
+                update_dict=db_update,
+                fail_on_empty=True,
+            )
+            db_vnfr = self.db.get_one(
+                "vnfrs", {"_id": change_vnf_flavor_data["vnfInstanceId"]}
+            )
+            for vdu_index, vdur in enumerate(db_vnfr.get("vdur", ())):
+                if (
+                    vdur.get("count-index") == count_index
+                    and vdur.get("vdu-id-ref") == vdu_id_ref
+                ):
+                    old_flavor_id = vdur.get("ns-flavor-id", 0)
+                    old_vdu_index = vdu_index
+                    filter_text = {
+                        "_id": change_vnf_flavor_data["vnfInstanceId"],
+                        "vdur.count-index": count_index,
+                        "vdur.vdu-id-ref": vdu_id_ref,
+                    }
+                    q_filter.update(filter_text)
+                    db_update = {}
+                    db_update[
+                        "vdur.{}.ns-flavor-id".format(vdu_index)
+                    ] = db_flavor_index
+                    self.db.set_one(
+                        "vnfrs",
+                        q_filter=q_filter,
+                        update_dict=db_update,
+                        fail_on_empty=True,
+                    )
+            target = {}
+            target.update(operationParams)
+            desc = await self.RO.vertical_scale(nsr_id, target)
+            self.logger.debug("RO return > {}".format(desc))
+            action_id = desc["action_id"]
+            await self._wait_ng_ro(
+                nsr_id,
+                action_id,
+                nslcmop_id,
+                start_deploy,
+                self.timeout.verticalscale,
+                operation="verticalscale",
+            )
+        except (
+            NgRoException,
+            ROclient.ROClientException,
+            DbException,
+            LcmException,
+        ) as e:
+            self.logger.error("Exit Exception {}".format(e))
+            exc = e
+        except asyncio.CancelledError:
+            self.logger.error("Cancelled Exception while '{}'".format(step))
+            exc = "Operation was cancelled"
+        except Exception as e:
+            exc = traceback.format_exc()
+            self.logger.critical(
+                "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
+            )
+        finally:
+            self._write_ns_status(
+                nsr_id=nsr_id,
+                ns_state=None,
+                current_operation="IDLE",
+                current_operation_id=None,
+            )
+            if exc:
+                db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
+                nslcmop_operation_state = "FAILED"
+                old_db_update[
+                    "vdur.{}.ns-flavor-id".format(old_vdu_index)
+                ] = old_flavor_id
+            else:
+                nslcmop_operation_state = "COMPLETED"
+                db_nslcmop_update["detailed-status"] = "Done"
+                db_nsr_update["detailed-status"] = "Done"
+
+            self._write_op_status(
+                op_id=nslcmop_id,
+                stage="",
+                error_message="",
+                operation_state=nslcmop_operation_state,
+                other_update=db_nslcmop_update,
+            )
+            if old_vdu_index and old_db_update != {}:
+                self.logger.critical(
+                    "Reverting Old Flavor -- : {}".format(old_db_update)
+                )
+                self.db.set_one(
+                    "vnfrs",
+                    q_filter=q_filter,
+                    update_dict=old_db_update,
+                    fail_on_empty=True,
+                )
+            if nslcmop_operation_state:
+                try:
+                    msg = {
+                        "nsr_id": nsr_id,
+                        "nslcmop_id": nslcmop_id,
+                        "operationState": nslcmop_operation_state,
+                    }
+                    await self.msg.aiowrite("ns", "verticalscaled", msg)
+                except Exception as e:
+                    self.logger.error(
+                        logging_text + "kafka_write notification Exception {}".format(e)
+                    )
+            self.logger.debug(logging_text + "Exit")
+            self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")