Revert to start of branch, update to master 16/12816/1
authorMark Beierl <mark.beierl@canonical.com>
Fri, 6 Jan 2023 16:53:37 +0000 (11:53 -0500)
committerMark Beierl <mark.beierl@canonical.com>
Fri, 6 Jan 2023 17:05:19 +0000 (12:05 -0500)
Change-Id: Ie0ca17776f73513efc94c34a18e16a4be0caba2f
Signed-off-by: Mark Beierl <mark.beierl@canonical.com>
28 files changed:
devops-stages/stage-test.sh
osm_lcm/data_utils/database/wim_account.py [new file with mode: 0644]
osm_lcm/data_utils/lcm_config.py [new file with mode: 0644]
osm_lcm/data_utils/vim.py
osm_lcm/data_utils/vnfd.py
osm_lcm/data_utils/wim.py [new file with mode: 0644]
osm_lcm/lcm.cfg
osm_lcm/lcm.py
osm_lcm/lcm_helm_conn.py
osm_lcm/lcm_utils.py
osm_lcm/netslice.py
osm_lcm/ns.py
osm_lcm/paas.py [deleted file]
osm_lcm/paas_conn.py [deleted file]
osm_lcm/paas_service.py [deleted file]
osm_lcm/tests/test_db_descriptors.py
osm_lcm/tests/test_lcm.py
osm_lcm/tests/test_lcm_helm_conn.py
osm_lcm/tests/test_lcm_utils.py
osm_lcm/tests/test_ns.py
osm_lcm/tests/test_paas.py [deleted file]
osm_lcm/tests/test_paas_conn.py [deleted file]
osm_lcm/tests/test_paas_service.py [deleted file]
osm_lcm/vim_sdn.py
requirements-dev.txt
requirements.in
requirements.txt
tox.ini

index 141fa55..45a6b1c 100755 (executable)
@@ -14,5 +14,6 @@
 # under the License.
 ##
 
-OUTPUT=$(TOX_PARALLEL_NO_SPINNER=1 tox --parallel=auto)
-printf "$OUTPUT"
+echo "Launching tox"
+TOX_PARALLEL_NO_SPINNER=1 tox --parallel=auto
+
diff --git a/osm_lcm/data_utils/database/wim_account.py b/osm_lcm/data_utils/database/wim_account.py
new file mode 100644 (file)
index 0000000..8b0b5f6
--- /dev/null
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+
+# This file is part of OSM Life-Cycle Management module
+#
+# Copyright 2022 ETSI
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
+from osm_lcm.data_utils.database.database import Database
+
+__author__ = (
+    "Lluis Gifre <lluis.gifre@cttc.es>, Ricard Vilalta <ricard.vilalta@cttc.es>"
+)
+
+
+class WimAccountDB:
+    db = None
+    db_wims = {}
+
+    def initialize_db():
+        WimAccountDB.db = Database().instance.db
+
+    def get_wim_account_with_id(wim_account_id):
+        if not WimAccountDB.db:
+            WimAccountDB.initialize_db()
+        if wim_account_id in WimAccountDB.db_wims:
+            return WimAccountDB.db_wims[wim_account_id]
+        db_wim = WimAccountDB.db.get_one("wim_accounts", {"_id": wim_account_id}) or {}
+        WimAccountDB.db_wims[wim_account_id] = db_wim
+        return db_wim
+
+    def get_all_wim_accounts():
+        if not WimAccountDB.db:
+            WimAccountDB.initialize_db()
+        db_wims_list = WimAccountDB.db.get_list("wim_accounts")
+        WimAccountDB.db_wims.update({db_wim["_id"]: db_wim for db_wim in db_wims_list})
+        return WimAccountDB.db_wims
diff --git a/osm_lcm/data_utils/lcm_config.py b/osm_lcm/data_utils/lcm_config.py
new file mode 100644 (file)
index 0000000..08a8728
--- /dev/null
@@ -0,0 +1,229 @@
+# Copyright 2022 Whitestack, LLC
+# *************************************************************
+#
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Whitestack, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: lvega@whitestack.com
+##
+
+from configman import ConfigMan
+from glom import glom, assign
+
+
+class OsmConfigman(ConfigMan):
+    def __init__(self, config_dict=None):
+        super().__init__()
+        self.set_from_dict(config_dict)
+        self.set_auto_env("OSMLCM")
+
+    def get(self, key, defaultValue):
+        return self.to_dict()[key]
+
+    def set_from_dict(self, config_dict):
+        def func(attr_path: str, _: type) -> None:
+            conf_val = glom(config_dict, attr_path, default=None)
+            if conf_val is not None:
+                assign(self, attr_path, conf_val)
+
+        self._run_func_for_all_premitives(func)
+
+    def _get_env_name(self, path: str, prefix: str = None) -> str:
+        path_parts = path.split(".")
+        if prefix is not None:
+            path_parts.insert(0, prefix)
+        return "_".join(path_parts).upper()
+
+    def transform(self):
+        pass
+
+
+# Configs from lcm.cfg
+
+
+class GlobalConfig(OsmConfigman):
+    loglevel: str = "DEBUG"
+    logfile: str = None
+    nologging: bool = False
+
+
+class Timeout(OsmConfigman):
+    nsi_deploy: int = 2 * 3600  # default global timeout for deployment a nsi
+    vca_on_error: int = (
+        5 * 60
+    )  # Time for charm from first time at blocked,error status to mark as failed
+    ns_deploy: int = 2 * 3600  # default global timeout for deployment a ns
+    ns_terminate: int = 1800  # default global timeout for un deployment a ns
+    ns_heal: int = 1800  # default global timeout for un deployment a ns
+    charm_delete: int = 10 * 60
+    primitive: int = 30 * 60  # timeout for primitive execution
+    ns_update: int = 30 * 60  # timeout for ns update
+    progress_primitive: int = (
+        10 * 60
+    )  # timeout for some progress in a primitive execution
+    migrate: int = 1800  # default global timeout for migrating vnfs
+    operate: int = 1800  # default global timeout for migrating vnfs
+    verticalscale: int = 1800  # default global timeout for Vertical Sclaing
+    scale_on_error = (
+        5 * 60
+    )  # Time for charm from first time at blocked,error status to mark as failed
+    scale_on_error_outer_factor = 1.05  # Factor in relation to timeout_scale_on_error related to the timeout to be applied within the asyncio.wait_for coroutine
+    primitive_outer_factor = 1.05  # Factor in relation to timeout_primitive related to the timeout to be applied within the asyncio.wait_for coroutine
+
+
+class RoConfig(OsmConfigman):
+    host: str = None
+    ng: bool = False
+    port: int = None
+    uri: str = None
+    tenant: str = "osm"
+    loglevel: str = "ERROR"
+    logfile: str = None
+    logger_name: str = None
+
+    def transform(self):
+        if not self.uri:
+            self.uri = "http://{}:{}/".format(self.host, self.port)
+        elif "/ro" in self.uri[-4:] or "/openmano" in self.uri[-10:]:
+            # uri ends with '/ro', '/ro/', '/openmano', '/openmano/'
+            index = self.uri[-1].rfind("/")
+            self.uri = self.uri[index + 1]
+        self.logger_name = "lcm.roclient"
+
+
+class VcaConfig(OsmConfigman):
+    host: str = None
+    port: int = None
+    user: str = None
+    secret: str = None
+    cloud: str = None
+    k8s_cloud: str = None
+    helmpath: str = None
+    helm3path: str = None
+    kubectlpath: str = None
+    jujupath: str = None
+    public_key: str = None
+    ca_cert: str = None
+    api_proxy: str = None
+    apt_mirror: str = None
+    eegrpcinittimeout: int = None
+    eegrpctimeout: int = None
+    eegrpc_tls_enforce: bool = False
+    loglevel: str = "DEBUG"
+    logfile: str = None
+    ca_store: str = "/etc/ssl/certs/osm-ca.crt"
+    kubectl_osm_namespace: str = "osm"
+    kubectl_osm_cluster_name: str = "_system-osm-k8s"
+    helm_ee_service_port: int = 50050
+    helm_max_initial_retry_time: int = 600
+    helm_max_retry_time: int = 30  # Max retry time for normal operations
+    helm_ee_retry_delay: int = (
+        10  # time between retries, retry time after a connection error is raised
+    )
+
+    def transform(self):
+        if self.eegrpcinittimeout:
+            self.helm_max_initial_retry_time = self.eegrpcinittimeout
+        if self.eegrpctimeout:
+            self.helm_max_retry_time = self.eegrpctimeout
+
+
+class DatabaseConfig(OsmConfigman):
+    driver: str = None
+    host: str = None
+    port: int = None
+    uri: str = None
+    name: str = None
+    replicaset: str = None
+    user: str = None
+    password: str = None
+    commonkey: str = None
+    loglevel: str = "DEBUG"
+    logfile: str = None
+    logger_name: str = None
+
+    def transform(self):
+        self.logger_name = "lcm.db"
+
+
+class StorageConfig(OsmConfigman):
+    driver: str = None
+    path: str = "/app/storage"
+    loglevel: str = "DEBUG"
+    logfile: str = None
+    logger_name: str = None
+    collection: str = None
+    uri: str = None
+
+    def transform(self):
+        self.logger_name = "lcm.fs"
+
+
+class MessageConfig(OsmConfigman):
+    driver: str = None
+    path: str = None
+    host: str = None
+    port: int = None
+    loglevel: str = "DEBUG"
+    logfile: str = None
+    group_id: str = None
+    logger_name: str = None
+
+    def transform(self):
+        self.logger_name = "lcm.msg"
+
+
+class TsdbConfig(OsmConfigman):
+    driver: str = None
+    path: str = None
+    uri: str = None
+    loglevel: str = "DEBUG"
+    logfile: str = None
+    logger_name: str = None
+
+    def transform(self):
+        self.logger_name = "lcm.prometheus"
+
+
+# Main configuration Template
+
+
+class LcmCfg(OsmConfigman):
+    globalConfig: GlobalConfig = GlobalConfig()
+    timeout: Timeout = Timeout()
+    RO: RoConfig = RoConfig()
+    VCA: VcaConfig = VcaConfig()
+    database: DatabaseConfig = DatabaseConfig()
+    storage: StorageConfig = StorageConfig()
+    message: MessageConfig = MessageConfig()
+    tsdb: TsdbConfig = TsdbConfig()
+
+    def transform(self):
+        for attribute in dir(self):
+            method = getattr(self, attribute)
+            if isinstance(method, OsmConfigman):
+                method.transform()
+
+
+class SubOperation(OsmConfigman):
+    STATUS_NOT_FOUND: int = -1
+    STATUS_NEW: int = -2
+    STATUS_SKIP: int = -3
+
+
+class LCMConfiguration(OsmConfigman):
+    suboperation: SubOperation = SubOperation()
+    task_name_deploy_vca = "Deploying VCA"
index 0e69572..2042fd8 100644 (file)
 # For those usages not covered by the Apache License, Version 2.0 please
 # contact: fbravo@whitestack.com
 ##
+
+from osm_lcm.data_utils.database.vim_account import VimAccountDB
+
+__author__ = (
+    "Lluis Gifre <lluis.gifre@cttc.es>, Ricard Vilalta <ricard.vilalta@cttc.es>"
+)
+
+
+def get_vims_to_connect(db_nsr, db_vnfrs, target_vld, logger):
+    vims_to_connect = set()
+    vld = next(
+        (vld for vld in db_nsr["vld"] if vld["id"] == target_vld["id"]),
+        None,
+    )
+    if vld is None:
+        return vims_to_connect  # VLD not in NS, means it is an internal VLD within a single VIM
+
+    vim_ids = set()
+    if "vnfd-connection-point-ref" in vld:
+        # during planning of VNF, use "vnfd-connection-point-ref" since "vim_info" is not available in vld
+        # get VNFD connection points (if available)
+        # iterate over VNFs and retrieve VIM IDs they are planned to be deployed to
+        vnfd_connection_point_ref = vld["vnfd-connection-point-ref"]
+        for vld_member_vnf_index_ref in vnfd_connection_point_ref:
+            vld_member_vnf_index_ref = vld_member_vnf_index_ref["member-vnf-index-ref"]
+            vim_ids.add(db_vnfrs[vld_member_vnf_index_ref]["vim-account-id"])
+    elif "vim_info" in vld:
+        # after instantiation of VNF, use "vim_info" since "vnfd-connection-point-ref" is not available in vld
+        # get VIM info (if available)
+        # iterate over VIM info and retrieve VIM IDs they are deployed to
+        vim_info = vld["vim_info"]
+        for vim_data in vim_info.values():
+            vim_ids.add(vim_data["vim_account_id"])
+    else:
+        # TODO: analyze if this situation is possible
+        # unable to retrieve planned/executed mapping of VNFs to VIMs
+        # by now, drop a log message for future debugging
+        logger.warning(
+            " ".join(
+                [
+                    "Unable to identify VIMs involved in VLD to check if WIM is required.",
+                    "Dumping internal variables for further debugging:",
+                ]
+            )
+        )
+        logger.warning("db_nsr={:s}".format(str(db_nsr)))
+        logger.warning("db_vnfrs={:s}".format(str(db_vnfrs)))
+        logger.warning("target_vld={:s}".format(str(target_vld)))
+        return vims_to_connect
+
+    for vim_id in vim_ids:
+        db_vim = VimAccountDB.get_vim_account_with_id(vim_id)
+        if db_vim is None:
+            continue
+        vims_to_connect.add(db_vim["name"])
+    return vims_to_connect
index ffcb582..9f8104a 100644 (file)
@@ -191,3 +191,15 @@ def find_software_version(vnfd: dict) -> str:
 
     else:
         return default_sw_version
+
+
+def check_helm_ee_in_ns(db_vnfds: list) -> bool:
+    for vnfd in db_vnfds:
+        descriptor_config = get_configuration(vnfd, vnfd["id"])
+        if not (
+            descriptor_config and "execution-environment-list" in descriptor_config
+        ):
+            continue
+        ee_list = descriptor_config.get("execution-environment-list", [])
+        if list_utils.find_in_list(ee_list, lambda ee_item: "helm-chart" in ee_item):
+            return True
diff --git a/osm_lcm/data_utils/wim.py b/osm_lcm/data_utils/wim.py
new file mode 100644 (file)
index 0000000..c8ce0bf
--- /dev/null
@@ -0,0 +1,166 @@
+# -*- coding: utf-8 -*-
+
+# This file is part of OSM Life-Cycle Management module
+#
+# Copyright 2022 ETSI
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+
+
+from osm_lcm.data_utils.database.vim_account import VimAccountDB
+from osm_lcm.data_utils.database.wim_account import WimAccountDB
+from osm_lcm.data_utils.vim import get_vims_to_connect
+from osm_lcm.lcm_utils import LcmException
+
+__author__ = (
+    "Lluis Gifre <lluis.gifre@cttc.es>, Ricard Vilalta <ricard.vilalta@cttc.es>"
+)
+
+
+def get_candidate_wims(vims_to_connect):
+    all_wim_accounts = WimAccountDB.get_all_wim_accounts()
+    candidate_wims = {}
+    for wim_id, db_wim in all_wim_accounts.items():
+        wim_port_mapping = db_wim.get("config", {}).get("wim_port_mapping", [])
+        wim_dc_ids = {
+            m.get("datacenter_id") for m in wim_port_mapping if m.get("datacenter_id")
+        }
+        not_reachable_vims = vims_to_connect.difference(wim_dc_ids)
+        if len(not_reachable_vims) > 0:
+            continue
+        # TODO: consider adding other filtering fields such as supported layer(s) [L2, L3, ...]
+        candidate_wims[wim_id] = db_wim
+    return candidate_wims
+
+
+def select_feasible_wim_account(db_nsr, db_vnfrs, target_vld, vld_params, logger):
+    logger.info("Checking if WIM is needed for VLD({:s})...".format(str(target_vld)))
+    if target_vld.get("mgmt-network", False):
+        logger.info(
+            "WIM not needed, VLD({:s}) is a management network".format(str(target_vld))
+        )
+        return None, None  # assume mgmt networks do not use a WIM
+
+    # check if WIM account is explicitly False
+    wim_account_id = vld_params.get("wimAccountId")
+    if wim_account_id is not None and not wim_account_id:
+        logger.info(
+            "VLD({:s}) explicitly specifies not to use a WIM".format(str(target_vld))
+        )
+        return None, None  # WIM account explicitly set to False, do not use a WIM
+
+    # find VIMs to be connected by VLD
+    vims_to_connect = get_vims_to_connect(db_nsr, db_vnfrs, target_vld, logger)
+    # check if we need a WIM to interconnect the VNFs in different VIMs
+    if len(vims_to_connect) < 2:
+        logger.info(
+            "WIM not needed, VLD({:s}) does not involve multiple VIMs".format(
+                str(target_vld)
+            )
+        )
+        return None, None
+    # if more than one VIM needs to be connected...
+    logger.info(
+        "WIM is needed, multiple VIMs to interconnect: {:s}".format(
+            str(vims_to_connect)
+        )
+    )
+    # find a WIM having these VIMs on its wim_port_mapping setting
+    candidate_wims = get_candidate_wims(vims_to_connect)
+    logger.info("Candidate WIMs: {:s}".format(str(candidate_wims)))
+
+    # check if a desired wim_account_id is specified in vld_params
+    wim_account_id = vld_params.get("wimAccountId")
+    if wim_account_id:
+        # check if the desired WIM account is feasible
+        # implicitly checks if it exists in the DB
+        db_wim = candidate_wims.get(wim_account_id)
+        if db_wim:
+            return wim_account_id, db_wim
+        msg = (
+            "WimAccountId specified in VldParams({:s}) cannot be used "
+            "to connect the required VIMs({:s}). Candidate WIMs are: {:s}"
+        )
+        raise LcmException(
+            msg.format(str(vld_params), str(vims_to_connect), str(candidate_wims))
+        )
+
+    # if multiple candidate WIMs: report error message
+    if len(candidate_wims) > 1:
+        msg = (
+            "Multiple candidate WIMs found ({:s}) and wim_account not specified. "
+            "Please, specify the WIM account to be used."
+        )
+        raise LcmException(msg.format(str(candidate_wims.keys())))
+
+    # a single candidate WIM has been found, retrieve it
+    return candidate_wims.popitem()  # returns tuple (wim_account_id, db_wim)
+
+
+def get_target_wim_attrs(nsr_id, target_vld, vld_params):
+    target_vims = [
+        "vim:{:s}".format(vim_id) for vim_id in vld_params["vim-network-name"]
+    ]
+    wim_vld = "nsrs:{}:vld.{}".format(nsr_id, target_vld["id"])
+    vld_type = target_vld.get("type")
+    if vld_type is None:
+        vld_type = "ELAN" if len(target_vims) > 2 else "ELINE"
+    target_wim_attrs = {
+        "sdn": True,
+        "target_vims": target_vims,
+        "vlds": [wim_vld],
+        "type": vld_type,
+    }
+    return target_wim_attrs
+
+
+def get_sdn_ports(vld_params, db_wim):
+    if vld_params.get("provider-network"):
+        # if SDN ports are specified in VLD params, use them
+        return vld_params["provider-network"].get("sdn-ports")
+
+    # otherwise, compose SDN ports required
+    wim_port_mapping = db_wim.get("config", {}).get("wim_port_mapping", [])
+    sdn_ports = []
+    for vim_id in vld_params["vim-network-name"]:
+        db_vim = VimAccountDB.get_vim_account_with_id(vim_id)
+        vim_name = db_vim["name"]
+        mapping = next(
+            (m for m in wim_port_mapping if m["datacenter_id"] == vim_name),
+            None,
+        )
+        if mapping is None:
+            msg = "WIM({:s},{:s}) does not specify a mapping for VIM({:s},{:s})"
+            raise LcmException(
+                msg.format(
+                    db_wim["name"],
+                    db_wim["_id"],
+                    db_vim["name"],
+                    db_vim["_id"],
+                )
+            )
+        sdn_port = {
+            "device_id": vim_name,
+            "switch_id": mapping.get("device_id"),
+            "switch_port": mapping.get("device_interface_id"),
+            "service_endpoint_id": mapping.get("service_endpoint_id"),
+        }
+        service_mapping_info = mapping.get("service_mapping_info", {})
+        encapsulation = service_mapping_info.get("encapsulation", {})
+        if encapsulation.get("type"):
+            sdn_port["service_endpoint_encapsulation_type"] = encapsulation["type"]
+        if encapsulation.get("vlan"):
+            sdn_port["vlan"] = encapsulation["vlan"]
+        sdn_ports.append(sdn_port)
+    return sdn_ports
index 96fb373..625f24e 100644 (file)
 # under the License.
 ##
 
-# TODO currently is a pure yaml format. Consider to change it to [ini] style with yaml inside to be coherent with other modules
+# TODO currently is a pure yaml format. Consider to change it to [ini: style with yaml inside to be coherent with other modules
 
-#[global]
 global:
     loglevel: DEBUG
     # logfile:  /app/log  # or /var/log/osm/lcm.log
     # nologging: True     # do no log to stdout/stderr
 
-#[timeout]
 timeout:
     # ns_deploy: 7200     # total deploy timeout for a ns 2 hours
     # nsi_deploy: 7200     # total deploy timeout for a nsi 2 hours
 
-#[RO]
 RO:
     host:   ro          # hostname or IP
     port:   9090
@@ -35,7 +32,6 @@ RO:
     # loglevel: DEBUG
     # logfile:  /var/log/osm/lcm-ro.log
 
-#[VCA]
 VCA:
     host:   vca
     port:   17070
@@ -47,16 +43,16 @@ VCA:
     helm3path:    /usr/local/bin/helm3
     kubectlpath: /usr/bin/kubectl
     jujupath:    /usr/local/bin/juju
-    # pubkey: pubkey
-    # cacert: cacert
-    # apiproxy: apiproxy
-    #eegrpcinittimeout: 600
-    #eegrpctimeout: 30
+    eegrpc_tls_enforce: false
+    # public_key: pubkey
+    # ca_cert: cacert
+    # api_proxy: apiproxy
+    # eegrpcinittimeout: 600
+    # eegrpctimeout: 30
 
     # loglevel: DEBUG
     # logfile:  /var/log/osm/lcm-vca.log
 
-#[database]
 database:
     driver: mongo       # mongo or memory
     host:   mongo       # hostname or IP
@@ -69,7 +65,6 @@ database:
     # loglevel: DEBUG
     # logfile:  /var/log/osm/lcm-database.log
 
-#[storage]
 storage:
     driver: local       # local filesystem
     # for local provide file path
@@ -77,7 +72,6 @@ storage:
     # loglevel: DEBUG
     # logfile:  /var/log/osm/lcm-storage.log
 
-#[message]
 message:
     driver:   kafka       # local or kafka
     # for local provide file path
index 5f34280..5638943 100644 (file)
@@ -29,19 +29,12 @@ import logging.handlers
 import getopt
 import sys
 
-from osm_lcm import ns, paas, vim_sdn, netslice
+from osm_lcm import ns, vim_sdn, netslice
 from osm_lcm.ng_ro import NgRoException, NgRoClient
 from osm_lcm.ROclient import ROClient, ROClientException
 
 from time import time
-from osm_lcm.lcm_utils import (
-    get_paas_id_by_nsr_id,
-    get_paas_type_by_paas_id,
-    LcmException,
-    LcmExceptionExit,
-    TaskRegistry,
-    versiontuple,
-)
+from osm_lcm.lcm_utils import versiontuple, LcmException, TaskRegistry, LcmExceptionExit
 from osm_lcm import version as lcm_version, version_date as lcm_version_date
 
 from osm_common import msglocal, msgkafka
@@ -51,9 +44,9 @@ from osm_common.fsbase import FsException
 from osm_common.msgbase import MsgException
 from osm_lcm.data_utils.database.database import Database
 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
+from osm_lcm.data_utils.lcm_config import LcmCfg
 from osm_lcm.lcm_hc import get_health_check_file
-from osm_lcm.paas_service import paas_service_factory
-from os import environ, path
+from os import path
 from random import choice as random_choice
 from n2vc import version as n2vc_version
 import traceback
@@ -75,18 +68,13 @@ class Lcm:
         120  # how many time ping is send once is confirmed all is running
     )
     ping_interval_boot = 5  # how many time ping is sent when booting
-    cfg_logger_name = {
-        "message": "lcm.msg",
-        "database": "lcm.db",
-        "storage": "lcm.fs",
-        "tsdb": "lcm.prometheus",
-    }
-    # ^ contains for each section at lcm.cfg the used logger name
+
+    main_config = LcmCfg()
 
     def __init__(self, config_file, loop=None):
         """
         Init, Connect to database, filesystem storage, and messaging
-        :param config_file: two level dictionary with configuration. Top level should contain 'database', 'storage',
+        :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
         :return: None
         """
         self.db = None
@@ -103,41 +91,19 @@ class Lcm:
         self.worker_id = self.get_process_id()
         # load configuration
         config = self.read_config_file(config_file)
-        self.config = config
-        self.health_check_file = get_health_check_file(self.config)
-        self.config["ro_config"] = {
-            "ng": config["RO"].get("ng", False),
-            "uri": config["RO"].get("uri"),
-            "tenant": config.get("tenant", "osm"),
-            "logger_name": "lcm.roclient",
-            "loglevel": config["RO"].get("loglevel", "ERROR"),
-        }
-        if not self.config["ro_config"]["uri"]:
-            self.config["ro_config"]["uri"] = "http://{}:{}/".format(
-                config["RO"]["host"], config["RO"]["port"]
-            )
-        elif (
-            "/ro" in self.config["ro_config"]["uri"][-4:]
-            or "/openmano" in self.config["ro_config"]["uri"][-10:]
-        ):
-            # uri ends with '/ro', '/ro/', '/openmano', '/openmano/'
-            index = self.config["ro_config"]["uri"][-1].rfind("/")
-            self.config["ro_config"]["uri"] = self.config["ro_config"]["uri"][index + 1]
-
+        self.main_config.set_from_dict(config)
+        self.main_config.transform()
+        self.main_config.load_from_env()
+        self.logger.critical("Loaded configuration:" + str(self.main_config.to_dict()))
+        # TODO: check if lcm_hc.py is necessary
+        self.health_check_file = get_health_check_file(self.main_config.to_dict())
         self.loop = loop or asyncio.get_event_loop()
         self.ns = (
             self.netslice
         ) = (
             self.vim
-        ) = (
-            self.wim
-        ) = (
-            self.sdn
-        ) = (
-            self.k8scluster
-        ) = (
-            self.vca
-        ) = self.k8srepo = self.paas = self.paas_service = self.juju_paas = None
+        ) = self.wim = self.sdn = self.k8scluster = self.vca = self.k8srepo = None
+
         # logging
         log_format_simple = (
             "%(asctime)s %(levelname)s %(name)s %(filename)s:%(lineno)s %(message)s"
@@ -145,35 +111,35 @@ class Lcm:
         log_formatter_simple = logging.Formatter(
             log_format_simple, datefmt="%Y-%m-%dT%H:%M:%S"
         )
-        config["database"]["logger_name"] = "lcm.db"
-        config["storage"]["logger_name"] = "lcm.fs"
-        config["message"]["logger_name"] = "lcm.msg"
-        if config["global"].get("logfile"):
+        if self.main_config.globalConfig.logfile:
             file_handler = logging.handlers.RotatingFileHandler(
-                config["global"]["logfile"], maxBytes=100e6, backupCount=9, delay=0
+                self.main_config.globalConfig.logfile,
+                maxBytes=100e6,
+                backupCount=9,
+                delay=0,
             )
             file_handler.setFormatter(log_formatter_simple)
             self.logger.addHandler(file_handler)
-        if not config["global"].get("nologging"):
+        if not self.main_config.globalConfig.to_dict()["nologging"]:
             str_handler = logging.StreamHandler()
             str_handler.setFormatter(log_formatter_simple)
             self.logger.addHandler(str_handler)
 
-        if config["global"].get("loglevel"):
-            self.logger.setLevel(config["global"]["loglevel"])
+        if self.main_config.globalConfig.to_dict()["loglevel"]:
+            self.logger.setLevel(self.main_config.globalConfig.loglevel)
 
         # logging other modules
-        for k1, logname in self.cfg_logger_name.items():
-            config[k1]["logger_name"] = logname
-            logger_module = logging.getLogger(logname)
-            if config[k1].get("logfile"):
+        for logger in ("message", "database", "storage", "tsdb"):
+            logger_config = self.main_config.to_dict()[logger]
+            logger_module = logging.getLogger(logger_config["logger_name"])
+            if logger_config["logfile"]:
                 file_handler = logging.handlers.RotatingFileHandler(
-                    config[k1]["logfile"], maxBytes=100e6, backupCount=9, delay=0
+                    logger_config["logfile"], maxBytes=100e6, backupCount=9, delay=0
                 )
                 file_handler.setFormatter(log_formatter_simple)
                 logger_module.addHandler(file_handler)
-            if config[k1].get("loglevel"):
-                logger_module.setLevel(config[k1]["loglevel"])
+            if logger_config["loglevel"]:
+                logger_module.setLevel(logger_config["loglevel"])
         self.logger.critical(
             "starting osm/lcm version {} {}".format(lcm_version, lcm_version_date)
         )
@@ -196,13 +162,13 @@ class Lcm:
             )
 
         try:
-            self.db = Database(config).instance.db
+            self.db = Database(self.main_config.to_dict()).instance.db
 
-            self.fs = Filesystem(config).instance.fs
+            self.fs = Filesystem(self.main_config.to_dict()).instance.fs
             self.fs.sync()
 
             # copy message configuration in order to remove 'group_id' for msg_admin
-            config_message = config["message"].copy()
+            config_message = self.main_config.message.to_dict()
             config_message["loop"] = self.loop
             if config_message["driver"] == "local":
                 self.msg = msglocal.MsgLocal()
@@ -219,7 +185,7 @@ class Lcm:
             else:
                 raise LcmException(
                     "Invalid configuration param '{}' at '[message]':'driver'".format(
-                        config["message"]["driver"]
+                        self.main_config.message.driver
                     )
                 )
         except (DbException, FsException, MsgException) as e:
@@ -233,19 +199,21 @@ class Lcm:
         tries = 14
         last_error = None
         while True:
-            ro_uri = self.config["ro_config"]["uri"]
+            ro_uri = self.main_config.RO.uri
+            if not ro_uri:
+                ro_uri = ""
             try:
                 # try new  RO, if fail old RO
                 try:
-                    self.config["ro_config"]["uri"] = ro_uri + "ro"
-                    ro_server = NgRoClient(self.loop, **self.config["ro_config"])
+                    self.main_config.RO.uri = ro_uri + "ro"
+                    ro_server = NgRoClient(self.loop, **self.main_config.RO.to_dict())
                     ro_version = await ro_server.get_version()
-                    self.config["ro_config"]["ng"] = True
+                    self.main_config.RO.ng = True
                 except Exception:
-                    self.config["ro_config"]["uri"] = ro_uri + "openmano"
-                    ro_server = ROClient(self.loop, **self.config["ro_config"])
+                    self.main_config.RO.uri = ro_uri + "openmano"
+                    ro_server = ROClient(self.loop, **self.main_config.RO.to_dict())
                     ro_version = await ro_server.get_version()
-                    self.config["ro_config"]["ng"] = False
+                    self.main_config.RO.ng = False
                 if versiontuple(ro_version) < versiontuple(min_RO_version):
                     raise LcmException(
                         "Not compatible osm/RO version '{}'. Needed '{}' or higher".format(
@@ -254,16 +222,16 @@ class Lcm:
                     )
                 self.logger.info(
                     "Connected to RO version {} new-generation version {}".format(
-                        ro_version, self.config["ro_config"]["ng"]
+                        ro_version, self.main_config.RO.ng
                     )
                 )
                 return
             except (ROClientException, NgRoException) as e:
-                self.config["ro_config"]["uri"] = ro_uri
+                self.main_config.RO.uri = ro_uri
                 tries -= 1
                 traceback.print_tb(e.__traceback__)
                 error_text = "Error while connecting to RO on {}: {}".format(
-                    self.config["ro_config"]["uri"], e
+                    self.main_config.RO.uri, e
                 )
                 if tries <= 0:
                     self.logger.critical(error_text)
@@ -330,94 +298,6 @@ class Lcm:
                 wait_time = 2 if not first_start else 5
                 await asyncio.sleep(wait_time, loop=self.loop)
 
-    def _kafka_read_paas(self, command, params, order_id):
-        paas_id = params.get("_id")
-
-        if command == "created":
-            task = asyncio.ensure_future(self.paas.create(params, order_id))
-            self.lcm_tasks.register("paas", paas_id, order_id, "paas_create", task)
-        elif command == "edited":
-            task = asyncio.ensure_future(self.paas.edit(params, order_id))
-            self.lcm_tasks.register("paas", paas_id, order_id, "paas_edit", task)
-        elif command == "delete":
-            task = asyncio.ensure_future(self.paas.delete(params, order_id))
-            self.lcm_tasks.register("paas", paas_id, order_id, "paas_delete", task)
-        elif command == "deleted":
-            self.logger.debug("PaaS {} already deleted from DB".format(paas_id))
-        else:
-            self.logger.error("Invalid command {} for PaaS topic".format(command))
-
-    def _kafka_read_ns_instantiate(self, params: dict) -> None:
-        """Operations to be performed if the topic is ns and command is instantiate.
-        Args:
-            params  (dict):     Dictionary including NS related parameters
-        """
-        nsr_id, nslcmop_id = params["nsInstanceId"], params["_id"]
-        paas_id = params["operationParams"].get("paasAccountId")
-
-        if paas_id:
-            paas_type = get_paas_type_by_paas_id(paas_id, self.db)
-            task = asyncio.ensure_future(
-                self.paas_service[paas_type].instantiate(nsr_id, nslcmop_id)
-            )
-            self.logger.debug(
-                "Deploying NS {} using PaaS account {}".format(nsr_id, paas_id)
-            )
-
-        else:
-            task = asyncio.ensure_future(self.ns.instantiate(nsr_id, nslcmop_id))
-            self.logger.debug("Deploying NS {}".format(nsr_id))
-
-        self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "ns_instantiate", task)
-
-    def _kafka_read_ns_terminate(self, params: dict, topic: str) -> None:
-        """Operations to be performed if the topic is ns and command is terminate.
-        Args:
-            params  (dict):     Dictionary including NS related parameters
-            topic   (str):      Name of Kafka topic
-        """
-        nsr_id, nslcmop_id = params["nsInstanceId"], params["_id"]
-        paas_id = get_paas_id_by_nsr_id(nsr_id, self.db)
-
-        if paas_id:
-            paas_type = get_paas_type_by_paas_id(paas_id, self.db)
-            task = asyncio.ensure_future(
-                self.paas_service[paas_type].terminate(nsr_id, nslcmop_id)
-            )
-            self.logger.debug(
-                "Terminating NS {} using PaaS account {}".format(nsr_id, paas_id)
-            )
-
-        else:
-            self.lcm_tasks.cancel(topic, nsr_id)
-            task = asyncio.ensure_future(self.ns.terminate(nsr_id, nslcmop_id))
-            self.logger.debug("Terminating NS {}".format(nsr_id))
-
-        self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "ns_terminate", task)
-
-    def _kafka_read_ns_action(self, params: dict) -> None:
-        """Operations to be performed if the topic is ns and command is action.
-        Args:
-            params  (dict):     Dictionary including NS related parameters
-        """
-        nsr_id, nslcmop_id = params["nsInstanceId"], params["_id"]
-        paas_id = get_paas_id_by_nsr_id(nsr_id, self.db)
-
-        if paas_id:
-            paas_type = get_paas_type_by_paas_id(paas_id, self.db)
-            task = asyncio.ensure_future(
-                self.paas_service[paas_type].action(nsr_id, nslcmop_id)
-            )
-            self.logger.debug(
-                "Running action on NS {} using PaaS account {}".format(nsr_id, paas_id)
-            )
-
-        else:
-            task = asyncio.ensure_future(self.ns.action(nsr_id, nslcmop_id))
-            self.logger.debug("Running action on NS {}".format(nsr_id))
-
-        self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "ns_action", task)
-
     def kafka_read_callback(self, topic, command, params):
         order_id = 1
 
@@ -486,9 +366,6 @@ class Lcm:
                 task = asyncio.ensure_future(self.vca.delete(params, order_id))
                 self.lcm_tasks.register("vca", vca_id, order_id, "vca_delete", task)
                 return
-        elif topic == "paas":
-            self._kafka_read_paas(command, params, order_id)
-            return
         elif topic == "k8srepo":
             if command == "create" or command == "created":
                 k8srepo_id = params.get("_id")
@@ -507,13 +384,24 @@ class Lcm:
                 return
         elif topic == "ns":
             if command == "instantiate":
-                self._kafka_read_ns_instantiate(params)
+                # self.logger.debug("Deploying NS {}".format(nsr_id))
+                nslcmop = params
+                nslcmop_id = nslcmop["_id"]
+                nsr_id = nslcmop["nsInstanceId"]
+                task = asyncio.ensure_future(self.ns.instantiate(nsr_id, nslcmop_id))
+                self.lcm_tasks.register(
+                    "ns", nsr_id, nslcmop_id, "ns_instantiate", task
+                )
                 return
-
             elif command == "terminate":
-                self._kafka_read_ns_terminate(params, topic)
+                # self.logger.debug("Deleting NS {}".format(nsr_id))
+                nslcmop = params
+                nslcmop_id = nslcmop["_id"]
+                nsr_id = nslcmop["nsInstanceId"]
+                self.lcm_tasks.cancel(topic, nsr_id)
+                task = asyncio.ensure_future(self.ns.terminate(nsr_id, nslcmop_id))
+                self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "ns_terminate", task)
                 return
-
             elif command == "vca_status_refresh":
                 nslcmop = params
                 nslcmop_id = nslcmop["_id"]
@@ -525,36 +413,49 @@ class Lcm:
                     "ns", nsr_id, nslcmop_id, "ns_vca_status_refresh", task
                 )
                 return
-
             elif command == "action":
-                self._kafka_read_ns_action(params)
+                # self.logger.debug("Update NS {}".format(nsr_id))
+                nslcmop = params
+                nslcmop_id = nslcmop["_id"]
+                nsr_id = nslcmop["nsInstanceId"]
+                task = asyncio.ensure_future(self.ns.action(nsr_id, nslcmop_id))
+                self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "ns_action", task)
                 return
-
             elif command == "update":
                 # self.logger.debug("Update NS {}".format(nsr_id))
-                nsr_id, nslcmop_id = params["nsInstanceId"], params["_id"]
+                nslcmop = params
+                nslcmop_id = nslcmop["_id"]
+                nsr_id = nslcmop["nsInstanceId"]
                 task = asyncio.ensure_future(self.ns.update(nsr_id, nslcmop_id))
                 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "ns_update", task)
                 return
             elif command == "scale":
                 # self.logger.debug("Update NS {}".format(nsr_id))
-                nsr_id, nslcmop_id = params["nsInstanceId"], params["_id"]
+                nslcmop = params
+                nslcmop_id = nslcmop["_id"]
+                nsr_id = nslcmop["nsInstanceId"]
                 task = asyncio.ensure_future(self.ns.scale(nsr_id, nslcmop_id))
                 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "ns_scale", task)
                 return
             elif command == "heal":
                 # self.logger.debug("Healing NS {}".format(nsr_id))
-                nsr_id, nslcmop_id = params["nsInstanceId"], params["_id"]
+                nslcmop = params
+                nslcmop_id = nslcmop["_id"]
+                nsr_id = nslcmop["nsInstanceId"]
                 task = asyncio.ensure_future(self.ns.heal(nsr_id, nslcmop_id))
                 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "ns_heal", task)
                 return
             elif command == "migrate":
-                nsr_id, nslcmop_id = params["nsInstanceId"], params["_id"]
+                nslcmop = params
+                nslcmop_id = nslcmop["_id"]
+                nsr_id = nslcmop["nsInstanceId"]
                 task = asyncio.ensure_future(self.ns.migrate(nsr_id, nslcmop_id))
                 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "ns_migrate", task)
                 return
             elif command == "verticalscale":
-                nsr_id, nslcmop_id = params["nsInstanceId"], params["_id"]
+                nslcmop = params
+                nslcmop_id = nslcmop["_id"]
+                nsr_id = nslcmop["nsInstanceId"]
                 task = asyncio.ensure_future(self.ns.vertical_scale(nsr_id, nslcmop_id))
                 self.logger.debug(
                     "nsr_id,nslcmop_id,task {},{},{}".format(nsr_id, nslcmop_id, task)
@@ -579,7 +480,7 @@ class Lcm:
                             db_nsr["config-status"],
                             db_nsr["detailed-status"],
                             db_nsr["_admin"]["deployed"],
-                            self.lcm_tasks.task_registry["ns"][nsr_id],
+                            self.lcm_ns_tasks.get(nsr_id),
                         )
                     )
                 except Exception as e:
@@ -661,7 +562,7 @@ class Lcm:
         elif topic == "vim_account":
             vim_id = params["_id"]
             if command in ("create", "created"):
-                if not self.config["ro_config"].get("ng"):
+                if not self.main_config.RO.ng:
                     task = asyncio.ensure_future(self.vim.create(params, order_id))
                     self.lcm_tasks.register(
                         "vim_account", vim_id, order_id, "vim_create", task
@@ -679,7 +580,7 @@ class Lcm:
                 sys.stdout.flush()
                 return
             elif command in ("edit", "edited"):
-                if not self.config["ro_config"].get("ng"):
+                if not self.main_config.RO.ng:
                     task = asyncio.ensure_future(self.vim.edit(params, order_id))
                     self.lcm_tasks.register(
                         "vim_account", vim_id, order_id, "vim_edit", task
@@ -690,7 +591,7 @@ class Lcm:
         elif topic == "wim_account":
             wim_id = params["_id"]
             if command in ("create", "created"):
-                if not self.config["ro_config"].get("ng"):
+                if not self.main_config.RO.ng:
                     task = asyncio.ensure_future(self.wim.create(params, order_id))
                     self.lcm_tasks.register(
                         "wim_account", wim_id, order_id, "wim_create", task
@@ -718,7 +619,7 @@ class Lcm:
         elif topic == "sdn":
             _sdn_id = params["_id"]
             if command in ("create", "created"):
-                if not self.config["ro_config"].get("ng"):
+                if not self.main_config.RO.ng:
                     task = asyncio.ensure_future(self.sdn.create(params, order_id))
                     self.lcm_tasks.register(
                         "sdn", _sdn_id, order_id, "sdn_create", task
@@ -754,7 +655,6 @@ class Lcm:
                     "nsi",
                     "k8scluster",
                     "vca",
-                    "paas",
                     "k8srepo",
                     "pla",
                 )
@@ -799,37 +699,30 @@ class Lcm:
         # check RO version
         self.loop.run_until_complete(self.check_RO_version())
 
-        self.ns = ns.NsLcm(self.msg, self.lcm_tasks, self.config, self.loop)
+        self.ns = ns.NsLcm(self.msg, self.lcm_tasks, self.main_config, self.loop)
+        # TODO: modify the rest of classes to use the LcmCfg object instead of dicts
         self.netslice = netslice.NetsliceLcm(
-            self.msg, self.lcm_tasks, self.config, self.loop, self.ns
+            self.msg, self.lcm_tasks, self.main_config.to_dict(), self.loop, self.ns
+        )
+        self.vim = vim_sdn.VimLcm(
+            self.msg, self.lcm_tasks, self.main_config.to_dict(), self.loop
+        )
+        self.wim = vim_sdn.WimLcm(
+            self.msg, self.lcm_tasks, self.main_config.to_dict(), self.loop
+        )
+        self.sdn = vim_sdn.SdnLcm(
+            self.msg, self.lcm_tasks, self.main_config.to_dict(), self.loop
         )
-        self.vim = vim_sdn.VimLcm(self.msg, self.lcm_tasks, self.config, self.loop)
-        self.wim = vim_sdn.WimLcm(self.msg, self.lcm_tasks, self.config, self.loop)
-        self.sdn = vim_sdn.SdnLcm(self.msg, self.lcm_tasks, self.config, self.loop)
         self.k8scluster = vim_sdn.K8sClusterLcm(
-            self.msg, self.lcm_tasks, self.config, self.loop
+            self.msg, self.lcm_tasks, self.main_config.to_dict(), self.loop
+        )
+        self.vca = vim_sdn.VcaLcm(
+            self.msg, self.lcm_tasks, self.main_config.to_dict(), self.loop
         )
-        self.vca = vim_sdn.VcaLcm(self.msg, self.lcm_tasks, self.config, self.loop)
-        self.paas = paas.PaasLcm(self.msg, self.lcm_tasks, self.config, self.loop)
         self.k8srepo = vim_sdn.K8sRepoLcm(
-            self.msg, self.lcm_tasks, self.config, self.loop
+            self.msg, self.lcm_tasks, self.main_config.to_dict(), self.loop
         )
 
-        # Specific PaaS Service Object for "Juju" PaaS Orchestrator type
-        self.juju_paas = paas_service_factory(
-            self.msg,
-            self.lcm_tasks,
-            self.db,
-            self.fs,
-            self.logger,
-            self.loop,
-            self.config,
-            "juju",
-        )
-        # Mapping between paas_type and PaaS service object
-        self.paas_service = {
-            "juju": self.juju_paas,
-        }
         self.loop.run_until_complete(
             asyncio.gather(self.kafka_read(), self.kafka_ping())
         )
@@ -856,67 +749,9 @@ class Lcm:
             self.fs.fs_disconnect()
 
     def read_config_file(self, config_file):
-        # TODO make a [ini] + yaml inside parser
-        # the configparser library is not suitable, because it does not admit comments at the end of line,
-        # and not parse integer or boolean
         try:
-            # read file as yaml format
             with open(config_file) as f:
-                conf = yaml.safe_load(f)
-            # Ensure all sections are not empty
-            for k in (
-                "global",
-                "timeout",
-                "RO",
-                "VCA",
-                "database",
-                "storage",
-                "message",
-            ):
-                if not conf.get(k):
-                    conf[k] = {}
-
-            # read all environ that starts with OSMLCM_
-            for k, v in environ.items():
-                if not k.startswith("OSMLCM_"):
-                    continue
-                subject, _, item = k[7:].lower().partition("_")
-                if not item:
-                    continue
-                if subject in ("ro", "vca"):
-                    # put in capital letter
-                    subject = subject.upper()
-                try:
-                    if item == "port" or subject == "timeout":
-                        conf[subject][item] = int(v)
-                    else:
-                        conf[subject][item] = v
-                except Exception as e:
-                    self.logger.warning(
-                        "skipping environ '{}' on exception '{}'".format(k, e)
-                    )
-
-            # backward compatibility of VCA parameters
-
-            if "pubkey" in conf["VCA"]:
-                conf["VCA"]["public_key"] = conf["VCA"].pop("pubkey")
-            if "cacert" in conf["VCA"]:
-                conf["VCA"]["ca_cert"] = conf["VCA"].pop("cacert")
-            if "apiproxy" in conf["VCA"]:
-                conf["VCA"]["api_proxy"] = conf["VCA"].pop("apiproxy")
-
-            if "enableosupgrade" in conf["VCA"]:
-                conf["VCA"]["enable_os_upgrade"] = conf["VCA"].pop("enableosupgrade")
-            if isinstance(conf["VCA"].get("enable_os_upgrade"), str):
-                if conf["VCA"]["enable_os_upgrade"].lower() == "false":
-                    conf["VCA"]["enable_os_upgrade"] = False
-                elif conf["VCA"]["enable_os_upgrade"].lower() == "true":
-                    conf["VCA"]["enable_os_upgrade"] = True
-
-            if "aptmirror" in conf["VCA"]:
-                conf["VCA"]["apt_mirror"] = conf["VCA"].pop("aptmirror")
-
-            return conf
+                return yaml.safe_load(f)
         except Exception as e:
             self.logger.critical("At config file '{}': {}".format(config_file, e))
             exit(1)
index 0c88abe..b8817a5 100644 (file)
 import functools
 import yaml
 import asyncio
-import socket
 import uuid
 import os
+import ssl
 
 from grpclib.client import Channel
 
+from osm_lcm.data_utils.lcm_config import VcaConfig
 from osm_lcm.frontend_pb2 import PrimitiveRequest
 from osm_lcm.frontend_pb2 import SshKeyRequest, SshKeyReply
 from osm_lcm.frontend_grpc import FrontendExecutorStub
-from osm_lcm.lcm_utils import LcmBase
+from osm_lcm.lcm_utils import LcmBase, get_ee_id_parts
 
 from osm_lcm.data_utils.database.database import Database
 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
@@ -77,23 +78,31 @@ def retryer(max_wait_time_var="_initial_retry_time", delay_time_var="_retry_dela
     return wrapper
 
 
-class LCMHelmConn(N2VCConnector, LcmBase):
-    _KUBECTL_OSM_NAMESPACE = "osm"
-    _KUBECTL_OSM_CLUSTER_NAME = "_system-osm-k8s"
-    _EE_SERVICE_PORT = 50050
+def create_secure_context(
+    trusted: str,
+) -> ssl.SSLContext:
+    ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
+    ctx.verify_mode = ssl.CERT_REQUIRED
+    ctx.check_hostname = True
+    ctx.minimum_version = ssl.TLSVersion.TLSv1_2
+    # TODO: client TLS
+    # ctx.load_cert_chain(str(client_cert), str(client_key))
+    ctx.load_verify_locations(trusted)
+    ctx.set_ciphers("ECDHE+AESGCM:ECDHE+CHACHA20:DHE+AESGCM:DHE+CHACHA20")
+    ctx.set_alpn_protocols(["h2"])
+    try:
+        ctx.set_npn_protocols(["h2"])
+    except NotImplementedError:
+        pass
+    return ctx
 
-    # Initial max retry time
-    _MAX_INITIAL_RETRY_TIME = 600
-    # Max retry time for normal operations
-    _MAX_RETRY_TIME = 30
-    # Time beetween retries, retry time after a connection error is raised
-    _EE_RETRY_DELAY = 10
 
+class LCMHelmConn(N2VCConnector, LcmBase):
     def __init__(
         self,
         log: object = None,
         loop: object = None,
-        vca_config: dict = None,
+        vca_config: VcaConfig = None,
         on_update_db=None,
     ):
         """
@@ -110,35 +119,20 @@ class LCMHelmConn(N2VCConnector, LcmBase):
 
         self.vca_config = vca_config
         self.log.debug("Initialize helm N2VC connector")
-        self.log.debug("initial vca_config: {}".format(vca_config))
-
-        # TODO - Obtain data from configuration
-        self._ee_service_port = self._EE_SERVICE_PORT
+        self.log.debug("initial vca_config: {}".format(vca_config.to_dict()))
 
-        self._retry_delay = self._EE_RETRY_DELAY
+        self._retry_delay = self.vca_config.helm_ee_retry_delay
 
-        if self.vca_config and self.vca_config.get("eegrpcinittimeout"):
-            self._initial_retry_time = self.vca_config.get("eegrpcinittimeout")
-            self.log.debug("Initial retry time: {}".format(self._initial_retry_time))
-        else:
-            self._initial_retry_time = self._MAX_INITIAL_RETRY_TIME
-            self.log.debug(
-                "Applied default retry time: {}".format(self._initial_retry_time)
-            )
+        self._initial_retry_time = self.vca_config.helm_max_initial_retry_time
+        self.log.debug("Initial retry time: {}".format(self._initial_retry_time))
 
-        if self.vca_config and self.vca_config.get("eegrpctimeout"):
-            self._max_retry_time = self.vca_config.get("eegrpctimeout")
-            self.log.debug("Retry time: {}".format(self._max_retry_time))
-        else:
-            self._max_retry_time = self._MAX_RETRY_TIME
-            self.log.debug(
-                "Applied default retry time: {}".format(self._max_retry_time)
-            )
+        self._max_retry_time = self.vca_config.helm_max_retry_time
+        self.log.debug("Retry time: {}".format(self._max_retry_time))
 
         # initialize helm connector for helmv2 and helmv3
         self._k8sclusterhelm2 = K8sHelmConnector(
-            kubectl_command=self.vca_config.get("kubectlpath"),
-            helm_command=self.vca_config.get("helmpath"),
+            kubectl_command=self.vca_config.kubectlpath,
+            helm_command=self.vca_config.helmpath,
             fs=self.fs,
             db=self.db,
             log=self.log,
@@ -146,8 +140,8 @@ class LCMHelmConn(N2VCConnector, LcmBase):
         )
 
         self._k8sclusterhelm3 = K8sHelm3Connector(
-            kubectl_command=self.vca_config.get("kubectlpath"),
-            helm_command=self.vca_config.get("helm3path"),
+            kubectl_command=self.vca_config.kubectlpath,
+            helm_command=self.vca_config.helm3path,
             fs=self.fs,
             log=self.log,
             db=self.db,
@@ -260,7 +254,7 @@ class LCMHelmConn(N2VCConnector, LcmBase):
                     system_cluster_uuid,
                     kdu_model=kdu_model,
                     kdu_instance=helm_id,
-                    namespace=self._KUBECTL_OSM_NAMESPACE,
+                    namespace=self.vca_config.kubectl_osm_namespace,
                     params=config,
                     db_dict=db_dict,
                     timeout=progress_timeout,
@@ -274,13 +268,15 @@ class LCMHelmConn(N2VCConnector, LcmBase):
                     system_cluster_uuid,
                     kdu_model=kdu_model,
                     kdu_instance=helm_id,
-                    namespace=self._KUBECTL_OSM_NAMESPACE,
+                    namespace=self.vca_config.kubectl_osm_namespace,
                     params=config,
                     db_dict=db_dict,
                     timeout=progress_timeout,
                 )
 
-            ee_id = "{}:{}.{}".format(vca_type, self._KUBECTL_OSM_NAMESPACE, helm_id)
+            ee_id = "{}:{}.{}".format(
+                vca_type, self.vca_config.kubectl_osm_namespace, helm_id
+            )
             return ee_id, None
         except N2VCException:
             raise
@@ -288,6 +284,150 @@ class LCMHelmConn(N2VCConnector, LcmBase):
             self.log.error("Error deploying chart ee: {}".format(e), exc_info=True)
             raise N2VCException("Error deploying chart ee: {}".format(e))
 
+    async def upgrade_execution_environment(
+        self,
+        namespace: str,
+        db_dict: dict,
+        helm_id: str,
+        progress_timeout: float = None,
+        total_timeout: float = None,
+        config: dict = None,
+        artifact_path: str = None,
+        vca_type: str = None,
+        *kargs,
+        **kwargs,
+    ) -> (str, dict):
+        """
+        Creates a new helm execution environment deploying the helm-chat indicated in the
+        attifact_path
+        :param str namespace: This param is not used, all helm charts are deployed in the osm
+        system namespace
+        :param dict db_dict: where to write to database when the status changes.
+            It contains a dictionary with {collection: str, filter: {},  path: str},
+                e.g. {collection: "nsrs", filter: {_id: <nsd-id>, path:
+                "_admin.deployed.VCA.3"}
+        :param helm_id: unique name of the Helm release to upgrade
+        :param float progress_timeout:
+        :param float total_timeout:
+        :param dict config:  General variables to instantiate KDU
+        :param str artifact_path:  path of package content
+        :param str vca_type:  Type of vca, must be type helm or helm-v3
+        :returns str, dict: id of the new execution environment including namespace.helm_id
+        and credentials object set to None as all credentials should be osm kubernetes .kubeconfig
+        """
+
+        self.log.info(
+            "upgrade_execution_environment: namespace: {}, artifact_path: {}, db_dict: {}, "
+        )
+
+        # Validate helm_id is provided
+        if helm_id is None or len(helm_id) == 0:
+            raise N2VCBadArgumentsException(
+                message="helm_id is mandatory", bad_args=["helm_id"]
+            )
+
+        # Validate artifact-path is provided
+        if artifact_path is None or len(artifact_path) == 0:
+            raise N2VCBadArgumentsException(
+                message="artifact_path is mandatory", bad_args=["artifact_path"]
+            )
+
+        # Validate artifact-path exists and sync path
+        from_path = os.path.split(artifact_path)[0]
+        self.fs.sync(from_path)
+
+        # remove / in charm path
+        while artifact_path.find("//") >= 0:
+            artifact_path = artifact_path.replace("//", "/")
+
+        # check charm path
+        if self.fs.file_exists(artifact_path):
+            helm_chart_path = artifact_path
+        else:
+            msg = "artifact path does not exist: {}".format(artifact_path)
+            raise N2VCBadArgumentsException(message=msg, bad_args=["artifact_path"])
+
+        if artifact_path.startswith("/"):
+            full_path = self.fs.path + helm_chart_path
+        else:
+            full_path = self.fs.path + "/" + helm_chart_path
+
+        while full_path.find("//") >= 0:
+            full_path = full_path.replace("//", "/")
+
+        try:
+            # Call helm conn upgrade
+            # Obtain system cluster id from database
+            system_cluster_uuid = await self._get_system_cluster_id()
+            # Add parameter osm if exist to global
+            if config and config.get("osm"):
+                if not config.get("global"):
+                    config["global"] = {}
+                config["global"]["osm"] = config.get("osm")
+
+            self.log.debug("Ugrade helm chart: {}".format(full_path))
+            if vca_type == "helm":
+                await self._k8sclusterhelm2.upgrade(
+                    system_cluster_uuid,
+                    kdu_model=full_path,
+                    kdu_instance=helm_id,
+                    namespace=namespace,
+                    params=config,
+                    db_dict=db_dict,
+                    timeout=progress_timeout,
+                    force=True,
+                )
+            else:
+                await self._k8sclusterhelm3.upgrade(
+                    system_cluster_uuid,
+                    kdu_model=full_path,
+                    kdu_instance=helm_id,
+                    namespace=namespace,
+                    params=config,
+                    db_dict=db_dict,
+                    timeout=progress_timeout,
+                    force=True,
+                )
+
+        except N2VCException:
+            raise
+        except Exception as e:
+            self.log.error("Error upgrading chart ee: {}".format(e), exc_info=True)
+            raise N2VCException("Error upgrading chart ee: {}".format(e))
+
+    async def create_tls_certificate(
+        self,
+        nsr_id: str,
+        secret_name: str,
+        usage: str,
+        dns_prefix: str,
+        namespace: str = None,
+    ):
+        # Obtain system cluster id from database
+        system_cluster_uuid = await self._get_system_cluster_id()
+        # use helm-v3 as certificates don't depend on helm version
+        await self._k8sclusterhelm3.create_certificate(
+            cluster_uuid=system_cluster_uuid,
+            namespace=namespace or self.vca_config.kubectl_osm_namespace,
+            dns_prefix=dns_prefix,
+            name=nsr_id,
+            secret_name=secret_name,
+            usage=usage,
+        )
+
+    async def delete_tls_certificate(
+        self,
+        certificate_name: str = None,
+        namespace: str = None,
+    ):
+        # Obtain system cluster id from database
+        system_cluster_uuid = await self._get_system_cluster_id()
+        await self._k8sclusterhelm3.delete_certificate(
+            cluster_uuid=system_cluster_uuid,
+            namespace=namespace or self.vca_config.kubectl_osm_namespace,
+            certificate_name=certificate_name,
+        )
+
     async def register_execution_environment(
         self,
         namespace: str,
@@ -348,9 +488,8 @@ class LCMHelmConn(N2VCConnector, LcmBase):
 
         try:
             # Obtain ip_addr for the ee service, it is resolved by dns from the ee name by kubernetes
-            version, namespace, helm_id = self._get_ee_id_parts(ee_id)
-            ip_addr = socket.gethostbyname(helm_id)
-
+            version, namespace, helm_id = get_ee_id_parts(ee_id)
+            ip_addr = "{}.{}.svc".format(helm_id, namespace)
             # Obtain ssh_key from the ee, this method will implement retries to allow the ee
             # install libraries and start successfully
             ssh_key = await self._get_ssh_key(ip_addr)
@@ -433,8 +572,8 @@ class LCMHelmConn(N2VCConnector, LcmBase):
             params_dict = dict()
 
         try:
-            version, namespace, helm_id = self._get_ee_id_parts(ee_id)
-            ip_addr = socket.gethostbyname(helm_id)
+            version, namespace, helm_id = get_ee_id_parts(ee_id)
+            ip_addr = "{}.{}.svc".format(helm_id, namespace)
         except Exception as e:
             self.log.error("Error getting ee ip ee: {}".format(e))
             raise N2VCException("Error getting ee ip ee: {}".format(e))
@@ -540,7 +679,7 @@ class LCMHelmConn(N2VCConnector, LcmBase):
             system_cluster_uuid = await self._get_system_cluster_id()
 
             # Get helm_id
-            version, namespace, helm_id = self._get_ee_id_parts(ee_id)
+            version, namespace, helm_id = get_ee_id_parts(ee_id)
 
             # Uninstall chart, for backward compatibility we must assume that if there is no
             # version it is helm-v2
@@ -579,14 +718,11 @@ class LCMHelmConn(N2VCConnector, LcmBase):
 
     @retryer(max_wait_time_var="_initial_retry_time", delay_time_var="_retry_delay")
     async def _get_ssh_key(self, ip_addr):
-        channel = Channel(ip_addr, self._ee_service_port)
-        try:
-            stub = FrontendExecutorStub(channel)
-            self.log.debug("get ssh key, ip_addr: {}".format(ip_addr))
-            reply: SshKeyReply = await stub.GetSshKey(SshKeyRequest())
-            return reply.message
-        finally:
-            channel.close()
+        return await self._execute_primitive_internal(
+            ip_addr,
+            "_get_ssh_key",
+            None,
+        )
 
     @retryer(max_wait_time_var="_initial_retry_time", delay_time_var="_retry_delay")
     async def _execute_config_primitive(self, ip_addr, params, db_dict=None):
@@ -603,10 +739,13 @@ class LCMHelmConn(N2VCConnector, LcmBase):
     async def _execute_primitive_internal(
         self, ip_addr, primitive_name, params, db_dict=None
     ):
-
-        channel = Channel(ip_addr, self._ee_service_port)
-        try:
+        async def execute():
             stub = FrontendExecutorStub(channel)
+            if primitive_name == "_get_ssh_key":
+                self.log.debug("get ssh key, ip_addr: {}".format(ip_addr))
+                reply: SshKeyReply = await stub.GetSshKey(SshKeyRequest())
+                return reply.message
+            # For any other primitives
             async with stub.RunPrimitive.open() as stream:
                 primitive_id = str(uuid.uuid1())
                 result = None
@@ -633,6 +772,29 @@ class LCMHelmConn(N2VCConnector, LcmBase):
                     return reply.status, reply.detailed_message
                 else:
                     return "ERROR", "No result received"
+
+        ssl_context = create_secure_context(self.vca_config.ca_store)
+        channel = Channel(
+            ip_addr, self.vca_config.helm_ee_service_port, ssl=ssl_context
+        )
+        try:
+            return await execute()
+        except ssl.SSLError as ssl_error:  # fallback to insecure gRPC
+            if (
+                ssl_error.reason == "WRONG_VERSION_NUMBER"
+                and not self.vca_config.eegrpc_tls_enforce
+            ):
+                self.log.debug(
+                    "Execution environment doesn't support TLS, falling back to unsecure gRPC"
+                )
+                channel = Channel(ip_addr, self.vca_config.helm_ee_service_port)
+                return await execute()
+            elif ssl_error.reason == "WRONG_VERSION_NUMBER":
+                raise N2VCException(
+                    "Execution environment doesn't support TLS, primitives cannot be executed"
+                )
+            else:
+                raise
         finally:
             channel.close()
 
@@ -658,7 +820,7 @@ class LCMHelmConn(N2VCConnector, LcmBase):
     async def _get_system_cluster_id(self):
         if not self._system_cluster_id:
             db_k8cluster = self.db.get_one(
-                "k8sclusters", {"name": self._KUBECTL_OSM_CLUSTER_NAME}
+                "k8sclusters", {"name": self.vca_config.kubectl_osm_cluster_name}
             )
             k8s_hc_id = deep_get(db_k8cluster, ("_admin", "helm-chart-v3", "id"))
             if not k8s_hc_id:
@@ -687,13 +849,3 @@ class LCMHelmConn(N2VCConnector, LcmBase):
                     )
             self._system_cluster_id = k8s_hc_id
         return self._system_cluster_id
-
-    def _get_ee_id_parts(self, ee_id):
-        """
-        Parses ee_id stored at database that can be either 'version:namespace.helm_id' or only
-        namespace.helm_id for backward compatibility
-        If exists helm version can be helm-v3 or helm (helm-v2 old version)
-        """
-        version, _, part_id = ee_id.rpartition(":")
-        namespace, _, helm_id = part_id.rpartition(".")
-        return version, namespace, helm_id
index 5cd5a2f..956e44f 100644 (file)
@@ -19,6 +19,7 @@
 import asyncio
 import checksumdir
 from collections import OrderedDict
+import hashlib
 import os
 import shutil
 import traceback
@@ -88,32 +89,6 @@ def get_iterable(in_dict, in_key):
     return in_dict[in_key]
 
 
-def get_paas_id_by_nsr_id(nsr_id: str, db: object) -> str:
-    """Get the PaaS account ID using NS record ID.
-    Args:
-        nsr_id (str):       NS record ID
-        db  (object):       Database Object
-
-    Returns:
-        paas_id   (str)     PaaS account ID
-    """
-    db_nsr = db.get_one("nsrs", {"_id": nsr_id})
-    return db_nsr.get("paasdatacenter")
-
-
-def get_paas_type_by_paas_id(paas_id: str, db: object) -> str:
-    """Get the PaaS type using PaaS account ID.
-    Args:
-        paas_id (str):      PaaS account ID
-        db  (object):       Database Object
-
-    Returns:
-        paas_type   (str)   Paas Orchestrator type
-    """
-    db_paas = db.get_one("paas", {"_id": paas_id})
-    return db_paas["paas_type"]
-
-
 def check_juju_bundle_existence(vnfd: dict) -> str:
     """Checks the existence of juju-bundle in the descriptor
 
@@ -187,6 +162,17 @@ def populate_dict(target_dict, key_list, value):
     target_dict[key_list[-1]] = value
 
 
+def get_ee_id_parts(ee_id):
+    """
+    Parses ee_id stored at database that can be either 'version:namespace.helm_id' or only
+    namespace.helm_id for backward compatibility
+    If exists helm version can be helm-v3 or helm (helm-v2 old version)
+    """
+    version, _, part_id = ee_id.rpartition(":")
+    namespace, _, helm_id = part_id.rpartition(".")
+    return version, namespace, helm_id
+
+
 class LcmBase:
     def __init__(self, msg, logger):
         """
@@ -215,6 +201,54 @@ class LcmBase:
         # except DbException as e:
         #     self.logger.error("Updating {} _id={} with '{}'. Error: {}".format(item, _id, _desc, e))
 
+    @staticmethod
+    def calculate_charm_hash(zipped_file):
+        """Calculate the hash of charm files which ends with .charm
+
+        Args:
+            zipped_file (str): Existing charm package full path
+
+        Returns:
+            hex digest  (str): The hash of the charm file
+        """
+        filehash = hashlib.md5()
+        with open(zipped_file, mode="rb") as file:
+            contents = file.read()
+            filehash.update(contents)
+            return filehash.hexdigest()
+
+    @staticmethod
+    def compare_charm_hash(current_charm, target_charm):
+        """Compare the existing charm and the target charm if the charms
+        are given as zip files ends with .charm
+
+        Args:
+            current_charm (str): Existing charm package full path
+            target_charm  (str): Target charm package full path
+
+        Returns:
+            True/False (bool): if charm has changed it returns True
+        """
+        return LcmBase.calculate_charm_hash(
+            current_charm
+        ) != LcmBase.calculate_charm_hash(target_charm)
+
+    @staticmethod
+    def compare_charmdir_hash(current_charm_dir, target_charm_dir):
+        """Compare the existing charm and the target charm if the charms
+        are given as directories
+
+        Args:
+            current_charm_dir (str): Existing charm package directory path
+            target_charm_dir  (str): Target charm package directory path
+
+        Returns:
+            True/False (bool): if charm has changed it returns True
+        """
+        return checksumdir.dirhash(current_charm_dir) != checksumdir.dirhash(
+            target_charm_dir
+        )
+
     def check_charm_hash_changed(
         self, current_charm_path: str, target_charm_path: str
     ) -> bool:
@@ -229,25 +263,30 @@ class LcmBase:
             True/False (bool): if charm has changed it returns True
 
         """
-        # Check if the charm artifacts are available
-        if os.path.exists(self.fs.path + current_charm_path) and os.path.exists(
-            self.fs.path + target_charm_path
-        ):
-            # Compare the hash of charm folders
-            if checksumdir.dirhash(
-                self.fs.path + current_charm_path
-            ) != checksumdir.dirhash(self.fs.path + target_charm_path):
+        try:
+            # Check if the charm artifacts are available
+            current_charm = self.fs.path + current_charm_path
+            target_charm = self.fs.path + target_charm_path
 
-                return True
+            if os.path.exists(current_charm) and os.path.exists(target_charm):
 
-            return False
+                # Compare the hash of .charm files
+                if current_charm.endswith(".charm"):
+                    return LcmBase.compare_charm_hash(current_charm, target_charm)
 
-        else:
-            raise LcmException(
-                "Charm artifact {} does not exist in the VNF Package".format(
-                    self.fs.path + target_charm_path
+                # Compare the hash of charm folders
+                return LcmBase.compare_charmdir_hash(current_charm, target_charm)
+
+            else:
+                raise LcmException(
+                    "Charm artifact {} does not exist in the VNF Package".format(
+                        self.fs.path + target_charm_path
+                    )
                 )
-            )
+        except (IOError, OSError, TypeError) as error:
+            self.logger.debug(traceback.format_exc())
+            self.logger.error(f"{error} occured while checking the charm hashes")
+            raise LcmException(error)
 
     @staticmethod
     def get_charm_name(charm_metadata_file: str) -> str:
@@ -405,9 +444,9 @@ class TaskRegistry(LcmBase):
     - worker:  the worker ID for this process
     """
 
-    # NS/NSI: "services" VIM/WIM/SDN/k8scluster/vca/PaaS/k8srepo: "accounts"
+    # NS/NSI: "services" VIM/WIM/SDN: "accounts"
     topic_service_list = ["ns", "nsi"]
-    topic_account_list = ["vim", "wim", "sdn", "k8scluster", "vca", "paas", "k8srepo"]
+    topic_account_list = ["vim", "wim", "sdn", "k8scluster", "vca", "k8srepo"]
 
     # Map topic to InstanceID
     topic2instid_dict = {"ns": "nsInstanceId", "nsi": "netsliceInstanceId"}
@@ -421,7 +460,6 @@ class TaskRegistry(LcmBase):
         "sdn": "sdns",
         "k8scluster": "k8sclusters",
         "vca": "vca",
-        "paas": "paas",
         "k8srepo": "k8srepos",
     }
 
@@ -434,7 +472,6 @@ class TaskRegistry(LcmBase):
             "sdn": {},
             "k8scluster": {},
             "vca": {},
-            "paas": {},
             "k8srepo": {},
         }
         self.worker_id = worker_id
@@ -444,7 +481,7 @@ class TaskRegistry(LcmBase):
     def register(self, topic, _id, op_id, task_name, task):
         """
         Register a new task
-        :param topic: Can be "ns", "nsi", "vim_account", "sdn", "paas"
+        :param topic: Can be "ns", "nsi", "vim_account", "sdn"
         :param _id: _id of the related item
         :param op_id: id of the operation of the related item
         :param task_name: Task descriptive name, as create, instantiate, terminate. Must be unique in this op_id
@@ -616,21 +653,21 @@ class TaskRegistry(LcmBase):
         """
         Lock a task, if possible, to indicate to the HA system that
         the task will be executed in this LCM instance.
-        :param topic: Can be "ns", "nsi", "vim", "wim", "paas" or "sdn"
+        :param topic: Can be "ns", "nsi", "vim", "wim", or "sdn"
         :param op_type: Operation type, can be "nslcmops", "nsilcmops", "create", "edit", "delete"
-        :param op_id: NS, NSI: Operation ID  VIM,WIM,SDN,PaaS: Account ID + ':' + Operation Index
+        :param op_id: NS, NSI: Operation ID  VIM,WIM,SDN: Account ID + ':' + Operation Index
         :return:
         True=lock was successful => execute the task (not registered by any other LCM instance)
         False=lock failed => do NOT execute the task (already registered by another LCM instance)
 
         HA tasks and backward compatibility:
-        If topic is "account type" (VIM/WIM/SDN/PaaS) and op_id is None, 'op_id' was not provided by NBI.
+        If topic is "account type" (VIM/WIM/SDN) and op_id is None, 'op_id' was not provided by NBI.
         This means that the running NBI instance does not support HA.
         In such a case this method should always return True, to always execute
         the task in this instance of LCM, without querying the DB.
         """
 
-        # Backward compatibility for VIM/WIM/SDN/k8scluster/PaaS without op_id
+        # Backward compatibility for VIM/WIM/SDN/k8scluster without op_id
         if self._is_account_type_HA(topic) and op_id is None:
             return True
 
index 3a8002c..2256540 100644 (file)
@@ -34,9 +34,6 @@ __author__ = "Felipe Vicens, Pol Alemany, Alfonso Tierno"
 
 
 class NetsliceLcm(LcmBase):
-
-    timeout_nsi_deploy = 2 * 3600  # default global timeout for deployment a nsi
-
     def __init__(self, msg, lcm_tasks, config, loop, ns):
         """
         Init, Connect to database, filesystem storage, and messaging
@@ -48,7 +45,7 @@ class NetsliceLcm(LcmBase):
         self.loop = loop
         self.lcm_tasks = lcm_tasks
         self.ns = ns
-        self.ro_config = config["ro_config"]
+        self.ro_config = config["RO"]
         self.timeout = config["timeout"]
 
         super().__init__(msg, self.logger)
@@ -392,9 +389,7 @@ class NetsliceLcm(LcmBase):
             if nsi_params and nsi_params.get("timeout_nsi_deploy"):
                 timeout_nsi_deploy = nsi_params["timeout_nsi_deploy"]
             else:
-                timeout_nsi_deploy = self.timeout.get(
-                    "nsi_deploy", self.timeout_nsi_deploy
-                )
+                timeout_nsi_deploy = self.timeout.get("nsi_deploy")
 
             # Empty list to keep track of network service records status in the netslice
             nsir_admin = db_nsir_admin = db_nsir.get("_admin")
index a6ef52e..b7df0b6 100644 (file)
@@ -34,6 +34,7 @@ from jinja2 import (
 )
 
 from osm_lcm import ROclient
+from osm_lcm.data_utils.lcm_config import LcmCfg
 from osm_lcm.data_utils.nsr import (
     get_deployed_kdu,
     get_deployed_vca,
@@ -59,6 +60,7 @@ from osm_lcm.lcm_utils import (
     populate_dict,
     check_juju_bundle_existence,
     get_charm_artifact_path,
+    get_ee_id_parts,
 )
 from osm_lcm.data_utils.nsd import (
     get_ns_configuration_relation_list,
@@ -83,6 +85,7 @@ from osm_lcm.data_utils.vnfd import (
     get_juju_ee_ref,
     get_kdu_resource_profile,
     find_software_version,
+    check_helm_ee_in_ns,
 )
 from osm_lcm.data_utils.list_utils import find_in_list
 from osm_lcm.data_utils.vnfr import (
@@ -103,6 +106,11 @@ from osm_common.fsbase import FsException
 
 from osm_lcm.data_utils.database.database import Database
 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
+from osm_lcm.data_utils.wim import (
+    get_sdn_ports,
+    get_target_wim_attrs,
+    select_feasible_wim_account,
+)
 
 from n2vc.n2vc_juju_conn import N2VCJujuConnector
 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
@@ -121,27 +129,12 @@ __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
 
 
 class NsLcm(LcmBase):
-    timeout_vca_on_error = (
-        5 * 60
-    )  # Time for charm from first time at blocked,error status to mark as failed
-    timeout_ns_deploy = 2 * 3600  # default global timeout for deployment a ns
-    timeout_ns_terminate = 1800  # default global timeout for un deployment a ns
-    timeout_ns_heal = 1800  # default global timeout for un deployment a ns
-    timeout_charm_delete = 10 * 60
-    timeout_primitive = 30 * 60  # timeout for primitive execution
-    timeout_ns_update = 30 * 60  # timeout for ns update
-    timeout_progress_primitive = (
-        10 * 60
-    )  # timeout for some progress in a primitive execution
-    timeout_migrate = 1800  # default global timeout for migrating vnfs
-    timeout_operate = 1800  # default global timeout for migrating vnfs
-    timeout_verticalscale = 1800  # default global timeout for Vertical Sclaing
     SUBOPERATION_STATUS_NOT_FOUND = -1
     SUBOPERATION_STATUS_NEW = -2
     SUBOPERATION_STATUS_SKIP = -3
     task_name_deploy_vca = "Deploying VCA"
 
-    def __init__(self, msg, lcm_tasks, config, loop):
+    def __init__(self, msg, lcm_tasks, config: LcmCfg, loop):
         """
         Init, Connect to database, filesystem storage, and messaging
         :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
@@ -153,10 +146,9 @@ class NsLcm(LcmBase):
         self.fs = Filesystem().instance.fs
         self.loop = loop
         self.lcm_tasks = lcm_tasks
-        self.timeout = config["timeout"]
-        self.ro_config = config["ro_config"]
-        self.ng_ro = config["ro_config"].get("ng")
-        self.vca_config = config["VCA"].copy()
+        self.timeout = config.timeout
+        self.ro_config = config.RO
+        self.vca_config = config.VCA
 
         # create N2VC connector
         self.n2vc = N2VCJujuConnector(
@@ -175,8 +167,8 @@ class NsLcm(LcmBase):
         )
 
         self.k8sclusterhelm2 = K8sHelmConnector(
-            kubectl_command=self.vca_config.get("kubectlpath"),
-            helm_command=self.vca_config.get("helmpath"),
+            kubectl_command=self.vca_config.kubectlpath,
+            helm_command=self.vca_config.helmpath,
             log=self.logger,
             on_update_db=None,
             fs=self.fs,
@@ -184,8 +176,8 @@ class NsLcm(LcmBase):
         )
 
         self.k8sclusterhelm3 = K8sHelm3Connector(
-            kubectl_command=self.vca_config.get("kubectlpath"),
-            helm_command=self.vca_config.get("helm3path"),
+            kubectl_command=self.vca_config.kubectlpath,
+            helm_command=self.vca_config.helm3path,
             fs=self.fs,
             log=self.logger,
             db=self.db,
@@ -193,8 +185,8 @@ class NsLcm(LcmBase):
         )
 
         self.k8sclusterjuju = K8sJujuConnector(
-            kubectl_command=self.vca_config.get("kubectlpath"),
-            juju_command=self.vca_config.get("jujupath"),
+            kubectl_command=self.vca_config.kubectlpath,
+            juju_command=self.vca_config.jujupath,
             log=self.logger,
             loop=self.loop,
             on_update_db=self._on_update_k8s_db,
@@ -219,7 +211,7 @@ class NsLcm(LcmBase):
         }
 
         # create RO client
-        self.RO = NgRoClient(self.loop, **self.ro_config)
+        self.RO = NgRoClient(self.loop, **self.ro_config.to_dict())
 
         self.op_status_map = {
             "instantiation": self.RO.status,
@@ -855,9 +847,30 @@ class NsLcm(LcmBase):
                     target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
                         "provider-network"
                     ]["sdn-ports"]
-            if vld_params.get("wimAccountId"):
-                target_wim = "wim:{}".format(vld_params["wimAccountId"])
-                target_vld["vim_info"][target_wim] = {}
+
+            # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
+            # if wim_account_id is specified in vld_params, validate if it is feasible.
+            wim_account_id, db_wim = select_feasible_wim_account(
+                db_nsr, db_vnfrs, target_vld, vld_params, self.logger
+            )
+
+            if wim_account_id:
+                # WIM is needed and a feasible one was found, populate WIM target and SDN ports
+                self.logger.info("WIM selected: {:s}".format(str(wim_account_id)))
+                # update vld_params with correct WIM account Id
+                vld_params["wimAccountId"] = wim_account_id
+
+                target_wim = "wim:{}".format(wim_account_id)
+                target_wim_attrs = get_target_wim_attrs(nsr_id, target_vld, vld_params)
+                sdn_ports = get_sdn_ports(vld_params, db_wim)
+                if len(sdn_ports) > 0:
+                    target_vld["vim_info"][target_wim] = target_wim_attrs
+                    target_vld["vim_info"][target_wim]["sdn-ports"] = sdn_ports
+
+                self.logger.debug(
+                    "Target VLD with WIM data: {:s}".format(str(target_vld))
+                )
+
             for param in ("vim-network-name", "vim-network-id"):
                 if vld_params.get(param):
                     if isinstance(vld_params[param], dict):
@@ -889,6 +902,8 @@ class NsLcm(LcmBase):
                         None,
                     )
                     vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
+                    if not vdur:
+                        return
                     for a_index, a_vld in enumerate(target["ns"]["vld"]):
                         target_vld = find_in_list(
                             get_iterable(vdur, "interfaces"),
@@ -1450,9 +1465,7 @@ class NsLcm(LcmBase):
             if ns_params and ns_params.get("timeout_ns_deploy"):
                 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
             else:
-                timeout_ns_deploy = self.timeout.get(
-                    "ns_deploy", self.timeout_ns_deploy
-                )
+                timeout_ns_deploy = self.timeout.ns_deploy
 
             # Check for and optionally request placement optimization. Database will be updated if placement activated
             stage[2] = "Waiting for Placement."
@@ -1642,7 +1655,7 @@ class NsLcm(LcmBase):
                     ro_vm_id = "{}-{}".format(
                         db_vnfr["member-vnf-index-ref"], target_vdu_id
                     )  # TODO add vdu_index
-                    if self.ng_ro:
+                    if self.ro_config.ng:
                         target = {
                             "action": {
                                 "action": "inject_ssh_key",
@@ -1769,6 +1782,7 @@ class NsLcm(LcmBase):
         vdu_id,
         kdu_name,
         vdu_index,
+        kdu_index,
         config_descriptor,
         deploy_params,
         base_folder,
@@ -2030,13 +2044,16 @@ class NsLcm(LcmBase):
             )
 
             # add relations for this VCA (wait for other peers related with this VCA)
-            await self._add_vca_relations(
+            is_relation_added = await self._add_vca_relations(
                 logging_text=logging_text,
                 nsr_id=nsr_id,
                 vca_type=vca_type,
                 vca_index=vca_index,
             )
 
+            if not is_relation_added:
+                raise LcmException("Relations could not be added to VCA.")
+
             # if SSH access is required, then get execution environment SSH public
             # if native charm we have waited already to VM be UP
             if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
@@ -2191,6 +2208,11 @@ class NsLcm(LcmBase):
                     vnfr_id=vnfr_id,
                     nsr_id=nsr_id,
                     target_ip=rw_mgmt_ip,
+                    vnf_member_index=db_vnfr.get("member-vnf-index-ref", ""),
+                    vdu_id=vdu_id,
+                    vdu_index=vdu_index,
+                    kdu_name=kdu_name,
+                    kdu_index=kdu_index,
                 )
                 if prometheus_jobs:
                     self.update_db_2(
@@ -2226,7 +2248,7 @@ class NsLcm(LcmBase):
             self._write_configuration_status(
                 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
             )
-            raise LcmException("{} {}".format(step, e)) from e
+            raise LcmException("{}. {}".format(step, e)) from e
 
     def _write_ns_status(
         self,
@@ -2480,9 +2502,7 @@ class NsLcm(LcmBase):
             if ns_params and ns_params.get("timeout_ns_deploy"):
                 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
             else:
-                timeout_ns_deploy = self.timeout.get(
-                    "ns_deploy", self.timeout_ns_deploy
-                )
+                timeout_ns_deploy = self.timeout.ns_deploy
 
             # read from db: ns
             stage[1] = "Getting nsr={} from db.".format(nsr_id)
@@ -2583,8 +2603,8 @@ class NsLcm(LcmBase):
             # feature 1429. Add n2vc public key to needed VMs
             n2vc_key = self.n2vc.get_public_key()
             n2vc_key_list = [n2vc_key]
-            if self.vca_config.get("public_key"):
-                n2vc_key_list.append(self.vca_config["public_key"])
+            if self.vca_config.public_key:
+                n2vc_key_list.append(self.vca_config.public_key)
 
             stage[1] = "Deploying NS at VIM."
             task_ro = asyncio.ensure_future(
@@ -2607,6 +2627,17 @@ class NsLcm(LcmBase):
             stage[1] = "Deploying Execution Environments."
             self.logger.debug(logging_text + stage[1])
 
+            # create namespace and certificate if any helm based EE is present in the NS
+            if check_helm_ee_in_ns(db_vnfds):
+                # TODO: create EE namespace
+                # create TLS certificates
+                await self.vca_map["helm-v3"].create_tls_certificate(
+                    secret_name="ee-tls-{}".format(nsr_id),
+                    dns_prefix="*",
+                    nsr_id=nsr_id,
+                    usage="server auth",
+                )
+
             nsi_id = None  # TODO put nsi_id when this nsr belongs to a NSI
             for vnf_profile in get_vnf_profiles(nsd):
                 vnfd_id = vnf_profile["vnfd-id"]
@@ -2618,6 +2649,7 @@ class NsLcm(LcmBase):
                 vdu_index = 0
                 vdu_name = None
                 kdu_name = None
+                kdu_index = None
 
                 # Get additional parameters
                 deploy_params = {"OSM": get_osm_params(db_vnfr)}
@@ -2641,6 +2673,7 @@ class NsLcm(LcmBase):
                         kdu_name=kdu_name,
                         member_vnf_index=member_vnf_index,
                         vdu_index=vdu_index,
+                        kdu_index=kdu_index,
                         vdu_name=vdu_name,
                         deploy_params=deploy_params,
                         descriptor_config=descriptor_config,
@@ -2673,6 +2706,7 @@ class NsLcm(LcmBase):
                     if descriptor_config:
                         vdu_name = None
                         kdu_name = None
+                        kdu_index = None
                         for vdu_index in range(vdud_count):
                             # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
                             self._deploy_n2vc(
@@ -2688,6 +2722,7 @@ class NsLcm(LcmBase):
                                 vnfd_id=vnfd_id,
                                 vdu_id=vdu_id,
                                 kdu_name=kdu_name,
+                                kdu_index=kdu_index,
                                 member_vnf_index=member_vnf_index,
                                 vdu_index=vdu_index,
                                 vdu_name=vdu_name,
@@ -2704,8 +2739,10 @@ class NsLcm(LcmBase):
                         vdu_id = None
                         vdu_index = 0
                         vdu_name = None
-                        kdur = next(
-                            x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name
+                        kdu_index, kdur = next(
+                            x
+                            for x in enumerate(db_vnfr["kdur"])
+                            if x[1]["kdu-name"] == kdu_name
                         )
                         deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
                         if kdur.get("additionalParams"):
@@ -2725,6 +2762,7 @@ class NsLcm(LcmBase):
                             kdu_name=kdu_name,
                             member_vnf_index=member_vnf_index,
                             vdu_index=vdu_index,
+                            kdu_index=kdu_index,
                             vdu_name=vdu_name,
                             deploy_params=deploy_params_kdu,
                             descriptor_config=descriptor_config,
@@ -2741,6 +2779,7 @@ class NsLcm(LcmBase):
                 member_vnf_index = None
                 vdu_id = None
                 kdu_name = None
+                kdu_index = None
                 vdu_index = 0
                 vdu_name = None
 
@@ -2763,6 +2802,7 @@ class NsLcm(LcmBase):
                     kdu_name=kdu_name,
                     member_vnf_index=member_vnf_index,
                     vdu_index=vdu_index,
+                    kdu_index=kdu_index,
                     vdu_name=vdu_name,
                     deploy_params=deploy_params,
                     descriptor_config=descriptor_config,
@@ -2893,9 +2933,11 @@ class NsLcm(LcmBase):
             self.logger.debug(logging_text + "Exit")
             self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
 
-    def _get_vnfd(self, vnfd_id: str, cached_vnfds: Dict[str, Any]):
+    def _get_vnfd(self, vnfd_id: str, projects_read: str, cached_vnfds: Dict[str, Any]):
         if vnfd_id not in cached_vnfds:
-            cached_vnfds[vnfd_id] = self.db.get_one("vnfds", {"id": vnfd_id})
+            cached_vnfds[vnfd_id] = self.db.get_one(
+                "vnfds", {"id": vnfd_id, "_admin.projects_read": projects_read}
+            )
         return cached_vnfds[vnfd_id]
 
     def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
@@ -2937,7 +2979,8 @@ class NsLcm(LcmBase):
         ]:
             vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
             vnfd_id = vnf_profile["vnfd-id"]
-            db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
+            project = nsd["_admin"]["projects_read"][0]
+            db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
             entity_id = (
                 vnfd_id
                 if ee_relation_level == EELevel.VNF
@@ -3010,7 +3053,8 @@ class NsLcm(LcmBase):
         vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
         vnf_profile_id = vnf_profile["id"]
         vnfd_id = vnf_profile["vnfd-id"]
-        db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
+        project = nsd["_admin"]["projects_read"][0]
+        db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
         db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
         for r in db_vnf_relations:
             provider_dict = None
@@ -3065,7 +3109,8 @@ class NsLcm(LcmBase):
             vnf_profiles,
             lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
         )["vnfd-id"]
-        db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
+        project = nsd["_admin"]["projects_read"][0]
+        db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
         kdu_resource_profile = get_kdu_resource_profile(
             db_vnfd, ee_relation.kdu_resource_profile_id
         )
@@ -3171,11 +3216,14 @@ class NsLcm(LcmBase):
                 requirer_vca_id,
                 relation.requirer.endpoint,
             )
-            await self.vca_map[vca_type].add_relation(
-                provider=provider_relation_endpoint,
-                requirer=requirer_relation_endpoint,
-            )
-            # remove entry from relations list
+            try:
+                await self.vca_map[vca_type].add_relation(
+                    provider=provider_relation_endpoint,
+                    requirer=requirer_relation_endpoint,
+                )
+            except N2VCException as exception:
+                self.logger.error(exception)
+                raise LcmException(exception)
             return True
         return False
 
@@ -3735,6 +3783,7 @@ class NsLcm(LcmBase):
         kdu_name,
         member_vnf_index,
         vdu_index,
+        kdu_index,
         vdu_name,
         deploy_params,
         descriptor_config,
@@ -3861,6 +3910,7 @@ class NsLcm(LcmBase):
                     vdu_id=vdu_id,
                     kdu_name=kdu_name,
                     vdu_index=vdu_index,
+                    kdu_index=kdu_index,
                     deploy_params=deploy_params,
                     config_descriptor=descriptor_config,
                     base_folder=base_folder,
@@ -4232,7 +4282,7 @@ class NsLcm(LcmBase):
         try:
             await self.n2vc.delete_namespace(
                 namespace=namespace,
-                total_timeout=self.timeout_charm_delete,
+                total_timeout=self.timeout.charm_delete,
                 vca_id=vca_id,
             )
         except N2VCNotFound:  # already deleted. Skip
@@ -4447,7 +4497,7 @@ class NsLcm(LcmBase):
 
         logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
         self.logger.debug(logging_text + "Enter")
-        timeout_ns_terminate = self.timeout_ns_terminate
+        timeout_ns_terminate = self.timeout.ns_terminate
         db_nsr = None
         db_nslcmop = None
         operation_params = None
@@ -4573,7 +4623,7 @@ class NsLcm(LcmBase):
                 error_list = await self._wait_for_tasks(
                     logging_text,
                     tasks_dict_info,
-                    min(self.timeout_charm_delete, timeout_ns_terminate),
+                    min(self.timeout.charm_delete, timeout_ns_terminate),
                     stage,
                     nslcmop_id,
                 )
@@ -4591,12 +4641,19 @@ class NsLcm(LcmBase):
                 task_delete_ee = asyncio.ensure_future(
                     asyncio.wait_for(
                         self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
-                        timeout=self.timeout_charm_delete,
+                        timeout=self.timeout.charm_delete,
                     )
                 )
                 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
                 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
 
+            # Delete Namespace and Certificates if necessary
+            if check_helm_ee_in_ns(list(db_vnfds_from_member_index.values())):
+                await self.vca_map["helm-v3"].delete_tls_certificate(
+                    certificate_name=db_nslcmop["nsInstanceId"],
+                )
+                # TODO: Delete namespace
+
             # Delete from k8scluster
             stage[1] = "Deleting KDUs."
             self.logger.debug(logging_text + stage[1])
@@ -4630,7 +4687,7 @@ class NsLcm(LcmBase):
 
             # remove from RO
             stage[1] = "Deleting ns from VIM."
-            if self.ng_ro:
+            if self.ro_config.ng:
                 task_delete_ro = asyncio.ensure_future(
                     self._terminate_ng_ro(
                         logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
@@ -4993,13 +5050,13 @@ class NsLcm(LcmBase):
                             ee_id=ee_id,
                             primitive_name=primitive,
                             params_dict=primitive_params,
-                            progress_timeout=self.timeout_progress_primitive,
-                            total_timeout=self.timeout_primitive,
+                            progress_timeout=self.timeout.progress_primitive,
+                            total_timeout=self.timeout.primitive,
                             db_dict=db_dict,
                             vca_id=vca_id,
                             vca_type=vca_type,
                         ),
-                        timeout=timeout or self.timeout_primitive,
+                        timeout=timeout or self.timeout.primitive,
                     )
                     # execution was OK
                     break
@@ -5107,7 +5164,7 @@ class NsLcm(LcmBase):
             primitive = db_nslcmop["operationParams"]["primitive"]
             primitive_params = db_nslcmop["operationParams"]["primitive_params"]
             timeout_ns_action = db_nslcmop["operationParams"].get(
-                "timeout_ns_action", self.timeout_primitive
+                "timeout_ns_action", self.timeout.primitive
             )
 
             if vnf_index:
@@ -5256,12 +5313,19 @@ class NsLcm(LcmBase):
                         parts = kdu_model.split(sep=":")
                         if len(parts) == 2:
                             kdu_model = parts[0]
+                    if desc_params.get("kdu_atomic_upgrade"):
+                        atomic_upgrade = desc_params.get(
+                            "kdu_atomic_upgrade"
+                        ).lower() in ("yes", "true", "1")
+                        del desc_params["kdu_atomic_upgrade"]
+                    else:
+                        atomic_upgrade = True
 
                     detailed_status = await asyncio.wait_for(
                         self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
                             cluster_uuid=kdu.get("k8scluster-uuid"),
                             kdu_instance=kdu.get("kdu-instance"),
-                            atomic=True,
+                            atomic=atomic_upgrade,
                             kdu_model=kdu_model,
                             params=desc_params,
                             db_dict=db_dict,
@@ -5475,7 +5539,7 @@ class NsLcm(LcmBase):
             stage[2] = "Terminating VDUs"
             if scaling_info.get("vdu-delete"):
                 # scale_process = "RO"
-                if self.ro_config.get("ng"):
+                if self.ro_config.ng:
                     await self._scale_ng_ro(
                         logging_text,
                         db_nsr,
@@ -5653,7 +5717,7 @@ class NsLcm(LcmBase):
                         }
                     )
                 scaling_info["vdu-create"][vdud["id"]] = count_index
-            if self.ro_config.get("ng"):
+            if self.ro_config.ng:
                 self.logger.debug(
                     "New Resources to be deployed: {}".format(scaling_info)
                 )
@@ -5699,7 +5763,7 @@ class NsLcm(LcmBase):
                 path=path,
                 charm_id=charm_id,
                 charm_type=charm_type,
-                timeout=timeout or self.timeout_ns_update,
+                timeout=timeout or self.timeout.ns_update,
             )
 
             if output:
@@ -5807,7 +5871,8 @@ class NsLcm(LcmBase):
                     current_charm_artifact_path,
                     target_charm_artifact_path,
                     charm_artifact_paths,
-                ) = ([], [], [])
+                    helm_artifacts,
+                ) = ([], [], [], [])
 
                 step = "Checking if revision has changed in VNFD"
                 if current_vnf_revision != latest_vnfd_revision:
@@ -5829,24 +5894,34 @@ class NsLcm(LcmBase):
                     step = (
                         "Get the charm-type, charm-id, ee-id if there is deployed VCA"
                     )
-                    base_folder = latest_vnfd["_admin"]["storage"]
+                    current_base_folder = current_vnfd["_admin"]["storage"]
+                    latest_base_folder = latest_vnfd["_admin"]["storage"]
 
-                    for charm_index, charm_deployed in enumerate(
+                    for vca_index, vca_deployed in enumerate(
                         get_iterable(nsr_deployed, "VCA")
                     ):
                         vnf_index = db_vnfr.get("member-vnf-index-ref")
 
                         # Getting charm-id and charm-type
-                        if charm_deployed.get("member-vnf-index") == vnf_index:
-                            charm_id = self.get_vca_id(db_vnfr, db_nsr)
-                            charm_type = charm_deployed.get("type")
+                        if vca_deployed.get("member-vnf-index") == vnf_index:
+                            vca_id = self.get_vca_id(db_vnfr, db_nsr)
+                            vca_type = vca_deployed.get("type")
+                            vdu_count_index = vca_deployed.get("vdu_count_index")
 
                             # Getting ee-id
-                            ee_id = charm_deployed.get("ee_id")
+                            ee_id = vca_deployed.get("ee_id")
 
                             step = "Getting descriptor config"
+                            if current_vnfd.get("kdu"):
+
+                                search_key = "kdu_name"
+                            else:
+                                search_key = "vnfd_id"
+
+                            entity_id = vca_deployed.get(search_key)
+
                             descriptor_config = get_configuration(
-                                current_vnfd, current_vnfd["id"]
+                                current_vnfd, entity_id
                             )
 
                             if "execution-environment-list" in descriptor_config:
@@ -5866,20 +5941,52 @@ class NsLcm(LcmBase):
                                     step = "Setting Charm artifact paths"
                                     current_charm_artifact_path.append(
                                         get_charm_artifact_path(
-                                            base_folder,
+                                            current_base_folder,
                                             charm_name,
-                                            charm_type,
+                                            vca_type,
                                             current_vnf_revision,
                                         )
                                     )
                                     target_charm_artifact_path.append(
                                         get_charm_artifact_path(
-                                            base_folder,
+                                            latest_base_folder,
                                             charm_name,
-                                            charm_type,
+                                            vca_type,
                                             latest_vnfd_revision,
                                         )
                                     )
+                                elif ee_item.get("helm-chart"):
+                                    # add chart to list and all parameters
+                                    step = "Getting helm chart name"
+                                    chart_name = ee_item.get("helm-chart")
+                                    if (
+                                        ee_item.get("helm-version")
+                                        and ee_item.get("helm-version") == "v2"
+                                    ):
+                                        vca_type = "helm"
+                                    else:
+                                        vca_type = "helm-v3"
+                                    step = "Setting Helm chart artifact paths"
+
+                                    helm_artifacts.append(
+                                        {
+                                            "current_artifact_path": get_charm_artifact_path(
+                                                current_base_folder,
+                                                chart_name,
+                                                vca_type,
+                                                current_vnf_revision,
+                                            ),
+                                            "target_artifact_path": get_charm_artifact_path(
+                                                latest_base_folder,
+                                                chart_name,
+                                                vca_type,
+                                                latest_vnfd_revision,
+                                            ),
+                                            "ee_id": ee_id,
+                                            "vca_index": vca_index,
+                                            "vdu_index": vdu_count_index,
+                                        }
+                                    )
 
                             charm_artifact_paths = zip(
                                 current_charm_artifact_path, target_charm_artifact_path
@@ -5948,8 +6055,8 @@ class NsLcm(LcmBase):
                                     detailed_status,
                                 ) = await self._ns_charm_upgrade(
                                     ee_id=ee_id,
-                                    charm_id=charm_id,
-                                    charm_type=charm_type,
+                                    charm_id=vca_id,
+                                    charm_type=vca_type,
                                     path=self.fs.path + target_charm_path,
                                     timeout=timeout_seconds,
                                 )
@@ -5972,6 +6079,121 @@ class NsLcm(LcmBase):
                         detailed_status = "Done"
                         db_nslcmop_update["detailed-status"] = "Done"
 
+                    # helm base EE
+                    for item in helm_artifacts:
+                        if not (
+                            item["current_artifact_path"]
+                            and item["target_artifact_path"]
+                            and self.check_charm_hash_changed(
+                                item["current_artifact_path"],
+                                item["target_artifact_path"],
+                            )
+                        ):
+                            continue
+                        db_update_entry = "_admin.deployed.VCA.{}.".format(
+                            item["vca_index"]
+                        )
+                        vnfr_id = db_vnfr["_id"]
+                        osm_config = {"osm": {"ns_id": nsr_id, "vnf_id": vnfr_id}}
+                        db_dict = {
+                            "collection": "nsrs",
+                            "filter": {"_id": nsr_id},
+                            "path": db_update_entry,
+                        }
+                        vca_type, namespace, helm_id = get_ee_id_parts(item["ee_id"])
+                        await self.vca_map[vca_type].upgrade_execution_environment(
+                            namespace=namespace,
+                            helm_id=helm_id,
+                            db_dict=db_dict,
+                            config=osm_config,
+                            artifact_path=item["target_artifact_path"],
+                            vca_type=vca_type,
+                        )
+                        vnf_id = db_vnfr.get("vnfd-ref")
+                        config_descriptor = get_configuration(latest_vnfd, vnf_id)
+                        self.logger.debug("get ssh key block")
+                        rw_mgmt_ip = None
+                        if deep_get(
+                            config_descriptor,
+                            ("config-access", "ssh-access", "required"),
+                        ):
+                            # Needed to inject a ssh key
+                            user = deep_get(
+                                config_descriptor,
+                                ("config-access", "ssh-access", "default-user"),
+                            )
+                            step = (
+                                "Install configuration Software, getting public ssh key"
+                            )
+                            pub_key = await self.vca_map[
+                                vca_type
+                            ].get_ee_ssh_public__key(
+                                ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
+                            )
+
+                            step = (
+                                "Insert public key into VM user={} ssh_key={}".format(
+                                    user, pub_key
+                                )
+                            )
+                            self.logger.debug(logging_text + step)
+
+                            # wait for RO (ip-address) Insert pub_key into VM
+                            rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
+                                logging_text,
+                                nsr_id,
+                                vnfr_id,
+                                None,
+                                item["vdu_index"],
+                                user=user,
+                                pub_key=pub_key,
+                            )
+
+                        initial_config_primitive_list = config_descriptor.get(
+                            "initial-config-primitive"
+                        )
+                        config_primitive = next(
+                            (
+                                p
+                                for p in initial_config_primitive_list
+                                if p["name"] == "config"
+                            ),
+                            None,
+                        )
+                        if not config_primitive:
+                            continue
+
+                        deploy_params = {"OSM": get_osm_params(db_vnfr)}
+                        if rw_mgmt_ip:
+                            deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
+                        if db_vnfr.get("additionalParamsForVnf"):
+                            deploy_params.update(
+                                parse_yaml_strings(
+                                    db_vnfr["additionalParamsForVnf"].copy()
+                                )
+                            )
+                        primitive_params_ = self._map_primitive_params(
+                            config_primitive, {}, deploy_params
+                        )
+
+                        step = "execute primitive '{}' params '{}'".format(
+                            config_primitive["name"], primitive_params_
+                        )
+                        self.logger.debug(logging_text + step)
+                        await self.vca_map[vca_type].exec_primitive(
+                            ee_id=ee_id,
+                            primitive_name=config_primitive["name"],
+                            params_dict=primitive_params_,
+                            db_dict=db_dict,
+                            vca_id=vca_id,
+                            vca_type=vca_type,
+                        )
+
+                        step = "Updating policies"
+                        member_vnf_index = db_vnfr["member-vnf-index-ref"]
+                        detailed_status = "Done"
+                        db_nslcmop_update["detailed-status"] = "Done"
+
                     #  If nslcmop_operation_state is None, so any operation is not failed.
                     if not nslcmop_operation_state:
                         nslcmop_operation_state = "COMPLETED"
@@ -6734,7 +6956,7 @@ class NsLcm(LcmBase):
                                             scaling_in=True,
                                             vca_id=vca_id,
                                         ),
-                                        timeout=self.timeout_charm_delete,
+                                        timeout=self.timeout.charm_delete,
                                     )
                                 )
                                 tasks_dict_info[task] = "Terminating VCA {}".format(
@@ -6754,7 +6976,7 @@ class NsLcm(LcmBase):
                                 logging_text,
                                 tasks_dict_info,
                                 min(
-                                    self.timeout_charm_delete, self.timeout_ns_terminate
+                                    self.timeout.charm_delete, self.timeout.ns_terminate
                                 ),
                                 stage,
                                 nslcmop_id,
@@ -6776,7 +6998,7 @@ class NsLcm(LcmBase):
             # SCALE RO - BEGIN
             if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
                 scale_process = "RO"
-                if self.ro_config.get("ng"):
+                if self.ro_config.ng:
                     await self._scale_ng_ro(
                         logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
                     )
@@ -6830,6 +7052,7 @@ class NsLcm(LcmBase):
                                 vdu_id = None
                                 vdu_name = None
                                 kdu_name = None
+                                kdu_index = None
                                 self._deploy_n2vc(
                                     logging_text=logging_text
                                     + "member_vnf_index={} ".format(member_vnf_index),
@@ -6841,6 +7064,7 @@ class NsLcm(LcmBase):
                                     vnfd_id=vnfd_id,
                                     vdu_id=vdu_id,
                                     kdu_name=kdu_name,
+                                    kdu_index=kdu_index,
                                     member_vnf_index=member_vnf_index,
                                     vdu_index=vdu_index,
                                     vdu_name=vdu_name,
@@ -6867,6 +7091,7 @@ class NsLcm(LcmBase):
                             if descriptor_config:
                                 vdu_name = None
                                 kdu_name = None
+                                kdu_index = None
                                 stage[
                                     1
                                 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
@@ -6889,6 +7114,7 @@ class NsLcm(LcmBase):
                                     kdu_name=kdu_name,
                                     member_vnf_index=member_vnf_index,
                                     vdu_index=vdu_index,
+                                    kdu_index=kdu_index,
                                     vdu_name=vdu_name,
                                     deploy_params=deploy_params_vdu,
                                     descriptor_config=descriptor_config,
@@ -7071,7 +7297,7 @@ class NsLcm(LcmBase):
                 exc = await self._wait_for_tasks(
                     logging_text,
                     tasks_dict_info,
-                    self.timeout_ns_deploy,
+                    self.timeout.ns_deploy,
                     stage,
                     nslcmop_id,
                     nsr_id=nsr_id,
@@ -7186,23 +7412,27 @@ class NsLcm(LcmBase):
                                     primitive_name=terminate_config_primitive["name"],
                                     params=primitive_params_,
                                     db_dict=db_dict,
+                                    total_timeout=self.timeout.primitive,
                                     vca_id=vca_id,
                                 ),
-                                timeout=600,
+                                timeout=self.timeout.primitive
+                                * self.timeout.primitive_outer_factor,
                             )
 
                 await asyncio.wait_for(
                     self.k8scluster_map[k8s_cluster_type].scale(
-                        kdu_instance,
-                        scale,
-                        kdu_scaling_info["resource-name"],
+                        kdu_instance=kdu_instance,
+                        scale=scale,
+                        resource_name=kdu_scaling_info["resource-name"],
+                        total_timeout=self.timeout.scale_on_error,
                         vca_id=vca_id,
                         cluster_uuid=cluster_uuid,
                         kdu_model=kdu_model,
                         atomic=True,
                         db_dict=db_dict,
                     ),
-                    timeout=self.timeout_vca_on_error,
+                    timeout=self.timeout.scale_on_error
+                    * self.timeout.scale_on_error_outer_factor,
                 )
 
                 if kdu_scaling_info["type"] == "create":
@@ -7277,7 +7507,7 @@ class NsLcm(LcmBase):
             n2vc_key_list,
             stage=stage,
             start_deploy=time(),
-            timeout_ns_deploy=self.timeout_ns_deploy,
+            timeout_ns_deploy=self.timeout.ns_deploy,
         )
         if vdu_scaling_info.get("vdu-delete"):
             self.scale_vnfr(
@@ -7285,8 +7515,42 @@ class NsLcm(LcmBase):
             )
 
     async def extract_prometheus_scrape_jobs(
-        self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip
-    ):
+        self,
+        ee_id: str,
+        artifact_path: str,
+        ee_config_descriptor: dict,
+        vnfr_id: str,
+        nsr_id: str,
+        target_ip: str,
+        vnf_member_index: str = "",
+        vdu_id: str = "",
+        vdu_index: int = None,
+        kdu_name: str = "",
+        kdu_index: int = None,
+    ) -> dict:
+        """Method to extract prometheus scrape jobs from EE's Prometheus template job file
+            This method will wait until the corresponding VDU or KDU is fully instantiated
+
+        Args:
+            ee_id (str): Execution Environment ID
+            artifact_path (str): Path where the EE's content is (including the Prometheus template file)
+            ee_config_descriptor (dict): Execution Environment's configuration descriptor
+            vnfr_id (str): VNFR ID where this EE applies
+            nsr_id (str): NSR ID where this EE applies
+            target_ip (str): VDU/KDU instance IP address
+            vnf_member_index (str, optional): VNF index where this EE applies. Defaults to "".
+            vdu_id (str, optional): VDU ID where this EE applies. Defaults to "".
+            vdu_index (int, optional): VDU index where this EE applies. Defaults to None.
+            kdu_name (str, optional): KDU name where this EE applies. Defaults to "".
+            kdu_index (int, optional): KDU index where this EE applies. Defaults to None.
+
+        Raises:
+            LcmException: When the VDU or KDU instance was not found in an hour
+
+        Returns:
+            _type_: Prometheus jobs
+        """
+        self.logger.debug(f"KDU: {kdu_name}; KDU INDEX: {kdu_index}")
         # look if exist a file called 'prometheus*.j2' and
         artifact_content = self.fs.dir_ls(artifact_path)
         job_file = next(
@@ -7302,6 +7566,52 @@ class NsLcm(LcmBase):
         with self.fs.file_open((artifact_path, job_file), "r") as f:
             job_data = f.read()
 
+        vdur_name = ""
+        kdur_name = ""
+        for r in range(360):
+            db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
+            if vdu_id and vdu_index is not None:
+                vdur = next(
+                    (
+                        x
+                        for x in get_iterable(db_vnfr, "vdur")
+                        if (
+                            x.get("vdu-id-ref") == vdu_id
+                            and x.get("count-index") == vdu_index
+                        )
+                    ),
+                    {},
+                )
+                if vdur.get("name"):
+                    vdur_name = vdur.get("name")
+                    break
+            if kdu_name and kdu_index is not None:
+                kdur = next(
+                    (
+                        x
+                        for x in get_iterable(db_vnfr, "kdur")
+                        if (
+                            x.get("kdu-name") == kdu_name
+                            and x.get("count-index") == kdu_index
+                        )
+                    ),
+                    {},
+                )
+                if kdur.get("name"):
+                    kdur_name = kdur.get("name")
+                    break
+
+            await asyncio.sleep(10, loop=self.loop)
+        else:
+            if vdu_id and vdu_index is not None:
+                raise LcmException(
+                    f"Timeout waiting VDU with name={vdu_id} and index={vdu_index} to be intantiated"
+                )
+            if kdu_name and kdu_index is not None:
+                raise LcmException(
+                    f"Timeout waiting KDU with name={kdu_name} and index={kdu_index} to be intantiated"
+                )
+
         # TODO get_service
         _, _, service = ee_id.partition(".")  # remove prefix   "namespace."
         host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
@@ -7312,6 +7622,10 @@ class NsLcm(LcmBase):
             "TARGET_IP": target_ip,
             "EXPORTER_POD_IP": host_name,
             "EXPORTER_POD_PORT": host_port,
+            "NSR_ID": nsr_id,
+            "VNF_MEMBER_INDEX": vnf_member_index,
+            "VDUR_NAME": vdur_name,
+            "KDUR_NAME": kdur_name,
         }
         job_list = parse_job(job_data, variables)
         # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
@@ -7394,7 +7708,7 @@ class NsLcm(LcmBase):
                 action_id,
                 nslcmop_id,
                 start_deploy,
-                self.timeout_operate,
+                self.timeout.operate,
                 None,
                 "start_stop_rebuild",
             )
@@ -7486,7 +7800,7 @@ class NsLcm(LcmBase):
                 action_id,
                 nslcmop_id,
                 start_deploy,
-                self.timeout_migrate,
+                self.timeout.migrate,
                 operation="migrate",
             )
         except (ROclient.ROClientException, DbException, LcmException) as e:
@@ -7593,17 +7907,12 @@ class NsLcm(LcmBase):
             self.update_db_2("nsrs", nsr_id, db_nsr_update)
 
             step = "Sending heal order to VIM"
-            task_ro = asyncio.ensure_future(
-                self.heal_RO(
-                    logging_text=logging_text,
-                    nsr_id=nsr_id,
-                    db_nslcmop=db_nslcmop,
-                    stage=stage,
-                )
+            await self.heal_RO(
+                logging_text=logging_text,
+                nsr_id=nsr_id,
+                db_nslcmop=db_nslcmop,
+                stage=stage,
             )
-            self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "heal_RO", task_ro)
-            tasks_dict_info[task_ro] = "Healing at VIM"
-
             # VCA tasks
             # read from db: nsd
             stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
@@ -7765,7 +8074,7 @@ class NsLcm(LcmBase):
                 exc = await self._wait_for_tasks(
                     logging_text,
                     tasks_dict_info,
-                    self.timeout_ns_deploy,
+                    self.timeout.ns_deploy,
                     stage,
                     nslcmop_id,
                     nsr_id=nsr_id,
@@ -7858,7 +8167,7 @@ class NsLcm(LcmBase):
             if ns_params and ns_params.get("timeout_ns_heal"):
                 timeout_ns_heal = ns_params["timeout_ns_heal"]
             else:
-                timeout_ns_heal = self.timeout.get("ns_heal", self.timeout_ns_heal)
+                timeout_ns_heal = self.timeout.ns_heal
 
             db_vims = {}
 
@@ -8345,7 +8654,7 @@ class NsLcm(LcmBase):
                 # n2vc_redesign STEP 5.1
                 # wait for RO (ip-address) Insert pub_key into VM
                 # IMPORTANT: We need do wait for RO to complete healing operation.
-                await self._wait_heal_ro(nsr_id, self.timeout_ns_heal)
+                await self._wait_heal_ro(nsr_id, self.timeout.ns_heal)
                 if vnfr_id:
                     if kdu_name:
                         rw_mgmt_ip = await self.wait_kdu_up(
@@ -8557,7 +8866,7 @@ class NsLcm(LcmBase):
                 action_id,
                 nslcmop_id,
                 start_deploy,
-                self.timeout_verticalscale,
+                self.timeout.verticalscale,
                 operation="verticalscale",
             )
         except (ROclient.ROClientException, DbException, LcmException) as e:
diff --git a/osm_lcm/paas.py b/osm_lcm/paas.py
deleted file mode 100644 (file)
index 9c3c0bf..0000000
+++ /dev/null
@@ -1,205 +0,0 @@
-#!/usr/bin/python3
-
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-#     Unless required by applicable law or agreed to in writing, software
-#     distributed under the License is distributed on an "AS IS" BASIS,
-#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#     See the License for the specific language governing permissions and
-#     limitations under the License.
-
-import logging
-from osm_lcm.lcm_utils import LcmBase
-from osm_common.dbbase import DbException
-
-
-class PaasLcm(LcmBase):
-    timeout_create = 30
-
-    def __init__(self, msg, lcm_tasks, config, loop):
-        """Init, Connect to database, filesystem storage, and messaging
-        Args:
-            msg: Message object to be used to write the messages to Kafka Bus
-            lcm_tasks: Task object to register the tasks
-            config: two level dictionary with configuration. Top level should contain 'database', 'storage'
-            loop: Async event loop object
-        """
-
-        self.logger = logging.getLogger("lcm.paas")
-        self.loop = loop
-        self.lcm_tasks = lcm_tasks
-
-        super().__init__(msg, self.logger)
-
-    def _get_paas_by_id(self, paas_id: str) -> dict:
-        db_paas = self.db.get_one("paas", {"_id": paas_id})
-        self.db.encrypt_decrypt_fields(
-            db_paas,
-            "decrypt",
-            ["secret"],
-            schema_version=db_paas["schema_version"],
-            salt=db_paas["_id"],
-        )
-        return db_paas
-
-    def _register_op_and_unlock(
-        self, paas_id, db_paas_update, action, op_id, operation
-    ):
-        """
-        Args:
-            paas_id (str): ID of the PaaS to update.
-            db_paas_update (dict): content to update in DB.
-            action (str): 'create', 'edit', or 'delete'.
-            op_id (str): ID of the operation to unlock.
-            operation (dict): contains 'state' and 'details'.
-        """
-        self.update_db_2("paas", paas_id, db_paas_update)
-        self.lcm_tasks.unlock_HA(
-            "paas",
-            action,
-            op_id,
-            operationState=operation["state"],
-            detailed_status=operation["details"],
-        )
-
-    def _set_status_connectivity_ok(self, db_paas_update: dict, operation: dict):
-        db_paas_update["_admin.operationalState"] = "ENABLED"
-        db_paas_update["_admin.detailed-status"] = "Connectivity: ok"
-        operation["details"] = "PaaS validated"
-        operation["state"] = "COMPLETED"
-
-    def _set_status_exception_raised(
-        self, db_paas_update: dict, operation: dict, error_msg: str
-    ):
-        db_paas_update["_admin.operationalState"] = "ERROR"
-        db_paas_update["_admin.detailed-status"] = error_msg
-        operation["state"] = "FAILED"
-        operation["details"] = error_msg
-
-    async def create(self, paas_content, order_id):
-        """HA tasks and backward compatibility:
-        If "paas_content" does not include "op_id", we a running a legacy NBI version.
-        In such a case, HA is not supported by NBI, "op_id" is None, and lock_HA() will do nothing.
-        Args:
-            paas_content (dict): Contains "op_id" and paas id ("_id")
-            order_id (str): Of the task
-        """
-        op_id = paas_content.pop("op_id", None)
-        if not self.lcm_tasks.lock_HA("paas", "create", op_id):
-            return
-
-        paas_id = paas_content["_id"]
-        self.logger.debug("Task paas_create={} {}".format(paas_id, "Enter"))
-        db_paas_update = {}
-        operation = {}
-
-        try:
-            self._get_paas_by_id(paas_id)
-            self._set_status_connectivity_ok(db_paas_update, operation)
-            msg = "Task paas_create={} Done. Result: {}".format(
-                paas_id, operation["state"]
-            )
-            self.logger.debug(msg)
-
-        except Exception as e:
-            error_msg = "Failed with exception: {}".format(e)
-            self._set_status_exception_raised(db_paas_update, operation, error_msg)
-            self.logger.error("Task paas_create={} {}".format(paas_id, error_msg))
-        finally:
-            try:
-                self._register_op_and_unlock(
-                    paas_id, db_paas_update, "create", op_id, operation
-                )
-            except DbException as e:
-                msg = "Task paas_create={} Cannot update database:{}".format(paas_id, e)
-                self.logger.error(msg)
-            self.lcm_tasks.remove("paas", paas_id, order_id)
-
-    async def edit(self, paas_content, order_id):
-        """HA tasks and backward compatibility:
-        If "paas_content" does not include "op_id", we a running a legacy NBI version.
-        In such a case, HA is not supported by NBI, "op_id" is None, and lock_HA() will do nothing.
-        Args:
-            paas_content (dict): Contains "op_id" and paas id ("_id")
-            order_id (str): Of the task
-        """
-
-        op_id = paas_content.pop("op_id", None)
-        if not self.lcm_tasks.lock_HA("paas", "edit", op_id):
-            return
-
-        paas_id = paas_content["_id"]
-        self.logger.debug("Task paas_edit={} {}".format(paas_id, "Enter"))
-        db_paas_update = {}
-        operation = {}
-
-        try:
-            self._get_paas_by_id(paas_id)
-            self._set_status_connectivity_ok(db_paas_update, operation)
-            msg = "Task paas_edit={} Done. Result: {}".format(
-                paas_id, operation["state"]
-            )
-            self.logger.debug(msg)
-
-        except Exception as e:
-            error_msg = "Failed with exception: {}".format(e)
-            self._set_status_exception_raised(db_paas_update, operation, error_msg)
-            self.logger.error("Task paas_edit={} {}".format(paas_id, error_msg))
-        finally:
-            try:
-                self._register_op_and_unlock(
-                    paas_id, db_paas_update, "edit", op_id, operation
-                )
-            except DbException as e:
-                msg = "Task paas_edit={} Cannot update database:{}".format(paas_id, e)
-                self.logger.error(msg)
-            self.lcm_tasks.remove("paas", paas_id, order_id)
-
-    async def delete(self, paas_content, order_id):
-        """HA tasks and backward compatibility:
-        If "paas_content" does not include "op_id", we a running a legacy NBI version.
-        In such a case, HA is not supported by NBI, "op_id" is None, and lock_HA() will do nothing.
-        Args:
-            paas_content (dict): Contains "op_id" and paas id ("_id")
-            order_id (str): Of the task
-        """
-        op_id = paas_content.pop("op_id", None)
-        if not self.lcm_tasks.lock_HA("paas", "delete", op_id):
-            return
-
-        paas_id = paas_content["_id"]
-        db_paas_update = {}
-        operation = {}
-
-        try:
-            msg = "Task paas_delete={}: Deleting paas from db".format(paas_id)
-            self.logger.debug(msg)
-            self.db.del_one("paas", {"_id": paas_id})
-            db_paas_update = None
-            operation["state"] = "COMPLETED"
-            operation["details"] = "deleted"
-            msg = "Task paas_delete={}: Done. Result: {}".format(
-                paas_id, operation["state"]
-            )
-            self.logger.debug(msg)
-        except Exception as e:
-            error_msg = "Failed with exception: {}".format(e)
-            self.logger.error("Task paas_delete={} {}".format(paas_id, error_msg))
-            self._set_status_exception_raised(db_paas_update, operation, error_msg)
-        finally:
-            try:
-                self._register_op_and_unlock(
-                    paas_id, db_paas_update, "delete", op_id, operation
-                )
-            except DbException as e:
-                msg = "Task paas_delete={}: Cannot update database:{}".format(
-                    paas_id, e
-                )
-                self.logger.error(msg)
-            self.lcm_tasks.remove("paas", paas_id, order_id)
diff --git a/osm_lcm/paas_conn.py b/osm_lcm/paas_conn.py
deleted file mode 100644 (file)
index 78638b3..0000000
+++ /dev/null
@@ -1,213 +0,0 @@
-#!/usr/bin/python3
-#
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-#     Unless required by applicable law or agreed to in writing, software
-#     distributed under the License is distributed on an "AS IS" BASIS,
-#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#     See the License for the specific language governing permissions and
-#     limitations under the License.
-
-import abc
-import asyncio
-import logging
-
-
-def paas_connector_factory(
-    uuid: str,
-    name: str,
-    db: object,
-    fs: object,
-    loop: object,
-    log: object,
-    config: dict,
-    paas_type="juju",
-):
-    """Factory Method to create the paas_connector objects according to PaaS Type.
-    Args:
-        uuid    (str):              Internal id of PaaS account
-        name    (str):              name assigned to PaaS account, can be used for logging
-        db  (object):               Database object to write current operation status
-        fs  (object):               Filesystem object to use during operations
-        loop    (object):           Async event loop object
-        log (object):               Logger for tracing
-        config  (dict):             Dictionary with extra PaaS information.
-        paas_type   (str):          Identifier to create paas_connector object using correct PaaS Connector Class
-
-    Returns:
-        paas_connector  (object):   paas_connector objects created according to given PaaS Type
-
-    Raises:
-        PaasConnException
-    """
-    connectors = {
-        "juju": JujuPaasConnector,
-    }
-    if paas_type not in connectors.keys():
-        raise PaasConnException(f"PaaS type: {paas_type} is not available.")
-
-    return connectors[paas_type](uuid, name, db, fs, loop, log, config)
-
-
-class PaasConnException(Exception):
-    """PaaS Connector Exception Base Class"""
-
-    def __init__(self, message: str = ""):
-        """Constructor of PaaS Connector Exception
-        Args:
-            message (str):  error message to be raised
-        """
-        Exception.__init__(self, message)
-        self.message = message
-
-    def __str__(self):
-        return self.message
-
-    def __repr__(self):
-        return "{}({})".format(type(self), self.message)
-
-
-class JujuPaasConnException(PaasConnException):
-    """Juju PaaS Connector Exception Class"""
-
-
-class AbstractPaasConnector(abc.ABC):
-    """Abstract PaaS Connector class to perform operations using PaaS Orchestrator."""
-
-    def __init__(
-        self,
-        uuid=None,
-        name=None,
-        db=None,
-        fs=None,
-        logger=None,
-        loop=None,
-        config=None,
-    ):
-        """Constructor of PaaS Connector.
-        Args:
-            uuid    (str):      internal id of PaaS account
-            name    (str):      name assigned to this account, can be used for logging
-            db  (object):       database object to write current operation status
-            fs  (object):       Filesystem object to use during operations
-            logger (object):    Logger for tracing
-            loop    (object):   Async event loop object
-            config  (dict):     Dictionary with extra PaaS information.
-        """
-        self.id = uuid
-        self.name = name
-        self.db = db
-        self.fs = fs
-        self.config = config or {}
-        self.logger = logger
-
-    @abc.abstractmethod
-    async def connect(self, endpoints: str, user: str = None, secret: str = None):
-        """Abstract method to connect PaaS account using endpoints, user and secret.
-        Args:
-            endpoints   (str):     Endpoint/URL to connect PaaS account
-            user    (str):         User which is used to connect PaaS account
-            secret  (str):         Used for authentication
-        """
-
-    @abc.abstractmethod
-    async def instantiate(self, nsr_id: str, nslcmop_id: str):
-        """Abstract method to perform PaaS Service instantiation.
-        Args:
-            nsr_id   (str):     NS service record to be used
-            nslcmop_id  (str):  NS LCM operation id
-        """
-
-    @abc.abstractmethod
-    async def terminate(self, nsr_id: str, nslcmop_id: str):
-        """Abstract method to perform PaaS Service termination.
-        Args:
-            nsr_id   (str):     NS service record to be used
-            nslcmop_id  (str):  NS LCM operation id
-        """
-
-    @abc.abstractmethod
-    async def action(self, nsr_id: str, nslcmop_id: str):
-        """Abstract method to perform action on PaaS Service.
-        Args:
-            nsr_id   (str):     NS service record to be used
-            nslcmop_id  (str):  NS LCM operation id
-        """
-
-
-class JujuPaasConnector(AbstractPaasConnector):
-    """Concrete PaaS Connector class to perform operations using the Juju PaaS Orchestrator."""
-
-    def __init__(
-        self,
-        uuid=None,
-        name=None,
-        db=None,
-        fs=None,
-        logger=None,
-        loop=None,
-        config=None,
-    ):
-        self.logger = logging.getLogger("lcm.juju_paas_connector")
-        super(JujuPaasConnector, self).__init__(logger=self.logger)
-
-    async def connect(self, endpoints: str, user: str = None, secret: str = None):
-        """Connect Juju PaaS account using endpoints, user and secret.
-        Args:
-            endpoints   (str):     Endpoint/URL to connect PaaS account
-            user    (str):         User which is used to connect PaaS account
-            secret  (str):         Used for authentication
-
-        Raises:
-            NotImplementedError
-        """
-        raise NotImplementedError(
-            "Juju Paas Connector connect method is not implemented"
-        )
-
-    async def instantiate(self, nsr_id: str, nslcmop_id: str):
-        """Perform Service instantiation.
-        Args:
-            nsr_id   (str):     NS service record to be used
-            nslcmop_id  (str):  NS LCM operation id
-
-        Raises:
-            JujuPaasConnException
-        """
-        # This is not the real implementation
-        # Sample code blocks to validate method execution
-        await asyncio.sleep(1)
-        self.logger.debug("Juju Paas Connector instantiate method is called")
-
-    async def terminate(self, nsr_id: str, nslcmop_id: str):
-        """Perform PaaS Service termination.
-        Args:
-            nsr_id   (str):     NS service record to be used
-            nslcmop_id  (str):  NS LCM operation id
-
-        Raises:
-            JujuPaasConnException
-        """
-        # This is not the real implementation
-        # Sample code blocks to validate method execution
-        await asyncio.sleep(1)
-        self.logger.debug("Juju Paas Connector terminate method is called")
-
-    async def action(self, nsr_id: str, nslcmop_id: str):
-        """Perform action on PaaS Service.
-        Args:
-            nsr_id   (str):     NS service record to be used
-            nslcmop_id  (str):  NS LCM operation id
-
-        Raises:
-            NotImplementedError
-        """
-        raise NotImplementedError(
-            "Juju Paas Connector instantiate method is not implemented"
-        )
diff --git a/osm_lcm/paas_service.py b/osm_lcm/paas_service.py
deleted file mode 100644 (file)
index 0a01157..0000000
+++ /dev/null
@@ -1,847 +0,0 @@
-# !/usr/bin/python3
-#
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-#     Unless required by applicable law or agreed to in writing, software
-#     distributed under the License is distributed on an "AS IS" BASIS,
-#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#     See the License for the specific language governing permissions and
-#     limitations under the License.
-
-import asyncio
-import logging
-import traceback
-
-from osm_common.dbbase import DbException
-from osm_common.msgbase import MsgException
-from osm_lcm.lcm_utils import LcmBase
-from osm_lcm.lcm_utils import LcmException
-from osm_lcm.paas_conn import JujuPaasConnException, paas_connector_factory
-
-from time import time
-
-
-def paas_service_factory(
-    msg: object,
-    lcm_tasks: object,
-    db: object,
-    fs: object,
-    log: object,
-    loop: object,
-    config: dict,
-    paas_type="juju",
-) -> object:
-    """Factory Method to create the paas_service objects according to PaaS Type.
-    Args:
-        msg (object):           Message object to be used to write the messages to Kafka Bus
-        lcm_tasks   (object):   Task object to register the tasks
-        db  (object):           Database object to write current operation status
-        fs  (object):           Filesystem object to use during operations
-        log (object)            Logger for tracing
-        loop (object)           Async event loop object
-        config  (dict):         Dictionary with extra PaaS Service information.
-        paas_type   (str):      Identifier to create paas_service object using correct PaaS Service Class
-
-    Returns:
-        paas_service  (object):   paas_service objects created according to given PaaS Type
-
-    Raises:
-        PaasServiceException
-    """
-    orchestrators = {
-        "juju": JujuPaasService,
-    }
-
-    if paas_type not in orchestrators.keys():
-        raise PaasServiceException(f"PaaS type: {paas_type} is not available.")
-
-    return orchestrators[paas_type](
-        msg=msg, lcm_tasks=lcm_tasks, db=db, fs=fs, loop=loop, logger=log, config=config
-    )
-
-
-class PaasServiceException(Exception):
-    """PaaS Service Exception Base Class"""
-
-    def __init__(self, message: str = ""):
-        """Constructor of PaaS Service Exception
-        Args:
-            message (str):  error message to be raised
-        """
-        Exception.__init__(self, message)
-        self.message = message
-
-    def __str__(self):
-        return self.message
-
-    def __repr__(self):
-        return "{}({})".format(type(self), self.message)
-
-
-class JujuPaasServiceException(PaasServiceException):
-    """Juju PaaS Service exception class"""
-
-
-class JujuPaasService(LcmBase):
-    """Juju PaaS Service class to handle ns operations such as instantiate, terminate, action etc."""
-
-    timeout_ns_deploy = 3600
-
-    def __init__(
-        self,
-        msg: object,
-        lcm_tasks: object,
-        db: object,
-        fs: object,
-        loop: object,
-        logger: object,
-        config: dict,
-    ):
-        """
-        Args:
-            msg (object):           Message object to be used to write the messages to Kafka Bus
-            lcm_tasks   (object):   Task object to register the tasks
-            db  (object):           Database object to write current operation status
-            fs  (object):           Filesystem object to use during operations
-            loop (object)           Async event loop object
-            logger (object):        Logger for tracing
-            config  (dict):         Dictionary with extra PaaS Service information.
-        """
-        self.logger = logging.getLogger("lcm.juju_paas_service")
-        self.loop = loop
-        self.lcm_tasks = lcm_tasks
-        self.config = config
-        super(JujuPaasService, self).__init__(msg=msg, logger=self.logger)
-
-        self.paas_connector = paas_connector_factory(
-            self.msg,
-            self.lcm_tasks,
-            self.db,
-            self.fs,
-            self.loop,
-            self.logger,
-            self.config,
-            "juju",
-        )
-
-    def _lock_ha_task(self, nslcmop_id: str, nsr_id: str, keyword: str) -> bool:
-        """Lock the task.
-        Args:
-            nslcmop_id  (str):          NS LCM operation id
-            nsr_id   (str):             NS service record to be used
-            keyword (str):              Word which indicates action such as instantiate, terminate
-
-        Returns:
-            task_is_locked_by_me (Boolean): True if task_is_locked_by_me else False
-        """
-        task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
-        if not task_is_locked_by_me:
-            self.logger.debug(
-                f"{keyword}() task is not locked by me, ns={nsr_id}, exiting."
-            )
-        return task_is_locked_by_me
-
-    def _write_ns_status(
-        self,
-        nsr_id: str,
-        ns_state: str,
-        current_operation: str,
-        current_operation_id: str,
-        error_description: str = None,
-        error_detail: str = None,
-        other_update: dict = None,
-    ) -> None:
-        """Update NS record.
-        Args:
-            nsr_id  (str):                      NS service record to be used
-            ns_state    (str):                  NS state
-            current_operation   (str):          Current operation name
-            current_operation_id    (str):      Current operation ID
-            error_description   (str):          Error description
-            error_detail        (str):          Details of error
-            other_update: (dict):               Other required changes at database if provided
-
-        Raises:
-            DbException
-        """
-        try:
-            db_dict = other_update or {}
-            db_update_dict = {
-                "_admin.nslcmop": current_operation_id,
-                "_admin.current-operation": current_operation_id,
-                "_admin.operation-type": current_operation
-                if current_operation != "IDLE"
-                else None,
-                "currentOperation": current_operation,
-                "currentOperationID": current_operation_id,
-                "errorDescription": error_description,
-                "errorDetail": error_detail,
-            }
-            db_dict.update(db_update_dict)
-
-            if ns_state:
-                db_dict["nsState"] = ns_state
-            self.update_db_2("nsrs", nsr_id, db_dict)
-
-        except DbException as e:
-            error = f"Error writing NS status, ns={nsr_id}: {e}"
-            self.logger.error(error)
-            raise JujuPaasServiceException(error)
-
-    def _write_op_status(
-        self,
-        op_id: str,
-        stage: str = None,
-        error_message: str = None,
-        queue_position: int = 0,
-        operation_state: str = None,
-        other_update: dict = None,
-    ) -> None:
-        """Update NS LCM Operation Status.
-        Args:
-            op_id  (str):                       Operation ID
-            stage    (str):                     Indicates the stage of operations
-            error_message   (str):              Error description
-            queue_position    (int):            Operation position in the queue
-            operation_state   (str):            State of operation
-            other_update: (dict):               Other required changes at database if provided
-
-        Raises:
-            DbException
-        """
-        try:
-            db_dict = other_update or {}
-            db_dict["queuePosition"] = queue_position
-            if stage:
-                db_dict["stage"] = str(stage)
-            if error_message:
-                db_dict["errorMessage"] = error_message
-            if operation_state:
-                db_dict["operationState"] = operation_state
-                db_dict["statusEnteredTime"] = time()
-            self.update_db_2("nslcmops", op_id, db_dict)
-
-        except DbException as e:
-            error = f"Error writing OPERATION status for op_id: {op_id} -> {e}"
-            self.logger.error(error)
-            raise JujuPaasServiceException(error)
-
-    def _update_nsr_error_desc(
-        self,
-        stage: str,
-        new_error: str,
-        error_list: list,
-        error_detail_list: list,
-        nsr_id: str,
-    ) -> None:
-        """Update error description in NS record.
-        Args:
-            stage   (str):          Indicates the stage of operations
-            new_error   (str):      New detected error
-            error_list  (str):      Updated error list
-            error_detail_list:      Updated detailed error list
-            nsr_id  (str):          NS service record to be used
-
-        Raises:
-            DbException
-        """
-        if new_error:
-            stage += " Errors: " + ". ".join(error_detail_list) + "."
-            if nsr_id:
-                try:
-                    # Update nsr
-                    self.update_db_2(
-                        "nsrs",
-                        nsr_id,
-                        {
-                            "errorDescription": "Error at: " + ", ".join(error_list),
-                            "errorDetail": ". ".join(error_detail_list),
-                        },
-                    )
-
-                except DbException as e:
-                    error = f"Error updating NSR error description for nsr_id: {nsr_id} -> {e}"
-                    self.logger.error(error)
-                    raise JujuPaasServiceException(error)
-
-    def _check_tasks_in_done(
-        self,
-        completed_tasks_list: list,
-        created_tasks_info: dict,
-        error_list: list,
-        error_detail_list: list,
-        logging_text: str,
-    ) -> (str, str, str):
-        """Check the completed tasks to detect errors
-        Args:
-            completed_tasks_list    (list):         List of completed tasks
-            created_tasks_info:     Dictionary which includes the tasks
-            error_list:             List of errors
-            error_detail_list:      List includes details of errors
-            logging_text:           Main log message
-
-        Returns:
-            new_error   (str):      New detected error
-            error_list  (str):      Updated error list
-            error_detail_list:      Updated detailed error list
-        """
-        new_error = ""
-        for task in completed_tasks_list:
-            if task.cancelled():
-                exc = "Cancelled"
-            else:
-                exc = task.exception()
-            if exc:
-                if isinstance(exc, asyncio.TimeoutError):
-                    exc = "Timeout"
-                new_error = created_tasks_info[task] + ": {}".format(exc)
-                error_list.append(created_tasks_info[task])
-                error_detail_list.append(new_error)
-                if isinstance(
-                    exc,
-                    (
-                        str,
-                        DbException,
-                        LcmException,
-                        JujuPaasConnException,
-                        JujuPaasServiceException,
-                    ),
-                ):
-                    self.logger.error(logging_text + new_error)
-                else:
-                    exc_traceback = "".join(
-                        traceback.format_exception(None, exc, exc.__traceback__)
-                    )
-                    self.logger.error(
-                        logging_text + created_tasks_info[task] + " " + exc_traceback
-                    )
-            else:
-                self.logger.debug(logging_text + created_tasks_info[task] + ": Done")
-
-        return new_error, error_list, error_detail_list
-
-    async def _wait_for_tasks(
-        self,
-        logging_text: str,
-        created_tasks_info: dict,
-        timeout: int,
-        stage: str,
-        nslcmop_id: str,
-        nsr_id: str,
-    ) -> None:
-        """Wait for tasks to be completed.
-        Args:
-            logging_text  (str):                Log message
-            created_tasks_info    (dict):       Dictionary which includes the tasks
-            timeout   (inst):                   Timeout in seconds
-            stage   (str):                      Indicates the stage of operations
-            nslcmop_id   (str):                 NS LCM Operation ID
-            nsr_id        (str):                NS service record to be used
-        """
-        time_start = time()
-        error_detail_list, error_list = [], []
-        pending_tasks = list(created_tasks_info.keys())
-        num_tasks = len(pending_tasks)
-        num_done = 0
-
-        self._write_op_status(nslcmop_id, stage=f"{stage}: {num_done}/{num_tasks}")
-
-        while pending_tasks:
-            _timeout = timeout + time_start - time()
-            done, pending_tasks = await asyncio.wait(
-                pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
-            )
-            num_done += len(done)
-            if not done:
-                # Timeout error
-                for task in pending_tasks:
-                    new_error = created_tasks_info[task] + ": Timeout"
-                    error_detail_list.append(new_error)
-                    error_list.append(new_error)
-                break
-            # Find out the errors in completed tasks
-            new_error, error_list, error_detail_list = self._check_tasks_in_done(
-                completed_tasks_list=done,
-                created_tasks_info=created_tasks_info,
-                error_detail_list=error_detail_list,
-                error_list=error_list,
-                logging_text=logging_text,
-            )
-
-            self._update_nsr_error_desc(
-                stage=f"{stage}: {num_done}/{num_tasks}",
-                new_error=new_error,
-                error_list=error_list,
-                error_detail_list=error_detail_list,
-                nsr_id=nsr_id,
-            )
-
-            self._write_op_status(nslcmop_id, stage=f"{stage}: {num_done}/{num_tasks}")
-
-        return error_detail_list
-
-    def _prepare_db_before_operation(
-        self,
-        db_nsr_update: dict,
-        nsr_id: str,
-        nslcmop_id: str,
-        detailed: str = None,
-        operational: str = None,
-        ns_state: str = None,
-        current_op: str = None,
-        stage: str = None,
-    ) -> None:
-        """Update DB before performing NS operations
-        Args:
-            db_nsr_update   (dict):  NS record update dictionary
-            nsr_id  (str):          NS record ID
-            nslcmop_id  (str):      NS LCM Operation ID
-            detailed:   (str):      Detailed status
-            operational     (str):  Operational status
-            ns_state    (str):      NS state
-            current_op  (str):      Current operation name
-            stage   (str):          Indicates the stage of operations
-        """
-        db_nsr_update["detailed-status"] = detailed
-        db_nsr_update["operational-status"] = operational
-
-        self._write_ns_status(
-            nsr_id=nsr_id,
-            ns_state=ns_state,
-            current_operation=current_op,
-            current_operation_id=nslcmop_id,
-            other_update=db_nsr_update,
-        )
-        self._write_op_status(op_id=nslcmop_id, stage=stage, queue_position=0)
-
-    async def _report_to_kafka(
-        self,
-        nsr_id: str,
-        nslcmop_id: str,
-        nslcmop_operation_state: str,
-        logging_text: str,
-        message: str,
-        autoremove="False",
-    ) -> None:
-        """Report operation status to Kafka.
-        Args:
-            nsr_id  (str):                  NS record ID
-            nslcmop_id  (str):              NS LCM Operation ID
-            nslcmop_operation_state (str):  NS LCM Operation status
-            logging_text    (str):          Common log message
-            message (str):                  Message which is sent through Kafka
-            autoremove  (Boolean):          True/False If True NBI deletes NS from DB
-
-        Raises:
-            PaasServiceException
-        """
-        if nslcmop_operation_state:
-            update_dict = {
-                "nsr_id": nsr_id,
-                "nslcmop_id": nslcmop_id,
-                "operationState": nslcmop_operation_state,
-            }
-            if message == "terminated":
-                update_dict["autoremove"] = autoremove
-            try:
-                await self.msg.aiowrite(
-                    "ns",
-                    message,
-                    update_dict,
-                    loop=self.loop,
-                )
-            except MsgException as e:
-                error = logging_text + f"kafka_write notification Exception: {e}"
-                self.logger.error(error)
-                raise PaasServiceException(error)
-
-    def _update_ns_state(self, nsr_id: str, db_nsr_update: dict, ns_state: str) -> None:
-        """Update NS state in NSR and VNFRs
-        Args:
-            nsr_id  (str):              NS record ID
-            db_nsr_update   (dict):     NS record dictionary
-            ns_state    (str):          NS status
-        """
-        db_nsr_update["_admin.nsState"] = ns_state
-        self.update_db_2("nsrs", nsr_id, db_nsr_update)
-        self.db.set_list("vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": ns_state})
-
-    def _update_db_ns_state_after_operation(
-        self,
-        error_list: list,
-        operation_type: str,
-        nslcmop_id: str,
-        db_nsr_update: dict,
-        db_nsr: dict,
-        nsr_id: str,
-    ) -> None:
-        """Update NS status at database after performing operations
-        Args:
-            error_list  (list):             List of errors
-            operation_type  (str):          Type of operation such as instantiate/terminate
-            nslcmop_id  (str):              NS LCM Operation ID
-            db_nsr_update   (dict):         NSR update dictionary
-            db_nsr  (dict):                 NS record dictionary
-            nsr_id  (str):                  NS record ID
-        """
-        ns_state = ""
-        if error_list:
-            error_detail = ". ".join(error_list)
-            error_description_nsr = "Operation: {}.{}".format(
-                operation_type, nslcmop_id
-            )
-            db_nsr_update["detailed-status"] = (
-                error_description_nsr + " Detail: " + error_detail
-            )
-            ns_state = "BROKEN"
-
-        else:
-            error_detail = None
-            error_description_nsr = None
-            db_nsr_update["detailed-status"] = "Done"
-            if operation_type == "instantiate":
-                ns_state = "READY"
-            elif operation_type == "terminate":
-                ns_state = "NOT_INSTANTIATED"
-                db_nsr_update["operational-status"] = "terminated"
-                db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
-
-        if db_nsr:
-            self._write_ns_status(
-                nsr_id=nsr_id,
-                ns_state=ns_state,
-                current_operation="IDLE",
-                current_operation_id=None,
-                error_description=error_description_nsr,
-                error_detail=error_detail,
-                other_update=db_nsr_update,
-            )
-
-        if ns_state == "NOT_INSTANTIATED":
-            self.db.set_list(
-                "vnfrs",
-                {"nsr-id-ref": nsr_id},
-                {"_admin.nsState": "NOT_INSTANTIATED"},
-            )
-
-    def _update_db_nslcmop_status_after_operation(
-        self, error_list: list, db_nslcmop_update: dict, nslcmop_id: str
-    ) -> str:
-        """Update NS LCM operation status at database after performing operation
-        Args
-            error_list  (list):             List of errors
-            db_nslcmop_update   (dict):     NS LCM operation update dictionary
-            nslcmop_id  (str):              NS LCM Operation ID
-
-        Returns:
-            nslcmop_operation_state (str):  State of NS LCM operation
-        """
-        if error_list:
-            error_detail = ". ".join(error_list)
-            error_description_nslcmop = "Detail: {}".format(error_detail)
-            db_nslcmop_update["detailed-status"] = error_detail
-            nslcmop_operation_state = "FAILED"
-
-        else:
-            error_description_nslcmop = None
-            db_nslcmop_update["detailed-status"] = "Done"
-            nslcmop_operation_state = "COMPLETED"
-
-        self._write_op_status(
-            op_id=nslcmop_id,
-            stage=nslcmop_operation_state,
-            error_message=error_description_nslcmop,
-            operation_state=nslcmop_operation_state,
-            other_update=db_nslcmop_update,
-        )
-
-        return nslcmop_operation_state
-
-    def _update_db_after_operation(
-        self,
-        nslcmop_id: str,
-        db_nsr: str,
-        nsr_id: str,
-        db_nslcmop_update: dict = None,
-        db_nsr_update: dict = None,
-        error_list: list = None,
-        operation_type: str = None,
-    ) -> str:
-        """Update database after operation is performed.
-        Args:
-            nslcmop_id  (str):              NS LCM Operation ID
-            db_nsr  (dict):                 NS record dictionary
-            nsr_id  (str):                  NS record ID
-            db_nslcmop_update   (dict):     NS LCM operation update dictionary
-            db_nsr_update   (dict):         NSR update dictionary
-            error_list  (list):             List of errors
-            operation_type  (str):          Type of operation such as instantiate/terminate
-
-        Returns:
-            nslcmop_operation_state (str):  State of NS LCM operation
-        """
-        # Update NS state
-        self._update_db_ns_state_after_operation(
-            error_list=error_list,
-            operation_type=operation_type,
-            nslcmop_id=nslcmop_id,
-            db_nsr_update=db_nsr_update,
-            db_nsr=db_nsr,
-            nsr_id=nsr_id,
-        )
-
-        # Update NS LCM Operation State
-        nslcmop_operation_state = self._update_db_nslcmop_status_after_operation(
-            error_list, db_nslcmop_update, nslcmop_id
-        )
-        return nslcmop_operation_state
-
-    async def instantiate(self, nsr_id: str, nslcmop_id: str) -> None:
-        """Perform PaaS Service instantiation.
-        Args:
-            nsr_id   (str):     NS service record to be used
-            nslcmop_id  (str):  NS LCM operation id
-        """
-        # Locking HA task
-        if not self._lock_ha_task(nslcmop_id, nsr_id, keyword="instantiate"):
-            return
-
-        logging_text = f"Task ns={nsr_id} instantiate={nslcmop_id} "
-        self.logger.debug(logging_text + "Enter")
-
-        # Required containers
-        db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
-        db_nsr_update, db_nslcmop_update, tasks_dict_info = {}, {}, {}
-        exc = None
-        error_list = []
-
-        try:
-            # Wait for any previous tasks in process
-            await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
-            # Update nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
-            self._prepare_db_before_operation(
-                db_nsr_update,
-                nsr_id,
-                nslcmop_id,
-                detailed="creating",
-                operational="init",
-                ns_state="BUILDING",
-                current_op="INSTANTIATING",
-                stage="Building",
-            )
-
-            # Perform PaaS Service Deployment using PaaS Connector
-            self.logger.debug(logging_text + "Creating instantiate task")
-            task_instantiate = asyncio.ensure_future(
-                self.paas_connector.instantiate(nsr_id, nslcmop_id)
-            )
-            self.lcm_tasks.register(
-                "ns",
-                nsr_id,
-                nslcmop_id,
-                "instantiate_juju_paas_service",
-                task_instantiate,
-            )
-            tasks_dict_info[task_instantiate] = "Instantiate juju PaaS Service"
-
-            # Update nsState="INSTANTIATED"
-            self.logger.debug(logging_text + "INSTANTIATED")
-            self._update_ns_state(nsr_id, db_nsr_update, "INSTANTIATED")
-
-        except (
-            DbException,
-            LcmException,
-            JujuPaasConnException,
-            JujuPaasServiceException,
-        ) as e:
-            self.logger.error(logging_text + "Exit Exception: {}".format(e))
-            exc = e
-        except asyncio.CancelledError:
-            self.logger.error(logging_text + "Cancelled Exception")
-            exc = "Operation was cancelled"
-
-        finally:
-            if exc:
-                error_list.append(str(exc))
-            try:
-                if tasks_dict_info:
-                    # Wait for pending tasks
-                    stage = "Waiting for instantiate pending tasks."
-                    self.logger.debug(logging_text + stage)
-                    error_list += await self._wait_for_tasks(
-                        logging_text,
-                        tasks_dict_info,
-                        self.timeout_ns_deploy,
-                        stage,
-                        nslcmop_id,
-                        nsr_id=nsr_id,
-                    )
-            except asyncio.CancelledError:
-                error_list.append("Cancelled")
-            except Exception as exc:
-                error_list.append(str(exc))
-
-            # Update operational-status
-            self.logger.debug("updating operational status")
-            db_nsr_update["operational-status"] = "running"
-
-            # Update status at database after operation
-            self.logger.debug(logging_text + "Updating DB after operation")
-            nslcmop_operation_state = self._update_db_after_operation(
-                nslcmop_id,
-                db_nsr,
-                nsr_id,
-                db_nslcmop_update=db_nslcmop_update,
-                db_nsr_update=db_nsr_update,
-                error_list=error_list,
-                operation_type="instantiate",
-            )
-
-            # Write to Kafka bus to report the operation status
-            await self._report_to_kafka(
-                nsr_id,
-                nslcmop_id,
-                nslcmop_operation_state,
-                logging_text,
-                "instantiated",
-            )
-            self.logger.debug(logging_text + "Exit")
-
-            # Remove task
-            self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
-
-    async def terminate(self, nsr_id: str, nslcmop_id: str) -> None:
-        """Perform PaaS Service termination.
-        Args:
-            nsr_id   (str):     NS service record to be used
-            nslcmop_id  (str):  NS LCM operation id
-        """
-        # Locking HA task
-        if not self._lock_ha_task(nslcmop_id, nsr_id, keyword="terminate"):
-            return
-
-        logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
-        self.logger.debug(logging_text + "Enter")
-
-        # Update ns termination timeout
-        timeout_ns_terminate = self.timeout_ns_deploy
-        db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
-        db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
-        operation_params = db_nslcmop.get("operationParams") or {}
-
-        if operation_params.get("timeout_ns_terminate"):
-            timeout_ns_terminate = operation_params["timeout_ns_terminate"]
-
-        # Required containers
-        autoremove = False
-        db_nsr_update, db_nslcmop_update, tasks_dict_info = {}, {}, {}
-        exc = None
-        error_list = []
-
-        try:
-            # Wait for any previous tasks in process
-            await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
-
-            # Update nsState="TERMINATING", currentOperation="TERMINATING", currentOperationID=nslcmop_id
-            self._prepare_db_before_operation(
-                db_nsr_update,
-                nsr_id,
-                nslcmop_id,
-                detailed="terminating",
-                operational="terminate",
-                ns_state="TERMINATING",
-                current_op="TERMINATING",
-                stage="terminating",
-            )
-
-            # Perform PaaS Service deletion using PaaS Connector
-            self.logger.debug(logging_text + "Creating terminate task")
-            task_terminate = asyncio.ensure_future(
-                self.paas_connector.terminate(nsr_id, nslcmop_id)
-            )
-            self.lcm_tasks.register(
-                "ns", nsr_id, nslcmop_id, "terminate_juju_paas_service", task_terminate
-            )
-            tasks_dict_info[task_terminate] = "Terminate juju PaaS Service"
-
-            # Update nsState="TERMINATED"
-            self.logger.debug(logging_text + "TERMINATED")
-            self._update_ns_state(nsr_id, db_nsr_update, "TERMINATED")
-
-        except (
-            DbException,
-            LcmException,
-            JujuPaasConnException,
-            JujuPaasServiceException,
-        ) as e:
-            self.logger.error(logging_text + "Exit Exception: {}".format(e))
-            exc = e
-        except asyncio.CancelledError:
-            self.logger.error(logging_text + "Cancelled Exception")
-            exc = "Operation was cancelled"
-
-        finally:
-            if exc:
-                error_list.append(str(exc))
-            try:
-                if tasks_dict_info:
-                    # Wait for pending tasks
-                    stage = "Waiting for pending tasks for termination."
-                    self.logger.debug(logging_text + stage)
-                    error_list += await self._wait_for_tasks(
-                        logging_text,
-                        tasks_dict_info,
-                        min(self.timeout_ns_deploy, timeout_ns_terminate),
-                        stage,
-                        nslcmop_id,
-                        nsr_id=nsr_id,
-                    )
-            except asyncio.CancelledError:
-                error_list.append("Cancelled")
-            except Exception as exc:
-                error_list.append(str(exc))
-
-            # Update status at database
-            nslcmop_operation_state = self._update_db_after_operation(
-                nslcmop_id,
-                db_nsr,
-                nsr_id,
-                db_nslcmop_update=db_nslcmop_update,
-                db_nsr_update=db_nsr_update,
-                error_list=error_list,
-                operation_type="terminate",
-            )
-
-            # Write to Kafka bus to report the operation status
-            if operation_params:
-                autoremove = operation_params.get("autoremove", False)
-
-            await self._report_to_kafka(
-                nsr_id,
-                nslcmop_id,
-                nslcmop_operation_state,
-                logging_text,
-                "terminated",
-                autoremove=autoremove,
-            )
-            self.logger.debug(logging_text + "Exit")
-
-            # Remove task
-            self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
-
-    async def action(self, nsr_id: str, nslcmop_id: str):
-        """Perform action on PaaS service.
-        Args:
-            nsr_id   (str):     NS service record to be used
-            nslcmop_id  (str):  NS LCM operation id
-
-        Raises:
-            NotImplementedError
-        """
-        raise NotImplementedError("Juju Paas Service action method is not implemented")
index d02b011..7f47231 100644 (file)
@@ -529,10 +529,11 @@ db_nsrs_text = """
                 type: lxc_proxy_charm
                 vnfd_id: hackfest3charmed-vnf
             -   application: alf-c-ab
-                ee_id: f48163a6-c807-47bc-9682-f72caef5af85.alf-c-ab
+                ee_id: "model_name.application_name.machine_id"
+                ee_descriptor_id: f48163a6-c807-47bc-9682-f72caef5af85.alf-c-ab
                 needed_terminate: True
                 detailed-status: Ready!
-                member-vnf-index: '2'
+                member-vnf-index: hackfest_vnf1
                 model: f48163a6-c807-47bc-9682-f72caef5af85
                 operational-status: active
                 primitive_id: null
@@ -543,6 +544,7 @@ db_nsrs_text = """
                 vdu_name: null
                 type: lxc_proxy_charm
                 vnfd_id: hackfest3charmed-vnf
+                config_sw_installed: true
             VCA-model-name: f48163a6-c807-47bc-9682-f72caef5af85
         modified: 1566823354.3716335
         nsState: INSTANTIATED
index 50b8d7a..cc18c37 100644 (file)
@@ -16,7 +16,7 @@ import os
 import re
 import tempfile
 from unittest import TestCase
-from unittest.mock import Mock, patch
+from unittest.mock import Mock
 
 from osm_lcm.lcm import Lcm
 from osm_lcm.data_utils.database.database import Database
@@ -62,7 +62,7 @@ def check_file_content(health_check_file: str) -> str:
         return contents
 
 
-class TestLcmBase(TestCase):
+class TestLcm(TestCase):
     def setUp(self):
         self.config_file = os.getcwd() + "/osm_lcm/tests/test_lcm_config_file.yaml"
         self.config_file_without_storage_path = tempfile.mkstemp()[1]
@@ -77,18 +77,20 @@ class TestLcmBase(TestCase):
         self.fs.path = "/"
         self.my_lcm = Lcm(config_file=self.config_file)
 
-
-class TestLcm(TestLcmBase):
     def test_get_health_check_file_from_config_file(self):
         self.assertEqual(self.my_lcm.health_check_file, "/tmp/storage/time_last_ping")
 
-    def test_health_check_file_not_in_config_file(self):
-        create_lcm_config(self.config_file, self.config_file_without_storage_path, 38)
-        with self.assertRaises(LcmException):
-            Lcm(config_file=self.config_file_without_storage_path)
+    def test_health_check_file_not_in_config_file(self):
+        create_lcm_config(self.config_file, self.config_file_without_storage_path, 38)
+        with self.assertRaises(LcmException):
+            Lcm(config_file=self.config_file_without_storage_path)
 
     def test_kafka_admin_topic_ping_command(self):
-        params = {"to": "lcm", "from": "lcm", "worker_id": self.my_lcm.worker_id}
+        params = {
+            "to": "lcm",
+            "from": "lcm",
+            "worker_id": self.my_lcm.worker_id,
+        }
         self.my_lcm.health_check_file = tempfile.mkstemp()[1]
         self.my_lcm.kafka_read_callback("admin", "ping", params)
         pattern = "[0-9]{10}.[0-9]{5,8}"
@@ -97,7 +99,11 @@ class TestLcm(TestLcmBase):
         self.assertTrue(result)
 
     def test_kafka_wrong_topic_ping_command(self):
-        params = {"to": "lcm", "from": "lcm", "worker_id": self.my_lcm.worker_id}
+        params = {
+            "to": "lcm",
+            "from": "lcm",
+            "worker_id": self.my_lcm.worker_id,
+        }
         self.my_lcm.health_check_file = tempfile.mkstemp()[1]
         self.my_lcm.kafka_read_callback("kafka", "ping", params)
         pattern = "[0-9]{10}.[0-9]{5,8}"
@@ -106,154 +112,14 @@ class TestLcm(TestLcmBase):
         self.assertFalse(result)
 
     def test_kafka_admin_topic_ping_command_wrong_worker_id(self):
-        params = {"to": "lcm", "from": "lcm", "worker_id": 5}
+        params = {
+            "to": "lcm",
+            "from": "lcm",
+            "worker_id": 5,
+        }
         self.my_lcm.health_check_file = tempfile.mkstemp()[1]
         self.my_lcm.kafka_read_callback("admin", "ping", params)
         pattern = "[0-9]{10}.[0-9]{5,8}"
         # Health check file is empty.
         result = re.findall(pattern, check_file_content(self.my_lcm.health_check_file))
         self.assertFalse(result)
-
-
-@patch("osm_lcm.lcm.asyncio.ensure_future")
-class TestPaasKafkaRead(TestLcmBase):
-    def setUp(self):
-        super().setUp()
-        self.params = {"_id": "paas_id", "name": "paas_name", "type": "juju"}
-        self.order_id = 2
-        self.my_lcm.paas = Mock()
-        self.my_lcm.lcm_tasks = Mock()
-        self.task = {}
-
-    def test_kafka_read_paas_create(self, ensure_future):
-        ensure_future.return_value = self.task
-        self.my_lcm.kafka_read_callback("paas", "created", self.params)
-        self.my_lcm.lcm_tasks.register.assert_called_with(
-            "paas", "paas_id", self.order_id, "paas_create", self.task
-        )
-        ensure_future.assert_called_once_with(self.my_lcm.paas.create())
-
-    def test_kafka_read_paas_update(self, ensure_future):
-        ensure_future.return_value = self.task
-        self.my_lcm.kafka_read_callback("paas", "edited", self.params)
-        self.my_lcm.lcm_tasks.register.assert_called_with(
-            "paas", "paas_id", self.order_id, "paas_edit", self.task
-        )
-        ensure_future.assert_called_once_with(self.my_lcm.paas.edit())
-
-    def test_kafka_read_paas_delete(self, ensure_future):
-        ensure_future.return_value = self.task
-        self.my_lcm.kafka_read_callback("paas", "delete", self.params)
-        self.my_lcm.lcm_tasks.register.assert_called_with(
-            "paas", "paas_id", self.order_id, "paas_delete", self.task
-        )
-        ensure_future.assert_called_once_with(self.my_lcm.paas.delete())
-
-    def test_kafka_read_paas_delete_force(self, ensure_future):
-        ensure_future.return_value = self.task
-        self.my_lcm.kafka_read_callback("paas", "deleted", self.params)
-        self.my_lcm.lcm_tasks.register.assert_not_called()
-        ensure_future.assert_not_called()
-
-    def test_kafka_read_paas_wrong_command(self, ensure_future):
-        ensure_future.return_value = self.task
-        self.my_lcm.kafka_read_callback("paas", "invalid", self.params)
-        self.my_lcm.lcm_tasks.register.assert_not_called()
-        ensure_future.assert_not_called()
-
-
-@patch("osm_lcm.lcm.asyncio.ensure_future")
-class TestNsKafkaRead(TestLcmBase):
-    def setUp(self):
-        super().setUp()
-        self.task = {}
-        self.paas_params = {
-            "_id": "nslcmop_id",
-            "nsInstanceId": "nsr_id",
-            "operationParams": {"paasAccountId": "paas_id"},
-        }
-
-        self.vim_params = {
-            "_id": "nslcmop_id",
-            "nsInstanceId": "nsr_id",
-            "operationParams": {},
-        }
-
-        self.my_lcm.ns = Mock()
-        self.my_lcm.lcm_tasks = Mock()
-        self.my_lcm.juju_paas = Mock()
-        self.my_lcm.paas_service = {"juju": self.my_lcm.juju_paas}
-
-    def test_kafka_read_ns_instantiate_vim_account(self, mock_ensure_future):
-        mock_ensure_future.return_value = self.task
-        self.my_lcm.kafka_read_callback("ns", "instantiate", self.vim_params)
-        mock_ensure_future.assert_called_once_with(self.my_lcm.ns.instantiate())
-        self.my_lcm.lcm_tasks.register.assert_called_with(
-            "ns", "nsr_id", "nslcmop_id", "ns_instantiate", self.task
-        )
-
-    @patch("osm_lcm.lcm.get_paas_id_by_nsr_id")
-    def test_kafka_read_ns_terminate_vim_account(self, get_paas_id, mock_ensure_future):
-        mock_ensure_future.return_value = self.task
-        get_paas_id.return_value = None
-        self.my_lcm.kafka_read_callback("ns", "terminate", self.vim_params)
-        self.my_lcm.lcm_tasks.cancel.assert_called_with("ns", "nsr_id")
-        mock_ensure_future.assert_called_once_with(self.my_lcm.ns.terminate())
-        self.my_lcm.lcm_tasks.register.assert_called_with(
-            "ns", "nsr_id", "nslcmop_id", "ns_terminate", self.task
-        )
-
-    @patch("osm_lcm.lcm.get_paas_id_by_nsr_id")
-    def test_kafka_read_ns_action_vim_account(self, get_paas_id, mock_ensure_future):
-        mock_ensure_future.return_value = self.task
-        get_paas_id.return_value = None
-        self.my_lcm.kafka_read_callback("ns", "action", self.vim_params)
-        mock_ensure_future.assert_called_once_with(self.my_lcm.ns.action())
-        self.my_lcm.lcm_tasks.register.assert_called_with(
-            "ns", "nsr_id", "nslcmop_id", "ns_action", self.task
-        )
-
-    @patch("osm_lcm.lcm.get_paas_type_by_paas_id")
-    def test_kafka_read_ns_instantiate_paas_account(
-        self, get_paas_type, mock_ensure_future
-    ):
-        mock_ensure_future.return_value = self.task
-        get_paas_type.return_value = "juju"
-        self.my_lcm.kafka_read_callback("ns", "instantiate", self.paas_params)
-        mock_ensure_future.assert_called_once_with(self.my_lcm.juju_paas.instantiate())
-        self.my_lcm.lcm_tasks.register.assert_called_with(
-            "ns", "nsr_id", "nslcmop_id", "ns_instantiate", self.task
-        )
-        get_paas_type.assert_called_with("paas_id", self.my_lcm.db)
-
-    @patch("osm_lcm.lcm.get_paas_type_by_paas_id")
-    @patch("osm_lcm.lcm.get_paas_id_by_nsr_id")
-    def test_kafka_read_ns_terminate_paas_account(
-        self, get_paas_id, get_paas_type, mock_ensure_future
-    ):
-        mock_ensure_future.return_value = self.task
-        get_paas_id.return_value = "paas_id"
-        get_paas_type.return_value = "juju"
-        self.my_lcm.kafka_read_callback("ns", "terminate", self.paas_params)
-        mock_ensure_future.assert_called_once_with(self.my_lcm.juju_paas.terminate())
-        self.my_lcm.lcm_tasks.register.assert_called_with(
-            "ns", "nsr_id", "nslcmop_id", "ns_terminate", self.task
-        )
-        get_paas_id.assert_called_with("nsr_id", self.my_lcm.db)
-        get_paas_type.assert_called_with("paas_id", self.my_lcm.db)
-
-    @patch("osm_lcm.lcm.get_paas_type_by_paas_id")
-    @patch("osm_lcm.lcm.get_paas_id_by_nsr_id")
-    def test_kafka_read_ns_action_paas_account(
-        self, get_paas_id, get_paas_type, mock_ensure_future
-    ):
-        mock_ensure_future.return_value = self.task
-        get_paas_id.return_value = "paas_id"
-        get_paas_type.return_value = "juju"
-        self.my_lcm.kafka_read_callback("ns", "action", self.paas_params)
-        mock_ensure_future.assert_called_once_with(self.my_lcm.juju_paas.action())
-        self.my_lcm.lcm_tasks.register.assert_called_with(
-            "ns", "nsr_id", "nslcmop_id", "ns_action", self.task
-        )
-        get_paas_id.assert_called_with("nsr_id", self.my_lcm.db)
-        get_paas_type.assert_called_with("paas_id", self.my_lcm.db)
index 3730162..2ea9ae8 100644 (file)
@@ -23,6 +23,7 @@ from osm_lcm.lcm_helm_conn import LCMHelmConn
 from asynctest.mock import Mock
 from osm_lcm.data_utils.database.database import Database
 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
+from osm_lcm.data_utils.lcm_config import VcaConfig
 
 __author__ = "Isabel Lloret <illoret@indra.es>"
 
@@ -54,6 +55,7 @@ class TestLcmHelmConn(asynctest.TestCase):
         lcm_helm_conn.K8sHelm3Connector = asynctest.Mock(
             lcm_helm_conn.K8sHelm3Connector
         )
+        vca_config = VcaConfig(vca_config)
         self.helm_conn = LCMHelmConn(
             loop=self.loop, vca_config=vca_config, log=self.logger
         )
@@ -104,7 +106,6 @@ class TestLcmHelmConn(asynctest.TestCase):
     async def test_get_ee_ssh_public__key(self):
         ee_id = "osm.helm_sample_charm_0001"
         db_dict = {}
-        lcm_helm_conn.socket.gethostbyname = asynctest.Mock()
         mock_pub_key = "ssh-rsapubkey"
         self.db.get_one.return_value = {"_admin": {"helm-chart": {"id": "myk8s_id"}}}
         self.helm_conn._get_ssh_key = asynctest.CoroutineMock(return_value=mock_pub_key)
@@ -115,7 +116,6 @@ class TestLcmHelmConn(asynctest.TestCase):
 
     @asynctest.fail_on(active_handles=True)
     async def test_execute_primitive(self):
-        lcm_helm_conn.socket.gethostbyname = asynctest.Mock()
         ee_id = "osm.helm_sample_charm_0001"
         primitive_name = "sleep"
         params = {}
@@ -129,7 +129,6 @@ class TestLcmHelmConn(asynctest.TestCase):
     @asynctest.fail_on(active_handles=True)
     async def test_execute_config_primitive(self):
         self.logger.debug("Execute config primitive")
-        lcm_helm_conn.socket.gethostbyname = asynctest.Mock()
         ee_id = "osm.helm_sample_charm_0001"
         primitive_name = "config"
         params = {"ssh-host-name": "host1"}
index 5155dcf..71ebab1 100644 (file)
 # contact: alfonso.tiernosepulveda@telefonica.com
 ##
 import logging
-from unittest.mock import Mock, patch, MagicMock
+import tempfile
+from unittest.mock import Mock, patch, MagicMock, mock_open
 from unittest import TestCase
 
 from osm_common.msgkafka import MsgKafka
 from osm_common import fslocal
 from osm_lcm.data_utils.database.database import Database
 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
-from osm_lcm.lcm_utils import (
-    LcmBase,
-    LcmException,
-    get_paas_id_by_nsr_id,
-    get_paas_type_by_paas_id,
-)
+from osm_lcm.lcm_utils import LcmBase, LcmException
 from osm_lcm.tests import test_db_descriptors as descriptors
 import yaml
 from zipfile import BadZipfile
 
 
+tmpdir = tempfile.mkdtemp()[1]
+tmpfile = tempfile.mkstemp()[1]
+
+
 class TestLcmBase(TestCase):
 
     test_nsr_id = "f48163a6-c807-47bc-9682-f72caef5af85"
@@ -57,6 +57,7 @@ class TestLcmBase(TestCase):
         self.my_ns = LcmBase(self.msg, self.logger)
         self.my_ns.fs = self.fs
         self.my_ns.db = self.db
+        self.hexdigest = "031edd7d41651593c5fe5c006f"
 
     def test_get_charm_name_successfully(self):
         instance = self.my_ns
@@ -251,23 +252,195 @@ class TestLcmBase(TestCase):
                 self.assertEqual(mock_metadata_file.call_count, 1)
                 self.assertEqual(mock_charm_name.call_count, 1)
 
-
-class TestPaasUtils(TestCase):
-    def setUp(self):
-        self.db = Mock()
-
-    def test_get_paas_id_by_nsr_id(self):
-        nsr_id = "nsr_id"
-        nsr_filter = {"_id": nsr_id}
-        self.db.get_one.return_value = {"paasdatacenter": "paas_id"}
-        result = get_paas_id_by_nsr_id(nsr_id, self.db)
-        self.db.get_one.expect_called_with("nsrs", nsr_filter)
-        assert result == "paas_id"
-
-    def test_get_paas_type_by_paas_id(self):
-        paas_id = "paas_id"
-        paas_filter = {"_id": paas_id}
-        self.db.get_one.return_value = {"paas_type": "juju"}
-        result = get_paas_type_by_paas_id(paas_id, self.db)
-        self.db.get_one.expect_called_with("paas", paas_filter)
-        assert result == "juju"
+    @patch("builtins.open", new_callable=mock_open(read_data="charm content"))
+    @patch("osm_lcm.lcm_utils.hashlib")
+    def test_calculate_charm_hash_sucessfully(self, mock_hashlib, mocking_open):
+        """Calculate charm hash successfully."""
+        charm = tmpfile
+        hexdigest = self.hexdigest
+        mock_file_hash = MagicMock()
+        mock_hashlib.md5.return_value = mock_file_hash
+        mock_file_hash.hexdigest.return_value = hexdigest
+        result = LcmBase.calculate_charm_hash(charm)
+        self.assertEqual(result, hexdigest)
+        self.assertEqual(mocking_open.call_count, 1)
+        self.assertEqual(mock_file_hash.update.call_count, 1)
+        self.assertEqual(mock_file_hash.hexdigest.call_count, 1)
+        self.assertEqual(mock_hashlib.md5.call_count, 1)
+
+    @patch("builtins.open", new_callable=mock_open(read_data="charm content"))
+    @patch("osm_lcm.lcm_utils.hashlib")
+    def test_calculate_charm_hash_open_raises(self, mock_hashlib, mocking_open):
+        """builtins.open raises exception."""
+        charm = tmpfile
+        hexdigest = self.hexdigest
+        mock_file_hash = MagicMock()
+        mock_hashlib.md5.return_value = mock_file_hash
+        mock_file_hash.hexdigest.return_value = hexdigest
+        mocking_open.side_effect = IOError
+        with self.assertRaises(IOError):
+            LcmBase.calculate_charm_hash(charm)
+        self.assertEqual(mocking_open.call_count, 1)
+        mock_file_hash.update.assert_not_called()
+        mock_file_hash.hexdigest.assert_not_called()
+        self.assertEqual(mock_hashlib.md5.call_count, 1)
+
+    @patch("builtins.open", new_callable=mock_open(read_data="charm content"))
+    @patch("osm_lcm.lcm_utils.hashlib")
+    def test_calculate_charm_filehash_update_raises(self, mock_hashlib, mocking_open):
+        """Filehash update raises exception."""
+        charm = tmpfile
+        hexdigest = self.hexdigest
+        mock_file_hash = MagicMock()
+        mock_file_hash.update.side_effect = Exception
+        mock_hashlib.md5.return_value = mock_file_hash
+        mock_file_hash.hexdigest.return_value = hexdigest
+        with self.assertRaises(Exception):
+            LcmBase.calculate_charm_hash(charm)
+        self.assertEqual(mocking_open.call_count, 1)
+        self.assertEqual(mock_file_hash.update.call_count, 1)
+        mock_file_hash.hexdigest.assert_not_called()
+        self.assertEqual(mock_hashlib.md5.call_count, 1)
+
+    @patch("builtins.open", new_callable=mock_open(read_data="charm content"))
+    @patch("osm_lcm.lcm_utils.hashlib")
+    def test_calculate_charm_filehash_hexdigest_raises(
+        self, mock_hashlib, mocking_open
+    ):
+        """Filehash hexdigest raises exception."""
+        charm = tmpfile
+        mock_file_hash = MagicMock()
+        mock_hashlib.md5.return_value = mock_file_hash
+        mock_file_hash.hexdigest.side_effect = Exception
+        with self.assertRaises(Exception):
+            LcmBase.calculate_charm_hash(charm)
+        self.assertEqual(mocking_open.call_count, 1)
+        self.assertEqual(mock_file_hash.update.call_count, 1)
+        mock_file_hash.hexdigest.assert_called_once()
+        self.assertEqual(mock_hashlib.md5.call_count, 1)
+        mock_file_hash.update.assert_called_once()
+
+    @patch("builtins.open", new_callable=mock_open(read_data="charm content"))
+    @patch("osm_lcm.lcm_utils.hashlib")
+    def test_calculate_charm_filehash_hashlib_md5_raises(
+        self, mock_hashlib, mocking_open
+    ):
+        """Filehash hashlib md5 raises exception."""
+        charm = tmpfile
+        mock_hashlib.md5.side_effect = Exception
+        with self.assertRaises(Exception):
+            LcmBase.calculate_charm_hash(charm)
+        self.assertEqual(mock_hashlib.md5.call_count, 1)
+        mocking_open.assert_not_called()
+
+    @patch("builtins.open", new_callable=mock_open(read_data="charm content"))
+    @patch("osm_lcm.lcm_utils.hashlib")
+    def test_calculate_charm_hash_file_does_not_exist(self, mock_hashlib, mocking_open):
+        """Calculate charm hash, charm file does not exist."""
+        file = None
+        mock_file_hash = MagicMock()
+        mock_hashlib.md5.return_value = mock_file_hash
+        mocking_open.side_effect = FileNotFoundError
+        with self.assertRaises(FileNotFoundError):
+            LcmBase.calculate_charm_hash(file)
+        self.assertEqual(mocking_open.call_count, 1)
+        mock_file_hash.update.assert_not_called()
+        mock_file_hash.hexdigest.assert_not_called()
+        self.assertEqual(mock_hashlib.md5.call_count, 1)
+
+    @patch("osm_lcm.lcm_utils.LcmBase.calculate_charm_hash")
+    def test_compare_charm_hash_charm_changed(self, mock_calculate_charm_hash):
+        """Compare charm hash, charm files are different."""
+        output = True
+        charm1, charm2 = tmpfile, tmpfile
+        mock_calculate_charm_hash.side_effect = [
+            self.hexdigest,
+            "0dd7d4173747593c5fe5c006f",
+        ]
+        result = LcmBase.compare_charm_hash(charm1, charm2)
+        self.assertEqual(output, result)
+        self.assertEqual(mock_calculate_charm_hash.call_count, 2)
+
+    @patch("osm_lcm.lcm_utils.LcmBase.calculate_charm_hash")
+    def test_compare_charm_hash_charm_is_same(self, mock_calculate_charm_hash):
+        """Compare charm hash, charm files are same."""
+        output = False
+        charm1 = charm2 = tmpfile
+        mock_calculate_charm_hash.side_effect = [
+            self.hexdigest,
+            self.hexdigest,
+        ]
+        result = LcmBase.compare_charm_hash(charm1, charm2)
+        self.assertEqual(output, result)
+        self.assertEqual(mock_calculate_charm_hash.call_count, 2)
+
+    @patch("osm_lcm.lcm_utils.LcmBase.calculate_charm_hash")
+    def test_compare_charm_hash_one_charm_is_not_valid(self, mock_calculate_charm_hash):
+        """Compare charm hash, one charm file is not valid."""
+        charm1, charm2 = tmpdir, None
+        mock_calculate_charm_hash.side_effect = [
+            self.hexdigest,
+            FileNotFoundError,
+        ]
+
+        with self.assertRaises(FileNotFoundError):
+            LcmBase.compare_charm_hash(charm1, charm2)
+        self.assertEqual(mock_calculate_charm_hash.call_count, 2)
+
+    @patch("osm_lcm.lcm_utils.LcmBase.calculate_charm_hash")
+    def test_compare_charm_hash_both_charms_are_not_valid(
+        self, mock_calculate_charm_hash
+    ):
+        """Compare charm hash, both charm files are not valid."""
+        charm1, charm2 = None, None
+        mock_calculate_charm_hash.side_effect = [IOError, IOError]
+        with self.assertRaises(IOError):
+            LcmBase.compare_charm_hash(charm1, charm2)
+        self.assertEqual(mock_calculate_charm_hash.call_count, 1)
+
+    @patch("osm_lcm.lcm_utils.checksumdir")
+    def test_compare_charmdir_charm_changed(self, mock_checksum):
+        """Compare charm directory hash, charms are changed."""
+        expected_output = True
+        charm_dir1, charm_dir2 = tmpdir, tmpdir
+        mock_checksum.dirhash.side_effect = [
+            self.hexdigest,
+            "031eddtrtr651593c5fe5c006f",
+        ]
+        result = LcmBase.compare_charmdir_hash(charm_dir1, charm_dir2)
+        self.assertEqual(expected_output, result)
+        self.assertEqual(mock_checksum.dirhash.call_count, 2)
+
+    @patch("osm_lcm.lcm_utils.checksumdir")
+    def test_compare_charmdir_charm_is_same(self, mock_checksum):
+        """Compare charm directory hash, charms are same."""
+        expected_output = False
+        charm_dir1 = charm_dir2 = tmpdir
+        mock_checksum.dirhash.side_effect = [
+            self.hexdigest,
+            self.hexdigest,
+        ]
+        result = LcmBase.compare_charmdir_hash(charm_dir1, charm_dir2)
+        self.assertEqual(expected_output, result)
+        self.assertEqual(mock_checksum.dirhash.call_count, 2)
+
+    @patch("osm_lcm.lcm_utils.checksumdir")
+    def test_compare_charmdir_one_charmdir_is_not_valid(self, mock_checksum):
+        """Compare charm directory hash, one charm directory is not valid."""
+        charm_dir1, charm_dir2 = tmpdir, None
+        mock_checksum.dirhash.side_effect = [
+            self.hexdigest,
+            FileNotFoundError,
+        ]
+        with self.assertRaises(FileNotFoundError):
+            LcmBase.compare_charmdir_hash(charm_dir1, charm_dir2)
+        self.assertEqual(mock_checksum.dirhash.call_count, 2)
+
+    @patch("osm_lcm.lcm_utils.checksumdir")
+    def test_compare_charmdir_both_charmdirs_are_not_valid(self, mock_checksum):
+        """Compare charm directory hash, both charm directories are not valid."""
+        charm_dir1, charm_dir2 = None, None
+        mock_checksum.dirhash.side_effect = [FileNotFoundError, FileNotFoundError]
+        with self.assertRaises(FileNotFoundError):
+            LcmBase.compare_charmdir_hash(charm_dir1, charm_dir2)
+        self.assertEqual(mock_checksum.dirhash.call_count, 1)
index 2004701..f44dbf6 100644 (file)
 
 import asynctest  # pip3 install asynctest --user
 import asyncio
+from copy import deepcopy
 import yaml
 import copy
+from n2vc.exceptions import N2VCException
 from os import getenv
 from osm_lcm import ns
 from osm_common.msgkafka import MsgKafka
+
+from osm_lcm.data_utils.lcm_config import LcmCfg
 from osm_lcm.lcm_utils import TaskRegistry
 from osm_lcm.ng_ro import NgRoClient
 from osm_lcm.data_utils.database.database import Database
 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
+from osm_lcm.data_utils.vca import Relation, EERelation
 from osm_lcm.data_utils.vnfd import find_software_version
 from osm_lcm.lcm_utils import check_juju_bundle_existence, get_charm_artifact_path
 from osm_lcm.lcm_utils import LcmException
@@ -53,7 +58,7 @@ It allows, if some testing ENV are supplied, testing without mocking some extern
     OSMLCM_RO_XXX: configuration of RO
 """
 
-lcm_config = {
+lcm_config_dict = {
     "global": {"loglevel": "DEBUG"},
     "timeout": {},
     "VCA": {  # TODO replace with os.get_env to get other configurations
@@ -65,7 +70,7 @@ lcm_config = {
         "ca_cert": getenv("OSMLCM_VCA_CACERT", None),
         "apiproxy": getenv("OSMLCM_VCA_APIPROXY", "192.168.1.1"),
     },
-    "ro_config": {
+    "RO": {
         "uri": "http://{}:{}/openmano".format(
             getenv("OSMLCM_RO_HOST", "ro"), getenv("OSMLCM_RO_PORT", "9090")
         ),
@@ -76,6 +81,23 @@ lcm_config = {
     },
 }
 
+lcm_config = LcmCfg()
+lcm_config.set_from_dict(lcm_config_dict)
+lcm_config.transform()
+
+nsr_id = descriptors.test_ids["TEST-A"]["ns"]
+nslcmop_id = descriptors.test_ids["TEST-A"]["update"]
+vnfr_id = "6421c7c9-d865-4fb4-9a13-d4275d243e01"
+vnfd_id = "7637bcf8-cf14-42dc-ad70-c66fcf1e6e77"
+update_fs = Mock(autospec=True)
+update_fs.path.__add__ = Mock()
+update_fs.path.side_effect = ["/", "/", "/", "/"]
+update_fs.sync.side_effect = [None, None]
+
+
+def callable(a):
+    return a
+
 
 class TestMyNS(asynctest.TestCase):
     async def _n2vc_DeployCharms(
@@ -87,7 +109,7 @@ class TestMyNS(asynctest.TestCase):
         params={},
         machine_spec={},
         callback=None,
-        *callback_args
+        *callback_args,
     ):
         if callback:
             for status, message in (
@@ -165,8 +187,17 @@ class TestMyNS(asynctest.TestCase):
         return str(uuid4())
 
     async def setUp(self):
-
-        # Mock DB
+        self.mock_db()
+        self.mock_kafka()
+        self.mock_filesystem()
+        self.mock_task_registry()
+        self.mock_vca_k8s()
+        self.create_nslcm_class()
+        self.mock_logging()
+        self.mock_vca_n2vc()
+        self.mock_ro()
+
+    def mock_db(self):
         if not getenv("OSMLCMTEST_DB_NOMOCK"):
             # Cleanup singleton Database instance
             Database.instance = None
@@ -193,10 +224,10 @@ class TestMyNS(asynctest.TestCase):
             self.db.create_list("vnfrs", yaml.safe_load(descriptors.db_vnfrs_text))
             self.db_vim_accounts = yaml.safe_load(descriptors.db_vim_accounts_text)
 
-        # Mock kafka
+    def mock_kafka(self):
         self.msg = asynctest.Mock(MsgKafka())
 
-        # Mock filesystem
+    def mock_filesystem(self):
         if not getenv("OSMLCMTEST_FS_NOMOCK"):
             self.fs = asynctest.Mock(
                 Filesystem({"storage": {"driver": "local", "path": "/"}}).instance.fs
@@ -208,13 +239,13 @@ class TestMyNS(asynctest.TestCase):
             # self.fs.file_open.return_value.__enter__.return_value = asynctest.MagicMock()  # called on a python "with"
             # self.fs.file_open.return_value.__enter__.return_value.read.return_value = ""   # empty file
 
-        # Mock TaskRegistry
+    def mock_task_registry(self):
         self.lcm_tasks = asynctest.Mock(TaskRegistry())
         self.lcm_tasks.lock_HA.return_value = True
         self.lcm_tasks.waitfor_related_HA.return_value = None
         self.lcm_tasks.lookfor_related.return_value = ("", [])
 
-        # Mock VCA - K8s
+    def mock_vca_k8s(self):
         if not getenv("OSMLCMTEST_VCA_K8s_NOMOCK"):
             ns.K8sJujuConnector = asynctest.MagicMock(ns.K8sJujuConnector)
             ns.K8sHelmConnector = asynctest.MagicMock(ns.K8sHelmConnector)
@@ -224,17 +255,17 @@ class TestMyNS(asynctest.TestCase):
             ns.N2VCJujuConnector = asynctest.MagicMock(ns.N2VCJujuConnector)
             ns.LCMHelmConn = asynctest.MagicMock(ns.LCMHelmConn)
 
-        # Create NsLCM class
+    def create_nslcm_class(self):
         self.my_ns = ns.NsLcm(self.msg, self.lcm_tasks, lcm_config, self.loop)
         self.my_ns.fs = self.fs
         self.my_ns.db = self.db
         self.my_ns._wait_dependent_n2vc = asynctest.CoroutineMock()
 
-        # Mock logging
+    def mock_logging(self):
         if not getenv("OSMLCMTEST_LOGGING_NOMOCK"):
             self.my_ns.logger = asynctest.Mock(self.my_ns.logger)
 
-        # Mock VCA - N2VC
+    def mock_vca_n2vc(self):
         if not getenv("OSMLCMTEST_VCA_NOMOCK"):
             pub_key = getenv("OSMLCMTEST_NS_PUBKEY", "ssh-rsa test-pub-key t@osm.com")
             # self.my_ns.n2vc = asynctest.Mock(N2VC())
@@ -278,11 +309,14 @@ class TestMyNS(asynctest.TestCase):
             self.my_ns.n2vc.delete_namespace = asynctest.CoroutineMock(
                 return_value=None
             )
+            self.my_ns.n2vc.register_execution_environment = asynctest.CoroutineMock(
+                return_value="model-name.application-name.k8s"
+            )
 
-        # Mock RO
+    def mock_ro(self):
         if not getenv("OSMLCMTEST_RO_NOMOCK"):
             self.my_ns.RO = asynctest.Mock(
-                NgRoClient(self.loop, **lcm_config["ro_config"])
+                NgRoClient(self.loop, **lcm_config.RO.to_dict())
             )
             # TODO first time should be empty list, following should return a dict
             # self.my_ns.RO.get_list = asynctest.CoroutineMock(self.my_ns.RO.get_list, return_value=[])
@@ -943,747 +977,671 @@ class TestMyNS(asynctest.TestCase):
     #     self.assertEqual(db_nsr.get("errorDescription "), None, "errorDescription different than None")
     #     self.assertEqual(db_nsr.get("errorDetail"), None, "errorDetail different than None")
 
-    # Test update method
-
-    async def test_update(self):
-
-        nsr_id = descriptors.test_ids["TEST-A"]["ns"]
-        nslcmop_id = descriptors.test_ids["TEST-A"]["update"]
-        vnfr_id = "6421c7c9-d865-4fb4-9a13-d4275d243e01"
-        vnfd_id = "7637bcf8-cf14-42dc-ad70-c66fcf1e6e77"
-
-        def mock_reset():
-            mock_charm_hash.reset_mock()
-            mock_juju_bundle.reset_mock()
-            fs.sync.reset_mock()
-            mock_charm_upgrade.reset_mock()
-            mock_software_version.reset_mock()
-
-        with self.subTest(
-            i=1,
-            t="Update type: CHANGE_VNFPKG, latest_vnfd revision changed,"
-            "Charm package changed, sw-version is not changed.",
-        ):
-
-            self.db.set_one(
-                "vnfds",
-                q_filter={"_id": vnfd_id},
-                update_dict={"_admin.revision": 3, "kdu": []},
-            )
-
-            self.db.set_one(
-                "vnfds_revisions",
-                q_filter={"_id": vnfd_id + ":1"},
-                update_dict={"_admin.revision": 1, "kdu": []},
-            )
-
-            self.db.set_one(
-                "vnfrs", q_filter={"_id": vnfr_id}, update_dict={"revision": 1}
-            )
-
-            mock_charm_hash = Mock(autospec=True)
-            mock_charm_hash.return_value = True
-
-            mock_juju_bundle = Mock(return_value=None)
-
-            mock_software_version = Mock(autospec=True)
-            mock_software_version.side_effect = ["1.0", "1.0"]
-
-            mock_charm_upgrade = asynctest.Mock(autospec=True)
-            task = asyncio.Future()
-            task.set_result(("COMPLETED", "some_output"))
-            mock_charm_upgrade.return_value = task
-
-            fs = Mock(autospec=True)
-            fs.path.__add__ = Mock()
-            fs.path.side_effect = ["/", "/", "/", "/"]
-            fs.sync.side_effect = [None, None]
-
-            instance = self.my_ns
-
-            expected_operation_state = "COMPLETED"
-            expected_operation_error = ""
-            expected_vnfr_revision = 3
-            expected_ns_state = "INSTANTIATED"
-            expected_ns_operational_state = "running"
-
-            with patch.object(instance, "fs", fs), patch(
-                "osm_lcm.lcm_utils.LcmBase.check_charm_hash_changed", mock_charm_hash
-            ), patch("osm_lcm.ns.NsLcm._ns_charm_upgrade", mock_charm_upgrade), patch(
-                "osm_lcm.data_utils.vnfd.find_software_version", mock_software_version
-            ), patch(
-                "osm_lcm.lcm_utils.check_juju_bundle_existence", mock_juju_bundle
-            ):
-
-                await instance.update(nsr_id, nslcmop_id)
-                return_operation_state = self.db.get_one(
-                    "nslcmops", {"_id": nslcmop_id}
-                ).get("operationState")
-                return_operation_error = self.db.get_one(
-                    "nslcmops", {"_id": nslcmop_id}
-                ).get("errorMessage")
-                return_ns_operational_state = self.db.get_one(
-                    "nsrs", {"_id": nsr_id}
-                ).get("operational-status")
-
-                return_vnfr_revision = self.db.get_one("vnfrs", {"_id": vnfr_id}).get(
-                    "revision"
-                )
-
-                return_ns_state = self.db.get_one("nsrs", {"_id": nsr_id}).get(
-                    "nsState"
-                )
-
-                mock_charm_hash.assert_called_with(
-                    "7637bcf8-cf14-42dc-ad70-c66fcf1e6e77:1/hackfest_3charmed_vnfd/charms/simple",
-                    "7637bcf8-cf14-42dc-ad70-c66fcf1e6e77:3/hackfest_3charmed_vnfd/charms/simple",
-                )
-
-                self.assertEqual(fs.sync.call_count, 2)
-                self.assertEqual(return_ns_state, expected_ns_state)
-                self.assertEqual(return_operation_state, expected_operation_state)
-                self.assertEqual(return_operation_error, expected_operation_error)
-                self.assertEqual(
-                    return_ns_operational_state, expected_ns_operational_state
-                )
-                self.assertEqual(return_vnfr_revision, expected_vnfr_revision)
-
-            mock_reset()
-
-        with self.subTest(
-            i=2, t="Update type: CHANGE_VNFPKG, latest_vnfd revision not changed"
-        ):
-
-            self.db.set_one(
-                "vnfds", q_filter={"_id": vnfd_id}, update_dict={"_admin.revision": 1}
-            )
-
-            self.db.set_one(
-                "vnfrs", q_filter={"_id": vnfr_id}, update_dict={"revision": 1}
-            )
-
-            mock_charm_hash = Mock(autospec=True)
-            mock_charm_hash.return_value = True
-
-            mock_juju_bundle = Mock(return_value=None)
-            mock_software_version = Mock(autospec=True)
-
-            mock_charm_upgrade = asynctest.Mock(autospec=True)
-            task = asyncio.Future()
-            task.set_result(("COMPLETED", "some_output"))
-            mock_charm_upgrade.return_value = task
-
-            fs = Mock(autospec=True)
-            fs.path.__add__ = Mock()
-            fs.path.side_effect = ["/", "/", "/", "/"]
-            fs.sync.side_effect = [None, None]
-
-            instance = self.my_ns
-
-            expected_operation_state = "COMPLETED"
-            expected_operation_error = ""
-            expected_vnfr_revision = 1
-            expected_ns_state = "INSTANTIATED"
-            expected_ns_operational_state = "running"
-
-            with patch.object(instance, "fs", fs), patch(
-                "osm_lcm.lcm_utils.LcmBase.check_charm_hash_changed", mock_charm_hash
-            ), patch("osm_lcm.ns.NsLcm._ns_charm_upgrade", mock_charm_upgrade), patch(
-                "osm_lcm.lcm_utils.check_juju_bundle_existence", mock_juju_bundle
-            ):
-
-                await instance.update(nsr_id, nslcmop_id)
-
-                return_operation_state = self.db.get_one(
-                    "nslcmops", {"_id": nslcmop_id}
-                ).get("operationState")
-
-                return_operation_error = self.db.get_one(
-                    "nslcmops", {"_id": nslcmop_id}
-                ).get("errorMessage")
-
-                return_ns_operational_state = self.db.get_one(
-                    "nsrs", {"_id": nsr_id}
-                ).get("operational-status")
-
-                return_ns_state = self.db.get_one("nsrs", {"_id": nsr_id}).get(
-                    "nsState"
-                )
-
-                return_vnfr_revision = self.db.get_one("vnfrs", {"_id": vnfr_id}).get(
-                    "revision"
-                )
-
-                mock_charm_hash.assert_not_called()
-                mock_software_version.assert_not_called()
-                mock_juju_bundle.assert_not_called()
-                mock_charm_upgrade.assert_not_called()
-                fs.sync.assert_not_called()
-
-                self.assertEqual(return_ns_state, expected_ns_state)
-                self.assertEqual(return_operation_state, expected_operation_state)
-                self.assertEqual(return_operation_error, expected_operation_error)
-                self.assertEqual(
-                    return_ns_operational_state, expected_ns_operational_state
-                )
-                self.assertEqual(return_vnfr_revision, expected_vnfr_revision)
-
-            mock_reset()
-
-        with self.subTest(
-            i=3,
-            t="Update type: CHANGE_VNFPKG, latest_vnfd revision changed, "
-            "Charm package is not changed, sw-version is not changed.",
-        ):
-
-            self.db.set_one(
-                "vnfds", q_filter={"_id": vnfd_id}, update_dict={"_admin.revision": 3}
-            )
-
-            self.db.set_one(
-                "vnfds_revisions",
-                q_filter={"_id": vnfd_id + ":1"},
-                update_dict={"_admin.revision": 1},
-            )
-
-            self.db.set_one(
-                "vnfrs", q_filter={"_id": vnfr_id}, update_dict={"revision": 1}
-            )
-
-            mock_charm_hash = Mock(autospec=True)
-            mock_charm_hash.return_value = False
-
-            mock_juju_bundle = Mock(return_value=None)
-
-            mock_software_version = Mock(autospec=True)
-
-            mock_charm_upgrade = asynctest.Mock(autospec=True)
-            task = asyncio.Future()
-            task.set_result(("COMPLETED", "some_output"))
-            mock_charm_upgrade.return_value = task
-            mock_software_version.side_effect = ["1.0", "1.0"]
-
-            fs = Mock(autospec=True)
-            fs.path.__add__ = Mock()
-            fs.path.side_effect = ["/", "/", "/", "/"]
-            fs.sync.side_effect = [None, None]
-
-            instance = self.my_ns
-
-            expected_operation_state = "COMPLETED"
-            expected_operation_error = ""
-            expected_vnfr_revision = 3
-            expected_ns_state = "INSTANTIATED"
-            expected_ns_operational_state = "running"
-
-            with patch.object(instance, "fs", fs), patch(
-                "osm_lcm.lcm_utils.LcmBase.check_charm_hash_changed", mock_charm_hash
-            ), patch("osm_lcm.ns.NsLcm._ns_charm_upgrade", mock_charm_upgrade), patch(
-                "osm_lcm.lcm_utils.check_juju_bundle_existence", mock_juju_bundle
-            ):
-
-                await instance.update(nsr_id, nslcmop_id)
-
-                return_operation_state = self.db.get_one(
-                    "nslcmops", {"_id": nslcmop_id}
-                ).get("operationState")
-
-                return_operation_error = self.db.get_one(
-                    "nslcmops", {"_id": nslcmop_id}
-                ).get("errorMessage")
-
-                return_ns_operational_state = self.db.get_one(
-                    "nsrs", {"_id": nsr_id}
-                ).get("operational-status")
-
-                return_vnfr_revision = self.db.get_one("vnfrs", {"_id": vnfr_id}).get(
-                    "revision"
-                )
-
-                return_ns_state = self.db.get_one("nsrs", {"_id": nsr_id}).get(
-                    "nsState"
-                )
-
-                mock_charm_hash.assert_called_with(
-                    "7637bcf8-cf14-42dc-ad70-c66fcf1e6e77:1/hackfest_3charmed_vnfd/charms/simple",
-                    "7637bcf8-cf14-42dc-ad70-c66fcf1e6e77:3/hackfest_3charmed_vnfd/charms/simple",
-                )
-
-                self.assertEqual(fs.sync.call_count, 2)
-                self.assertEqual(mock_charm_hash.call_count, 1)
-
-                mock_juju_bundle.assert_not_called()
-                mock_charm_upgrade.assert_not_called()
-
-                self.assertEqual(return_ns_state, expected_ns_state)
-                self.assertEqual(return_operation_state, expected_operation_state)
-                self.assertEqual(return_operation_error, expected_operation_error)
-                self.assertEqual(
-                    return_ns_operational_state, expected_ns_operational_state
-                )
-                self.assertEqual(return_vnfr_revision, expected_vnfr_revision)
-
-            mock_reset()
-
-        with self.subTest(
-            i=4,
-            t="Update type: CHANGE_VNFPKG, latest_vnfd revision changed, "
-            "Charm package exists, sw-version changed.",
-        ):
-
-            self.db.set_one(
-                "vnfds",
-                q_filter={"_id": vnfd_id},
-                update_dict={"_admin.revision": 3, "software-version": "3.0"},
-            )
-
-            self.db.set_one(
-                "vnfds_revisions",
-                q_filter={"_id": vnfd_id + ":1"},
-                update_dict={"_admin.revision": 1},
-            )
-
-            self.db.set_one(
-                "vnfrs",
-                q_filter={"_id": vnfr_id},
-                update_dict={"revision": 1},
-            )
-
-            mock_charm_hash = Mock(autospec=True)
-            mock_charm_hash.return_value = False
-
-            mock_juju_bundle = Mock(return_value=None)
-
-            mock_charm_upgrade = asynctest.Mock(autospec=True)
-            task = asyncio.Future()
-            task.set_result(("COMPLETED", "some_output"))
-            mock_charm_upgrade.return_value = task
-
-            mock_charm_artifact = Mock(autospec=True)
-            mock_charm_artifact.side_effect = [
-                "7637bcf8-cf14-42dc-ad70-c66fcf1e6e77:1/hackfest_3charmed_vnfd/charms/simple",
-                "7637bcf8-cf14-42dc-ad70-c66fcf1e6e77/hackfest_3charmed_vnfd/charms/simple",
-            ]
-
-            fs = Mock(autospec=True)
-            fs.path.__add__ = Mock()
-            fs.path.side_effect = ["/", "/", "/", "/"]
-            fs.sync.side_effect = [None, None]
-
-            instance = self.my_ns
+    @patch("osm_lcm.lcm_utils.LcmBase.check_charm_hash_changed")
+    @patch(
+        "osm_lcm.ns.NsLcm._ns_charm_upgrade", new_callable=asynctest.Mock(autospec=True)
+    )
+    @patch("osm_lcm.data_utils.vnfd.find_software_version")
+    @patch("osm_lcm.lcm_utils.check_juju_bundle_existence")
+    async def test_update_change_vnfpkg_sw_version_not_changed(
+        self,
+        mock_juju_bundle,
+        mock_software_version,
+        mock_charm_upgrade,
+        mock_charm_hash,
+    ):
+        """Update type: CHANGE_VNFPKG, latest_vnfd revision changed,
+        Charm package changed, sw-version is not changed"""
+        self.db.set_one(
+            "vnfds",
+            q_filter={"_id": vnfd_id},
+            update_dict={"_admin.revision": 3, "kdu": []},
+        )
 
-            expected_operation_state = "FAILED"
-            expected_operation_error = "FAILED Checking if existing VNF has charm: Software version change is not supported as VNF instance 6421c7c9-d865-4fb4-9a13-d4275d243e01 has charm."
-            expected_vnfr_revision = 1
-            expected_ns_state = "INSTANTIATED"
-            expected_ns_operational_state = "running"
+        self.db.set_one(
+            "vnfds_revisions",
+            q_filter={"_id": vnfd_id + ":1"},
+            update_dict={"_admin.revision": 1, "kdu": []},
+        )
 
-            with patch.object(instance, "fs", fs), patch(
-                "osm_lcm.lcm_utils.LcmBase.check_charm_hash_changed", mock_charm_hash
-            ), patch("osm_lcm.ns.NsLcm._ns_charm_upgrade", mock_charm_upgrade), patch(
-                "osm_lcm.lcm_utils.get_charm_artifact_path", mock_charm_artifact
-            ):
+        self.db.set_one("vnfrs", q_filter={"_id": vnfr_id}, update_dict={"revision": 1})
 
-                await instance.update(nsr_id, nslcmop_id)
-
-                return_operation_state = self.db.get_one(
-                    "nslcmops", {"_id": nslcmop_id}
-                ).get("operationState")
-
-                return_operation_error = self.db.get_one(
-                    "nslcmops", {"_id": nslcmop_id}
-                ).get("errorMessage")
-
-                return_ns_operational_state = self.db.get_one(
-                    "nsrs", {"_id": nsr_id}
-                ).get("operational-status")
-
-                return_vnfr_revision = self.db.get_one("vnfrs", {"_id": vnfr_id}).get(
-                    "revision"
-                )
-
-                return_ns_state = self.db.get_one("nsrs", {"_id": nsr_id}).get(
-                    "nsState"
-                )
-
-                self.assertEqual(fs.sync.call_count, 2)
-                mock_charm_hash.assert_not_called()
-
-                mock_juju_bundle.assert_not_called()
-                mock_charm_upgrade.assert_not_called()
-
-                self.assertEqual(return_ns_state, expected_ns_state)
-                self.assertEqual(return_operation_state, expected_operation_state)
-                self.assertEqual(return_operation_error, expected_operation_error)
-                self.assertEqual(
-                    return_ns_operational_state, expected_ns_operational_state
-                )
-                self.assertEqual(return_vnfr_revision, expected_vnfr_revision)
-
-            mock_reset()
-
-        with self.subTest(
-            i=5,
-            t="Update type: CHANGE_VNFPKG, latest_vnfd revision changed,"
-            "Charm package exists, sw-version not changed, juju-bundle exists",
-        ):
-
-            self.db.set_one(
-                "vnfds",
-                q_filter={"_id": vnfd_id},
-                update_dict={
-                    "_admin.revision": 3,
-                    "software-version": "1.0",
-                    "kdu.0.juju-bundle": "stable/native-kdu",
-                },
-            )
+        mock_charm_hash.return_value = True
+        mock_software_version.side_effect = ["1.0", "1.0"]
 
-            self.db.set_one(
-                "vnfds_revisions",
-                q_filter={"_id": vnfd_id + ":1"},
-                update_dict={
-                    "_admin.revision": 1,
-                    "software-version": "1.0",
-                    "kdu.0.juju-bundle": "stable/native-kdu",
-                },
-            )
+        task = asyncio.Future()
+        task.set_result(("COMPLETED", "some_output"))
+        mock_charm_upgrade.return_value = task
 
-            self.db.set_one(
-                "vnfrs", q_filter={"_id": vnfr_id}, update_dict={"revision": 1}
-            )
+        instance = self.my_ns
+        fs = deepcopy(update_fs)
+        instance.fs = fs
 
-            mock_charm_hash = Mock(autospec=True)
-            mock_charm_hash.return_value = True
-
-            mock_charm_artifact = Mock(autospec=True)
-            mock_charm_artifact.side_effect = [
-                "7637bcf8-cf14-42dc-ad70-c66fcf1e6e77:1/hackfest_3charmed_vnfd/charms/simple",
-                "7637bcf8-cf14-42dc-ad70-c66fcf1e6e77/hackfest_3charmed_vnfd/charms/simple",
-            ]
-
-            fs = Mock(autospec=True)
-            fs.path.__add__ = Mock()
-            fs.path.side_effect = ["/", "/", "/", "/"]
-            fs.sync.side_effect = [None, None]
-
-            instance = self.my_ns
-
-            expected_operation_state = "FAILED"
-            expected_operation_error = "FAILED Checking whether VNF uses juju bundle: Charm upgrade is not supported for the instance which uses juju-bundle: stable/native-kdu"
-            expected_vnfr_revision = 1
-            expected_ns_state = "INSTANTIATED"
-            expected_ns_operational_state = "running"
-
-            with patch.object(instance, "fs", fs), patch(
-                "osm_lcm.lcm_utils.LcmBase.check_charm_hash_changed", mock_charm_hash
-            ), patch("osm_lcm.lcm_utils.get_charm_artifact_path", mock_charm_artifact):
-
-                await instance.update(nsr_id, nslcmop_id)
-
-                return_operation_state = self.db.get_one(
-                    "nslcmops", {"_id": nslcmop_id}
-                ).get("operationState")
-
-                return_operation_error = self.db.get_one(
-                    "nslcmops", {"_id": nslcmop_id}
-                ).get("errorMessage")
-
-                return_ns_operational_state = self.db.get_one(
-                    "nsrs", {"_id": nsr_id}
-                ).get("operational-status")
-
-                return_vnfr_revision = self.db.get_one("vnfrs", {"_id": vnfr_id}).get(
-                    "revision"
-                )
-
-                return_ns_state = self.db.get_one("nsrs", {"_id": nsr_id}).get(
-                    "nsState"
-                )
-
-                self.assertEqual(fs.sync.call_count, 2)
-                self.assertEqual(mock_charm_hash.call_count, 1)
-                self.assertEqual(mock_charm_hash.call_count, 1)
-
-                mock_charm_upgrade.assert_not_called()
-
-                self.assertEqual(return_ns_state, expected_ns_state)
-                self.assertEqual(return_operation_state, expected_operation_state)
-                self.assertEqual(return_operation_error, expected_operation_error)
-                self.assertEqual(
-                    return_ns_operational_state, expected_ns_operational_state
-                )
-                self.assertEqual(return_vnfr_revision, expected_vnfr_revision)
-
-            mock_reset()
-
-        with self.subTest(
-            i=6,
-            t="Update type: CHANGE_VNFPKG, latest_vnfd revision changed,"
-            "Charm package exists, sw-version not changed, charm-upgrade failed",
-        ):
-
-            self.db.set_one(
-                "vnfds",
-                q_filter={"_id": vnfd_id},
-                update_dict={
-                    "_admin.revision": 3,
-                    "software-version": "1.0",
-                    "kdu": [],
-                },
-            )
+        expected_operation_state = "COMPLETED"
+        expected_operation_error = ""
+        expected_vnfr_revision = 3
+        expected_ns_state = "INSTANTIATED"
+        expected_ns_operational_state = "running"
 
-            self.db.set_one(
-                "vnfds_revisions",
-                q_filter={"_id": vnfd_id + ":1"},
-                update_dict={
-                    "_admin.revision": 1,
-                    "software-version": "1.0",
-                    "kdu": [],
-                },
-            )
+        await instance.update(nsr_id, nslcmop_id)
+        return_operation_state = self.db.get_one("nslcmops", {"_id": nslcmop_id}).get(
+            "operationState"
+        )
+        return_operation_error = self.db.get_one("nslcmops", {"_id": nslcmop_id}).get(
+            "errorMessage"
+        )
+        return_ns_operational_state = self.db.get_one("nsrs", {"_id": nsr_id}).get(
+            "operational-status"
+        )
+        return_vnfr_revision = self.db.get_one("vnfrs", {"_id": vnfr_id}).get(
+            "revision"
+        )
+        return_ns_state = self.db.get_one("nsrs", {"_id": nsr_id}).get("nsState")
+        mock_charm_hash.assert_called_with(
+            f"{vnfd_id}:1/hackfest_3charmed_vnfd/charms/simple",
+            f"{vnfd_id}:3/hackfest_3charmed_vnfd/charms/simple",
+        )
+        self.assertEqual(fs.sync.call_count, 2)
+        self.assertEqual(return_ns_state, expected_ns_state)
+        self.assertEqual(return_operation_state, expected_operation_state)
+        self.assertEqual(return_operation_error, expected_operation_error)
+        self.assertEqual(return_ns_operational_state, expected_ns_operational_state)
+        self.assertEqual(return_vnfr_revision, expected_vnfr_revision)
+
+    @patch("osm_lcm.lcm_utils.LcmBase.check_charm_hash_changed")
+    @patch(
+        "osm_lcm.ns.NsLcm._ns_charm_upgrade", new_callable=asynctest.Mock(autospec=True)
+    )
+    @patch("osm_lcm.data_utils.vnfd.find_software_version")
+    @patch("osm_lcm.lcm_utils.check_juju_bundle_existence")
+    async def test_update_change_vnfpkg_vnfd_revision_not_changed(
+        self,
+        mock_juju_bundle,
+        mock_software_version,
+        mock_charm_upgrade,
+        mock_charm_hash,
+    ):
+        """Update type: CHANGE_VNFPKG, latest_vnfd revision not changed"""
+        self.db.set_one(
+            "vnfds", q_filter={"_id": vnfd_id}, update_dict={"_admin.revision": 1}
+        )
+        self.db.set_one("vnfrs", q_filter={"_id": vnfr_id}, update_dict={"revision": 1})
 
-            self.db.set_one(
-                "vnfrs", q_filter={"_id": vnfr_id}, update_dict={"revision": 1}
-            )
+        mock_charm_hash.return_value = True
 
-            mock_charm_hash = Mock(autospec=True)
-            mock_charm_hash.return_value = True
-
-            mock_charm_upgrade = asynctest.Mock(autospec=True)
-            task = asyncio.Future()
-            task.set_result(("FAILED", "some_error"))
-            mock_charm_upgrade.return_value = task
-
-            mock_charm_artifact = Mock(autospec=True)
-            mock_charm_artifact.side_effect = [
-                "7637bcf8-cf14-42dc-ad70-c66fcf1e6e77:1/hackfest_3charmed_vnfd/charms/simple",
-                "7637bcf8-cf14-42dc-ad70-c66fcf1e6e77/hackfest_3charmed_vnfd/charms/simple",
-            ]
-
-            fs = Mock(autospec=True)
-            fs.path.__add__ = Mock()
-            fs.path.side_effect = ["/", "/", "/", "/"]
-            fs.sync.side_effect = [None, None]
-
-            instance = self.my_ns
-
-            expected_operation_state = "FAILED"
-            expected_operation_error = "some_error"
-            expected_vnfr_revision = 1
-            expected_ns_state = "INSTANTIATED"
-            expected_ns_operational_state = "running"
-
-            with patch.object(instance, "fs", fs), patch(
-                "osm_lcm.lcm_utils.LcmBase.check_charm_hash_changed", mock_charm_hash
-            ), patch("osm_lcm.ns.NsLcm._ns_charm_upgrade", mock_charm_upgrade):
-
-                await instance.update(nsr_id, nslcmop_id)
-
-                return_operation_state = self.db.get_one(
-                    "nslcmops", {"_id": nslcmop_id}
-                ).get("operationState")
-
-                return_operation_error = self.db.get_one(
-                    "nslcmops", {"_id": nslcmop_id}
-                ).get("errorMessage")
-
-                return_ns_operational_state = self.db.get_one(
-                    "nsrs", {"_id": nsr_id}
-                ).get("operational-status")
-
-                return_vnfr_revision = self.db.get_one("vnfrs", {"_id": vnfr_id}).get(
-                    "revision"
-                )
-
-                return_ns_state = self.db.get_one("nsrs", {"_id": nsr_id}).get(
-                    "nsState"
-                )
-
-                self.assertEqual(fs.sync.call_count, 2)
-                self.assertEqual(mock_charm_hash.call_count, 1)
-                self.assertEqual(mock_charm_upgrade.call_count, 1)
-
-                self.assertEqual(return_ns_state, expected_ns_state)
-                self.assertEqual(return_operation_state, expected_operation_state)
-                self.assertEqual(return_operation_error, expected_operation_error)
-                self.assertEqual(
-                    return_ns_operational_state, expected_ns_operational_state
-                )
-                self.assertEqual(return_vnfr_revision, expected_vnfr_revision)
-
-            mock_reset()
-
-    def test_ns_update_helper_methods(self):
-        def mock_reset():
-            fs.mock_reset()
-            mock_path.mock_reset()
-            mock_checksumdir.mock_reset()
-
-        with self.subTest(
-            i=1, t="Find software version, VNFD does not have have software version"
-        ):
-            # Testing method find_software_version
-
-            db_vnfd = self.db.get_one(
-                "vnfds", {"_id": "7637bcf8-cf14-42dc-ad70-c66fcf1e6e77"}
-            )
-            expected_result = "1.0"
-            result = find_software_version(db_vnfd)
-            self.assertEqual(
-                result, expected_result, "Default sw version should be 1.0"
-            )
+        task = asyncio.Future()
+        task.set_result(("COMPLETED", "some_output"))
+        mock_charm_upgrade.return_value = task
 
-        with self.subTest(
-            i=2, t="Find software version, VNFD includes software version"
-        ):
-            # Testing method find_software_version
+        instance = self.my_ns
 
-            db_vnfd = self.db.get_one(
-                "vnfds", {"_id": "7637bcf8-cf14-42dc-ad70-c66fcf1e6e77"}
-            )
-            db_vnfd["software-version"] = "3.1"
-            expected_result = "3.1"
-            result = find_software_version(db_vnfd)
-            self.assertEqual(result, expected_result, "VNFD software version is wrong")
+        expected_operation_state = "COMPLETED"
+        expected_operation_error = ""
+        expected_vnfr_revision = 1
+        expected_ns_state = "INSTANTIATED"
+        expected_ns_operational_state = "running"
 
-        with self.subTest(i=3, t="Check charm hash, Hash did not change"):
-            # Testing method check_charm_hash_changed
+        await instance.update(nsr_id, nslcmop_id)
+        return_operation_state = self.db.get_one("nslcmops", {"_id": nslcmop_id}).get(
+            "operationState"
+        )
+        return_operation_error = self.db.get_one("nslcmops", {"_id": nslcmop_id}).get(
+            "errorMessage"
+        )
+        return_ns_operational_state = self.db.get_one("nsrs", {"_id": nsr_id}).get(
+            "operational-status"
+        )
+        return_ns_state = self.db.get_one("nsrs", {"_id": nsr_id}).get("nsState")
+        return_vnfr_revision = self.db.get_one("vnfrs", {"_id": vnfr_id}).get(
+            "revision"
+        )
+        mock_charm_hash.assert_not_called()
+        mock_software_version.assert_not_called()
+        mock_juju_bundle.assert_not_called()
+        mock_charm_upgrade.assert_not_called()
+        update_fs.sync.assert_not_called()
+        self.assertEqual(return_ns_state, expected_ns_state)
+        self.assertEqual(return_operation_state, expected_operation_state)
+        self.assertEqual(return_operation_error, expected_operation_error)
+        self.assertEqual(return_ns_operational_state, expected_ns_operational_state)
+        self.assertEqual(return_vnfr_revision, expected_vnfr_revision)
+
+    @patch("osm_lcm.lcm_utils.LcmBase.check_charm_hash_changed")
+    @patch(
+        "osm_lcm.ns.NsLcm._ns_charm_upgrade", new_callable=asynctest.Mock(autospec=True)
+    )
+    @patch("osm_lcm.data_utils.vnfd.find_software_version")
+    @patch("osm_lcm.lcm_utils.check_juju_bundle_existence")
+    async def test_update_change_vnfpkg_charm_is_not_changed(
+        self,
+        mock_juju_bundle,
+        mock_software_version,
+        mock_charm_upgrade,
+        mock_charm_hash,
+    ):
+        """Update type: CHANGE_VNFPKG, latest_vnfd revision changed
+        Charm package is not changed, sw-version is not changed"""
+        self.db.set_one(
+            "vnfds",
+            q_filter={"_id": vnfd_id},
+            update_dict={"_admin.revision": 3, "kdu": []},
+        )
+        self.db.set_one(
+            "vnfds_revisions",
+            q_filter={"_id": vnfd_id + ":1"},
+            update_dict={"_admin.revision": 1, "kdu": []},
+        )
+        self.db.set_one("vnfrs", q_filter={"_id": vnfr_id}, update_dict={"revision": 1})
+
+        mock_charm_hash.return_value = False
+        mock_software_version.side_effect = ["1.0", "1.0"]
+
+        instance = self.my_ns
+        fs = deepcopy(update_fs)
+        instance.fs = fs
+        expected_operation_state = "COMPLETED"
+        expected_operation_error = ""
+        expected_vnfr_revision = 3
+        expected_ns_state = "INSTANTIATED"
+        expected_ns_operational_state = "running"
+
+        await instance.update(nsr_id, nslcmop_id)
+        return_operation_state = self.db.get_one("nslcmops", {"_id": nslcmop_id}).get(
+            "operationState"
+        )
+        return_operation_error = self.db.get_one("nslcmops", {"_id": nslcmop_id}).get(
+            "errorMessage"
+        )
+        return_ns_operational_state = self.db.get_one("nsrs", {"_id": nsr_id}).get(
+            "operational-status"
+        )
+        return_vnfr_revision = self.db.get_one("vnfrs", {"_id": vnfr_id}).get(
+            "revision"
+        )
+        return_ns_state = self.db.get_one("nsrs", {"_id": nsr_id}).get("nsState")
+        mock_charm_hash.assert_called_with(
+            f"{vnfd_id}:1/hackfest_3charmed_vnfd/charms/simple",
+            f"{vnfd_id}:3/hackfest_3charmed_vnfd/charms/simple",
+        )
+        self.assertEqual(fs.sync.call_count, 2)
+        self.assertEqual(mock_charm_hash.call_count, 1)
+        mock_juju_bundle.assert_not_called()
+        mock_charm_upgrade.assert_not_called()
+        self.assertEqual(return_ns_state, expected_ns_state)
+        self.assertEqual(return_operation_state, expected_operation_state)
+        self.assertEqual(return_operation_error, expected_operation_error)
+        self.assertEqual(return_ns_operational_state, expected_ns_operational_state)
+        self.assertEqual(return_vnfr_revision, expected_vnfr_revision)
+
+    @patch("osm_lcm.lcm_utils.check_juju_bundle_existence")
+    @patch("osm_lcm.lcm_utils.LcmBase.check_charm_hash_changed")
+    @patch(
+        "osm_lcm.ns.NsLcm._ns_charm_upgrade", new_callable=asynctest.Mock(autospec=True)
+    )
+    @patch("osm_lcm.lcm_utils.get_charm_artifact_path")
+    async def test_update_change_vnfpkg_sw_version_changed(
+        self, mock_charm_artifact, mock_charm_upgrade, mock_charm_hash, mock_juju_bundle
+    ):
+        """Update type: CHANGE_VNFPKG, latest_vnfd revision changed
+        Charm package exists, sw-version changed."""
+        self.db.set_one(
+            "vnfds",
+            q_filter={"_id": vnfd_id},
+            update_dict={"_admin.revision": 3, "software-version": "3.0", "kdu": []},
+        )
+        self.db.set_one(
+            "vnfds_revisions",
+            q_filter={"_id": vnfd_id + ":1"},
+            update_dict={"_admin.revision": 1, "kdu": []},
+        )
+        self.db.set_one(
+            "vnfrs",
+            q_filter={"_id": vnfr_id},
+            update_dict={"revision": 1},
+        )
+        mock_charm_hash.return_value = False
 
-            current_path, target_path = "/tmp/charm1", "/tmp/charm1"
-            fs = Mock(autospec=True)
-            fs.path.__add__ = Mock()
-            fs.path.side_effect = ["/", "/", "/", "/"]
+        mock_charm_artifact.side_effect = [
+            f"{vnfd_id}:1/hackfest_3charmed_vnfd/charms/simple",
+            f"{vnfd_id}:3/hackfest_3charmed_vnfd/charms/simple",
+        ]
 
-            mock_path = Mock(autospec=True)
-            mock_path.exists.side_effect = [True, True]
+        instance = self.my_ns
+        fs = deepcopy(update_fs)
+        instance.fs = fs
+        expected_operation_state = "FAILED"
+        expected_operation_error = "FAILED Checking if existing VNF has charm: Software version change is not supported as VNF instance 6421c7c9-d865-4fb4-9a13-d4275d243e01 has charm."
+        expected_vnfr_revision = 1
+        expected_ns_state = "INSTANTIATED"
+        expected_ns_operational_state = "running"
+
+        await instance.update(nsr_id, nslcmop_id)
+        return_operation_state = self.db.get_one("nslcmops", {"_id": nslcmop_id}).get(
+            "operationState"
+        )
+        return_operation_error = self.db.get_one("nslcmops", {"_id": nslcmop_id}).get(
+            "errorMessage"
+        )
+        return_ns_operational_state = self.db.get_one("nsrs", {"_id": nsr_id}).get(
+            "operational-status"
+        )
+        return_vnfr_revision = self.db.get_one("vnfrs", {"_id": vnfr_id}).get(
+            "revision"
+        )
+        return_ns_state = self.db.get_one("nsrs", {"_id": nsr_id}).get("nsState")
+        self.assertEqual(fs.sync.call_count, 2)
+        mock_charm_hash.assert_not_called()
+        mock_juju_bundle.assert_not_called()
+        mock_charm_upgrade.assert_not_called()
+        self.assertEqual(return_ns_state, expected_ns_state)
+        self.assertEqual(return_operation_state, expected_operation_state)
+        self.assertEqual(return_operation_error, expected_operation_error)
+        self.assertEqual(return_ns_operational_state, expected_ns_operational_state)
+        self.assertEqual(return_vnfr_revision, expected_vnfr_revision)
+
+    @patch("osm_lcm.lcm_utils.check_juju_bundle_existence")
+    @patch("osm_lcm.lcm_utils.LcmBase.check_charm_hash_changed")
+    @patch(
+        "osm_lcm.ns.NsLcm._ns_charm_upgrade", new_callable=asynctest.Mock(autospec=True)
+    )
+    @patch("osm_lcm.data_utils.vnfd.find_software_version")
+    async def test_update_change_vnfpkg_juju_bundle_exists(
+        self,
+        mock_software_version,
+        mock_charm_upgrade,
+        mock_charm_hash,
+        mock_juju_bundle,
+    ):
+        """Update type: CHANGE_VNFPKG, latest_vnfd revision changed
+        Charm package exists, sw-version not changed, juju-bundle exists"""
+        # Upgrade is not allowed with juju bundles, this will cause TypeError
+        self.db.set_one(
+            "vnfds",
+            q_filter={"_id": vnfd_id},
+            update_dict={
+                "_admin.revision": 5,
+                "software-version": "1.0",
+                "kdu": [{"kdu_name": "native-kdu", "juju-bundle": "stable/native-kdu"}],
+            },
+        )
+        self.db.set_one(
+            "vnfds_revisions",
+            q_filter={"_id": vnfd_id + ":1"},
+            update_dict={
+                "_admin.revision": 1,
+                "software-version": "1.0",
+                "kdu": [{"kdu_name": "native-kdu", "juju-bundle": "stable/native-kdu"}],
+            },
+        )
+        self.db.set_one(
+            "nsrs",
+            q_filter={"_id": nsr_id},
+            update_dict={
+                "_admin.deployed.VCA.0.kdu_name": "native-kdu",
+            },
+        )
+        self.db.set_one("vnfrs", q_filter={"_id": vnfr_id}, update_dict={"revision": 1})
+
+        mock_charm_hash.side_effect = [True]
+        mock_software_version.side_effect = ["1.0", "1.0"]
+        mock_juju_bundle.return_value = True
+        instance = self.my_ns
+        fs = deepcopy(update_fs)
+        instance.fs = fs
+
+        expected_vnfr_revision = 1
+        expected_ns_state = "INSTANTIATED"
+        expected_ns_operational_state = "running"
+
+        await instance.update(nsr_id, nslcmop_id)
+        return_ns_operational_state = self.db.get_one("nsrs", {"_id": nsr_id}).get(
+            "operational-status"
+        )
+        return_vnfr_revision = self.db.get_one("vnfrs", {"_id": vnfr_id}).get(
+            "revision"
+        )
+        return_ns_state = self.db.get_one("nsrs", {"_id": nsr_id}).get("nsState")
+        self.assertEqual(fs.sync.call_count, 2)
+        mock_charm_upgrade.assert_not_called()
+        mock_charm_hash.assert_not_called()
+        self.assertEqual(return_ns_state, expected_ns_state)
+        self.assertEqual(return_ns_operational_state, expected_ns_operational_state)
+        self.assertEqual(return_vnfr_revision, expected_vnfr_revision)
+
+    @patch("osm_lcm.lcm_utils.LcmBase.check_charm_hash_changed")
+    @patch(
+        "osm_lcm.ns.NsLcm._ns_charm_upgrade", new_callable=asynctest.Mock(autospec=True)
+    )
+    async def test_update_change_vnfpkg_charm_upgrade_failed(
+        self, mock_charm_upgrade, mock_charm_hash
+    ):
+        """ "Update type: CHANGE_VNFPKG, latest_vnfd revision changed"
+        Charm package exists, sw-version not changed, charm-upgrade failed"""
+        self.db.set_one(
+            "vnfds",
+            q_filter={"_id": vnfd_id},
+            update_dict={
+                "_admin.revision": 3,
+                "software-version": "1.0",
+                "kdu": [],
+            },
+        )
+        self.db.set_one(
+            "vnfds_revisions",
+            q_filter={"_id": vnfd_id + ":1"},
+            update_dict={
+                "_admin.revision": 1,
+                "software-version": "1.0",
+                "kdu": [],
+            },
+        )
+        self.db.set_one("vnfrs", q_filter={"_id": vnfr_id}, update_dict={"revision": 1})
 
-            mock_checksumdir = Mock(autospec=True)
-            mock_checksumdir.dirhash.side_effect = ["hash_value", "hash_value"]
+        mock_charm_hash.return_value = True
 
-            instance = self.my_ns
-            expected_result = False
+        task = asyncio.Future()
+        task.set_result(("FAILED", "some_error"))
+        mock_charm_upgrade.return_value = task
 
-            with patch.object(instance, "fs", fs), patch(
-                "checksumdir.dirhash", mock_checksumdir.dirhash
-            ), patch("os.path.exists", mock_path.exists):
+        instance = self.my_ns
+        fs = deepcopy(update_fs)
+        instance.fs = fs
+        expected_operation_state = "FAILED"
+        expected_operation_error = "some_error"
+        expected_vnfr_revision = 1
+        expected_ns_state = "INSTANTIATED"
+        expected_ns_operational_state = "running"
 
-                result = instance.check_charm_hash_changed(current_path, target_path)
-                self.assertEqual(
-                    result, expected_result, "Wrong charm hash control value"
-                )
-                self.assertEqual(mock_path.exists.call_count, 2)
-                self.assertEqual(mock_checksumdir.dirhash.call_count, 2)
+        await instance.update(nsr_id, nslcmop_id)
+        return_operation_state = self.db.get_one("nslcmops", {"_id": nslcmop_id}).get(
+            "operationState"
+        )
+        return_operation_error = self.db.get_one("nslcmops", {"_id": nslcmop_id}).get(
+            "errorMessage"
+        )
+        return_ns_operational_state = self.db.get_one("nsrs", {"_id": nsr_id}).get(
+            "operational-status"
+        )
+        return_vnfr_revision = self.db.get_one("vnfrs", {"_id": vnfr_id}).get(
+            "revision"
+        )
+        return_ns_state = self.db.get_one("nsrs", {"_id": nsr_id}).get("nsState")
+        self.assertEqual(fs.sync.call_count, 2)
+        self.assertEqual(mock_charm_hash.call_count, 1)
+        self.assertEqual(mock_charm_upgrade.call_count, 1)
+        self.assertEqual(return_ns_state, expected_ns_state)
+        self.assertEqual(return_operation_state, expected_operation_state)
+        self.assertEqual(return_operation_error, expected_operation_error)
+        self.assertEqual(return_ns_operational_state, expected_ns_operational_state)
+        self.assertEqual(return_vnfr_revision, expected_vnfr_revision)
+
+    def test_ns_update_find_sw_version_vnfd_not_includes(self):
+        """Find software version, VNFD does not have software version"""
+
+        db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
+        expected_result = "1.0"
+        result = find_software_version(db_vnfd)
+        self.assertEqual(result, expected_result, "Default sw version should be 1.0")
+
+    def test_ns_update_find_sw_version_vnfd_includes(self):
+        """Find software version, VNFD includes software version"""
+
+        db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
+        db_vnfd["software-version"] = "3.1"
+        expected_result = "3.1"
+        result = find_software_version(db_vnfd)
+        self.assertEqual(result, expected_result, "VNFD software version is wrong")
+
+    @patch("os.path.exists")
+    @patch("osm_lcm.lcm_utils.LcmBase.compare_charmdir_hash")
+    @patch("osm_lcm.lcm_utils.LcmBase.compare_charm_hash")
+    def test_ns_update_check_charm_hash_not_changed(
+        self, mock_compare_charm_hash, mock_compare_charmdir_hash, mock_path_exists
+    ):
+        """Check charm hash, Hash did not change"""
 
-            mock_reset()
+        current_path, target_path = "/tmp/charm1", "/tmp/charm1"
 
-        with self.subTest(i=4, t="Check charm hash, Hash has changed"):
-            # Testing method check_charm_hash_changed
+        fs = Mock()
+        fs.path.__add__ = Mock()
+        fs.path.side_effect = [current_path, target_path]
+        fs.path.__add__.side_effect = [current_path, target_path]
 
-            current_path, target_path = "/tmp/charm1", "/tmp/charm2"
-            fs = Mock(autospec=True)
-            fs.path.__add__ = Mock()
-            fs.path.side_effect = ["/", "/", "/", "/"]
+        mock_path_exists.side_effect = [True, True]
 
-            mock_path = Mock(autospec=True)
-            mock_path.exists.side_effect = [True, True]
+        mock_compare_charmdir_hash.return_value = callable(False)
+        mock_compare_charm_hash.return_value = callable(False)
 
-            mock_checksumdir = Mock(autospec=True)
-            mock_checksumdir.dirhash.side_effect = ["hash_value", "another_hash_value"]
+        instance = self.my_ns
+        instance.fs = fs
+        expected_result = False
 
-            instance = self.my_ns
-            expected_result = True
+        result = instance.check_charm_hash_changed(current_path, target_path)
+        self.assertEqual(result, expected_result, "Wrong charm hash control value")
+        self.assertEqual(mock_path_exists.call_count, 2)
+        self.assertEqual(mock_compare_charmdir_hash.call_count, 1)
+        self.assertEqual(mock_compare_charm_hash.call_count, 0)
 
-            with patch.object(instance, "fs", fs), patch(
-                "checksumdir.dirhash", mock_checksumdir.dirhash
-            ), patch("os.path.exists", mock_path.exists):
+    @patch("os.path.exists")
+    @patch("osm_lcm.lcm_utils.LcmBase.compare_charmdir_hash")
+    @patch("osm_lcm.lcm_utils.LcmBase.compare_charm_hash")
+    def test_ns_update_check_charm_hash_changed(
+        self, mock_compare_charm_hash, mock_compare_charmdir_hash, mock_path_exists
+    ):
+        """Check charm hash, Hash has changed"""
+
+        current_path, target_path = "/tmp/charm1", "/tmp/charm2"
+
+        fs = Mock()
+        fs.path.__add__ = Mock()
+        fs.path.side_effect = [current_path, target_path, current_path, target_path]
+        fs.path.__add__.side_effect = [
+            current_path,
+            target_path,
+            current_path,
+            target_path,
+        ]
 
-                result = instance.check_charm_hash_changed(current_path, target_path)
-                self.assertEqual(
-                    result, expected_result, "Wrong charm hash control value"
-                )
-                self.assertEqual(mock_path.exists.call_count, 2)
-                self.assertEqual(mock_checksumdir.dirhash.call_count, 2)
+        mock_path_exists.side_effect = [True, True]
+        mock_compare_charmdir_hash.return_value = callable(True)
+        mock_compare_charm_hash.return_value = callable(True)
+
+        instance = self.my_ns
+        instance.fs = fs
+        expected_result = True
+
+        result = instance.check_charm_hash_changed(current_path, target_path)
+        self.assertEqual(result, expected_result, "Wrong charm hash control value")
+        self.assertEqual(mock_path_exists.call_count, 2)
+        self.assertEqual(mock_compare_charmdir_hash.call_count, 1)
+        self.assertEqual(mock_compare_charm_hash.call_count, 0)
+
+    @patch("os.path.exists")
+    @patch("osm_lcm.lcm_utils.LcmBase.compare_charmdir_hash")
+    @patch("osm_lcm.lcm_utils.LcmBase.compare_charm_hash")
+    def test_ns_update_check_no_charm_path(
+        self, mock_compare_charm_hash, mock_compare_charmdir_hash, mock_path_exists
+    ):
+        """Check charm hash, Charm path does not exist"""
+
+        current_path, target_path = "/tmp/charm1", "/tmp/charm2"
+
+        fs = Mock()
+        fs.path.__add__ = Mock()
+        fs.path.side_effect = [current_path, target_path, current_path, target_path]
+        fs.path.__add__.side_effect = [
+            current_path,
+            target_path,
+            current_path,
+            target_path,
+        ]
 
-            mock_reset()
+        mock_path_exists.side_effect = [True, False]
 
-        with self.subTest(i=5, t="Check charm hash, Charm path does not exists"):
-            # Testing method check_charm_hash_changed
+        mock_compare_charmdir_hash.return_value = callable(False)
+        mock_compare_charm_hash.return_value = callable(False)
+        instance = self.my_ns
+        instance.fs = fs
 
-            current_path, target_path = "/tmp/charm1", "/tmp/charm2"
-            fs = Mock(autospec=True)
-            fs.path.__add__ = Mock()
-            fs.path.side_effect = ["/", "/", "/", "/"]
+        with self.assertRaises(LcmException):
+            instance.check_charm_hash_changed(current_path, target_path)
+            self.assertEqual(mock_path_exists.call_count, 2)
+            self.assertEqual(mock_compare_charmdir_hash.call_count, 0)
+            self.assertEqual(mock_compare_charm_hash.call_count, 0)
 
-            mock_path = Mock(autospec=True)
-            mock_path.exists.side_effect = [True, False]
+    def test_ns_update_check_juju_bundle_existence_bundle_exists(self):
+        """Check juju bundle existence"""
+        test_vnfd2 = self.db.get_one(
+            "vnfds", {"_id": "d96b1cdf-5ad6-49f7-bf65-907ada989293"}
+        )
+        expected_result = "stable/native-kdu"
+        result = check_juju_bundle_existence(test_vnfd2)
+        self.assertEqual(result, expected_result, "Wrong juju bundle name")
+
+    def test_ns_update_check_juju_bundle_existence_bundle_does_not_exist(self):
+        """Check juju bundle existence"""
+        test_vnfd1 = self.db.get_one("vnfds", {"_id": vnfd_id})
+        expected_result = None
+        result = check_juju_bundle_existence(test_vnfd1)
+        self.assertEqual(result, expected_result, "Wrong juju bundle name")
+
+    def test_ns_update_check_juju_bundle_existence_empty_vnfd(self):
+        """Check juju bundle existence"""
+        test_vnfd1 = {}
+        expected_result = None
+        result = check_juju_bundle_existence(test_vnfd1)
+        self.assertEqual(result, expected_result, "Wrong juju bundle name")
+
+    def test_ns_update_check_juju_bundle_existence_invalid_vnfd(self):
+        """Check juju bundle existence"""
+        test_vnfd1 = [{"_id": vnfd_id}]
+        with self.assertRaises(AttributeError):
+            check_juju_bundle_existence(test_vnfd1)
+
+    def test_ns_update_check_juju_charm_artifacts_base_folder_wth_pkgdir(self):
+        """Check charm artifacts"""
+        base_folder = {
+            "folder": vnfd_id,
+            "pkg-dir": "hackfest_3charmed_vnfd",
+        }
+        charm_name = "simple"
+        charm_type = "lxc_proxy_charm"
+        revision = 3
+        expected_result = f"{vnfd_id}:3/hackfest_3charmed_vnfd/charms/simple"
+        result = get_charm_artifact_path(base_folder, charm_name, charm_type, revision)
+        self.assertEqual(result, expected_result, "Wrong charm artifact path")
+
+    def test_ns_update_check_juju_charm_artifacts_base_folder_wthout_pkgdir(self):
+        """Check charm artifacts, SOL004 packages"""
+        base_folder = {
+            "folder": vnfd_id,
+        }
+        charm_name = "basic"
+        charm_type, revision = "", ""
+        expected_result = f"{vnfd_id}/Scripts/helm-charts/basic"
+        result = get_charm_artifact_path(base_folder, charm_name, charm_type, revision)
+        self.assertEqual(result, expected_result, "Wrong charm artifact path")
 
-            mock_checksumdir = Mock(autospec=True)
-            mock_checksumdir.dirhash.side_effect = ["hash_value", "hash_value"]
 
-            instance = self.my_ns
+class TestInstantiateN2VC(TestMyNS):
+    async def setUp(self):
+        await super().setUp()
+        self.db_nsr = yaml.safe_load(descriptors.db_nsrs_text)[0]
+        self.db_vnfr = yaml.safe_load(descriptors.db_vnfrs_text)[0]
+        self.vca_index = 1
+        self.my_ns._write_configuration_status = Mock()
+
+    async def call_instantiate_N2VC(self):
+        logging_text = "N2VC Instantiation"
+        config_descriptor = {"config-access": {"ssh-access": {"default-user": "admin"}}}
+        base_folder = {"pkg-dir": "", "folder": "~"}
+        stage = ["Stage", "Message"]
+
+        await self.my_ns.instantiate_N2VC(
+            logging_text=logging_text,
+            vca_index=self.vca_index,
+            nsi_id="nsi_id",
+            db_nsr=self.db_nsr,
+            db_vnfr=self.db_vnfr,
+            vdu_id=None,
+            kdu_name=None,
+            vdu_index=None,
+            config_descriptor=config_descriptor,
+            deploy_params={},
+            base_folder=base_folder,
+            nslcmop_id="nslcmop_id",
+            stage=stage,
+            vca_type="native_charm",
+            vca_name="vca_name",
+            ee_config_descriptor={},
+        )
 
-            with patch.object(instance, "fs", fs), patch(
-                "checksumdir.dirhash", mock_checksumdir.dirhash
-            ), patch("os.path.exists", mock_path.exists):
+    def check_config_status(self, expected_status):
+        self.my_ns._write_configuration_status.assert_called_with(
+            nsr_id=self.db_nsr["_id"], vca_index=self.vca_index, status=expected_status
+        )
 
-                with self.assertRaises(LcmException):
+    async def call_ns_add_relation(self):
+        ee_relation = EERelation(
+            {
+                "nsr-id": self.db_nsr["_id"],
+                "vdu-profile-id": None,
+                "kdu-resource-profile-id": None,
+                "vnf-profile-id": "hackfest_vnf1",
+                "execution-environment-ref": "f48163a6-c807-47bc-9682-f72caef5af85.alf-c-ab",
+                "endpoint": "127.0.0.1",
+            }
+        )
 
-                    instance.check_charm_hash_changed(current_path, target_path)
-                    self.assertEqual(mock_path.exists.call_count, 2)
-                    self.assertEqual(mock_checksumdir.dirhash.call_count, 0)
+        relation = Relation("relation-name", ee_relation, ee_relation)
+        cached_vnfrs = {"hackfest_vnf1": self.db_vnfr}
 
-            mock_reset()
+        return await self.my_ns._add_relation(
+            relation=relation,
+            vca_type="native_charm",
+            db_nsr=self.db_nsr,
+            cached_vnfds={},
+            cached_vnfrs=cached_vnfrs,
+        )
 
-        with self.subTest(i=6, t="Check juju bundle existence"):
-            # Testing method check_juju_bundle_existence
+    async def test_add_relation_ok(self):
+        await self.call_instantiate_N2VC()
+        self.check_config_status(expected_status="READY")
 
-            test_vnfd1 = self.db.get_one(
-                "vnfds", {"_id": "7637bcf8-cf14-42dc-ad70-c66fcf1e6e77"}
-            )
-            test_vnfd2 = self.db.get_one(
-                "vnfds", {"_id": "d96b1cdf-5ad6-49f7-bf65-907ada989293"}
-            )
+    async def test_add_relation_returns_false_raises_exception(self):
+        self.my_ns._add_vca_relations = asynctest.CoroutineMock(return_value=False)
 
-            expected_result = None
-            result = check_juju_bundle_existence(test_vnfd1)
-            self.assertEqual(result, expected_result, "Wrong juju bundle name")
+        with self.assertRaises(LcmException) as exception:
+            await self.call_instantiate_N2VC()
 
-            expected_result = "stable/native-kdu"
-            result = check_juju_bundle_existence(test_vnfd2)
-            self.assertEqual(result, expected_result, "Wrong juju bundle name")
+        exception_msg = "Relations could not be added to VCA."
+        self.assertTrue(exception_msg in str(exception.exception))
+        self.check_config_status(expected_status="BROKEN")
 
-        with self.subTest(i=7, t="Check charm artifacts"):
-            # Testing method check_juju_bundle_existence
+    async def test_add_relation_raises_lcm_exception(self):
+        exception_msg = "Relations FAILED"
+        self.my_ns._add_vca_relations = asynctest.CoroutineMock(
+            side_effect=LcmException(exception_msg)
+        )
 
-            base_folder = {
-                "folder": "7637bcf8-cf14-42dc-ad70-c66fcf1e6e77",
-                "pkg-dir": "hackfest_3charmed_vnfd",
-            }
-            charm_name = "simple"
-            charm_type = "lxc_proxy_charm"
-            revision = 3
+        with self.assertRaises(LcmException) as exception:
+            await self.call_instantiate_N2VC()
 
-            expected_result = "7637bcf8-cf14-42dc-ad70-c66fcf1e6e77:3/hackfest_3charmed_vnfd/charms/simple"
-            result = get_charm_artifact_path(
-                base_folder, charm_name, charm_type, revision
-            )
-            self.assertEqual(result, expected_result, "Wrong charm artifact path")
+        self.assertTrue(exception_msg in str(exception.exception))
+        self.check_config_status(expected_status="BROKEN")
 
-            # SOL004 packages
-            base_folder = {
-                "folder": "7637bcf8-cf14-42dc-ad70-c66fcf1e6e77",
-            }
-            charm_name = "basic"
-            charm_type = ""
-            revision = ""
+    async def test_n2vc_add_relation_fails_raises_exception(self):
+        exception_msg = "N2VC failed to add relations"
+        self.my_ns.n2vc.add_relation = asynctest.CoroutineMock(
+            side_effect=N2VCException(exception_msg)
+        )
+        with self.assertRaises(LcmException) as exception:
+            await self.call_ns_add_relation()
+        self.assertTrue(exception_msg in str(exception.exception))
 
-            expected_result = (
-                "7637bcf8-cf14-42dc-ad70-c66fcf1e6e77/Scripts/helm-charts/basic"
-            )
-            result = get_charm_artifact_path(
-                base_folder, charm_name, charm_type, revision
-            )
-            self.assertEqual(result, expected_result, "Wrong charm artifact path")
+    async def test_n2vc_add_relation_ok_returns_true(self):
+        self.my_ns.n2vc.add_relation = asynctest.CoroutineMock(return_value=None)
+        self.assertTrue(await self.call_ns_add_relation())
 
 
 if __name__ == "__main__":
diff --git a/osm_lcm/tests/test_paas.py b/osm_lcm/tests/test_paas.py
deleted file mode 100644 (file)
index 10da9a5..0000000
+++ /dev/null
@@ -1,303 +0,0 @@
-#!/usr/bin/python3
-
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-#     Unless required by applicable law or agreed to in writing, software
-#     distributed under the License is distributed on an "AS IS" BASIS,
-#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#     See the License for the specific language governing permissions and
-#     limitations under the License.
-
-import asyncio
-from unittest import TestCase
-from unittest.mock import Mock, patch
-from osm_common import msgbase
-from osm_common.dbbase import DbException
-from osm_lcm.paas import PaasLcm
-
-
-class TestPaasLcm(TestCase):
-    @patch("osm_lcm.lcm_utils.Database")
-    @patch("osm_lcm.lcm_utils.Filesystem")
-    def setUp(self, mock_filesystem, mock_database):
-        self.loop = asyncio.get_event_loop()
-        self.msg = Mock(msgbase.MsgBase())
-        self.lcm_tasks = Mock()
-        self.lcm_tasks.lock_HA.return_value = True
-        self.config = {"database": {"driver": "mongo"}}
-        self.paas_lcm = PaasLcm(self.msg, self.lcm_tasks, self.config, self.loop)
-        self.paas_lcm.db = Mock()
-        self.paas_lcm.fs = Mock()
-        self.paas_lcm.update_db_2 = Mock()
-        self.op_id = "op-id"
-        self.paas_id = "paas-id"
-        self.order_id = "order-id"
-        self.paas_content = {"op_id": self.op_id, "_id": self.paas_id}
-        self.db_paas = {
-            "_id": "_id",
-            "name": "paas-name",
-            "secret": "secret",
-            "schema_version": "1.11",
-        }
-
-    def check_assert_not_called_when_legacy_nbi(self):
-        self.paas_lcm.db.get_one.assert_not_called()
-        self.paas_lcm.db.encrypt_decrypt_fields.assert_not_called()
-        self.paas_lcm.update_db_2.assert_not_called()
-        self.lcm_tasks.unlock_HA.assert_not_called()
-        self.lcm_tasks.remove.assert_not_called()
-
-    def check_db_update_when_successful_connectivity(self):
-        self.paas_lcm.update_db_2.assert_called_with(
-            "paas",
-            self.paas_id,
-            {
-                "_admin.operationalState": "ENABLED",
-                "_admin.detailed-status": "Connectivity: ok",
-            },
-        )
-
-    def check_db_update_when_db_exception(self):
-        self.paas_lcm.update_db_2.assert_called_with(
-            "paas",
-            self.paas_id,
-            {
-                "_admin.operationalState": "ERROR",
-                "_admin.detailed-status": "Failed with exception: database exception failed",
-            },
-        )
-
-    def test_paas_lcm_create_legacy_nbi(self):
-        self.lcm_tasks.lock_HA.return_value = False
-        self.loop.run_until_complete(
-            self.paas_lcm.create(self.paas_content, self.order_id)
-        )
-        self.check_assert_not_called_when_legacy_nbi()
-
-    def test_paas_lcm_create(self):
-        self.paas_lcm.db.get_one.return_value = self.db_paas
-        self.loop.run_until_complete(
-            self.paas_lcm.create(self.paas_content, self.order_id)
-        )
-
-        self.lcm_tasks.lock_HA.assert_called_with("paas", "create", self.op_id)
-        self.paas_lcm.db.encrypt_decrypt_fields.assert_called_with(
-            self.db_paas, "decrypt", ["secret"], schema_version="1.11", salt="_id"
-        )
-        self.check_db_update_when_successful_connectivity()
-        self.lcm_tasks.unlock_HA.assert_called_with(
-            "paas",
-            "create",
-            self.op_id,
-            operationState="COMPLETED",
-            detailed_status="PaaS validated",
-        )
-        self.lcm_tasks.remove.assert_called_with("paas", self.paas_id, self.order_id)
-
-    def test_paas_lcm_create_exception_getting_and_updating_db(self):
-        self.paas_lcm.db.get_one.side_effect = DbException("failed")
-        self.paas_lcm.update_db_2.side_effect = DbException("failed")
-        self.loop.run_until_complete(
-            self.paas_lcm.create(self.paas_content, self.order_id)
-        )
-
-        self.lcm_tasks.lock_HA.assert_called_with("paas", "create", self.op_id)
-        self.paas_lcm.db.encrypt_decrypt_fields.assert_not_called()
-
-        self.check_db_update_when_db_exception()
-        self.lcm_tasks.unlock_HA.assert_not_called()
-        self.lcm_tasks.remove.assert_called_with("paas", self.paas_id, self.order_id)
-
-    def test_paas_lcm_create_exception_updating_db(self):
-
-        self.paas_lcm.db.get_one.return_value = self.db_paas
-        self.paas_lcm.update_db_2.side_effect = DbException("failed")
-        self.loop.run_until_complete(
-            self.paas_lcm.create(self.paas_content, self.order_id)
-        )
-
-        self.lcm_tasks.lock_HA.assert_called_with("paas", "create", self.op_id)
-        self.paas_lcm.db.encrypt_decrypt_fields.assert_called_with(
-            self.db_paas, "decrypt", ["secret"], schema_version="1.11", salt="_id"
-        )
-        self.check_db_update_when_successful_connectivity()
-        self.lcm_tasks.unlock_HA.assert_not_called()
-        self.lcm_tasks.remove.assert_called_with("paas", self.paas_id, self.order_id)
-
-    def test_paas_lcm_create_exception_getting_from_db(self):
-        self.paas_lcm.db.get_one.side_effect = DbException("failed")
-        self.loop.run_until_complete(
-            self.paas_lcm.create(self.paas_content, self.order_id)
-        )
-        self.lcm_tasks.lock_HA.assert_called_with("paas", "create", self.op_id)
-        self.paas_lcm.db.encrypt_decrypt_fields.assert_not_called()
-        self.check_db_update_when_db_exception()
-        self.lcm_tasks.unlock_HA.assert_called_with(
-            "paas",
-            "create",
-            self.op_id,
-            operationState="FAILED",
-            detailed_status="Failed with exception: database exception failed",
-        )
-        self.lcm_tasks.remove.assert_called_with("paas", self.paas_id, self.order_id)
-
-    def test_paas_lcm_edit_legacy_nbi(self):
-        self.lcm_tasks.lock_HA.return_value = False
-        self.loop.run_until_complete(
-            self.paas_lcm.edit(self.paas_content, self.order_id)
-        )
-        self.check_assert_not_called_when_legacy_nbi()
-
-    def test_paas_lcm_edit(self):
-
-        self.paas_lcm.db.get_one.return_value = self.db_paas
-        self.loop.run_until_complete(
-            self.paas_lcm.edit(self.paas_content, self.order_id)
-        )
-
-        self.lcm_tasks.lock_HA.assert_called_with("paas", "edit", self.op_id)
-        self.paas_lcm.db.encrypt_decrypt_fields.assert_called_with(
-            self.db_paas, "decrypt", ["secret"], schema_version="1.11", salt="_id"
-        )
-        self.check_db_update_when_successful_connectivity()
-        self.lcm_tasks.unlock_HA.assert_called_with(
-            "paas",
-            "edit",
-            self.op_id,
-            operationState="COMPLETED",
-            detailed_status="PaaS validated",
-        )
-        self.lcm_tasks.remove.assert_called_with("paas", self.paas_id, self.order_id)
-
-    def test_paas_lcm_edit_exception_getting_and_updating_db(self):
-        self.paas_lcm.db.get_one.side_effect = DbException("failed")
-        self.paas_lcm.update_db_2.side_effect = DbException("failed")
-        self.loop.run_until_complete(
-            self.paas_lcm.edit(self.paas_content, self.order_id)
-        )
-
-        self.lcm_tasks.lock_HA.assert_called_with("paas", "edit", self.op_id)
-        self.paas_lcm.db.encrypt_decrypt_fields.assert_not_called()
-
-        self.check_db_update_when_db_exception()
-        self.lcm_tasks.unlock_HA.assert_not_called()
-        self.lcm_tasks.remove.assert_called_with("paas", self.paas_id, self.order_id)
-
-    def test_paas_lcm_edit_exception_updating_db(self):
-        self.paas_lcm.db.get_one.return_value = self.db_paas
-        self.paas_lcm.update_db_2.side_effect = DbException("failed")
-        self.loop.run_until_complete(
-            self.paas_lcm.edit(self.paas_content, self.order_id)
-        )
-
-        self.lcm_tasks.lock_HA.assert_called_with("paas", "edit", self.op_id)
-        self.paas_lcm.db.encrypt_decrypt_fields.assert_called_with(
-            self.db_paas, "decrypt", ["secret"], schema_version="1.11", salt="_id"
-        )
-        self.check_db_update_when_successful_connectivity()
-        self.lcm_tasks.unlock_HA.assert_not_called()
-        self.lcm_tasks.remove.assert_called_with("paas", self.paas_id, self.order_id)
-
-    def test_paas_lcm_edit_exception_getting_from_db(self):
-        self.paas_lcm.db.get_one.side_effect = DbException("failed")
-        self.loop.run_until_complete(
-            self.paas_lcm.edit(self.paas_content, self.order_id)
-        )
-        self.lcm_tasks.lock_HA.assert_called_with("paas", "edit", self.op_id)
-        self.paas_lcm.db.encrypt_decrypt_fields.assert_not_called()
-        self.check_db_update_when_db_exception()
-        self.lcm_tasks.unlock_HA.assert_called_with(
-            "paas",
-            "edit",
-            self.op_id,
-            operationState="FAILED",
-            detailed_status="Failed with exception: database exception failed",
-        )
-        self.lcm_tasks.remove.assert_called_with("paas", self.paas_id, self.order_id)
-
-    def test_paas_lcm_delete_legacy_nbi(self):
-        self.lcm_tasks.lock_HA.return_value = False
-        self.loop.run_until_complete(
-            self.paas_lcm.delete(self.paas_content, self.order_id)
-        )
-        self.check_assert_not_called_when_legacy_nbi()
-
-    def test_paas_lcm_delete(self):
-        self.loop.run_until_complete(
-            self.paas_lcm.delete(self.paas_content, self.order_id)
-        )
-
-        self.lcm_tasks.lock_HA.assert_called_with("paas", "delete", self.op_id)
-        self.paas_lcm.db.del_one.assert_called_with("paas", {"_id": self.paas_id})
-        self.paas_lcm.update_db_2.assert_called_with("paas", self.paas_id, None)
-        self.lcm_tasks.unlock_HA.assert_called_with(
-            "paas",
-            "delete",
-            self.op_id,
-            operationState="COMPLETED",
-            detailed_status="deleted",
-        )
-        self.lcm_tasks.remove.assert_called_with("paas", self.paas_id, self.order_id)
-
-    def test_paas_lcm_delete_exception_deleting_from_db(self):
-        self.paas_lcm.db.del_one.side_effect = Exception("failed deleting")
-        self.loop.run_until_complete(
-            self.paas_lcm.delete(self.paas_content, self.order_id)
-        )
-
-        self.lcm_tasks.lock_HA.assert_called_with("paas", "delete", self.op_id)
-        self.paas_lcm.db.del_one.assert_called_with("paas", {"_id": self.paas_id})
-        self.paas_lcm.update_db_2.assert_called_with(
-            "paas",
-            self.paas_id,
-            {
-                "_admin.operationalState": "ERROR",
-                "_admin.detailed-status": "Failed with exception: failed deleting",
-            },
-        )
-        self.lcm_tasks.unlock_HA.assert_called_with(
-            "paas",
-            "delete",
-            self.op_id,
-            operationState="FAILED",
-            detailed_status="Failed with exception: failed deleting",
-        )
-        self.lcm_tasks.remove.assert_called_with("paas", self.paas_id, self.order_id)
-
-    def test_paas_lcm_delete_exception_updating_db(self):
-        self.loop.run_until_complete(
-            self.paas_lcm.delete(self.paas_content, self.order_id)
-        )
-
-        self.lcm_tasks.lock_HA.assert_called_with("paas", "delete", self.op_id)
-        self.paas_lcm.db.del_one.assert_called_with("paas", {"_id": self.paas_id})
-        self.paas_lcm.update_db_2.assert_called_with("paas", self.paas_id, None)
-        self.lcm_tasks.unlock_HA.not_called()
-        self.lcm_tasks.remove.assert_called_with("paas", self.paas_id, self.order_id)
-
-    def test_paas_lcm_delete_exception_deleting_and_updating_db(self):
-        self.paas_lcm.db.del_one.side_effect = Exception("failed deleting")
-        self.paas_lcm.update_db_2.side_effect = DbException("failed")
-
-        self.loop.run_until_complete(
-            self.paas_lcm.delete(self.paas_content, self.order_id)
-        )
-
-        self.lcm_tasks.lock_HA.assert_called_with("paas", "delete", self.op_id)
-        self.paas_lcm.db.del_one.assert_called_with("paas", {"_id": self.paas_id})
-        self.paas_lcm.update_db_2.assert_called_with(
-            "paas",
-            self.paas_id,
-            {
-                "_admin.operationalState": "ERROR",
-                "_admin.detailed-status": "Failed with exception: failed deleting",
-            },
-        )
-        self.lcm_tasks.unlock_HA.not_called()
-        self.lcm_tasks.remove.assert_called_with("paas", self.paas_id, self.order_id)
diff --git a/osm_lcm/tests/test_paas_conn.py b/osm_lcm/tests/test_paas_conn.py
deleted file mode 100644 (file)
index 3d463aa..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-#     Unless required by applicable law or agreed to in writing, software
-#     distributed under the License is distributed on an "AS IS" BASIS,
-#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#     See the License for the specific language governing permissions and
-#     limitations under the License.
-
-import asyncio
-from unittest import TestCase
-from unittest.mock import Mock
-from osm_lcm.paas_conn import (
-    paas_connector_factory,
-    JujuPaasConnector,
-    PaasConnException,
-)
-
-
-class TestPaasConnectorFactory(TestCase):
-    def test_paas_connector_factory_default_param_is_juju(self):
-        connector = paas_connector_factory(
-            "uuid", "name", Mock(), Mock(), Mock(), Mock(), Mock()
-        )
-        assert isinstance(connector, JujuPaasConnector)
-
-    def test_paas_connector_factory(self):
-        connector = paas_connector_factory(
-            "uuid", "name", Mock(), Mock(), Mock(), Mock(), Mock(), "juju"
-        )
-        assert isinstance(connector, JujuPaasConnector)
-
-    def test_paas_connector_factory_not_existing_type(self):
-        with self.assertRaises(PaasConnException):
-            paas_connector_factory(
-                "uuid", "name", Mock(), Mock(), Mock(), Mock(), Mock(), "other"
-            )
-
-
-class TestJujuPaasConnector(TestCase):
-    def setUp(self):
-        self.juju_paas_connector = JujuPaasConnector("uuid", "name")
-
-    def test_connect(self):
-        with self.assertRaises(NotImplementedError):
-            asyncio.run(self.juju_paas_connector.connect("endpoint", "user", "secret"))
-
-    def test_instantiate(self):
-        asyncio.run(self.juju_paas_connector.instantiate("nsr_id", "nslcmop_id"))
-
-    def test_terminate(self):
-        asyncio.run(self.juju_paas_connector.terminate("nsr_id", "nslcmop_id"))
-
-    def test_action(self):
-        with self.assertRaises(NotImplementedError):
-            asyncio.run(self.juju_paas_connector.action("nsr_id", "nslcmop_id"))
diff --git a/osm_lcm/tests/test_paas_service.py b/osm_lcm/tests/test_paas_service.py
deleted file mode 100644 (file)
index 668c364..0000000
+++ /dev/null
@@ -1,648 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-#     Unless required by applicable law or agreed to in writing, software
-#     distributed under the License is distributed on an "AS IS" BASIS,
-#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#     See the License for the specific language governing permissions and
-#     limitations under the License.
-
-import asynctest
-import asyncio
-from copy import deepcopy
-from unittest import TestCase
-from unittest.mock import Mock, patch, call
-from osm_lcm.lcm_utils import TaskRegistry
-from osm_common import msgbase
-from osm_common.dbbase import DbException
-from osm_lcm.paas_service import (
-    paas_service_factory,
-    JujuPaasService,
-    PaasServiceException,
-)
-from osm_lcm.paas_conn import JujuPaasConnector
-
-
-class CopyingMock(Mock):
-    def __call__(self, *args, **kwargs):
-        args = deepcopy(args)
-        kwargs = deepcopy(kwargs)
-        return super(CopyingMock, self).__call__(*args, **kwargs)
-
-
-@patch("osm_lcm.lcm_utils.Database")
-@patch("osm_lcm.lcm_utils.Filesystem")
-class TestPaasServiceFactory(TestCase):
-    def test_paas_service_factory_default_param_is_juju(self, mock_fs, mock_db):
-        service = paas_service_factory(
-            Mock(), Mock(), Mock(), Mock(), Mock(), Mock(), Mock()
-        )
-        assert isinstance(service, JujuPaasService)
-
-    def test_paas_service_factory(self, mock_fs, mock_db):
-
-        service = paas_service_factory(
-            Mock(), Mock(), Mock(), Mock(), Mock(), Mock(), Mock(), "juju"
-        )
-        assert isinstance(service, JujuPaasService)
-
-    def test_paas_service_factory_not_existing_type(self, mock_fs, mock_db):
-        with self.assertRaises(PaasServiceException):
-            paas_service_factory(
-                Mock(), Mock(), Mock(), Mock(), Mock(), Mock(), Mock(), "other"
-            )
-
-
-class TestJujuPaasService(TestCase):
-    @patch("osm_lcm.lcm_utils.Database")
-    @patch("osm_lcm.lcm_utils.Filesystem")
-    def setUp(self, mock_fs, mock_db):
-        self.msg = Mock(msgbase.MsgBase())
-        self.set_lcm_tasks_mock_behavior()
-        loop = asyncio.new_event_loop()
-        asyncio.set_event_loop(loop)
-        self.loop = asyncio.get_event_loop()
-        self.logger = Mock()
-        self.juju_paas_service = JujuPaasService(
-            self.msg, self.lcm_tasks, Mock(), Mock(), self.loop, self.logger, Mock()
-        )
-        self.juju_paas_service.db = Mock()
-        self.task = Mock()
-        self.task.cancelled.return_value = False
-        self.task.exception.return_value = None
-        self.juju_paas_service.logger = asynctest.Mock(self.juju_paas_service.logger)
-        self.nsr_id = "nsr_id"
-        self.nslcmop_id = "nslcmop_id"
-        self.time_value = 12
-
-        self.before_instantiate_ns_upd = {
-            "detailed-status": "creating",
-            "operational-status": "init",
-            "_admin.nslcmop": self.nslcmop_id,
-            "_admin.current-operation": self.nslcmop_id,
-            "_admin.operation-type": "INSTANTIATING",
-            "currentOperation": "INSTANTIATING",
-            "currentOperationID": self.nslcmop_id,
-            "errorDescription": None,
-            "errorDetail": None,
-            "nsState": "BUILDING",
-            "_admin.modified": self.time_value,
-        }
-
-        self.before_instantiate_op_upd = {
-            "queuePosition": 0,
-            "stage": "Building",
-            "_admin.modified": self.time_value,
-        }
-
-        self.before_terminate_ns_upd = {
-            "detailed-status": "terminating",
-            "operational-status": "terminate",
-            "_admin.nslcmop": self.nslcmop_id,
-            "_admin.current-operation": self.nslcmop_id,
-            "_admin.operation-type": "TERMINATING",
-            "currentOperation": "TERMINATING",
-            "currentOperationID": self.nslcmop_id,
-            "errorDescription": None,
-            "errorDetail": None,
-            "nsState": "TERMINATING",
-            "_admin.modified": self.time_value,
-        }
-
-        self.before_terminate_op_upd = {
-            "queuePosition": 0,
-            "stage": "terminating",
-            "_admin.modified": self.time_value,
-        }
-
-    def set_lcm_tasks_mock_behavior(self):
-        self.lcm_tasks = asynctest.Mock(TaskRegistry())
-        self.lcm_tasks.lock_HA.return_value = True
-        self.lcm_tasks.waitfor_related_HA.return_value = None
-
-    def call_instantiate(self):
-        self.loop.run_until_complete(
-            self.juju_paas_service.instantiate(self.nsr_id, self.nslcmop_id)
-        )
-
-    def call_terminate(self):
-        self.loop.run_until_complete(
-            self.juju_paas_service.terminate(self.nsr_id, self.nslcmop_id)
-        )
-
-    def check_db_update(self, expected_calls):
-        self.juju_paas_service.db.set_one.assert_has_calls(expected_calls)
-
-    def config_instantiate_test(self, paas_mock_time, lcm_mock_time):
-        nsr = {"_id": self.nsr_id}
-        self.juju_paas_service.db.get_one.return_value = nsr
-        self.juju_paas_service.paas_connector = Mock()
-        self.juju_paas_service.db.set_one = CopyingMock()
-        paas_mock_time.return_value = 0
-        lcm_mock_time.return_value = self.time_value
-
-    def config_terminate_test(self, paas_mock_time, lcm_mock_time):
-        nsr = {"_id": self.nsr_id}
-        nslcmop = {"_id": self.nslcmop_id}
-        self.juju_paas_service.paas_connector = Mock()
-        self.juju_paas_service.db.get_one.side_effect = [nsr, nslcmop]
-        self.juju_paas_service.db.set_one = CopyingMock()
-        paas_mock_time.return_value = 0
-        lcm_mock_time.return_value = self.time_value
-
-    def instantiate_expected_calls(self, ensure_future_mock, kafka_msg):
-        self.juju_paas_service.db.get_one.assert_called_with(
-            "nsrs", {"_id": self.nsr_id}
-        )
-        self.lcm_tasks.waitfor_related_HA.assert_called_with(
-            "ns", "nslcmops", self.nslcmop_id
-        )
-        ensure_future_mock.assert_called_once_with(
-            self.juju_paas_service.paas_connector.instantiate()
-        )
-
-        self.juju_paas_service.msg.aiowrite.assert_called_with(
-            "ns", "instantiated", kafka_msg, loop=self.juju_paas_service.loop
-        )
-        self.juju_paas_service.lcm_tasks.remove.assert_called_with(
-            "ns", self.nsr_id, self.nslcmop_id, "ns_instantiate"
-        )
-
-    def terminate_expected_calls(self, ensure_future_mock, kafka_msg):
-        self.juju_paas_service.db.get_one.assert_has_calls(
-            [
-                call("nsrs", {"_id": self.nsr_id}),
-                call("nslcmops", {"_id": self.nslcmop_id}),
-            ]
-        )
-        self.lcm_tasks.waitfor_related_HA.assert_called_with(
-            "ns", "nslcmops", self.nslcmop_id
-        )
-        ensure_future_mock.assert_called_once_with(
-            self.juju_paas_service.paas_connector.terminate()
-        )
-        self.juju_paas_service.msg.aiowrite.assert_called_with(
-            "ns", "terminated", kafka_msg, loop=self.juju_paas_service.loop
-        )
-        self.juju_paas_service.lcm_tasks.remove.assert_called_with(
-            "ns", self.nsr_id, self.nslcmop_id, "ns_terminate"
-        )
-
-    def test_paas_connector_is_juju(self):
-        assert isinstance(self.juju_paas_service.paas_connector, JujuPaasConnector)
-
-    @patch("osm_lcm.paas_service.asyncio.ensure_future")
-    def test_paas_service_instantiate_lock_ha_returns_false(self, ensure_future_mock):
-        self.lcm_tasks.lock_HA.return_value = False
-        self.call_instantiate()
-        self.lcm_tasks.waitfor_related_HA.assert_not_called()
-        self.juju_paas_service.db.set_one.assert_not_called()
-        self.juju_paas_service.db.get_one.assert_not_called()
-        ensure_future_mock.assert_not_called()
-        self.juju_paas_service.lcm_tasks.register.assert_not_called()
-        self.juju_paas_service.msg.aiowrite.assert_not_called()
-        self.juju_paas_service.lcm_tasks.remove.assert_not_called()
-
-    @patch("osm_lcm.paas_service.asyncio.ensure_future")
-    def test_paas_service_terminate_lock_ha_returns_false(self, ensure_future_mock):
-        self.lcm_tasks.lock_HA.return_value = False
-        self.call_terminate()
-        self.lcm_tasks.waitfor_related_HA.assert_not_called()
-        self.juju_paas_service.db.set_one.assert_not_called()
-        self.juju_paas_service.db.get_one.assert_not_called()
-        ensure_future_mock.assert_not_called()
-        self.juju_paas_service.lcm_tasks.register.assert_not_called()
-        self.juju_paas_service.msg.aiowrite.assert_not_called()
-        self.juju_paas_service.lcm_tasks.remove.assert_not_called()
-
-    @patch("osm_lcm.paas_service.asyncio.ensure_future")
-    def test_instantiate_nsrs_does_not_exist_on_db(self, ensure_future_mock):
-
-        self.juju_paas_service.db.get_one.side_effect = DbException("failed")
-        with self.assertRaises(DbException):
-            self.call_instantiate()
-        self.lcm_tasks.waitfor_related_HA.assert_not_called()
-        self.juju_paas_service.db.set_one.assert_not_called()
-        ensure_future_mock.assert_not_called()
-        self.juju_paas_service.lcm_tasks.register.assert_not_called()
-        self.juju_paas_service.msg.aiowrite.assert_not_called()
-        self.juju_paas_service.lcm_tasks.remove.assert_not_called()
-
-    @patch("osm_lcm.lcm_utils.time")
-    @patch("osm_lcm.paas_service.time")
-    @patch("osm_lcm.paas_service.asyncio.ensure_future")
-    @patch("osm_lcm.paas_service.asyncio.wait")
-    def test_instantiate(
-        self, mock_await, ensure_future_mock, paas_mock_time, lcm_mock_time
-    ):
-        self.config_instantiate_test(paas_mock_time, lcm_mock_time)
-        ensure_future_mock.return_value = self.task
-        mock_await.return_value = [self.task], []
-
-        ns_instanciated_upd = {
-            "_admin.nsState": "INSTANTIATED",
-            "_admin.modified": self.time_value,
-        }
-
-        pending_tasks_before_loop_upd = {
-            "queuePosition": 0,
-            "stage": "Waiting for instantiate pending tasks.: 0/1",
-            "_admin.modified": self.time_value,
-        }
-
-        pending_tasks_during_loop_upd = {
-            "queuePosition": 0,
-            "stage": "Waiting for instantiate pending tasks.: 1/1",
-            "_admin.modified": self.time_value,
-        }
-
-        kafka_msg = {
-            "nsr_id": self.nsr_id,
-            "nslcmop_id": self.nslcmop_id,
-            "operationState": "COMPLETED",
-        }
-
-        after_instantiate_ns_upd = {
-            "operational-status": "running",
-            "detailed-status": "Done",
-            "_admin.nslcmop": None,
-            "_admin.current-operation": None,
-            "_admin.operation-type": None,
-            "currentOperation": "IDLE",
-            "currentOperationID": None,
-            "errorDescription": None,
-            "errorDetail": None,
-            "nsState": "READY",
-            "_admin.modified": self.time_value,
-        }
-
-        after_instantiate_op_upd = {
-            "detailed-status": "Done",
-            "queuePosition": 0,
-            "stage": "COMPLETED",
-            "operationState": "COMPLETED",
-            "statusEnteredTime": 0,
-            "_admin.modified": self.time_value,
-        }
-
-        self.call_instantiate()
-        self.juju_paas_service.logger.error.assert_not_called()
-        self.juju_paas_service.logger.exception().assert_not_called()
-
-        db_update_expected_calls = [
-            call("nsrs", {"_id": self.nsr_id}, self.before_instantiate_ns_upd),
-            call("nslcmops", {"_id": self.nslcmop_id}, self.before_instantiate_op_upd),
-            call("nsrs", {"_id": self.nsr_id}, ns_instanciated_upd),
-            call("nslcmops", {"_id": self.nslcmop_id}, pending_tasks_before_loop_upd),
-            call("nslcmops", {"_id": self.nslcmop_id}, pending_tasks_during_loop_upd),
-            call("nsrs", {"_id": self.nsr_id}, after_instantiate_ns_upd),
-            call("nslcmops", {"_id": self.nslcmop_id}, after_instantiate_op_upd),
-        ]
-
-        self.check_db_update(db_update_expected_calls)
-
-        self.juju_paas_service.lcm_tasks.register.assert_called_with(
-            "ns",
-            self.nsr_id,
-            self.nslcmop_id,
-            "instantiate_juju_paas_service",
-            self.task,
-        )
-        self.instantiate_expected_calls(ensure_future_mock, kafka_msg)
-
-    @patch("osm_lcm.lcm_utils.time")
-    @patch("osm_lcm.paas_service.time")
-    @patch("osm_lcm.paas_service.asyncio.ensure_future")
-    @patch("osm_lcm.paas_service.asyncio.wait")
-    def test_instantiate_with_exception(
-        self, mock_await, ensure_future_mock, paas_mock_time, lcm_mock_time
-    ):
-        self.config_instantiate_test(paas_mock_time, lcm_mock_time)
-        ensure_future_mock.side_effect = asyncio.CancelledError
-        mock_await.return_value = [self.task], []
-
-        kafka_msg = {
-            "nsr_id": self.nsr_id,
-            "nslcmop_id": self.nslcmop_id,
-            "operationState": "FAILED",
-        }
-
-        after_instantiate_ns_upd = {
-            "operational-status": "running",
-            "detailed-status": "Operation: instantiate.nslcmop_id Detail: Operation was cancelled",
-            "_admin.nslcmop": None,
-            "_admin.current-operation": None,
-            "_admin.operation-type": None,
-            "currentOperation": "IDLE",
-            "currentOperationID": None,
-            "errorDescription": "Operation: instantiate.nslcmop_id",
-            "errorDetail": "Operation was cancelled",
-            "nsState": "BROKEN",
-            "_admin.modified": self.time_value,
-        }
-
-        after_instantiate_op_upd = {
-            "detailed-status": "Operation was cancelled",
-            "queuePosition": 0,
-            "stage": "FAILED",
-            "errorMessage": "Detail: Operation was cancelled",
-            "operationState": "FAILED",
-            "statusEnteredTime": 0,
-            "_admin.modified": self.time_value,
-        }
-
-        self.call_instantiate()
-        db_update_expected_calls = [
-            call("nsrs", {"_id": "nsr_id"}, self.before_instantiate_ns_upd),
-            call("nslcmops", {"_id": "nslcmop_id"}, self.before_instantiate_op_upd),
-            call("nsrs", {"_id": "nsr_id"}, after_instantiate_ns_upd),
-            call("nslcmops", {"_id": "nslcmop_id"}, after_instantiate_op_upd),
-        ]
-
-        self.check_db_update(db_update_expected_calls)
-        self.juju_paas_service.lcm_tasks.register.assert_not_called()
-        self.instantiate_expected_calls(ensure_future_mock, kafka_msg)
-
-    @patch("osm_lcm.lcm_utils.time")
-    @patch("osm_lcm.paas_service.time")
-    @patch("osm_lcm.paas_service.asyncio.ensure_future")
-    @patch("osm_lcm.paas_service.asyncio.wait")
-    def test_instantiate_timeout(
-        self, mock_await, ensure_future_mock, paas_mock_time, lcm_mock_time
-    ):
-        self.config_instantiate_test(paas_mock_time, lcm_mock_time)
-        ensure_future_mock.return_value = self.task
-        mock_await.return_value = [], [self.task]  # Set timeout
-
-        ns_instanciated_upd = {
-            "_admin.nsState": "INSTANTIATED",
-            "_admin.modified": self.time_value,
-        }
-
-        pending_tasks_before_loop_upd = {
-            "queuePosition": 0,
-            "stage": "Waiting for instantiate pending tasks.: 0/1",
-            "_admin.modified": self.time_value,
-        }
-
-        kafka_msg = {
-            "nsr_id": self.nsr_id,
-            "nslcmop_id": self.nslcmop_id,
-            "operationState": "FAILED",
-        }
-
-        after_instantiate_ns_upd = {
-            "operational-status": "running",
-            "detailed-status": "Operation: instantiate.nslcmop_id Detail: Instantiate juju PaaS Service: Timeout",
-            "_admin.nslcmop": None,
-            "_admin.current-operation": None,
-            "_admin.operation-type": None,
-            "currentOperation": "IDLE",
-            "currentOperationID": None,
-            "errorDescription": "Operation: instantiate.nslcmop_id",
-            "errorDetail": "Instantiate juju PaaS Service: Timeout",
-            "nsState": "BROKEN",
-            "_admin.modified": self.time_value,
-        }
-
-        after_instantiate_op_upd = {
-            "detailed-status": "Instantiate juju PaaS Service: Timeout",
-            "queuePosition": 0,
-            "stage": "FAILED",
-            "errorMessage": "Detail: Instantiate juju PaaS Service: Timeout",
-            "operationState": "FAILED",
-            "statusEnteredTime": 0,
-            "_admin.modified": self.time_value,
-        }
-
-        self.call_instantiate()
-        db_update_expected_calls = [
-            call("nsrs", {"_id": "nsr_id"}, self.before_instantiate_ns_upd),
-            call("nslcmops", {"_id": "nslcmop_id"}, self.before_instantiate_op_upd),
-            call("nsrs", {"_id": "nsr_id"}, ns_instanciated_upd),
-            call("nslcmops", {"_id": "nslcmop_id"}, pending_tasks_before_loop_upd),
-            call("nsrs", {"_id": "nsr_id"}, after_instantiate_ns_upd),
-            call("nslcmops", {"_id": "nslcmop_id"}, after_instantiate_op_upd),
-        ]
-        self.check_db_update(db_update_expected_calls)
-        self.juju_paas_service.lcm_tasks.register.assert_called_with(
-            "ns",
-            self.nsr_id,
-            self.nslcmop_id,
-            "instantiate_juju_paas_service",
-            self.task,
-        )
-        self.instantiate_expected_calls(ensure_future_mock, kafka_msg)
-
-    @patch("osm_lcm.lcm_utils.time")
-    @patch("osm_lcm.paas_service.time")
-    @patch("osm_lcm.paas_service.asyncio.ensure_future")
-    @patch("osm_lcm.paas_service.asyncio.wait")
-    def test_terminate(
-        self, mock_await, ensure_future_mock, paas_mock_time, lcm_mock_time
-    ):
-        self.config_terminate_test(paas_mock_time, lcm_mock_time)
-        ensure_future_mock.return_value = self.task
-        mock_await.return_value = [self.task], []
-
-        ns_terminate_upd = {
-            "_admin.nsState": "TERMINATED",
-            "_admin.modified": self.time_value,
-        }
-
-        pending_tasks_before_loop_upd = {
-            "queuePosition": 0,
-            "stage": "Waiting for pending tasks for termination.: 0/1",
-            "_admin.modified": self.time_value,
-        }
-
-        pending_tasks_during_loop_upd = {
-            "queuePosition": 0,
-            "stage": "Waiting for pending tasks for termination.: 1/1",
-            "_admin.modified": self.time_value,
-        }
-
-        after_terminate_ns_upd = {
-            "detailed-status": "Done",
-            "operational-status": "terminated",
-            "_admin.nsState": "NOT_INSTANTIATED",
-            "_admin.nslcmop": None,
-            "_admin.current-operation": None,
-            "_admin.operation-type": None,
-            "currentOperation": "IDLE",
-            "currentOperationID": None,
-            "errorDescription": None,
-            "errorDetail": None,
-            "nsState": "NOT_INSTANTIATED",
-            "_admin.modified": self.time_value,
-        }
-
-        after_terminate_op_upd = {
-            "detailed-status": "Done",
-            "queuePosition": 0,
-            "stage": "COMPLETED",
-            "operationState": "COMPLETED",
-            "statusEnteredTime": 0,
-            "_admin.modified": self.time_value,
-        }
-
-        kafka_msg = {
-            "nsr_id": self.nsr_id,
-            "nslcmop_id": self.nslcmop_id,
-            "operationState": "COMPLETED",
-            "autoremove": False,
-        }
-
-        self.call_terminate()
-
-        self.juju_paas_service.logger.error.assert_not_called()
-        self.juju_paas_service.logger.exception().assert_not_called()
-
-        db_update_expected_calls = [
-            call("nsrs", {"_id": self.nsr_id}, self.before_terminate_ns_upd),
-            call("nslcmops", {"_id": self.nslcmop_id}, self.before_terminate_op_upd),
-            call("nsrs", {"_id": self.nsr_id}, ns_terminate_upd),
-            call("nslcmops", {"_id": self.nslcmop_id}, pending_tasks_before_loop_upd),
-            call("nslcmops", {"_id": self.nslcmop_id}, pending_tasks_during_loop_upd),
-            call("nsrs", {"_id": self.nsr_id}, after_terminate_ns_upd),
-            call("nslcmops", {"_id": self.nslcmop_id}, after_terminate_op_upd),
-        ]
-        self.check_db_update(db_update_expected_calls)
-
-        self.juju_paas_service.lcm_tasks.register.assert_called_with(
-            "ns", self.nsr_id, self.nslcmop_id, "terminate_juju_paas_service", self.task
-        )
-        self.terminate_expected_calls(ensure_future_mock, kafka_msg)
-
-    @patch("osm_lcm.lcm_utils.time")
-    @patch("osm_lcm.paas_service.time")
-    @patch("osm_lcm.paas_service.asyncio.ensure_future")
-    @patch("osm_lcm.paas_service.asyncio.wait")
-    def test_terminate_exception(
-        self, mock_await, ensure_future_mock, paas_mock_time, lcm_mock_time
-    ):
-        self.config_terminate_test(paas_mock_time, lcm_mock_time)
-        ensure_future_mock.side_effect = asyncio.CancelledError
-        mock_await.return_value = [self.task], []
-
-        after_terminate_ns_upd = {
-            "detailed-status": "Operation: terminate.nslcmop_id Detail: Operation was cancelled",
-            "_admin.nslcmop": None,
-            "_admin.current-operation": None,
-            "_admin.operation-type": None,
-            "currentOperation": "IDLE",
-            "currentOperationID": None,
-            "errorDescription": "Operation: terminate.nslcmop_id",
-            "errorDetail": "Operation was cancelled",
-            "nsState": "BROKEN",
-            "_admin.modified": self.time_value,
-        }
-
-        after_terminate_op_upd = {
-            "detailed-status": "Operation was cancelled",
-            "queuePosition": 0,
-            "stage": "FAILED",
-            "errorMessage": "Detail: Operation was cancelled",
-            "operationState": "FAILED",
-            "statusEnteredTime": 0,
-            "_admin.modified": self.time_value,
-        }
-
-        kafka_msg = {
-            "nsr_id": self.nsr_id,
-            "nslcmop_id": self.nslcmop_id,
-            "operationState": "FAILED",
-            "autoremove": False,
-        }
-
-        self.call_terminate()
-
-        db_update_expected_calls = [
-            call("nsrs", {"_id": "nsr_id"}, self.before_terminate_ns_upd),
-            call("nslcmops", {"_id": "nslcmop_id"}, self.before_terminate_op_upd),
-            call("nsrs", {"_id": "nsr_id"}, after_terminate_ns_upd),
-            call("nslcmops", {"_id": "nslcmop_id"}, after_terminate_op_upd),
-        ]
-
-        self.check_db_update(db_update_expected_calls)
-        self.juju_paas_service.lcm_tasks.register.assert_not_called()
-        self.terminate_expected_calls(ensure_future_mock, kafka_msg)
-
-    @patch("osm_lcm.lcm_utils.time")
-    @patch("osm_lcm.paas_service.time")
-    @patch("osm_lcm.paas_service.asyncio.ensure_future")
-    @patch("osm_lcm.paas_service.asyncio.wait")
-    def test_terminate_timeout(
-        self, mock_await, ensure_future_mock, paas_mock_time, lcm_mock_time
-    ):
-        self.config_terminate_test(paas_mock_time, lcm_mock_time)
-        ensure_future_mock.return_value = self.task
-        mock_await.return_value = [], [self.task]  # set timeout
-
-        ns_terminate_upd = {
-            "_admin.nsState": "TERMINATED",
-            "_admin.modified": self.time_value,
-        }
-
-        pending_tasks_before_loop_upd = {
-            "queuePosition": 0,
-            "stage": "Waiting for pending tasks for termination.: 0/1",
-            "_admin.modified": self.time_value,
-        }
-
-        after_terminate_ns_upd = {
-            "detailed-status": "Operation: terminate.nslcmop_id Detail: Terminate juju PaaS Service: Timeout",
-            "_admin.nslcmop": None,
-            "_admin.current-operation": None,
-            "_admin.operation-type": None,
-            "currentOperation": "IDLE",
-            "currentOperationID": None,
-            "errorDescription": "Operation: terminate.nslcmop_id",
-            "errorDetail": "Terminate juju PaaS Service: Timeout",
-            "nsState": "BROKEN",
-            "_admin.modified": self.time_value,
-        }
-
-        after_terminate_op_upd = {
-            "detailed-status": "Terminate juju PaaS Service: Timeout",
-            "queuePosition": 0,
-            "stage": "FAILED",
-            "errorMessage": "Detail: Terminate juju PaaS Service: Timeout",
-            "operationState": "FAILED",
-            "statusEnteredTime": 0,
-            "_admin.modified": self.time_value,
-        }
-
-        kafka_msg = {
-            "nsr_id": self.nsr_id,
-            "nslcmop_id": self.nslcmop_id,
-            "operationState": "FAILED",
-            "autoremove": False,
-        }
-
-        self.call_terminate()
-        db_update_expected_calls = [
-            call("nsrs", {"_id": "nsr_id"}, self.before_terminate_ns_upd),
-            call("nslcmops", {"_id": "nslcmop_id"}, self.before_terminate_op_upd),
-            call("nsrs", {"_id": "nsr_id"}, ns_terminate_upd),
-            call("nslcmops", {"_id": "nslcmop_id"}, pending_tasks_before_loop_upd),
-            call("nsrs", {"_id": "nsr_id"}, after_terminate_ns_upd),
-            call("nslcmops", {"_id": "nslcmop_id"}, after_terminate_op_upd),
-        ]
-
-        self.check_db_update(db_update_expected_calls)
-        self.juju_paas_service.lcm_tasks.register.assert_called_with(
-            "ns", self.nsr_id, self.nslcmop_id, "terminate_juju_paas_service", self.task
-        )
-        self.terminate_expected_calls(ensure_future_mock, kafka_msg)
-
-    def test_action(self):
-        with self.assertRaises(NotImplementedError):
-            self.loop.run_until_complete(
-                self.juju_paas_service.action(self.nsr_id, self.nslcmop_id)
-            )
index 1e5458c..0c22305 100644 (file)
@@ -56,7 +56,7 @@ class VimLcm(LcmBase):
         self.logger = logging.getLogger("lcm.vim")
         self.loop = loop
         self.lcm_tasks = lcm_tasks
-        self.ro_config = config["ro_config"]
+        self.ro_config = config["RO"]
 
         super().__init__(msg, self.logger)
 
@@ -478,7 +478,7 @@ class WimLcm(LcmBase):
         self.logger = logging.getLogger("lcm.vim")
         self.loop = loop
         self.lcm_tasks = lcm_tasks
-        self.ro_config = config["ro_config"]
+        self.ro_config = config["RO"]
 
         super().__init__(msg, self.logger)
 
@@ -826,7 +826,7 @@ class SdnLcm(LcmBase):
         self.logger = logging.getLogger("lcm.sdn")
         self.loop = loop
         self.lcm_tasks = lcm_tasks
-        self.ro_config = config["ro_config"]
+        self.ro_config = config["RO"]
 
         super().__init__(msg, self.logger)
 
index 9740085..39172fd 100644 (file)
@@ -35,8 +35,12 @@ certifi==2022.9.24
     #   requests
 cffi==1.15.1
     # via
+<<<<<<< HEAD
     #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=paas
     #   bcrypt
+=======
+    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
+>>>>>>> fb1e25f (Fix black issues and run tox properly in stage-test)
     #   cryptography
     #   pynacl
 charset-normalizer==2.1.1
@@ -149,7 +153,7 @@ pyyaml==5.4.1
     #   juju
     #   jujubundlelib
     #   kubernetes
-requests==2.28.0
+requests==2.28.1
     # via
     #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=paas
     #   kubernetes
@@ -162,7 +166,7 @@ requests-oauthlib==1.3.1
     #   kubernetes
 retrying-async==2.0.0
     # via -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=paas
-rsa==4.8
+rsa==4.9
     # via
     #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=paas
     #   google-auth
index f2830be..41a01ba 100644 (file)
@@ -22,3 +22,4 @@ jinja2
 pyyaml==5.4.1
 pydantic
 protobuf==3.20.3
+config-man==0.0.4
\ No newline at end of file
index bcd2c46..24c09c0 100644 (file)
@@ -21,12 +21,24 @@ async-timeout==3.0.1
     #   -r requirements.in
     #   aiohttp
 attrs==22.1.0
-    # via aiohttp
+    # via
+    #   aiohttp
+    #   glom
+boltons==21.0.0
+    # via
+    #   face
+    #   glom
 chardet==4.0.0
     # via aiohttp
 checksumdir==1.2.0
     # via -r requirements.in
-grpcio==1.49.1
+config-man==0.0.4
+    # via -r requirements.in
+face==22.0.0
+    # via glom
+glom==22.1.0
+    # via config-man
+grpcio==1.50.0
     # via grpcio-tools
 grpcio-tools==1.48.1
     # via -r requirements.in
diff --git a/tox.ini b/tox.ini
index f1aeb9b..b014de1 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -27,6 +27,7 @@ basepython = python3.8
 setenv = VIRTUAL_ENV={envdir}
          PYTHONDONTWRITEBYTECODE = 1
 deps =  -r{toxinidir}/requirements.txt
+parallel_show_output = true
 
 #######################################################################################
 [testenv:black]