Pin black version in tox.ini to 23.12.1 78/14178/1 master
authorgarciadeblas <gerardo.garciadeblas@telefonica.com>
Mon, 29 Jan 2024 17:26:53 +0000 (18:26 +0100)
committergarciadeblas <gerardo.garciadeblas@telefonica.com>
Mon, 29 Jan 2024 17:26:53 +0000 (18:26 +0100)
Change-Id: Ica4b7a2ec310cffa116f319818f755c5062f0787
Signed-off-by: garciadeblas <gerardo.garciadeblas@telefonica.com>
32 files changed:
Dockerfile
Jenkinsfile
devops-stages/stage-archive.sh
n2vc/k8s_conn.py
n2vc/k8s_helm3_conn.py
n2vc/k8s_helm_base_conn.py
n2vc/k8s_helm_conn.py [deleted file]
n2vc/k8s_juju_conn.py
n2vc/kubectl.py
n2vc/libjuju.py
n2vc/loggable.py
n2vc/n2vc_conn.py
n2vc/n2vc_juju_conn.py
n2vc/store.py
n2vc/tests/unit/test_juju_watcher.py
n2vc/tests/unit/test_k8s_helm3_conn.py
n2vc/tests/unit/test_k8s_helm_conn.py [deleted file]
n2vc/tests/unit/test_k8s_juju_conn.py
n2vc/tests/unit/test_kubectl.py
n2vc/tests/unit/test_libjuju.py
n2vc/tests/unit/test_n2vc_juju_conn.py
n2vc/tests/unit/test_store.py
n2vc/tests/unit/testdata/test_certificate.yaml [new file with mode: 0644]
n2vc/tests/unit/testdata/test_db_descriptors.py [new file with mode: 0644]
n2vc/tests/unit/utils.py
n2vc/utils.py
requirements-dev.txt
requirements-test.in
requirements-test.txt
requirements.in
requirements.txt
tox.ini

index c597522..27ab273 100644 (file)
@@ -21,7 +21,7 @@
 #   devops-stages/stage-build.sh
 #
 
 #   devops-stages/stage-build.sh
 #
 
-FROM ubuntu:20.04
+FROM ubuntu:22.04
 
 ARG APT_PROXY
 RUN if [ ! -z $APT_PROXY ] ; then \
 
 ARG APT_PROXY
 RUN if [ ! -z $APT_PROXY ] ; then \
@@ -37,7 +37,9 @@ RUN DEBIAN_FRONTEND=noninteractive apt-get update && \
         python3 \
         python3-all \
         python3-dev \
         python3 \
         python3-all \
         python3-dev \
-        python3-setuptools
+        python3-setuptools \
+        python3-pip \
+        tox
 
 
-RUN python3 -m easy_install pip==21.3.1
-RUN pip install tox==3.24.5
+ENV LC_ALL C.UTF-8
+ENV LANG C.UTF-8
index e384cbd..d8e7474 100644 (file)
@@ -1,17 +1,19 @@
-/*
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-  implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-*/
+/* Copyright ETSI OSM and others
+ *
+ * All Rights Reserved.
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License"); you may
+ *   not use this file except in compliance with the License. You may obtain
+ *   a copy of the License at
+ *
+ *        http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ *   WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ *   License for the specific language governing permissions and limitations
+ *   under the License.
+ */
 
 properties([
     parameters([
 
 properties([
     parameters([
@@ -31,7 +33,7 @@ def devops_checkout() {
     }
 }
 
     }
 }
 
-node('docker') {
+node('stage_2') {
     checkout scm
     devops_checkout()
 
     checkout scm
     devops_checkout()
 
index 662616c..eead613 100755 (executable)
@@ -18,7 +18,4 @@ rm -rf pool
 rm -rf dists
 mkdir -p pool/$MDG
 mv deb_dist/*.deb pool/$MDG/
 rm -rf dists
 mkdir -p pool/$MDG
 mv deb_dist/*.deb pool/$MDG/
-mkdir -p dists/unstable/$MDG/binary-amd64/
-apt-ftparchive packages pool/$MDG > dists/unstable/$MDG/binary-amd64/Packages
-gzip -9fk dists/unstable/$MDG/binary-amd64/Packages
-echo "dists/**,pool/$MDG/*.deb"
+
index ef4f5f2..3a1a5ef 100644 (file)
@@ -194,6 +194,7 @@ class K8sConnector(abc.ABC, Loggable):
         timeout: float = 300,
         params: dict = None,
         db_dict: dict = None,
         timeout: float = 300,
         params: dict = None,
         db_dict: dict = None,
+        force: bool = False,
     ):
         """
         Upgrades an existing KDU instance. It would implicitly use the `upgrade` call
     ):
         """
         Upgrades an existing KDU instance. It would implicitly use the `upgrade` call
@@ -213,6 +214,7 @@ class K8sConnector(abc.ABC, Loggable):
                         path: <str>},
                             e.g. {collection: "nsrs", filter:
                             {_id: <nsd-id>, path: "_admin.deployed.K8S.3"}
                         path: <str>},
                             e.g. {collection: "nsrs", filter:
                             {_id: <nsd-id>, path: "_admin.deployed.K8S.3"}
+        :param force: force recreation of resources if necessary
         :return: reference to the new revision number of the KDU instance
         """
 
         :return: reference to the new revision number of the KDU instance
         """
 
@@ -329,6 +331,28 @@ class K8sConnector(abc.ABC, Loggable):
         :return: Returns the output of the action
         """
 
         :return: Returns the output of the action
         """
 
+    @abc.abstractmethod
+    async def upgrade_charm(
+        self,
+        ee_id: str = None,
+        path: str = None,
+        charm_id: str = None,
+        charm_type: str = None,
+        timeout: float = None,
+    ) -> str:
+        """This method upgrade charms in VNFs
+
+        Args:
+            ee_id:  Execution environment id
+            path:   Local path to the charm
+            charm_id:   charm-id
+            charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
+            timeout: (Float)    Timeout for the ns update operation
+
+        Returns:
+            The output of the update operation if status equals to "completed"
+        """
+
     @abc.abstractmethod
     async def inspect_kdu(self, kdu_model: str, repo_url: str = None) -> str:
         """
     @abc.abstractmethod
     async def inspect_kdu(self, kdu_model: str, repo_url: str = None) -> str:
         """
@@ -436,6 +460,18 @@ class K8sConnector(abc.ABC, Loggable):
     async def write_app_status_to_db(
         self, db_dict: dict, status: str, detailed_status: str, operation: str
     ) -> bool:
     async def write_app_status_to_db(
         self, db_dict: dict, status: str, detailed_status: str, operation: str
     ) -> bool:
+        """
+        This method will write the status of the application to the database.
+
+        :param db_dict: A dictionary with the database necessary information. It shall contain the values for the keys:
+            - "collection": The Mongo DB collection to write to
+            - "filter": The query filter to use in the update process
+            - "path": The dot separated keys which targets the object to be updated
+        :param status: Status of the application
+        :param detailed_status: Detailed status of the application
+        :param operation: Operation that is being performed on the application
+        :return: True if successful
+        """
 
         if not self.db:
             self.warning("No db => No database write")
 
         if not self.db:
             self.warning("No db => No database write")
@@ -448,7 +484,6 @@ class K8sConnector(abc.ABC, Loggable):
         self.log.debug("status={}".format(status))
 
         try:
         self.log.debug("status={}".format(status))
 
         try:
-
             the_table = db_dict["collection"]
             the_filter = db_dict["filter"]
             the_path = db_dict["path"]
             the_table = db_dict["collection"]
             the_filter = db_dict["filter"]
             the_path = db_dict["path"]
index bb08f07..14f7fe0 100644 (file)
@@ -20,6 +20,7 @@
 # contact with: nfvlabs@tid.es
 ##
 from typing import Union
 # contact with: nfvlabs@tid.es
 ##
 from typing import Union
+from shlex import quote
 import os
 import yaml
 
 import os
 import yaml
 
@@ -84,7 +85,12 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         """Install a helm chart
 
         :param cluster_uuid str: The UUID of the cluster to install to
         """Install a helm chart
 
         :param cluster_uuid str: The UUID of the cluster to install to
-        :param kdu_model str: The name or path of a bundle to install
+        :param kdu_model str: chart/reference (string), which can be either
+            of these options:
+            - a name of chart available via the repos known by OSM
+              (e.g. stable/openldap, stable/openldap:1.2.4)
+            - a path to a packaged chart (e.g. mychart.tgz)
+            - a path to an unpacked chart directory or a URL (e.g. mychart)
         :param kdu_instance: Kdu instance name
         :param atomic bool: If set, waits until the model is active and resets
                             the cluster on failure.
         :param kdu_instance: Kdu instance name
         :param atomic bool: If set, waits until the model is active and resets
                             the cluster on failure.
@@ -113,6 +119,7 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         if namespace and namespace != "kube-system":
             if not await self._namespace_exists(cluster_uuid, namespace):
                 try:
         if namespace and namespace != "kube-system":
             if not await self._namespace_exists(cluster_uuid, namespace):
                 try:
+                    # TODO: refactor to use kubernetes API client
                     await self._create_namespace(cluster_uuid, namespace)
                 except Exception as e:
                     if not await self._namespace_exists(cluster_uuid, namespace):
                     await self._create_namespace(cluster_uuid, namespace)
                 except Exception as e:
                     if not await self._namespace_exists(cluster_uuid, namespace):
@@ -144,7 +151,6 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         return True
 
     async def inspect_kdu(self, kdu_model: str, repo_url: str = None) -> str:
         return True
 
     async def inspect_kdu(self, kdu_model: str, repo_url: str = None) -> str:
-
         self.log.debug(
             "inspect kdu_model {} from (optional) repo: {}".format(kdu_model, repo_url)
         )
         self.log.debug(
             "inspect kdu_model {} from (optional) repo: {}".format(kdu_model, repo_url)
         )
@@ -245,7 +251,6 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         return namespace in namespaces if namespaces else False
 
     async def _get_namespaces(self, cluster_id: str):
         return namespace in namespaces if namespaces else False
 
     async def _get_namespaces(self, cluster_id: str):
-
         self.log.debug("get namespaces cluster_id {}".format(cluster_id))
 
         # init config, env
         self.log.debug("get namespaces cluster_id {}".format(cluster_id))
 
         # init config, env
@@ -254,7 +259,7 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         )
 
         command = "{} --kubeconfig={} get namespaces -o=yaml".format(
         )
 
         command = "{} --kubeconfig={} get namespaces -o=yaml".format(
-            self.kubectl_command, paths["kube_config"]
+            self.kubectl_command, quote(paths["kube_config"])
         )
         output, _rc = await self._local_async_exec(
             command=command, raise_exception_on_error=True, env=env
         )
         output, _rc = await self._local_async_exec(
             command=command, raise_exception_on_error=True, env=env
@@ -267,7 +272,6 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         return namespaces
 
     async def _create_namespace(self, cluster_id: str, namespace: str):
         return namespaces
 
     async def _create_namespace(self, cluster_id: str, namespace: str):
-
         self.log.debug(f"create namespace: {cluster_id} for cluster_id: {namespace}")
 
         # init config, env
         self.log.debug(f"create namespace: {cluster_id} for cluster_id: {namespace}")
 
         # init config, env
@@ -276,7 +280,7 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         )
 
         command = "{} --kubeconfig={} create namespace {}".format(
         )
 
         command = "{} --kubeconfig={} create namespace {}".format(
-            self.kubectl_command, paths["kube_config"], namespace
+            self.kubectl_command, quote(paths["kube_config"]), quote(namespace)
         )
         _, _rc = await self._local_async_exec(
             command=command, raise_exception_on_error=True, env=env
         )
         _, _rc = await self._local_async_exec(
             command=command, raise_exception_on_error=True, env=env
@@ -288,16 +292,17 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
     async def _get_services(
         self, cluster_id: str, kdu_instance: str, namespace: str, kubeconfig: str
     ):
     async def _get_services(
         self, cluster_id: str, kdu_instance: str, namespace: str, kubeconfig: str
     ):
-
         # init config, env
         paths, env = self._init_paths_env(
             cluster_name=cluster_id, create_if_not_exist=True
         )
 
         command1 = "env KUBECONFIG={} {} get manifest {} --namespace={}".format(
         # init config, env
         paths, env = self._init_paths_env(
             cluster_name=cluster_id, create_if_not_exist=True
         )
 
         command1 = "env KUBECONFIG={} {} get manifest {} --namespace={}".format(
-            kubeconfig, self._helm_command, kdu_instance, namespace
+            kubeconfig, self._helm_command, quote(kdu_instance), quote(namespace)
+        )
+        command2 = "{} get --namespace={} -f -".format(
+            self.kubectl_command, quote(namespace)
         )
         )
-        command2 = "{} get --namespace={} -f -".format(self.kubectl_command, namespace)
         output, _rc = await self._local_async_exec_pipe(
             command1, command2, env=env, raise_exception_on_error=True
         )
         output, _rc = await self._local_async_exec_pipe(
             command1, command2, env=env, raise_exception_on_error=True
         )
@@ -313,6 +318,7 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         if namespace != "kube-system":
             namespaces = await self._get_namespaces(cluster_id)
             if namespace not in namespaces:
         if namespace != "kube-system":
             namespaces = await self._get_namespaces(cluster_id)
             if namespace not in namespaces:
+                # TODO: refactor to use kubernetes API client
                 await self._create_namespace(cluster_id, namespace)
 
         repo_list = await self.repo_list(cluster_id)
                 await self._create_namespace(cluster_id, namespace)
 
         repo_list = await self.repo_list(cluster_id)
@@ -328,7 +334,6 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         pass
 
     async def _instances_list(self, cluster_id: str):
         pass
 
     async def _instances_list(self, cluster_id: str):
-
         # init paths, env
         paths, env = self._init_paths_env(
             cluster_name=cluster_id, create_if_not_exist=True
         # init paths, env
         paths, env = self._init_paths_env(
             cluster_name=cluster_id, create_if_not_exist=True
@@ -346,10 +351,23 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
             return []
 
     def _get_inspect_command(
             return []
 
     def _get_inspect_command(
-        self, inspect_command: str, kdu_model: str, repo_str: str, version: str
+        self, show_command: str, kdu_model: str, repo_str: str, version: str
     ):
     ):
+        """Generates the command to obtain the information about an Helm Chart package
+            (´helm show ...´ command)
+
+        Args:
+            show_command: the second part of the command (`helm show <show_command>`)
+            kdu_model: The name or path of a Helm Chart
+            repo_str: Helm Chart repository url
+            version: constraint with specific version of the Chart to use
+
+        Returns:
+            str: the generated Helm Chart command
+        """
+
         inspect_command = "{} show {} {}{} {}".format(
         inspect_command = "{} show {} {}{} {}".format(
-            self._helm_command, inspect_command, kdu_model, repo_str, version
+            self._helm_command, show_command, quote(kdu_model), repo_str, version
         )
         return inspect_command
 
         )
         return inspect_command
 
@@ -358,7 +376,11 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
     ):
         get_command = (
             "env KUBECONFIG={} {} get {} {} --namespace={} --output yaml".format(
     ):
         get_command = (
             "env KUBECONFIG={} {} get {} {} --namespace={} --output yaml".format(
-                kubeconfig, self._helm_command, get_command, kdu_instance, namespace
+                kubeconfig,
+                self._helm_command,
+                get_command,
+                quote(kdu_instance),
+                quote(namespace),
             )
         )
         return get_command
             )
         )
         return get_command
@@ -371,7 +393,6 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         yaml_format: bool = False,
         show_error_log: bool = False,
     ) -> Union[str, dict]:
         yaml_format: bool = False,
         show_error_log: bool = False,
     ) -> Union[str, dict]:
-
         self.log.debug(
             "status of kdu_instance: {}, namespace: {} ".format(kdu_instance, namespace)
         )
         self.log.debug(
             "status of kdu_instance: {}, namespace: {} ".format(kdu_instance, namespace)
         )
@@ -384,7 +405,10 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
             cluster_name=cluster_id, create_if_not_exist=True
         )
         command = "env KUBECONFIG={} {} status {} --namespace={} --output yaml".format(
             cluster_name=cluster_id, create_if_not_exist=True
         )
         command = "env KUBECONFIG={} {} status {} --namespace={} --output yaml".format(
-            paths["kube_config"], self._helm_command, kdu_instance, namespace
+            paths["kube_config"],
+            self._helm_command,
+            quote(kdu_instance),
+            quote(namespace),
         )
 
         output, rc = await self._local_async_exec(
         )
 
         output, rc = await self._local_async_exec(
@@ -430,7 +454,6 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         timeout: float,
         kubeconfig: str,
     ) -> str:
         timeout: float,
         kubeconfig: str,
     ) -> str:
-
         timeout_str = ""
         if timeout:
             timeout_str = "--timeout {}s".format(timeout)
         timeout_str = ""
         if timeout:
             timeout_str = "--timeout {}s".format(timeout)
@@ -442,7 +465,7 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         # namespace
         namespace_str = ""
         if namespace:
         # namespace
         namespace_str = ""
         if namespace:
-            namespace_str = "--namespace {}".format(namespace)
+            namespace_str = "--namespace {}".format(quote(namespace))
 
         # version
         version_str = ""
 
         # version
         version_str = ""
@@ -454,12 +477,12 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
             "{params} {timeout} {ns} {model} {ver}".format(
                 kubeconfig=kubeconfig,
                 helm=self._helm_command,
             "{params} {timeout} {ns} {model} {ver}".format(
                 kubeconfig=kubeconfig,
                 helm=self._helm_command,
-                name=kdu_instance,
+                name=quote(kdu_instance),
                 atomic=atomic_str,
                 params=params_str,
                 timeout=timeout_str,
                 ns=namespace_str,
                 atomic=atomic_str,
                 params=params_str,
                 timeout=timeout_str,
                 ns=namespace_str,
-                model=kdu_model,
+                model=quote(kdu_model),
                 ver=version_str,
             )
         )
                 ver=version_str,
             )
         )
@@ -478,25 +501,24 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         resource_name: str,
         kubeconfig: str,
     ) -> str:
         resource_name: str,
         kubeconfig: str,
     ) -> str:
-
-        timeout_str = ""
-        if timeout:
-            timeout_str = "--timeout {}s".format(timeout)
-
-        # atomic
-        atomic_str = ""
-        if atomic:
-            atomic_str = "--atomic"
-
-        # version
-        version_str = ""
-        if version:
-            version_str = "--version {}".format(version)
-
-        # namespace
-        namespace_str = ""
-        if namespace:
-            namespace_str = "--namespace {}".format(namespace)
+        """Generates the command to scale a Helm Chart release
+
+        Args:
+            kdu_model (str): Kdu model name, corresponding to the Helm local location or repository
+            kdu_instance (str): KDU instance, corresponding to the Helm Chart release in question
+            namespace (str): Namespace where this KDU instance is deployed
+            scale (int): Scale count
+            version (str): Constraint with specific version of the Chart to use
+            atomic (bool): If set, upgrade process rolls back changes made in case of failed upgrade.
+                The --wait flag will be set automatically if --atomic is used
+            replica_str (str): The key under resource_name key where the scale count is stored
+            timeout (float): The time, in seconds, to wait
+            resource_name (str): The KDU's resource to scale
+            kubeconfig (str): Kubeconfig file path
+
+        Returns:
+            str: command to scale a Helm Chart release
+        """
 
         # scale
         if resource_name:
 
         # scale
         if resource_name:
@@ -506,21 +528,16 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
 
         scale_str = self._params_to_set_option(scale_dict)
 
 
         scale_str = self._params_to_set_option(scale_dict)
 
-        command = (
-            "env KUBECONFIG={kubeconfig} {helm} upgrade {name} {model} {namespace} {atomic} --output yaml {scale} "
-            "{timeout} {ver}"
-        ).format(
-            helm=self._helm_command,
-            name=kdu_instance,
-            namespace=namespace_str,
-            atomic=atomic_str,
-            scale=scale_str,
-            timeout=timeout_str,
-            model=kdu_model,
-            ver=version_str,
+        return self._get_upgrade_command(
+            kdu_model=kdu_model,
+            kdu_instance=kdu_instance,
+            namespace=namespace,
+            params_str=scale_str,
+            version=version,
+            atomic=atomic,
+            timeout=timeout,
             kubeconfig=kubeconfig,
         )
             kubeconfig=kubeconfig,
         )
-        return command
 
     def _get_upgrade_command(
         self,
 
     def _get_upgrade_command(
         self,
@@ -532,7 +549,24 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         atomic: bool,
         timeout: float,
         kubeconfig: str,
         atomic: bool,
         timeout: float,
         kubeconfig: str,
+        force: bool = False,
     ) -> str:
     ) -> str:
+        """Generates the command to upgrade a Helm Chart release
+
+        Args:
+            kdu_model (str): Kdu model name, corresponding to the Helm local location or repository
+            kdu_instance (str): KDU instance, corresponding to the Helm Chart release in question
+            namespace (str): Namespace where this KDU instance is deployed
+            params_str (str): Params used to upgrade the Helm Chart release
+            version (str): Constraint with specific version of the Chart to use
+            atomic (bool): If set, upgrade process rolls back changes made in case of failed upgrade.
+                The --wait flag will be set automatically if --atomic is used
+            timeout (float): The time, in seconds, to wait
+            kubeconfig (str): Kubeconfig file path
+            force (bool): If set, helm forces resource updates through a replacement strategy. This may recreate pods.
+        Returns:
+            str: command to upgrade a Helm Chart release
+        """
 
         timeout_str = ""
         if timeout:
 
         timeout_str = ""
         if timeout:
@@ -543,28 +577,34 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         if atomic:
             atomic_str = "--atomic"
 
         if atomic:
             atomic_str = "--atomic"
 
+        # force
+        force_str = ""
+        if force:
+            force_str = "--force "
+
         # version
         version_str = ""
         if version:
         # version
         version_str = ""
         if version:
-            version_str = "--version {}".format(version)
+            version_str = "--version {}".format(quote(version))
 
         # namespace
         namespace_str = ""
         if namespace:
 
         # namespace
         namespace_str = ""
         if namespace:
-            namespace_str = "--namespace {}".format(namespace)
+            namespace_str = "--namespace {}".format(quote(namespace))
 
         command = (
 
         command = (
-            "env KUBECONFIG={kubeconfig} {helm} upgrade {name} {model} {namespace} {atomic} "
-            "--output yaml {params} {timeout} {ver}"
+            "env KUBECONFIG={kubeconfig} {helm} upgrade {name} {model} {namespace} {atomic} {force}"
+            "--output yaml {params} {timeout} --reuse-values {ver}"
         ).format(
             kubeconfig=kubeconfig,
             helm=self._helm_command,
         ).format(
             kubeconfig=kubeconfig,
             helm=self._helm_command,
-            name=kdu_instance,
+            name=quote(kdu_instance),
             namespace=namespace_str,
             atomic=atomic_str,
             namespace=namespace_str,
             atomic=atomic_str,
+            force=force_str,
             params=params_str,
             timeout=timeout_str,
             params=params_str,
             timeout=timeout_str,
-            model=kdu_model,
+            model=quote(kdu_model),
             ver=version_str,
         )
         return command
             ver=version_str,
         )
         return command
@@ -573,15 +613,18 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         self, kdu_instance: str, namespace: str, revision: float, kubeconfig: str
     ) -> str:
         return "env KUBECONFIG={} {} rollback {} {} --namespace={} --wait".format(
         self, kdu_instance: str, namespace: str, revision: float, kubeconfig: str
     ) -> str:
         return "env KUBECONFIG={} {} rollback {} {} --namespace={} --wait".format(
-            kubeconfig, self._helm_command, kdu_instance, revision, namespace
+            kubeconfig,
+            self._helm_command,
+            quote(kdu_instance),
+            revision,
+            quote(namespace),
         )
 
     def _get_uninstall_command(
         self, kdu_instance: str, namespace: str, kubeconfig: str
     ) -> str:
         )
 
     def _get_uninstall_command(
         self, kdu_instance: str, namespace: str, kubeconfig: str
     ) -> str:
-
         return "env KUBECONFIG={} {} uninstall {} --namespace={}".format(
         return "env KUBECONFIG={} {} uninstall {} --namespace={}".format(
-            kubeconfig, self._helm_command, kdu_instance, namespace
+            kubeconfig, self._helm_command, quote(kdu_instance), quote(namespace)
         )
 
     def _get_helm_chart_repos_ids(self, cluster_uuid) -> list:
         )
 
     def _get_helm_chart_repos_ids(self, cluster_uuid) -> list:
index adbdefc..5f004b3 100644 (file)
@@ -22,6 +22,7 @@
 import abc
 import asyncio
 from typing import Union
 import abc
 import asyncio
 from typing import Union
+from shlex import quote
 import random
 import time
 import shlex
 import random
 import time
 import shlex
@@ -30,10 +31,12 @@ import stat
 import os
 import yaml
 from uuid import uuid4
 import os
 import yaml
 from uuid import uuid4
+from urllib.parse import urlparse
 
 from n2vc.config import EnvironConfig
 from n2vc.exceptions import K8sException
 from n2vc.k8s_conn import K8sConnector
 
 from n2vc.config import EnvironConfig
 from n2vc.exceptions import K8sException
 from n2vc.k8s_conn import K8sConnector
+from n2vc.kubectl import Kubectl
 
 
 class K8sHelmBaseConnector(K8sConnector):
 
 
 class K8sHelmBaseConnector(K8sConnector):
@@ -90,6 +93,9 @@ class K8sHelmBaseConnector(K8sConnector):
         if self._stable_repo_url == "None":
             self._stable_repo_url = None
 
         if self._stable_repo_url == "None":
             self._stable_repo_url = None
 
+        # Lock to avoid concurrent execution of helm commands
+        self.cmd_lock = asyncio.Lock()
+
     def _get_namespace(self, cluster_uuid: str) -> str:
         """
         Obtains the namespace used by the cluster with the uuid passed by argument
     def _get_namespace(self, cluster_uuid: str) -> str:
         """
         Obtains the namespace used by the cluster with the uuid passed by argument
@@ -109,7 +115,7 @@ class K8sHelmBaseConnector(K8sConnector):
         namespace: str = "kube-system",
         reuse_cluster_uuid=None,
         **kwargs,
         namespace: str = "kube-system",
         reuse_cluster_uuid=None,
         **kwargs,
-    ) -> (str, bool):
+    ) -> tuple[str, bool]:
         """
         It prepares a given K8s cluster environment to run Charts
 
         """
         It prepares a given K8s cluster environment to run Charts
 
@@ -160,6 +166,7 @@ class K8sHelmBaseConnector(K8sConnector):
         cert: str = None,
         user: str = None,
         password: str = None,
         cert: str = None,
         user: str = None,
         password: str = None,
+        oci: bool = False,
     ):
         self.log.debug(
             "Cluster {}, adding {} repository {}. URL: {}".format(
     ):
         self.log.debug(
             "Cluster {}, adding {} repository {}. URL: {}".format(
@@ -175,10 +182,23 @@ class K8sHelmBaseConnector(K8sConnector):
         # sync local dir
         self.fs.sync(from_path=cluster_uuid)
 
         # sync local dir
         self.fs.sync(from_path=cluster_uuid)
 
-        # helm repo add name url
-        command = ("env KUBECONFIG={} {} repo add {} {}").format(
-            paths["kube_config"], self._helm_command, name, url
-        )
+        if oci:
+            if user and password:
+                host_port = urlparse(url).netloc if url.startswith("oci://") else url
+                # helm registry login url
+                command = "env KUBECONFIG={} {} registry login {}".format(
+                    paths["kube_config"], self._helm_command, quote(host_port)
+                )
+            else:
+                self.log.debug(
+                    "OCI registry login is not needed for repo: {}".format(name)
+                )
+                return
+        else:
+            # helm repo add name url
+            command = "env KUBECONFIG={} {} repo add {} {}".format(
+                paths["kube_config"], self._helm_command, quote(name), quote(url)
+            )
 
         if cert:
             temp_cert_file = os.path.join(
 
         if cert:
             temp_cert_file = os.path.join(
@@ -187,23 +207,49 @@ class K8sHelmBaseConnector(K8sConnector):
             os.makedirs(os.path.dirname(temp_cert_file), exist_ok=True)
             with open(temp_cert_file, "w") as the_cert:
                 the_cert.write(cert)
             os.makedirs(os.path.dirname(temp_cert_file), exist_ok=True)
             with open(temp_cert_file, "w") as the_cert:
                 the_cert.write(cert)
-            command += " --ca-file {}".format(temp_cert_file)
+            command += " --ca-file {}".format(quote(temp_cert_file))
 
         if user:
 
         if user:
-            command += " --username={}".format(user)
+            command += " --username={}".format(quote(user))
 
         if password:
 
         if password:
-            command += " --password={}".format(password)
+            command += " --password={}".format(quote(password))
 
         self.log.debug("adding repo: {}".format(command))
         await self._local_async_exec(
             command=command, raise_exception_on_error=True, env=env
         )
 
 
         self.log.debug("adding repo: {}".format(command))
         await self._local_async_exec(
             command=command, raise_exception_on_error=True, env=env
         )
 
-        # helm repo update
-        command = "env KUBECONFIG={} {} repo update {}".format(
-            paths["kube_config"], self._helm_command, name
+        if not oci:
+            # helm repo update
+            command = "env KUBECONFIG={} {} repo update {}".format(
+                paths["kube_config"], self._helm_command, quote(name)
+            )
+            self.log.debug("updating repo: {}".format(command))
+            await self._local_async_exec(
+                command=command, raise_exception_on_error=False, env=env
+            )
+
+        # sync fs
+        self.fs.reverse_sync(from_path=cluster_uuid)
+
+    async def repo_update(self, cluster_uuid: str, name: str, repo_type: str = "chart"):
+        self.log.debug(
+            "Cluster {}, updating {} repository {}".format(
+                cluster_uuid, repo_type, name
+            )
+        )
+
+        # init_env
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
         )
         )
+
+        # sync local dir
+        self.fs.sync(from_path=cluster_uuid)
+
+        # helm repo update
+        command = "{} repo update {}".format(self._helm_command, quote(name))
         self.log.debug("updating repo: {}".format(command))
         await self._local_async_exec(
             command=command, raise_exception_on_error=False, env=env
         self.log.debug("updating repo: {}".format(command))
         await self._local_async_exec(
             command=command, raise_exception_on_error=False, env=env
@@ -265,7 +311,7 @@ class K8sHelmBaseConnector(K8sConnector):
         self.fs.sync(from_path=cluster_uuid)
 
         command = "env KUBECONFIG={} {} repo remove {}".format(
         self.fs.sync(from_path=cluster_uuid)
 
         command = "env KUBECONFIG={} {} repo remove {}".format(
-            paths["kube_config"], self._helm_command, name
+            paths["kube_config"], self._helm_command, quote(name)
         )
         await self._local_async_exec(
             command=command, raise_exception_on_error=True, env=env
         )
         await self._local_async_exec(
             command=command, raise_exception_on_error=True, env=env
@@ -346,6 +392,14 @@ class K8sHelmBaseConnector(K8sConnector):
 
         return True
 
 
         return True
 
+    def _is_helm_chart_a_file(self, chart_name: str):
+        return chart_name.count("/") > 1
+
+    @staticmethod
+    def _is_helm_chart_a_url(chart_name: str):
+        result = urlparse(chart_name)
+        return all([result.scheme, result.netloc])
+
     async def _install_impl(
         self,
         cluster_id: str,
     async def _install_impl(
         self,
         cluster_id: str,
@@ -370,8 +424,7 @@ class K8sHelmBaseConnector(K8sConnector):
             cluster_id=cluster_id, params=params
         )
 
             cluster_id=cluster_id, params=params
         )
 
-        # version
-        kdu_model, version = self._split_version(kdu_model)
+        kdu_model, version = await self._prepare_helm_chart(kdu_model, cluster_id)
 
         command = self._get_install_command(
             kdu_model,
 
         command = self._get_install_command(
             kdu_model,
@@ -402,7 +455,6 @@ class K8sHelmBaseConnector(K8sConnector):
                     namespace=namespace,
                     db_dict=db_dict,
                     operation="install",
                     namespace=namespace,
                     db_dict=db_dict,
                     operation="install",
-                    run_once=False,
                 )
             )
 
                 )
             )
 
@@ -415,7 +467,6 @@ class K8sHelmBaseConnector(K8sConnector):
             output, rc = exec_task.result()
 
         else:
             output, rc = exec_task.result()
 
         else:
-
             output, rc = await self._local_async_exec(
                 command=command, raise_exception_on_error=False, env=env
             )
             output, rc = await self._local_async_exec(
                 command=command, raise_exception_on_error=False, env=env
             )
@@ -431,8 +482,6 @@ class K8sHelmBaseConnector(K8sConnector):
             namespace=namespace,
             db_dict=db_dict,
             operation="install",
             namespace=namespace,
             db_dict=db_dict,
             operation="install",
-            run_once=True,
-            check_every=0,
         )
 
         if rc != 0:
         )
 
         if rc != 0:
@@ -449,6 +498,8 @@ class K8sHelmBaseConnector(K8sConnector):
         timeout: float = 300,
         params: dict = None,
         db_dict: dict = None,
         timeout: float = 300,
         params: dict = None,
         db_dict: dict = None,
+        namespace: str = None,
+        force: bool = False,
     ):
         self.log.debug("upgrading {} in cluster {}".format(kdu_model, cluster_uuid))
 
     ):
         self.log.debug("upgrading {} in cluster {}".format(kdu_model, cluster_uuid))
 
@@ -456,9 +507,13 @@ class K8sHelmBaseConnector(K8sConnector):
         self.fs.sync(from_path=cluster_uuid)
 
         # look for instance to obtain namespace
         self.fs.sync(from_path=cluster_uuid)
 
         # look for instance to obtain namespace
-        instance_info = await self.get_instance_info(cluster_uuid, kdu_instance)
-        if not instance_info:
-            raise K8sException("kdu_instance {} not found".format(kdu_instance))
+
+        # set namespace
+        if not namespace:
+            instance_info = await self.get_instance_info(cluster_uuid, kdu_instance)
+            if not instance_info:
+                raise K8sException("kdu_instance {} not found".format(kdu_instance))
+            namespace = instance_info["namespace"]
 
         # init env, paths
         paths, env = self._init_paths_env(
 
         # init env, paths
         paths, env = self._init_paths_env(
@@ -473,24 +528,23 @@ class K8sHelmBaseConnector(K8sConnector):
             cluster_id=cluster_uuid, params=params
         )
 
             cluster_id=cluster_uuid, params=params
         )
 
-        # version
-        kdu_model, version = self._split_version(kdu_model)
+        kdu_model, version = await self._prepare_helm_chart(kdu_model, cluster_uuid)
 
         command = self._get_upgrade_command(
             kdu_model,
             kdu_instance,
 
         command = self._get_upgrade_command(
             kdu_model,
             kdu_instance,
-            instance_info["namespace"],
+            namespace,
             params_str,
             version,
             atomic,
             timeout,
             paths["kube_config"],
             params_str,
             version,
             atomic,
             timeout,
             paths["kube_config"],
+            force,
         )
 
         self.log.debug("upgrading: {}".format(command))
 
         if atomic:
         )
 
         self.log.debug("upgrading: {}".format(command))
 
         if atomic:
-
             # exec helm in a task
             exec_task = asyncio.ensure_future(
                 coro_or_future=self._local_async_exec(
             # exec helm in a task
             exec_task = asyncio.ensure_future(
                 coro_or_future=self._local_async_exec(
@@ -502,10 +556,9 @@ class K8sHelmBaseConnector(K8sConnector):
                 coro_or_future=self._store_status(
                     cluster_id=cluster_uuid,
                     kdu_instance=kdu_instance,
                 coro_or_future=self._store_status(
                     cluster_id=cluster_uuid,
                     kdu_instance=kdu_instance,
-                    namespace=instance_info["namespace"],
+                    namespace=namespace,
                     db_dict=db_dict,
                     operation="upgrade",
                     db_dict=db_dict,
                     operation="upgrade",
-                    run_once=False,
                 )
             )
 
                 )
             )
 
@@ -517,7 +570,6 @@ class K8sHelmBaseConnector(K8sConnector):
             output, rc = exec_task.result()
 
         else:
             output, rc = exec_task.result()
 
         else:
-
             output, rc = await self._local_async_exec(
                 command=command, raise_exception_on_error=False, env=env
             )
             output, rc = await self._local_async_exec(
                 command=command, raise_exception_on_error=False, env=env
             )
@@ -530,11 +582,9 @@ class K8sHelmBaseConnector(K8sConnector):
         await self._store_status(
             cluster_id=cluster_uuid,
             kdu_instance=kdu_instance,
         await self._store_status(
             cluster_id=cluster_uuid,
             kdu_instance=kdu_instance,
-            namespace=instance_info["namespace"],
+            namespace=namespace,
             db_dict=db_dict,
             operation="upgrade",
             db_dict=db_dict,
             operation="upgrade",
-            run_once=True,
-            check_every=0,
         )
 
         if rc != 0:
         )
 
         if rc != 0:
@@ -606,13 +656,9 @@ class K8sHelmBaseConnector(K8sConnector):
         )
 
         # version
         )
 
         # version
-        kdu_model, version = self._split_version(kdu_model)
+        kdu_model, version = await self._prepare_helm_chart(kdu_model, cluster_uuid)
 
         repo_url = await self._find_repo(kdu_model, cluster_uuid)
 
         repo_url = await self._find_repo(kdu_model, cluster_uuid)
-        if not repo_url:
-            raise K8sException(
-                "Repository not found for kdu_model {}".format(kdu_model)
-            )
 
         _, replica_str = await self._get_replica_count_url(
             kdu_model, repo_url, resource_name
 
         _, replica_str = await self._get_replica_count_url(
             kdu_model, repo_url, resource_name
@@ -648,7 +694,6 @@ class K8sHelmBaseConnector(K8sConnector):
                     namespace=instance_info["namespace"],
                     db_dict=db_dict,
                     operation="scale",
                     namespace=instance_info["namespace"],
                     db_dict=db_dict,
                     operation="scale",
-                    run_once=False,
                 )
             )
 
                 )
             )
 
@@ -671,8 +716,6 @@ class K8sHelmBaseConnector(K8sConnector):
             namespace=instance_info["namespace"],
             db_dict=db_dict,
             operation="scale",
             namespace=instance_info["namespace"],
             db_dict=db_dict,
             operation="scale",
-            run_once=True,
-            check_every=0,
         )
 
         if rc != 0:
         )
 
         if rc != 0:
@@ -699,7 +742,7 @@ class K8sHelmBaseConnector(K8sConnector):
             cluster_uuid: The UUID of the cluster
             resource_name: Resource name
             kdu_instance: KDU instance name
             cluster_uuid: The UUID of the cluster
             resource_name: Resource name
             kdu_instance: KDU instance name
-            kdu_model: The name or path of a bundle
+            kdu_model: The name or path of an Helm Chart
             kwargs: Additional parameters
 
         Returns:
             kwargs: Additional parameters
 
         Returns:
@@ -716,30 +759,42 @@ class K8sHelmBaseConnector(K8sConnector):
             raise K8sException("kdu_instance {} not found".format(kdu_instance))
 
         # init env, paths
             raise K8sException("kdu_instance {} not found".format(kdu_instance))
 
         # init env, paths
-        paths, env = self._init_paths_env(
+        paths, _ = self._init_paths_env(
             cluster_name=cluster_uuid, create_if_not_exist=True
         )
 
         replicas = await self._get_replica_count_instance(
             cluster_name=cluster_uuid, create_if_not_exist=True
         )
 
         replicas = await self._get_replica_count_instance(
-            kdu_instance, instance_info["namespace"], paths["kube_config"]
+            kdu_instance=kdu_instance,
+            namespace=instance_info["namespace"],
+            kubeconfig=paths["kube_config"],
+            resource_name=resource_name,
         )
 
         )
 
-        # Get default value if scale count is not found from provided values
-        if not replicas:
-            repo_url = await self._find_repo(kdu_model, cluster_uuid)
-            if not repo_url:
-                raise K8sException(
-                    "Repository not found for kdu_model {}".format(kdu_model)
-                )
+        self.log.debug(
+            f"Number of replicas of the KDU instance {kdu_instance} and resource {resource_name} obtained: {replicas}"
+        )
 
 
+        # Get default value if scale count is not found from provided values
+        # Important note: this piece of code shall only be executed in the first scaling operation,
+        # since it is expected that the _get_replica_count_instance is able to obtain the number of
+        # replicas when a scale operation was already conducted previously for this KDU/resource!
+        if replicas is None:
+            repo_url = await self._find_repo(
+                kdu_model=kdu_model, cluster_uuid=cluster_uuid
+            )
             replicas, _ = await self._get_replica_count_url(
             replicas, _ = await self._get_replica_count_url(
-                kdu_model, repo_url, resource_name
+                kdu_model=kdu_model, repo_url=repo_url, resource_name=resource_name
             )
 
             )
 
-        if not replicas:
-            msg = "Replica count not found. Cannot be scaled"
-            self.log.error(msg)
-            raise K8sException(msg)
+            self.log.debug(
+                f"Number of replicas of the Helm Chart package for KDU instance {kdu_instance} and resource "
+                f"{resource_name} obtained: {replicas}"
+            )
+
+            if replicas is None:
+                msg = "Replica count not found. Cannot be scaled"
+                self.log.error(msg)
+                raise K8sException(msg)
 
         return int(replicas)
 
 
         return int(replicas)
 
@@ -788,7 +843,6 @@ class K8sHelmBaseConnector(K8sConnector):
                 namespace=instance_info["namespace"],
                 db_dict=db_dict,
                 operation="rollback",
                 namespace=instance_info["namespace"],
                 db_dict=db_dict,
                 operation="rollback",
-                run_once=False,
             )
         )
 
             )
         )
 
@@ -807,8 +861,6 @@ class K8sHelmBaseConnector(K8sConnector):
             namespace=instance_info["namespace"],
             db_dict=db_dict,
             operation="rollback",
             namespace=instance_info["namespace"],
             db_dict=db_dict,
             operation="rollback",
-            run_once=True,
-            check_every=0,
         )
 
         if rc != 0:
         )
 
         if rc != 0:
@@ -905,6 +957,28 @@ class K8sHelmBaseConnector(K8sConnector):
         self.log.debug("Instance {} not found".format(kdu_instance))
         return None
 
         self.log.debug("Instance {} not found".format(kdu_instance))
         return None
 
+    async def upgrade_charm(
+        self,
+        ee_id: str = None,
+        path: str = None,
+        charm_id: str = None,
+        charm_type: str = None,
+        timeout: float = None,
+    ) -> str:
+        """This method upgrade charms in VNFs
+
+        Args:
+            ee_id:  Execution environment id
+            path:   Local path to the charm
+            charm_id:   charm-id
+            charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
+            timeout: (Float)    Timeout for the ns update operation
+
+        Returns:
+            The output of the update operation if status equals to "completed"
+        """
+        raise K8sException("KDUs deployed with Helm do not support charm upgrade")
+
     async def exec_primitive(
         self,
         cluster_uuid: str = None,
     async def exec_primitive(
         self,
         cluster_uuid: str = None,
@@ -983,7 +1057,6 @@ class K8sHelmBaseConnector(K8sConnector):
     async def get_service(
         self, cluster_uuid: str, service_name: str, namespace: str
     ) -> object:
     async def get_service(
         self, cluster_uuid: str, service_name: str, namespace: str
     ) -> object:
-
         self.log.debug(
             "get service, service_name: {}, namespace: {}, cluster_uuid: {}".format(
                 service_name, namespace, cluster_uuid
         self.log.debug(
             "get service, service_name: {}, namespace: {}, cluster_uuid: {}".format(
                 service_name, namespace, cluster_uuid
@@ -1066,7 +1139,6 @@ class K8sHelmBaseConnector(K8sConnector):
     async def get_values_kdu(
         self, kdu_instance: str, namespace: str, kubeconfig: str
     ) -> str:
     async def get_values_kdu(
         self, kdu_instance: str, namespace: str, kubeconfig: str
     ) -> str:
-
         self.log.debug("get kdu_instance values {}".format(kdu_instance))
 
         return await self._exec_get_command(
         self.log.debug("get kdu_instance values {}".format(kdu_instance))
 
         return await self._exec_get_command(
@@ -1077,6 +1149,15 @@ class K8sHelmBaseConnector(K8sConnector):
         )
 
     async def values_kdu(self, kdu_model: str, repo_url: str = None) -> str:
         )
 
     async def values_kdu(self, kdu_model: str, repo_url: str = None) -> str:
+        """Method to obtain the Helm Chart package's values
+
+        Args:
+            kdu_model: The name or path of an Helm Chart
+            repo_url: Helm Chart repository url
+
+        Returns:
+            str: the values of the Helm Chart package
+        """
 
         self.log.debug(
             "inspect kdu_model values {} from (optional) repo: {}".format(
 
         self.log.debug(
             "inspect kdu_model values {} from (optional) repo: {}".format(
@@ -1089,7 +1170,6 @@ class K8sHelmBaseConnector(K8sConnector):
         )
 
     async def help_kdu(self, kdu_model: str, repo_url: str = None) -> str:
         )
 
     async def help_kdu(self, kdu_model: str, repo_url: str = None) -> str:
-
         self.log.debug(
             "inspect kdu_model {} readme.md from repo: {}".format(kdu_model, repo_url)
         )
         self.log.debug(
             "inspect kdu_model {} readme.md from repo: {}".format(kdu_model, repo_url)
         )
@@ -1099,7 +1179,6 @@ class K8sHelmBaseConnector(K8sConnector):
         )
 
     async def synchronize_repos(self, cluster_uuid: str):
         )
 
     async def synchronize_repos(self, cluster_uuid: str):
-
         self.log.debug("synchronize repos for cluster helm-id: {}".format(cluster_uuid))
         try:
             db_repo_ids = self._get_helm_chart_repos_ids(cluster_uuid)
         self.log.debug("synchronize repos for cluster helm-id: {}".format(cluster_uuid))
         try:
             db_repo_ids = self._get_helm_chart_repos_ids(cluster_uuid)
@@ -1130,19 +1209,15 @@ class K8sHelmBaseConnector(K8sConnector):
 
                         # add repo
                         self.log.debug("add repo {}".format(db_repo["name"]))
 
                         # add repo
                         self.log.debug("add repo {}".format(db_repo["name"]))
-                        if "ca_cert" in db_repo:
-                            await self.repo_add(
-                                cluster_uuid,
-                                db_repo["name"],
-                                db_repo["url"],
-                                cert=db_repo["ca_cert"],
-                            )
-                        else:
-                            await self.repo_add(
-                                cluster_uuid,
-                                db_repo["name"],
-                                db_repo["url"],
-                            )
+                        await self.repo_add(
+                            cluster_uuid,
+                            db_repo["name"],
+                            db_repo["url"],
+                            cert=db_repo.get("ca_cert"),
+                            user=db_repo.get("user"),
+                            password=db_repo.get("password"),
+                            oci=db_repo.get("oci", False),
+                        )
                         added_repo_dict[repo_id] = db_repo["name"]
                 except Exception as e:
                     raise K8sException(
                         added_repo_dict[repo_id] = db_repo["name"]
                 except Exception as e:
                     raise K8sException(
@@ -1259,7 +1334,24 @@ class K8sHelmBaseConnector(K8sConnector):
         resource_name,
         kubeconfig,
     ) -> str:
         resource_name,
         kubeconfig,
     ) -> str:
-        """Obtain command to be executed to upgrade the indicated instance."""
+        """Generates the command to scale a Helm Chart release
+
+        Args:
+            kdu_model (str): Kdu model name, corresponding to the Helm local location or repository
+            kdu_instance (str): KDU instance, corresponding to the Helm Chart release in question
+            namespace (str): Namespace where this KDU instance is deployed
+            scale (int): Scale count
+            version (str): Constraint with specific version of the Chart to use
+            atomic (bool): If set, upgrade process rolls back changes made in case of failed upgrade.
+                The --wait flag will be set automatically if --atomic is used
+            replica_str (str): The key under resource_name key where the scale count is stored
+            timeout (float): The time, in seconds, to wait
+            resource_name (str): The KDU's resource to scale
+            kubeconfig (str): Kubeconfig file path
+
+        Returns:
+            str: command to scale a Helm Chart release
+        """
 
     @abc.abstractmethod
     def _get_upgrade_command(
 
     @abc.abstractmethod
     def _get_upgrade_command(
@@ -1272,9 +1364,23 @@ class K8sHelmBaseConnector(K8sConnector):
         atomic,
         timeout,
         kubeconfig,
         atomic,
         timeout,
         kubeconfig,
+        force,
     ) -> str:
     ) -> str:
-        """
-        Obtain command to be executed to upgrade the indicated instance
+        """Generates the command to upgrade a Helm Chart release
+
+        Args:
+            kdu_model (str): Kdu model name, corresponding to the Helm local location or repository
+            kdu_instance (str): KDU instance, corresponding to the Helm Chart release in question
+            namespace (str): Namespace where this KDU instance is deployed
+            params_str (str): Params used to upgrade the Helm Chart release
+            version (str): Constraint with specific version of the Chart to use
+            atomic (bool): If set, upgrade process rolls back changes made in case of failed upgrade.
+                The --wait flag will be set automatically if --atomic is used
+            timeout (float): The time, in seconds, to wait
+            kubeconfig (str): Kubeconfig file path
+            force (bool): If set, helm forces resource updates through a replacement strategy. This may recreate pods.
+        Returns:
+            str: command to upgrade a Helm Chart release
         """
 
     @abc.abstractmethod
         """
 
     @abc.abstractmethod
@@ -1297,8 +1403,17 @@ class K8sHelmBaseConnector(K8sConnector):
     def _get_inspect_command(
         self, show_command: str, kdu_model: str, repo_str: str, version: str
     ):
     def _get_inspect_command(
         self, show_command: str, kdu_model: str, repo_str: str, version: str
     ):
-        """
-        Obtain command to be executed to obtain information about the kdu
+        """Generates the command to obtain the information about an Helm Chart package
+            (´helm show ...´ command)
+
+        Args:
+            show_command: the second part of the command (`helm show <show_command>`)
+            kdu_model: The name or path of an Helm Chart
+            repo_url: Helm Chart repository url
+            version: constraint with specific version of the Chart to use
+
+        Returns:
+            str: the generated Helm Chart command
         """
 
     @abc.abstractmethod
         """
 
     @abc.abstractmethod
@@ -1431,8 +1546,7 @@ class K8sHelmBaseConnector(K8sConnector):
         show_error_log: bool = True,
         encode_utf8: bool = False,
         env: dict = None,
         show_error_log: bool = True,
         encode_utf8: bool = False,
         env: dict = None,
-    ) -> (str, int):
-
+    ) -> tuple[str, int]:
         command = K8sHelmBaseConnector._remove_multiple_spaces(command)
         self.log.debug(
             "Executing async local command: {}, env: {}".format(command, env)
         command = K8sHelmBaseConnector._remove_multiple_spaces(command)
         self.log.debug(
             "Executing async local command: {}, env: {}".format(command, env)
@@ -1446,17 +1560,18 @@ class K8sHelmBaseConnector(K8sConnector):
             environ.update(env)
 
         try:
             environ.update(env)
 
         try:
-            process = await asyncio.create_subprocess_exec(
-                *command,
-                stdout=asyncio.subprocess.PIPE,
-                stderr=asyncio.subprocess.PIPE,
-                env=environ,
-            )
+            async with self.cmd_lock:
+                process = await asyncio.create_subprocess_exec(
+                    *command,
+                    stdout=asyncio.subprocess.PIPE,
+                    stderr=asyncio.subprocess.PIPE,
+                    env=environ,
+                )
 
 
-            # wait for command terminate
-            stdout, stderr = await process.communicate()
+                # wait for command terminate
+                stdout, stderr = await process.communicate()
 
 
-            return_code = process.returncode
+                return_code = process.returncode
 
             output = ""
             if stdout:
 
             output = ""
             if stdout:
@@ -1483,6 +1598,9 @@ class K8sHelmBaseConnector(K8sConnector):
             return output, return_code
 
         except asyncio.CancelledError:
             return output, return_code
 
         except asyncio.CancelledError:
+            # first, kill the process if it is still running
+            if process.returncode is None:
+                process.kill()
             raise
         except K8sException:
             raise
             raise
         except K8sException:
             raise
@@ -1503,7 +1621,6 @@ class K8sHelmBaseConnector(K8sConnector):
         encode_utf8: bool = False,
         env: dict = None,
     ):
         encode_utf8: bool = False,
         env: dict = None,
     ):
-
         command1 = K8sHelmBaseConnector._remove_multiple_spaces(command1)
         command2 = K8sHelmBaseConnector._remove_multiple_spaces(command2)
         command = "{} | {}".format(command1, command2)
         command1 = K8sHelmBaseConnector._remove_multiple_spaces(command1)
         command2 = K8sHelmBaseConnector._remove_multiple_spaces(command2)
         command = "{} | {}".format(command1, command2)
@@ -1520,16 +1637,19 @@ class K8sHelmBaseConnector(K8sConnector):
             environ.update(env)
 
         try:
             environ.update(env)
 
         try:
-            read, write = os.pipe()
-            await asyncio.create_subprocess_exec(*command1, stdout=write, env=environ)
-            os.close(write)
-            process_2 = await asyncio.create_subprocess_exec(
-                *command2, stdin=read, stdout=asyncio.subprocess.PIPE, env=environ
-            )
-            os.close(read)
-            stdout, stderr = await process_2.communicate()
+            async with self.cmd_lock:
+                read, write = os.pipe()
+                process_1 = await asyncio.create_subprocess_exec(
+                    *command1, stdout=write, env=environ
+                )
+                os.close(write)
+                process_2 = await asyncio.create_subprocess_exec(
+                    *command2, stdin=read, stdout=asyncio.subprocess.PIPE, env=environ
+                )
+                os.close(read)
+                stdout, stderr = await process_2.communicate()
 
 
-            return_code = process_2.returncode
+                return_code = process_2.returncode
 
             output = ""
             if stdout:
 
             output = ""
             if stdout:
@@ -1555,6 +1675,10 @@ class K8sHelmBaseConnector(K8sConnector):
 
             return output, return_code
         except asyncio.CancelledError:
 
             return output, return_code
         except asyncio.CancelledError:
+            # first, kill the processes if they are still running
+            for process in (process_1, process_2):
+                if process.returncode is None:
+                    process.kill()
             raise
         except K8sException:
             raise
             raise
         except K8sException:
             raise
@@ -1588,7 +1712,10 @@ class K8sHelmBaseConnector(K8sConnector):
         )
 
         command = "{} --kubeconfig={} --namespace={} get service {} -o=yaml".format(
         )
 
         command = "{} --kubeconfig={} --namespace={} get service {} -o=yaml".format(
-            self.kubectl_command, paths["kube_config"], namespace, service_name
+            self.kubectl_command,
+            paths["kube_config"],
+            quote(namespace),
+            quote(service_name),
         )
 
         output, _rc = await self._local_async_exec(
         )
 
         output, _rc = await self._local_async_exec(
@@ -1626,52 +1753,67 @@ class K8sHelmBaseConnector(K8sConnector):
     async def _exec_inspect_command(
         self, inspect_command: str, kdu_model: str, repo_url: str = None
     ):
     async def _exec_inspect_command(
         self, inspect_command: str, kdu_model: str, repo_url: str = None
     ):
-        """Obtains information about a kdu, no cluster (no env)."""
+        """Obtains information about an Helm Chart package (´helm show´ command)
+
+        Args:
+            inspect_command: the Helm sub command (`helm show <inspect_command> ...`)
+            kdu_model: The name or path of an Helm Chart
+            repo_url: Helm Chart repository url
+
+        Returns:
+            str: the requested info about the Helm Chart package
+        """
 
         repo_str = ""
         if repo_url:
 
         repo_str = ""
         if repo_url:
-            repo_str = " --repo {}".format(repo_url)
+            repo_str = " --repo {}".format(quote(repo_url))
 
 
-        idx = kdu_model.find("/")
-        if idx >= 0:
-            idx += 1
-            kdu_model = kdu_model[idx:]
+            # Obtain the Chart's name and store it in the var kdu_model
+            kdu_model, _ = self._split_repo(kdu_model=kdu_model)
 
         kdu_model, version = self._split_version(kdu_model)
         if version:
 
         kdu_model, version = self._split_version(kdu_model)
         if version:
-            version_str = "--version {}".format(version)
+            version_str = "--version {}".format(quote(version))
         else:
             version_str = ""
 
         full_command = self._get_inspect_command(
         else:
             version_str = ""
 
         full_command = self._get_inspect_command(
-            inspect_command, kdu_model, repo_str, version_str
+            show_command=inspect_command,
+            kdu_model=quote(kdu_model),
+            repo_str=repo_str,
+            version=version_str,
         )
 
         )
 
-        output, _rc = await self._local_async_exec(command=full_command)
+        output, _ = await self._local_async_exec(command=full_command)
 
         return output
 
     async def _get_replica_count_url(
         self,
         kdu_model: str,
 
         return output
 
     async def _get_replica_count_url(
         self,
         kdu_model: str,
-        repo_url: str,
+        repo_url: str = None,
         resource_name: str = None,
         resource_name: str = None,
-    ):
+    ) -> tuple[int, str]:
         """Get the replica count value in the Helm Chart Values.
 
         Args:
         """Get the replica count value in the Helm Chart Values.
 
         Args:
-            kdu_model: The name or path of a bundle
+            kdu_model: The name or path of an Helm Chart
             repo_url: Helm Chart repository url
             resource_name: Resource name
 
         Returns:
             repo_url: Helm Chart repository url
             resource_name: Resource name
 
         Returns:
-            True if replicas, False replicaCount
+            A tuple with:
+            - The number of replicas of the specific instance; if not found, returns None; and
+            - The string corresponding to the replica count key in the Helm values
         """
 
         kdu_values = yaml.load(
         """
 
         kdu_values = yaml.load(
-            await self.values_kdu(kdu_model, repo_url), Loader=yaml.SafeLoader
+            await self.values_kdu(kdu_model=kdu_model, repo_url=repo_url),
+            Loader=yaml.SafeLoader,
         )
 
         )
 
+        self.log.debug(f"Obtained the Helm package values for the KDU: {kdu_values}")
+
         if not kdu_values:
             raise K8sException(
                 "kdu_values not found for kdu_model {}".format(kdu_model)
         if not kdu_values:
             raise K8sException(
                 "kdu_values not found for kdu_model {}".format(kdu_model)
@@ -1692,10 +1834,10 @@ class K8sHelmBaseConnector(K8sConnector):
         replica_str = ""
         replicas = None
 
         replica_str = ""
         replicas = None
 
-        if kdu_values.get("replicaCount", None):
+        if kdu_values.get("replicaCount") is not None:
             replicas = kdu_values["replicaCount"]
             replica_str = "replicaCount"
             replicas = kdu_values["replicaCount"]
             replica_str = "replicaCount"
-        elif kdu_values.get("replicas", None):
+        elif kdu_values.get("replicas") is not None:
             duplicate_check = True
             replicas = kdu_values["replicas"]
             replica_str = "replicas"
             duplicate_check = True
             replicas = kdu_values["replicas"]
             replica_str = "replicas"
@@ -1734,7 +1876,7 @@ class K8sHelmBaseConnector(K8sConnector):
         namespace: str,
         kubeconfig: str,
         resource_name: str = None,
         namespace: str,
         kubeconfig: str,
         resource_name: str = None,
-    ):
+    ) -> int:
         """Get the replica count value in the instance.
 
         Args:
         """Get the replica count value in the instance.
 
         Args:
@@ -1744,7 +1886,7 @@ class K8sHelmBaseConnector(K8sConnector):
             resource_name: Resource name
 
         Returns:
             resource_name: Resource name
 
         Returns:
-            True if replicas, False replicaCount
+            The number of replicas of the specific instance; if not found, returns None
         """
 
         kdu_values = yaml.load(
         """
 
         kdu_values = yaml.load(
@@ -1752,23 +1894,23 @@ class K8sHelmBaseConnector(K8sConnector):
             Loader=yaml.SafeLoader,
         )
 
             Loader=yaml.SafeLoader,
         )
 
+        self.log.debug(f"Obtained the Helm values for the KDU instance: {kdu_values}")
+
         replicas = None
 
         if kdu_values:
             resource_values = (
                 kdu_values.get(resource_name, None) if resource_name else None
             )
         replicas = None
 
         if kdu_values:
             resource_values = (
                 kdu_values.get(resource_name, None) if resource_name else None
             )
-            replicas = (
-                (
-                    resource_values.get("replicaCount", None)
-                    or resource_values.get("replicas", None)
-                )
-                if resource_values
-                else (
-                    kdu_values.get("replicaCount", None)
-                    or kdu_values.get("replicas", None)
-                )
-            )
+
+            for replica_str in ("replicaCount", "replicas"):
+                if resource_values:
+                    replicas = resource_values.get(replica_str)
+                else:
+                    replicas = kdu_values.get(replica_str)
+
+                if replicas is not None:
+                    break
 
         return replicas
 
 
         return replicas
 
@@ -1778,52 +1920,60 @@ class K8sHelmBaseConnector(K8sConnector):
         operation: str,
         kdu_instance: str,
         namespace: str = None,
         operation: str,
         kdu_instance: str,
         namespace: str = None,
-        check_every: float = 10,
         db_dict: dict = None,
         db_dict: dict = None,
-        run_once: bool = False,
-    ):
-        while True:
-            try:
-                await asyncio.sleep(check_every)
-                detailed_status = await self._status_kdu(
-                    cluster_id=cluster_id,
-                    kdu_instance=kdu_instance,
-                    yaml_format=False,
-                    namespace=namespace,
-                )
-                status = detailed_status.get("info").get("description")
-                self.log.debug("KDU {} STATUS: {}.".format(kdu_instance, status))
-                # write status to db
-                result = await self.write_app_status_to_db(
-                    db_dict=db_dict,
-                    status=str(status),
-                    detailed_status=str(detailed_status),
-                    operation=operation,
-                )
-                if not result:
-                    self.log.info("Error writing in database. Task exiting...")
-                    return
-            except asyncio.CancelledError:
-                self.log.debug("Task cancelled")
-                return
-            except Exception as e:
-                self.log.debug(
-                    "_store_status exception: {}".format(str(e)), exc_info=True
-                )
-                pass
-            finally:
-                if run_once:
-                    return
+    ) -> None:
+        """
+        Obtains the status of the KDU instance based on Helm Charts, and stores it in the database.
+
+        :param cluster_id (str): the cluster where the KDU instance is deployed
+        :param operation (str): The operation related to the status to be updated (for instance, "install" or "upgrade")
+        :param kdu_instance (str): The KDU instance in relation to which the status is obtained
+        :param namespace (str): The Kubernetes namespace where the KDU instance was deployed. Defaults to None
+        :param db_dict (dict): A dictionary with the database necessary information. It shall contain the
+        values for the keys:
+            - "collection": The Mongo DB collection to write to
+            - "filter": The query filter to use in the update process
+            - "path": The dot separated keys which targets the object to be updated
+        Defaults to None.
+        """
+
+        try:
+            detailed_status = await self._status_kdu(
+                cluster_id=cluster_id,
+                kdu_instance=kdu_instance,
+                yaml_format=False,
+                namespace=namespace,
+            )
+
+            status = detailed_status.get("info").get("description")
+            self.log.debug(f"Status for KDU {kdu_instance} obtained: {status}.")
+
+            # write status to db
+            result = await self.write_app_status_to_db(
+                db_dict=db_dict,
+                status=str(status),
+                detailed_status=str(detailed_status),
+                operation=operation,
+            )
+
+            if not result:
+                self.log.info("Error writing in database. Task exiting...")
+
+        except asyncio.CancelledError as e:
+            self.log.warning(
+                f"Exception in method {self._store_status.__name__} (task cancelled): {e}"
+            )
+        except Exception as e:
+            self.log.warning(f"Exception in method {self._store_status.__name__}: {e}")
 
     # params for use in -f file
     # returns values file option and filename (in order to delete it at the end)
 
     # params for use in -f file
     # returns values file option and filename (in order to delete it at the end)
-    def _params_to_file_option(self, cluster_id: str, params: dict) -> (str, str):
-
+    def _params_to_file_option(self, cluster_id: str, params: dict) -> tuple[str, str]:
         if params and len(params) > 0:
             self._init_paths_env(cluster_name=cluster_id, create_if_not_exist=True)
 
             def get_random_number():
         if params and len(params) > 0:
             self._init_paths_env(cluster_name=cluster_id, create_if_not_exist=True)
 
             def get_random_number():
-                r = random.randrange(start=1, stop=99999999)
+                r = random.SystemRandom().randint(1, 99999999)
                 s = str(r)
                 while len(s) < 10:
                     s = "0" + s
                 s = str(r)
                 while len(s) < 10:
                     s = "0" + s
@@ -1833,7 +1983,7 @@ class K8sHelmBaseConnector(K8sConnector):
             for key in params:
                 value = params.get(key)
                 if "!!yaml" in str(value):
             for key in params:
                 value = params.get(key)
                 if "!!yaml" in str(value):
-                    value = yaml.load(value[7:])
+                    value = yaml.safe_load(value[7:])
                 params2[key] = value
 
             values_file = get_random_number() + ".yaml"
                 params2[key] = value
 
             values_file = get_random_number() + ".yaml"
@@ -1847,19 +1997,14 @@ class K8sHelmBaseConnector(K8sConnector):
     # params for use in --set option
     @staticmethod
     def _params_to_set_option(params: dict) -> str:
     # params for use in --set option
     @staticmethod
     def _params_to_set_option(params: dict) -> str:
-        params_str = ""
-        if params and len(params) > 0:
-            start = True
-            for key in params:
-                value = params.get(key, None)
-                if value is not None:
-                    if start:
-                        params_str += "--set "
-                        start = False
-                    else:
-                        params_str += ","
-                    params_str += "{}={}".format(key, value)
-        return params_str
+        pairs = [
+            f"{quote(str(key))}={quote(str(value))}"
+            for key, value in params.items()
+            if value is not None
+        ]
+        if not pairs:
+            return ""
+        return "--set " + ",".join(pairs)
 
     @staticmethod
     def generate_kdu_instance_name(**kwargs):
 
     @staticmethod
     def generate_kdu_instance_name(**kwargs):
@@ -1889,7 +2034,7 @@ class K8sHelmBaseConnector(K8sConnector):
         name += "-"
 
         def get_random_number():
         name += "-"
 
         def get_random_number():
-            r = random.randrange(start=1, stop=99999999)
+            r = random.SystemRandom().randint(1, 99999999)
             s = str(r)
             s = s.rjust(10, "0")
             return s
             s = str(r)
             s = s.rjust(10, "0")
             return s
@@ -1897,22 +2042,226 @@ class K8sHelmBaseConnector(K8sConnector):
         name = name + get_random_number()
         return name.lower()
 
         name = name + get_random_number()
         return name.lower()
 
-    def _split_version(self, kdu_model: str) -> (str, str):
+    def _split_version(self, kdu_model: str) -> tuple[str, str]:
         version = None
         version = None
-        if ":" in kdu_model:
+        if (
+            not (
+                self._is_helm_chart_a_file(kdu_model)
+                or self._is_helm_chart_a_url(kdu_model)
+            )
+            and ":" in kdu_model
+        ):
             parts = kdu_model.split(sep=":")
             if len(parts) == 2:
                 version = str(parts[1])
                 kdu_model = parts[0]
         return kdu_model, version
 
             parts = kdu_model.split(sep=":")
             if len(parts) == 2:
                 version = str(parts[1])
                 kdu_model = parts[0]
         return kdu_model, version
 
-    async def _find_repo(self, kdu_model: str, cluster_uuid: str) -> str:
-        repo_url = None
+    def _split_repo(self, kdu_model: str) -> tuple[str, str]:
+        """Obtain the Helm Chart's repository and Chart's names from the KDU model
+
+        Args:
+            kdu_model (str): Associated KDU model
+
+        Returns:
+            (str, str): Tuple with the Chart name in index 0, and the repo name
+                        in index 2; if there was a problem finding them, return None
+                        for both
+        """
+
+        chart_name = None
+        repo_name = None
+
         idx = kdu_model.find("/")
         idx = kdu_model.find("/")
-        if idx >= 0:
+        if not self._is_helm_chart_a_url(kdu_model) and idx >= 0:
+            chart_name = kdu_model[idx + 1 :]
             repo_name = kdu_model[:idx]
             repo_name = kdu_model[:idx]
+
+        return chart_name, repo_name
+
+    async def _find_repo(self, kdu_model: str, cluster_uuid: str) -> str:
+        """Obtain the Helm repository for an Helm Chart
+
+        Args:
+            kdu_model (str): the KDU model associated with the Helm Chart instantiation
+            cluster_uuid (str): The cluster UUID associated with the Helm Chart instantiation
+
+        Returns:
+            str: the repository URL; if Helm Chart is a local one, the function returns None
+        """
+
+        _, repo_name = self._split_repo(kdu_model=kdu_model)
+
+        repo_url = None
+        if repo_name:
             # Find repository link
             local_repo_list = await self.repo_list(cluster_uuid)
             for repo in local_repo_list:
             # Find repository link
             local_repo_list = await self.repo_list(cluster_uuid)
             for repo in local_repo_list:
-                repo_url = repo["url"] if repo["name"] == repo_name else None
+                if repo["name"] == repo_name:
+                    repo_url = repo["url"]
+                    break  # it is not necessary to continue the loop if the repo link was found...
+
         return repo_url
         return repo_url
+
+    def _repo_to_oci_url(self, repo):
+        db_repo = self.db.get_one("k8srepos", {"name": repo}, fail_on_empty=False)
+        if db_repo and "oci" in db_repo:
+            return db_repo.get("url")
+
+    async def _prepare_helm_chart(self, kdu_model, cluster_id):
+        # e.g.: "stable/openldap", "1.0"
+        kdu_model, version = self._split_version(kdu_model)
+        # e.g.: "openldap, stable"
+        chart_name, repo = self._split_repo(kdu_model)
+        if repo and chart_name:  # repo/chart case
+            oci_url = self._repo_to_oci_url(repo)
+            if oci_url:  # oci does not require helm repo update
+                kdu_model = f"{oci_url.rstrip('/')}/{chart_name.lstrip('/')}"  # urljoin doesn't work for oci schema
+            else:
+                await self.repo_update(cluster_id, repo)
+        return kdu_model, version
+
+    async def create_certificate(
+        self, cluster_uuid, namespace, dns_prefix, name, secret_name, usage
+    ):
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+        kubectl = Kubectl(config_file=paths["kube_config"])
+        await kubectl.create_certificate(
+            namespace=namespace,
+            name=name,
+            dns_prefix=dns_prefix,
+            secret_name=secret_name,
+            usages=[usage],
+            issuer_name="ca-issuer",
+        )
+
+    async def delete_certificate(self, cluster_uuid, namespace, certificate_name):
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+        kubectl = Kubectl(config_file=paths["kube_config"])
+        await kubectl.delete_certificate(namespace, certificate_name)
+
+    async def create_namespace(
+        self,
+        namespace,
+        cluster_uuid,
+        labels,
+    ):
+        """
+        Create a namespace in a specific cluster
+
+        :param namespace:    Namespace to be created
+        :param cluster_uuid: K8s cluster uuid used to retrieve kubeconfig
+        :param labels:       Dictionary with labels for the new namespace
+        :returns: None
+        """
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+        kubectl = Kubectl(config_file=paths["kube_config"])
+        await kubectl.create_namespace(
+            name=namespace,
+            labels=labels,
+        )
+
+    async def delete_namespace(
+        self,
+        namespace,
+        cluster_uuid,
+    ):
+        """
+        Delete a namespace in a specific cluster
+
+        :param namespace: namespace to be deleted
+        :param cluster_uuid: K8s cluster uuid used to retrieve kubeconfig
+        :returns: None
+        """
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+        kubectl = Kubectl(config_file=paths["kube_config"])
+        await kubectl.delete_namespace(
+            name=namespace,
+        )
+
+    async def copy_secret_data(
+        self,
+        src_secret: str,
+        dst_secret: str,
+        cluster_uuid: str,
+        data_key: str,
+        src_namespace: str = "osm",
+        dst_namespace: str = "osm",
+    ):
+        """
+        Copy a single key and value from an existing secret to a new one
+
+        :param src_secret: name of the existing secret
+        :param dst_secret: name of the new secret
+        :param cluster_uuid: K8s cluster uuid used to retrieve kubeconfig
+        :param data_key: key of the existing secret to be copied
+        :param src_namespace: Namespace of the existing secret
+        :param dst_namespace: Namespace of the new secret
+        :returns: None
+        """
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+        kubectl = Kubectl(config_file=paths["kube_config"])
+        secret_data = await kubectl.get_secret_content(
+            name=src_secret,
+            namespace=src_namespace,
+        )
+        # Only the corresponding data_key value needs to be copy
+        data = {data_key: secret_data.get(data_key)}
+        await kubectl.create_secret(
+            name=dst_secret,
+            data=data,
+            namespace=dst_namespace,
+            secret_type="Opaque",
+        )
+
+    async def setup_default_rbac(
+        self,
+        name,
+        namespace,
+        cluster_uuid,
+        api_groups,
+        resources,
+        verbs,
+        service_account,
+    ):
+        """
+        Create a basic RBAC for a new namespace.
+
+        :param name: name of both Role and Role Binding
+        :param namespace: K8s namespace
+        :param cluster_uuid: K8s cluster uuid used to retrieve kubeconfig
+        :param api_groups: Api groups to be allowed in Policy Rule
+        :param resources: Resources to be allowed in Policy Rule
+        :param verbs: Verbs to be allowed in Policy Rule
+        :param service_account: Service Account name used to bind the Role
+        :returns: None
+        """
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+        kubectl = Kubectl(config_file=paths["kube_config"])
+        await kubectl.create_role(
+            name=name,
+            labels={},
+            namespace=namespace,
+            api_groups=api_groups,
+            resources=resources,
+            verbs=verbs,
+        )
+        await kubectl.create_role_binding(
+            name=name,
+            labels={},
+            namespace=namespace,
+            role_name=name,
+            sa_name=service_account,
+        )
diff --git a/n2vc/k8s_helm_conn.py b/n2vc/k8s_helm_conn.py
deleted file mode 100644 (file)
index 8c526f5..0000000
+++ /dev/null
@@ -1,745 +0,0 @@
-##
-# Copyright 2019 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of OSM
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-import asyncio
-from typing import Union
-import os
-import yaml
-
-from n2vc.k8s_helm_base_conn import K8sHelmBaseConnector
-from n2vc.exceptions import K8sException
-
-
-class K8sHelmConnector(K8sHelmBaseConnector):
-
-    """
-    ####################################################################################
-    ################################### P U B L I C ####################################
-    ####################################################################################
-    """
-
-    def __init__(
-        self,
-        fs: object,
-        db: object,
-        kubectl_command: str = "/usr/bin/kubectl",
-        helm_command: str = "/usr/bin/helm",
-        log: object = None,
-        on_update_db=None,
-    ):
-        """
-        Initializes helm connector for helm v2
-
-        :param fs: file system for kubernetes and helm configuration
-        :param db: database object to write current operation status
-        :param kubectl_command: path to kubectl executable
-        :param helm_command: path to helm executable
-        :param log: logger
-        :param on_update_db: callback called when k8s connector updates database
-        """
-
-        # parent class
-        K8sHelmBaseConnector.__init__(
-            self,
-            db=db,
-            log=log,
-            fs=fs,
-            kubectl_command=kubectl_command,
-            helm_command=helm_command,
-            on_update_db=on_update_db,
-        )
-
-        self.log.info("Initializing K8S Helm2 connector")
-
-        # initialize helm client-only
-        self.log.debug("Initializing helm client-only...")
-        command = "{} init --client-only {} ".format(
-            self._helm_command,
-            "--stable-repo-url {}".format(self._stable_repo_url)
-            if self._stable_repo_url
-            else "--skip-repos",
-        )
-        try:
-            asyncio.ensure_future(
-                self._local_async_exec(command=command, raise_exception_on_error=False)
-            )
-            # loop = asyncio.get_event_loop()
-            # loop.run_until_complete(self._local_async_exec(command=command,
-            # raise_exception_on_error=False))
-        except Exception as e:
-            self.warning(
-                msg="helm init failed (it was already initialized): {}".format(e)
-            )
-
-        self.log.info("K8S Helm2 connector initialized")
-
-    async def install(
-        self,
-        cluster_uuid: str,
-        kdu_model: str,
-        kdu_instance: str,
-        atomic: bool = True,
-        timeout: float = 300,
-        params: dict = None,
-        db_dict: dict = None,
-        kdu_name: str = None,
-        namespace: str = None,
-        **kwargs,
-    ):
-        """
-        Deploys of a new KDU instance. It would implicitly rely on the `install` call
-        to deploy the Chart/Bundle properly parametrized (in practice, this call would
-        happen before any _initial-config-primitive_of the VNF is called).
-
-        :param cluster_uuid: UUID of a K8s cluster known by OSM
-        :param kdu_model: chart/ reference (string), which can be either
-            of these options:
-            - a name of chart available via the repos known by OSM
-            - a path to a packaged chart
-            - a path to an unpacked chart directory or a URL
-        :param kdu_instance: Kdu instance name
-        :param atomic: If set, installation process purges chart/bundle on fail, also
-            will wait until all the K8s objects are active
-        :param timeout: Time in seconds to wait for the install of the chart/bundle
-            (defaults to Helm default timeout: 300s)
-        :param params: dictionary of key-value pairs for instantiation parameters
-            (overriding default values)
-        :param dict db_dict: where to write into database when the status changes.
-                        It contains a dict with {collection: <str>, filter: {},
-                        path: <str>},
-                            e.g. {collection: "nsrs", filter:
-                            {_id: <nsd-id>, path: "_admin.deployed.K8S.3"}
-        :param kdu_name: Name of the KDU instance to be installed
-        :param namespace: K8s namespace to use for the KDU instance
-        :param kwargs: Additional parameters (None yet)
-        :return: True if successful
-        """
-        self.log.debug("installing {} in cluster {}".format(kdu_model, cluster_uuid))
-
-        # sync local dir
-        self.fs.sync(from_path=cluster_uuid)
-
-        # init env, paths
-        paths, env = self._init_paths_env(
-            cluster_name=cluster_uuid, create_if_not_exist=True
-        )
-
-        await self._install_impl(
-            cluster_uuid,
-            kdu_model,
-            paths,
-            env,
-            kdu_instance,
-            atomic=atomic,
-            timeout=timeout,
-            params=params,
-            db_dict=db_dict,
-            kdu_name=kdu_name,
-            namespace=namespace,
-        )
-
-        # sync fs
-        self.fs.reverse_sync(from_path=cluster_uuid)
-
-        self.log.debug("Returning kdu_instance {}".format(kdu_instance))
-        return True
-
-    async def inspect_kdu(self, kdu_model: str, repo_url: str = None) -> str:
-
-        self.log.debug(
-            "inspect kdu_model {} from (optional) repo: {}".format(kdu_model, repo_url)
-        )
-
-        return await self._exec_inspect_command(
-            inspect_command="", kdu_model=kdu_model, repo_url=repo_url
-        )
-
-    """
-    ####################################################################################
-    ################################### P R I V A T E ##################################
-    ####################################################################################
-    """
-
-    def _init_paths_env(self, cluster_name: str, create_if_not_exist: bool = True):
-        """
-        Creates and returns base cluster and kube dirs and returns them.
-        Also created helm3 dirs according to new directory specification, paths are
-        returned and also environment variables that must be provided to execute commands
-
-        Helm 2 directory specification uses helm_home dir:
-
-        The variables assigned for this paths are:
-        - Helm hone: $HELM_HOME
-        - helm kubeconfig: $KUBECONFIG
-
-        :param cluster_name:  cluster_name
-        :return: Dictionary with config_paths and dictionary with helm environment variables
-        """
-        base = self.fs.path
-        if base.endswith("/") or base.endswith("\\"):
-            base = base[:-1]
-
-        # base dir for cluster
-        cluster_dir = base + "/" + cluster_name
-
-        # kube dir
-        kube_dir = cluster_dir + "/" + ".kube"
-        if create_if_not_exist and not os.path.exists(kube_dir):
-            self.log.debug("Creating dir {}".format(kube_dir))
-            os.makedirs(kube_dir)
-
-        # helm home dir
-        helm_dir = cluster_dir + "/" + ".helm"
-        if create_if_not_exist and not os.path.exists(helm_dir):
-            self.log.debug("Creating dir {}".format(helm_dir))
-            os.makedirs(helm_dir)
-
-        config_filename = kube_dir + "/config"
-
-        # 2 - Prepare dictionary with paths
-        paths = {
-            "kube_dir": kube_dir,
-            "kube_config": config_filename,
-            "cluster_dir": cluster_dir,
-            "helm_dir": helm_dir,
-        }
-
-        for file_name, file in paths.items():
-            if "dir" in file_name and not os.path.exists(file):
-                err_msg = "{} dir does not exist".format(file)
-                self.log.error(err_msg)
-                raise K8sException(err_msg)
-
-        # 3 - Prepare environment variables
-        env = {"HELM_HOME": helm_dir, "KUBECONFIG": config_filename}
-
-        return paths, env
-
-    async def _get_services(self, cluster_id, kdu_instance, namespace, kubeconfig):
-
-        # init config, env
-        paths, env = self._init_paths_env(
-            cluster_name=cluster_id, create_if_not_exist=True
-        )
-
-        command1 = "env KUBECONFIG={} {} get manifest {} ".format(
-            kubeconfig, self._helm_command, kdu_instance
-        )
-        command2 = "{} get --namespace={} -f -".format(self.kubectl_command, namespace)
-        output, _rc = await self._local_async_exec_pipe(
-            command1, command2, env=env, raise_exception_on_error=True
-        )
-        services = self._parse_services(output)
-
-        return services
-
-    async def _cluster_init(
-        self, cluster_id: str, namespace: str, paths: dict, env: dict
-    ):
-        """
-        Implements the helm version dependent cluster initialization:
-        For helm2 it initialized tiller environment if needed
-        """
-
-        # check if tiller pod is up in cluster
-        command = "{} --kubeconfig={} --namespace={} get deployments".format(
-            self.kubectl_command, paths["kube_config"], namespace
-        )
-        output, _rc = await self._local_async_exec(
-            command=command, raise_exception_on_error=True, env=env
-        )
-
-        output_table = self._output_to_table(output=output)
-
-        # find 'tiller' pod in all pods
-        already_initialized = False
-        try:
-            for row in output_table:
-                if row[0].startswith("tiller-deploy"):
-                    already_initialized = True
-                    break
-        except Exception:
-            pass
-
-        # helm init
-        n2vc_installed_sw = False
-        if not already_initialized:
-            self.log.info(
-                "Initializing helm in client and server: {}".format(cluster_id)
-            )
-            command = "{} --kubeconfig={} --namespace kube-system create serviceaccount {}".format(
-                self.kubectl_command, paths["kube_config"], self.service_account
-            )
-            _, _rc = await self._local_async_exec(
-                command=command, raise_exception_on_error=False, env=env
-            )
-
-            command = (
-                "{} --kubeconfig={} create clusterrolebinding osm-tiller-cluster-rule "
-                "--clusterrole=cluster-admin --serviceaccount=kube-system:{}"
-            ).format(self.kubectl_command, paths["kube_config"], self.service_account)
-            _, _rc = await self._local_async_exec(
-                command=command, raise_exception_on_error=False, env=env
-            )
-
-            command = (
-                "{} --kubeconfig={} --tiller-namespace={} --home={} --service-account {} "
-                " {} init"
-            ).format(
-                self._helm_command,
-                paths["kube_config"],
-                namespace,
-                paths["helm_dir"],
-                self.service_account,
-                "--stable-repo-url {}".format(self._stable_repo_url)
-                if self._stable_repo_url
-                else "--skip-repos",
-            )
-            _, _rc = await self._local_async_exec(
-                command=command, raise_exception_on_error=True, env=env
-            )
-            n2vc_installed_sw = True
-        else:
-            # check client helm installation
-            check_file = paths["helm_dir"] + "/repository/repositories.yaml"
-            if not self._check_file_exists(
-                filename=check_file, exception_if_not_exists=False
-            ):
-                self.log.info("Initializing helm in client: {}".format(cluster_id))
-                command = (
-                    "{} --kubeconfig={} --tiller-namespace={} "
-                    "--home={} init --client-only {} "
-                ).format(
-                    self._helm_command,
-                    paths["kube_config"],
-                    namespace,
-                    paths["helm_dir"],
-                    "--stable-repo-url {}".format(self._stable_repo_url)
-                    if self._stable_repo_url
-                    else "--skip-repos",
-                )
-                output, _rc = await self._local_async_exec(
-                    command=command, raise_exception_on_error=True, env=env
-                )
-            else:
-                self.log.info("Helm client already initialized")
-
-        repo_list = await self.repo_list(cluster_id)
-        for repo in repo_list:
-            if repo["name"] == "stable" and repo["url"] != self._stable_repo_url:
-                self.log.debug("Add new stable repo url: {}")
-                await self.repo_remove(cluster_id, "stable")
-                if self._stable_repo_url:
-                    await self.repo_add(cluster_id, "stable", self._stable_repo_url)
-                break
-
-        return n2vc_installed_sw
-
-    async def _uninstall_sw(self, cluster_id: str, namespace: str):
-        # uninstall Tiller if necessary
-
-        self.log.debug("Uninstalling tiller from cluster {}".format(cluster_id))
-
-        # init paths, env
-        paths, env = self._init_paths_env(
-            cluster_name=cluster_id, create_if_not_exist=True
-        )
-
-        if not namespace:
-            # find namespace for tiller pod
-            command = "{} --kubeconfig={} get deployments --all-namespaces".format(
-                self.kubectl_command, paths["kube_config"]
-            )
-            output, _rc = await self._local_async_exec(
-                command=command, raise_exception_on_error=False, env=env
-            )
-            output_table = self._output_to_table(output=output)
-            namespace = None
-            for r in output_table:
-                try:
-                    if "tiller-deploy" in r[1]:
-                        namespace = r[0]
-                        break
-                except Exception:
-                    pass
-            else:
-                msg = "Tiller deployment not found in cluster {}".format(cluster_id)
-                self.log.error(msg)
-
-            self.log.debug("namespace for tiller: {}".format(namespace))
-
-        if namespace:
-            # uninstall tiller from cluster
-            self.log.debug("Uninstalling tiller from cluster {}".format(cluster_id))
-            command = "{} --kubeconfig={} --home={} reset".format(
-                self._helm_command, paths["kube_config"], paths["helm_dir"]
-            )
-            self.log.debug("resetting: {}".format(command))
-            output, _rc = await self._local_async_exec(
-                command=command, raise_exception_on_error=True, env=env
-            )
-            # Delete clusterrolebinding and serviceaccount.
-            # Ignore if errors for backward compatibility
-            command = (
-                "{} --kubeconfig={} delete clusterrolebinding.rbac.authorization.k8s."
-                "io/osm-tiller-cluster-rule"
-            ).format(self.kubectl_command, paths["kube_config"])
-            output, _rc = await self._local_async_exec(
-                command=command, raise_exception_on_error=False, env=env
-            )
-            command = (
-                "{} --kubeconfig={} --namespace {} delete serviceaccount/{}".format(
-                    self.kubectl_command,
-                    paths["kube_config"],
-                    namespace,
-                    self.service_account,
-                )
-            )
-            output, _rc = await self._local_async_exec(
-                command=command, raise_exception_on_error=False, env=env
-            )
-
-        else:
-            self.log.debug("namespace not found")
-
-    async def _instances_list(self, cluster_id):
-
-        # init paths, env
-        paths, env = self._init_paths_env(
-            cluster_name=cluster_id, create_if_not_exist=True
-        )
-
-        command = "{} list --output yaml".format(self._helm_command)
-
-        output, _rc = await self._local_async_exec(
-            command=command, raise_exception_on_error=True, env=env
-        )
-
-        if output and len(output) > 0:
-            # parse yaml and update keys to lower case to unify with helm3
-            instances = yaml.load(output, Loader=yaml.SafeLoader).get("Releases")
-            new_instances = []
-            for instance in instances:
-                new_instance = dict((k.lower(), v) for k, v in instance.items())
-                new_instances.append(new_instance)
-            return new_instances
-        else:
-            return []
-
-    def _get_inspect_command(
-        self, show_command: str, kdu_model: str, repo_str: str, version: str
-    ):
-        inspect_command = "{} inspect {} {}{} {}".format(
-            self._helm_command, show_command, kdu_model, repo_str, version
-        )
-        return inspect_command
-
-    def _get_get_command(
-        self, get_command: str, kdu_instance: str, namespace: str, kubeconfig: str
-    ):
-        get_command = "env KUBECONFIG={} {} get {} {} --output yaml".format(
-            kubeconfig, self._helm_command, get_command, kdu_instance
-        )
-        return get_command
-
-    async def _status_kdu(
-        self,
-        cluster_id: str,
-        kdu_instance: str,
-        namespace: str = None,
-        yaml_format: bool = False,
-        show_error_log: bool = False,
-    ) -> Union[str, dict]:
-
-        self.log.debug(
-            "status of kdu_instance: {}, namespace: {} ".format(kdu_instance, namespace)
-        )
-
-        # init config, env
-        paths, env = self._init_paths_env(
-            cluster_name=cluster_id, create_if_not_exist=True
-        )
-        command = ("env KUBECONFIG={} {} status {} --output yaml").format(
-            paths["kube_config"], self._helm_command, kdu_instance
-        )
-        output, rc = await self._local_async_exec(
-            command=command,
-            raise_exception_on_error=True,
-            show_error_log=show_error_log,
-            env=env,
-        )
-
-        if yaml_format:
-            return str(output)
-
-        if rc != 0:
-            return None
-
-        data = yaml.load(output, Loader=yaml.SafeLoader)
-
-        # remove field 'notes'
-        try:
-            del data.get("info").get("status")["notes"]
-        except KeyError:
-            pass
-
-        # parse the manifest to a list of dictionaries
-        if "manifest" in data:
-            manifest_str = data.get("manifest")
-            manifest_docs = yaml.load_all(manifest_str, Loader=yaml.SafeLoader)
-
-            data["manifest"] = []
-            for doc in manifest_docs:
-                data["manifest"].append(doc)
-
-        # parse field 'resources'
-        try:
-            resources = str(data.get("info").get("status").get("resources"))
-            resource_table = self._output_to_table(resources)
-            data.get("info").get("status")["resources"] = resource_table
-        except Exception:
-            pass
-
-        # set description to lowercase (unify with helm3)
-        try:
-            data.get("info")["description"] = data.get("info").pop("Description")
-        except KeyError:
-            pass
-
-        return data
-
-    def _get_helm_chart_repos_ids(self, cluster_uuid) -> list:
-        repo_ids = []
-        cluster_filter = {"_admin.helm-chart.id": cluster_uuid}
-        cluster = self.db.get_one("k8sclusters", cluster_filter)
-        if cluster:
-            repo_ids = cluster.get("_admin").get("helm_chart_repos") or []
-            return repo_ids
-        else:
-            raise K8sException(
-                "k8cluster with helm-id : {} not found".format(cluster_uuid)
-            )
-
-    async def _is_install_completed(self, cluster_id: str, kdu_instance: str) -> bool:
-        # init config, env
-        paths, env = self._init_paths_env(
-            cluster_name=cluster_id, create_if_not_exist=True
-        )
-
-        status = await self._status_kdu(
-            cluster_id=cluster_id, kdu_instance=kdu_instance, yaml_format=False
-        )
-
-        # extract info.status.resources-> str
-        # format:
-        #       ==> v1/Deployment
-        #       NAME                    READY   UP-TO-DATE   AVAILABLE   AGE
-        #       halting-horse-mongodb   0/1     1            0           0s
-        #       halting-petit-mongodb   1/1     1            0           0s
-        # blank line
-        resources = K8sHelmBaseConnector._get_deep(
-            status, ("info", "status", "resources")
-        )
-
-        # convert to table
-        resources = K8sHelmBaseConnector._output_to_table(resources)
-
-        num_lines = len(resources)
-        index = 0
-        ready = True
-        while index < num_lines:
-            try:
-                line1 = resources[index]
-                index += 1
-                # find '==>' in column 0
-                if line1[0] == "==>":
-                    line2 = resources[index]
-                    index += 1
-                    # find READY in column 1
-                    if line2[1] == "READY":
-                        # read next lines
-                        line3 = resources[index]
-                        index += 1
-                        while len(line3) > 1 and index < num_lines:
-                            ready_value = line3[1]
-                            parts = ready_value.split(sep="/")
-                            current = int(parts[0])
-                            total = int(parts[1])
-                            if current < total:
-                                self.log.debug("NOT READY:\n    {}".format(line3))
-                                ready = False
-                            line3 = resources[index]
-                            index += 1
-
-            except Exception:
-                pass
-
-        return ready
-
-    def _get_install_command(
-        self,
-        kdu_model,
-        kdu_instance,
-        namespace,
-        params_str,
-        version,
-        atomic,
-        timeout,
-        kubeconfig,
-    ) -> str:
-
-        timeout_str = ""
-        if timeout:
-            timeout_str = "--timeout {}".format(timeout)
-
-        # atomic
-        atomic_str = ""
-        if atomic:
-            atomic_str = "--atomic"
-        # namespace
-        namespace_str = ""
-        if namespace:
-            namespace_str = "--namespace {}".format(namespace)
-
-        # version
-        version_str = ""
-        if version:
-            version_str = "--version {}".format(version)
-
-        command = (
-            "env KUBECONFIG={kubeconfig} {helm} install {atomic} --output yaml  "
-            "{params} {timeout} --name={name} {ns} {model} {ver}".format(
-                kubeconfig=kubeconfig,
-                helm=self._helm_command,
-                atomic=atomic_str,
-                params=params_str,
-                timeout=timeout_str,
-                name=kdu_instance,
-                ns=namespace_str,
-                model=kdu_model,
-                ver=version_str,
-            )
-        )
-        return command
-
-    def _get_upgrade_scale_command(
-        self,
-        kdu_model: str,
-        kdu_instance: str,
-        namespace: str,
-        scale: int,
-        version: str,
-        atomic: bool,
-        replica_str: str,
-        timeout: float,
-        resource_name: str,
-        kubeconfig: str,
-    ) -> str:
-
-        timeout_str = ""
-        if timeout:
-            timeout_str = "--timeout {}s".format(timeout)
-
-        # atomic
-        atomic_str = ""
-        if atomic:
-            atomic_str = "--atomic"
-
-        # version
-        version_str = ""
-        if version:
-            version_str = "--version {}".format(version)
-
-        # scale
-        if resource_name:
-            scale_dict = {"{}.{}".format(resource_name, replica_str): scale}
-        else:
-            scale_dict = {replica_str: scale}
-
-        scale_str = self._params_to_set_option(scale_dict)
-
-        command = (
-            "env KUBECONFIG={kubeconfig} {helm} upgrade {atomic} --output yaml {scale} {timeout} {name} {model} {ver}"
-        ).format(
-            helm=self._helm_command,
-            name=kdu_instance,
-            atomic=atomic_str,
-            scale=scale_str,
-            timeout=timeout_str,
-            model=kdu_model,
-            ver=version_str,
-            kubeconfig=kubeconfig,
-        )
-        return command
-
-    def _get_upgrade_command(
-        self,
-        kdu_model,
-        kdu_instance,
-        namespace,
-        params_str,
-        version,
-        atomic,
-        timeout,
-        kubeconfig,
-    ) -> str:
-
-        timeout_str = ""
-        if timeout:
-            timeout_str = "--timeout {}".format(timeout)
-
-        # atomic
-        atomic_str = ""
-        if atomic:
-            atomic_str = "--atomic"
-
-        # version
-        version_str = ""
-        if version:
-            version_str = "--version {}".format(version)
-
-        command = (
-            "env KUBECONFIG={kubeconfig} {helm} upgrade {atomic} --output yaml {params} {timeout} {name} {model} {ver}"
-        ).format(
-            kubeconfig=kubeconfig,
-            helm=self._helm_command,
-            atomic=atomic_str,
-            params=params_str,
-            timeout=timeout_str,
-            name=kdu_instance,
-            model=kdu_model,
-            ver=version_str,
-        )
-        return command
-
-    def _get_rollback_command(
-        self, kdu_instance, namespace, revision, kubeconfig
-    ) -> str:
-        return "env KUBECONFIG={} {} rollback {} {} --wait".format(
-            kubeconfig, self._helm_command, kdu_instance, revision
-        )
-
-    def _get_uninstall_command(
-        self, kdu_instance: str, namespace: str, kubeconfig: str
-    ) -> str:
-        return "env KUBECONFIG={} {} delete --purge  {}".format(
-            kubeconfig, self._helm_command, kdu_instance
-        )
index 737cac6..c197221 100644 (file)
@@ -51,7 +51,6 @@ class K8sJujuConnector(K8sConnector):
         kubectl_command: str = "/usr/bin/kubectl",
         juju_command: str = "/usr/bin/juju",
         log: object = None,
         kubectl_command: str = "/usr/bin/kubectl",
         juju_command: str = "/usr/bin/juju",
         log: object = None,
-        loop: object = None,
         on_update_db=None,
     ):
         """
         on_update_db=None,
     ):
         """
@@ -60,24 +59,17 @@ class K8sJujuConnector(K8sConnector):
         :param kubectl_command: path to kubectl executable
         :param helm_command: path to helm executable
         :param log: logger
         :param kubectl_command: path to kubectl executable
         :param helm_command: path to helm executable
         :param log: logger
-        :param: loop: Asyncio loop
         """
 
         # parent class
         """
 
         # parent class
-        K8sConnector.__init__(
-            self,
-            db,
-            log=log,
-            on_update_db=on_update_db,
-        )
+        K8sConnector.__init__(self, db, log=log, on_update_db=on_update_db)
 
         self.fs = fs
 
         self.fs = fs
-        self.loop = loop or asyncio.get_event_loop()
         self.log.debug("Initializing K8S Juju connector")
 
         db_uri = EnvironConfig(prefixes=["OSMLCM_", "OSMMON_"]).get("database_uri")
         self._store = MotorStore(db_uri)
         self.log.debug("Initializing K8S Juju connector")
 
         db_uri = EnvironConfig(prefixes=["OSMLCM_", "OSMMON_"]).get("database_uri")
         self._store = MotorStore(db_uri)
-        self.loading_libjuju = asyncio.Lock(loop=self.loop)
+        self.loading_libjuju = asyncio.Lock()
         self.uninstall_locks = {}
 
         self.log.debug("K8S Juju connector initialized")
         self.uninstall_locks = {}
 
         self.log.debug("K8S Juju connector initialized")
@@ -125,44 +117,27 @@ class K8sJujuConnector(K8sConnector):
         cleanup_data = []
         try:
             self.log.debug("Initializing K8s cluster for juju")
         cleanup_data = []
         try:
             self.log.debug("Initializing K8s cluster for juju")
-            kubectl.create_cluster_role(
-                name=metadata_name,
-                labels=labels,
-            )
+            kubectl.create_cluster_role(name=metadata_name, labels=labels)
             self.log.debug("Cluster role created")
             cleanup_data.append(
             self.log.debug("Cluster role created")
             cleanup_data.append(
-                {
-                    "delete": kubectl.delete_cluster_role,
-                    "args": (metadata_name,),
-                }
+                {"delete": kubectl.delete_cluster_role, "args": (metadata_name,)}
             )
 
             )
 
-            kubectl.create_service_account(
-                name=metadata_name,
-                labels=labels,
-            )
+            kubectl.create_service_account(name=metadata_name, labels=labels)
             self.log.debug("Service account created")
             cleanup_data.append(
             self.log.debug("Service account created")
             cleanup_data.append(
-                {
-                    "delete": kubectl.delete_service_account,
-                    "args": (metadata_name,),
-                }
+                {"delete": kubectl.delete_service_account, "args": (metadata_name,)}
             )
 
             )
 
-            kubectl.create_cluster_role_binding(
-                name=metadata_name,
-                labels=labels,
-            )
+            kubectl.create_cluster_role_binding(name=metadata_name, labels=labels)
             self.log.debug("Role binding created")
             cleanup_data.append(
                 {
             self.log.debug("Role binding created")
             cleanup_data.append(
                 {
-                    "delete": kubectl.delete_service_account,
+                    "delete": kubectl.delete_cluster_role_binding,
                     "args": (metadata_name,),
                 }
             )
                     "args": (metadata_name,),
                 }
             )
-            token, client_cert_data = await kubectl.get_secret_data(
-                metadata_name,
-            )
+            token, client_cert_data = await kubectl.get_secret_data(metadata_name)
 
             default_storage_class = kubectl.get_default_storage_class()
             self.log.debug("Default storage class: {}".format(default_storage_class))
 
             default_storage_class = kubectl.get_default_storage_class()
             self.log.debug("Default storage class: {}".format(default_storage_class))
@@ -204,10 +179,7 @@ class K8sJujuConnector(K8sConnector):
     async def repo_list(self):
         raise MethodNotImplemented()
 
     async def repo_list(self):
         raise MethodNotImplemented()
 
-    async def repo_remove(
-        self,
-        name: str,
-    ):
+    async def repo_remove(self, name: str):
         raise MethodNotImplemented()
 
     async def synchronize_repos(self, cluster_uuid: str, name: str):
         raise MethodNotImplemented()
 
     async def synchronize_repos(self, cluster_uuid: str, name: str):
@@ -327,12 +299,16 @@ class K8sJujuConnector(K8sConnector):
             os.chdir(new_workdir)
             bundle = "local:{}".format(kdu_model)
 
             os.chdir(new_workdir)
             bundle = "local:{}".format(kdu_model)
 
-        self.log.debug("Checking for model named {}".format(kdu_instance))
+        # default namespace to kdu_instance
+        if not namespace:
+            namespace = kdu_instance
+
+        self.log.debug("Checking for model named {}".format(namespace))
 
         # Create the new model
 
         # Create the new model
-        self.log.debug("Adding model: {}".format(kdu_instance))
+        self.log.debug("Adding model: {}".format(namespace))
         cloud = Cloud(cluster_uuid, self._get_credential_name(cluster_uuid))
         cloud = Cloud(cluster_uuid, self._get_credential_name(cluster_uuid))
-        await libjuju.add_model(kdu_instance, cloud)
+        await libjuju.add_model(namespace, cloud)
 
         # if model:
         # TODO: Instantiation parameters
 
         # if model:
         # TODO: Instantiation parameters
@@ -351,10 +327,17 @@ class K8sJujuConnector(K8sConnector):
             previous_workdir = "/app/storage"
 
         self.log.debug("[install] deploying {}".format(bundle))
             previous_workdir = "/app/storage"
 
         self.log.debug("[install] deploying {}".format(bundle))
+        instantiation_params = params.get("overlay") if params else None
         await libjuju.deploy(
         await libjuju.deploy(
-            bundle, model_name=kdu_instance, wait=atomic, timeout=timeout
+            bundle,
+            model_name=namespace,
+            wait=atomic,
+            timeout=timeout,
+            instantiation_params=instantiation_params,
         )
         os.chdir(previous_workdir)
         )
         os.chdir(previous_workdir)
+
+        # update information in the database (first, the VCA status, and then, the namespace)
         if self.on_update_db:
             await self.on_update_db(
                 cluster_uuid,
         if self.on_update_db:
             await self.on_update_db(
                 cluster_uuid,
@@ -362,6 +345,13 @@ class K8sJujuConnector(K8sConnector):
                 filter=db_dict["filter"],
                 vca_id=kwargs.get("vca_id"),
             )
                 filter=db_dict["filter"],
                 vca_id=kwargs.get("vca_id"),
             )
+
+        self.db.set_one(
+            table="nsrs",
+            q_filter={"_admin.deployed.K8s.kdu-instance": kdu_instance},
+            update_dict={"_admin.deployed.K8s.$.namespace": namespace},
+        )
+
         return True
 
     async def scale(
         return True
 
     async def scale(
@@ -370,6 +360,7 @@ class K8sJujuConnector(K8sConnector):
         scale: int,
         resource_name: str,
         total_timeout: float = 1800,
         scale: int,
         resource_name: str,
         total_timeout: float = 1800,
+        namespace: str = None,
         **kwargs,
     ) -> bool:
         """Scale an application in a model
         **kwargs,
     ) -> bool:
         """Scale an application in a model
@@ -379,50 +370,56 @@ class K8sJujuConnector(K8sConnector):
         :param: resource_name str:       The application name in the Juju Bundle
         :param: timeout float:           The time, in seconds, to wait for the install
                                          to finish
         :param: resource_name str:       The application name in the Juju Bundle
         :param: timeout float:           The time, in seconds, to wait for the install
                                          to finish
+        :param namespace str: The namespace (model) where the Bundle was deployed
         :param kwargs:                   Additional parameters
                                             vca_id (str): VCA ID
 
         :return: If successful, returns True
         """
 
         :param kwargs:                   Additional parameters
                                             vca_id (str): VCA ID
 
         :return: If successful, returns True
         """
 
+        model_name = self._obtain_namespace(
+            kdu_instance=kdu_instance, namespace=namespace
+        )
         try:
             libjuju = await self._get_libjuju(kwargs.get("vca_id"))
             await libjuju.scale_application(
         try:
             libjuju = await self._get_libjuju(kwargs.get("vca_id"))
             await libjuju.scale_application(
-                model_name=kdu_instance,
+                model_name=model_name,
                 application_name=resource_name,
                 scale=scale,
                 total_timeout=total_timeout,
             )
         except Exception as e:
                 application_name=resource_name,
                 scale=scale,
                 total_timeout=total_timeout,
             )
         except Exception as e:
-            error_msg = "Error scaling application {} in kdu instance {}: {}".format(
-                resource_name, kdu_instance, e
+            error_msg = "Error scaling application {} of the model {} of the kdu instance {}: {}".format(
+                resource_name, model_name, kdu_instance, e
             )
             self.log.error(error_msg)
             raise K8sException(message=error_msg)
         return True
 
     async def get_scale_count(
             )
             self.log.error(error_msg)
             raise K8sException(message=error_msg)
         return True
 
     async def get_scale_count(
-        self,
-        resource_name: str,
-        kdu_instance: str,
-        **kwargs,
+        self, resource_name: str, kdu_instance: str, namespace: str = None, **kwargs
     ) -> int:
         """Get an application scale count
 
         :param: resource_name str:       The application name in the Juju Bundle
         :param: kdu_instance str:        KDU instance name
     ) -> int:
         """Get an application scale count
 
         :param: resource_name str:       The application name in the Juju Bundle
         :param: kdu_instance str:        KDU instance name
+        :param namespace str: The namespace (model) where the Bundle was deployed
         :param kwargs:                   Additional parameters
                                             vca_id (str): VCA ID
         :return: Return application instance count
         """
 
         :param kwargs:                   Additional parameters
                                             vca_id (str): VCA ID
         :return: Return application instance count
         """
 
+        model_name = self._obtain_namespace(
+            kdu_instance=kdu_instance, namespace=namespace
+        )
         try:
             libjuju = await self._get_libjuju(kwargs.get("vca_id"))
         try:
             libjuju = await self._get_libjuju(kwargs.get("vca_id"))
-            status = await libjuju.get_model_status(kdu_instance)
+            status = await libjuju.get_model_status(model_name=model_name)
             return len(status.applications[resource_name].units)
         except Exception as e:
             return len(status.applications[resource_name].units)
         except Exception as e:
-            error_msg = "Error getting scale count from application {} in kdu instance {}: {}".format(
-                resource_name, kdu_instance, e
+            error_msg = (
+                f"Error getting scale count from application {resource_name} of the model {model_name} of "
+                f"the kdu instance {kdu_instance}: {e}"
             )
             self.log.error(error_msg)
             raise K8sException(message=error_msg)
             )
             self.log.error(error_msg)
             raise K8sException(message=error_msg)
@@ -472,10 +469,7 @@ class K8sJujuConnector(K8sConnector):
     """Rollback"""
 
     async def rollback(
     """Rollback"""
 
     async def rollback(
-        self,
-        cluster_uuid: str,
-        kdu_instance: str,
-        revision: int = 0,
+        self, cluster_uuid: str, kdu_instance: str, revision: int = 0
     ) -> str:
         """Rollback a model
 
     ) -> str:
         """Rollback a model
 
@@ -492,47 +486,72 @@ class K8sJujuConnector(K8sConnector):
     """Deletion"""
 
     async def uninstall(
     """Deletion"""
 
     async def uninstall(
-        self,
-        cluster_uuid: str,
-        kdu_instance: str,
-        **kwargs,
+        self, cluster_uuid: str, kdu_instance: str, namespace: str = None, **kwargs
     ) -> bool:
         """Uninstall a KDU instance
 
         :param cluster_uuid str: The UUID of the cluster
         :param kdu_instance str: The unique name of the KDU instance
     ) -> bool:
         """Uninstall a KDU instance
 
         :param cluster_uuid str: The UUID of the cluster
         :param kdu_instance str: The unique name of the KDU instance
+        :param namespace str: The namespace (model) where the Bundle was deployed
         :param kwargs: Additional parameters
             vca_id (str): VCA ID
 
         :return: Returns True if successful, or raises an exception
         """
         :param kwargs: Additional parameters
             vca_id (str): VCA ID
 
         :return: Returns True if successful, or raises an exception
         """
+        model_name = self._obtain_namespace(
+            kdu_instance=kdu_instance, namespace=namespace
+        )
 
 
-        self.log.debug("[uninstall] Destroying model")
+        self.log.debug(f"[uninstall] Destroying model: {model_name}")
 
         will_not_delete = False
 
         will_not_delete = False
-        if kdu_instance not in self.uninstall_locks:
-            self.uninstall_locks[kdu_instance] = asyncio.Lock(loop=self.loop)
-        delete_lock = self.uninstall_locks[kdu_instance]
+        if model_name not in self.uninstall_locks:
+            self.uninstall_locks[model_name] = asyncio.Lock()
+        delete_lock = self.uninstall_locks[model_name]
 
         while delete_lock.locked():
             will_not_delete = True
             await asyncio.sleep(0.1)
 
         if will_not_delete:
 
         while delete_lock.locked():
             will_not_delete = True
             await asyncio.sleep(0.1)
 
         if will_not_delete:
-            self.log.info("Model {} deleted by another worker.".format(kdu_instance))
+            self.log.info("Model {} deleted by another worker.".format(model_name))
             return True
 
         try:
             async with delete_lock:
                 libjuju = await self._get_libjuju(kwargs.get("vca_id"))
 
             return True
 
         try:
             async with delete_lock:
                 libjuju = await self._get_libjuju(kwargs.get("vca_id"))
 
-                await libjuju.destroy_model(kdu_instance, total_timeout=3600)
+                await libjuju.destroy_model(model_name, total_timeout=3600)
         finally:
         finally:
-            self.uninstall_locks.pop(kdu_instance)
+            self.uninstall_locks.pop(model_name)
 
 
-        self.log.debug(f"[uninstall] Model {kdu_instance} destroyed")
+        self.log.debug(f"[uninstall] Model {model_name} destroyed")
         return True
 
         return True
 
+    async def upgrade_charm(
+        self,
+        ee_id: str = None,
+        path: str = None,
+        charm_id: str = None,
+        charm_type: str = None,
+        timeout: float = None,
+    ) -> str:
+        """This method upgrade charms in VNFs
+
+        Args:
+            ee_id:  Execution environment id
+            path:   Local path to the charm
+            charm_id:   charm-id
+            charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
+            timeout: (Float)    Timeout for the ns update operation
+
+        Returns:
+            The output of the update operation if status equals to "completed"
+        """
+        raise K8sException(
+            "KDUs deployed with Juju Bundle do not support charm upgrade"
+        )
+
     async def exec_primitive(
         self,
         cluster_uuid: str = None,
     async def exec_primitive(
         self,
         cluster_uuid: str = None,
@@ -541,6 +560,7 @@ class K8sJujuConnector(K8sConnector):
         timeout: float = 300,
         params: dict = None,
         db_dict: dict = None,
         timeout: float = 300,
         params: dict = None,
         db_dict: dict = None,
+        namespace: str = None,
         **kwargs,
     ) -> str:
         """Exec primitive (Juju action)
         **kwargs,
     ) -> str:
         """Exec primitive (Juju action)
@@ -551,6 +571,7 @@ class K8sJujuConnector(K8sConnector):
         :param timeout: Timeout for action execution
         :param params: Dictionary of all the parameters needed for the action
         :param db_dict: Dictionary for any additional data
         :param timeout: Timeout for action execution
         :param params: Dictionary of all the parameters needed for the action
         :param db_dict: Dictionary for any additional data
+        :param namespace str: The namespace (model) where the Bundle was deployed
         :param kwargs: Additional parameters
             vca_id (str): VCA ID
 
         :param kwargs: Additional parameters
             vca_id (str): VCA ID
 
@@ -558,6 +579,10 @@ class K8sJujuConnector(K8sConnector):
         """
         libjuju = await self._get_libjuju(kwargs.get("vca_id"))
 
         """
         libjuju = await self._get_libjuju(kwargs.get("vca_id"))
 
+        namespace = self._obtain_namespace(
+            kdu_instance=kdu_instance, namespace=namespace
+        )
+
         if not params or "application-name" not in params:
             raise K8sException(
                 "Missing application-name argument, \
         if not params or "application-name" not in params:
             raise K8sException(
                 "Missing application-name argument, \
@@ -566,14 +591,19 @@ class K8sJujuConnector(K8sConnector):
         try:
             self.log.debug(
                 "[exec_primitive] Getting model "
         try:
             self.log.debug(
                 "[exec_primitive] Getting model "
-                "kdu_instance: {}".format(kdu_instance)
+                "{} for the kdu_instance: {}".format(namespace, kdu_instance)
             )
             application_name = params["application-name"]
             )
             application_name = params["application-name"]
-            actions = await libjuju.get_actions(application_name, kdu_instance)
+            actions = await libjuju.get_actions(
+                application_name=application_name, model_name=namespace
+            )
             if primitive_name not in actions:
                 raise K8sException("Primitive {} not found".format(primitive_name))
             output, status = await libjuju.execute_action(
             if primitive_name not in actions:
                 raise K8sException("Primitive {} not found".format(primitive_name))
             output, status = await libjuju.execute_action(
-                application_name, kdu_instance, primitive_name, **params
+                application_name=application_name,
+                model_name=namespace,
+                action_name=primitive_name,
+                **params,
             )
 
             if status != "completed":
             )
 
             if status != "completed":
@@ -582,7 +612,9 @@ class K8sJujuConnector(K8sConnector):
                 )
             if self.on_update_db:
                 await self.on_update_db(
                 )
             if self.on_update_db:
                 await self.on_update_db(
-                    cluster_uuid, kdu_instance, filter=db_dict["filter"]
+                    cluster_uuid=cluster_uuid,
+                    kdu_instance=kdu_instance,
+                    filter=db_dict["filter"],
                 )
 
             return output
                 )
 
             return output
@@ -594,10 +626,7 @@ class K8sJujuConnector(K8sConnector):
 
     """Introspection"""
 
 
     """Introspection"""
 
-    async def inspect_kdu(
-        self,
-        kdu_model: str,
-    ) -> dict:
+    async def inspect_kdu(self, kdu_model: str) -> dict:
         """Inspect a KDU
 
         Inspects a bundle and returns a dictionary of config parameters and
         """Inspect a KDU
 
         Inspects a bundle and returns a dictionary of config parameters and
@@ -639,17 +668,14 @@ class K8sJujuConnector(K8sConnector):
 
         return kdu
 
 
         return kdu
 
-    async def help_kdu(
-        self,
-        kdu_model: str,
-    ) -> str:
+    async def help_kdu(self, kdu_model: str) -> str:
         """View the README
 
         """View the README
 
-        If available, returns the README of the bundle.
+                If available, returns the README of the bundle.
 
 
-        :param kdu_model str: The name or path of a bundle
-
-        :return: If found, returns the contents of the README.
+                :param kdu_model str: The name or path of a bundle
+        f
+                :return: If found, returns the contents of the README.
         """
         readme = None
 
         """
         readme = None
 
@@ -669,6 +695,7 @@ class K8sJujuConnector(K8sConnector):
         kdu_instance: str,
         complete_status: bool = False,
         yaml_format: bool = False,
         kdu_instance: str,
         complete_status: bool = False,
         yaml_format: bool = False,
+        namespace: str = None,
         **kwargs,
     ) -> Union[str, dict]:
         """Get the status of the KDU
         **kwargs,
     ) -> Union[str, dict]:
         """Get the status of the KDU
@@ -679,6 +706,7 @@ class K8sJujuConnector(K8sConnector):
         :param kdu_instance str: The unique id of the KDU instance
         :param complete_status: To get the complete_status of the KDU
         :param yaml_format: To get the status in proper format for NSR record
         :param kdu_instance str: The unique id of the KDU instance
         :param complete_status: To get the complete_status of the KDU
         :param yaml_format: To get the status in proper format for NSR record
+        :param namespace str: The namespace (model) where the Bundle was deployed
         :param: kwargs: Additional parameters
             vca_id (str): VCA ID
 
         :param: kwargs: Additional parameters
             vca_id (str): VCA ID
 
@@ -688,7 +716,10 @@ class K8sJujuConnector(K8sConnector):
         libjuju = await self._get_libjuju(kwargs.get("vca_id"))
         status = {}
 
         libjuju = await self._get_libjuju(kwargs.get("vca_id"))
         status = {}
 
-        model_status = await libjuju.get_model_status(kdu_instance)
+        model_name = self._obtain_namespace(
+            kdu_instance=kdu_instance, namespace=namespace
+        )
+        model_status = await libjuju.get_model_status(model_name=model_name)
 
         if not complete_status:
             for name in model_status.applications:
 
         if not complete_status:
             for name in model_status.applications:
@@ -703,9 +734,7 @@ class K8sJujuConnector(K8sConnector):
         return status
 
     async def add_relation(
         return status
 
     async def add_relation(
-        self,
-        provider: RelationEndpoint,
-        requirer: RelationEndpoint,
+        self, provider: RelationEndpoint, requirer: RelationEndpoint
     ):
         """
         Add relation between two charmed endpoints
     ):
         """
         Add relation between two charmed endpoints
@@ -716,7 +745,7 @@ class K8sJujuConnector(K8sConnector):
         self.log.debug(f"adding new relation between {provider} and {requirer}")
         cross_model_relation = (
             provider.model_name != requirer.model_name
         self.log.debug(f"adding new relation between {provider} and {requirer}")
         cross_model_relation = (
             provider.model_name != requirer.model_name
-            or requirer.vca_id != requirer.vca_id
+            or provider.vca_id != requirer.vca_id
         )
         try:
             if cross_model_relation:
         )
         try:
             if cross_model_relation:
@@ -729,9 +758,7 @@ class K8sJujuConnector(K8sConnector):
                         requirer.model_name, offer, provider_libjuju
                     )
                     await requirer_libjuju.add_relation(
                         requirer.model_name, offer, provider_libjuju
                     )
                     await requirer_libjuju.add_relation(
-                        requirer.model_name,
-                        requirer.endpoint,
-                        saas_name,
+                        requirer.model_name, requirer.endpoint, saas_name
                     )
             else:
                 # Standard relation
                     )
             else:
                 # Standard relation
@@ -749,34 +776,44 @@ class K8sJujuConnector(K8sConnector):
             self.log.error(message)
             raise Exception(message=message)
 
             self.log.error(message)
             raise Exception(message=message)
 
-    async def update_vca_status(self, vcastatus: dict, kdu_instance: str, **kwargs):
+    async def update_vca_status(
+        self, vcastatus: dict, kdu_instance: str, namespace: str = None, **kwargs
+    ):
         """
         Add all configs, actions, executed actions of all applications in a model to vcastatus dict
 
         :param vcastatus dict: dict containing vcastatus
         :param kdu_instance str: The unique id of the KDU instance
         """
         Add all configs, actions, executed actions of all applications in a model to vcastatus dict
 
         :param vcastatus dict: dict containing vcastatus
         :param kdu_instance str: The unique id of the KDU instance
+        :param namespace str: The namespace (model) where the Bundle was deployed
         :param: kwargs: Additional parameters
             vca_id (str): VCA ID
 
         :return: None
         """
         :param: kwargs: Additional parameters
             vca_id (str): VCA ID
 
         :return: None
         """
+
+        model_name = self._obtain_namespace(
+            kdu_instance=kdu_instance, namespace=namespace
+        )
+
         libjuju = await self._get_libjuju(kwargs.get("vca_id"))
         try:
         libjuju = await self._get_libjuju(kwargs.get("vca_id"))
         try:
-            for model_name in vcastatus:
+            for vca_model_name in vcastatus:
                 # Adding executed actions
                 # Adding executed actions
-                vcastatus[model_name][
+                vcastatus[vca_model_name][
                     "executedActions"
                     "executedActions"
-                ] = await libjuju.get_executed_actions(kdu_instance)
+                ] = await libjuju.get_executed_actions(model_name=model_name)
 
 
-                for application in vcastatus[model_name]["applications"]:
+                for application in vcastatus[vca_model_name]["applications"]:
                     # Adding application actions
                     # Adding application actions
-                    vcastatus[model_name]["applications"][application][
+                    vcastatus[vca_model_name]["applications"][application][
                         "actions"
                         "actions"
-                    ] = await libjuju.get_actions(application, kdu_instance)
+                    ] = {}
                     # Adding application configs
                     # Adding application configs
-                    vcastatus[model_name]["applications"][application][
+                    vcastatus[vca_model_name]["applications"][application][
                         "configs"
                         "configs"
-                    ] = await libjuju.get_application_configs(kdu_instance, application)
+                    ] = await libjuju.get_application_configs(
+                        model_name=model_name, application_name=application
+                    )
 
         except Exception as e:
             self.log.debug("Error in updating vca status: {}".format(str(e)))
 
         except Exception as e:
             self.log.debug("Error in updating vca status: {}".format(str(e)))
@@ -786,10 +823,14 @@ class K8sJujuConnector(K8sConnector):
     ) -> list:
         """Return a list of services of a kdu_instance"""
 
     ) -> list:
         """Return a list of services of a kdu_instance"""
 
+        namespace = self._obtain_namespace(
+            kdu_instance=kdu_instance, namespace=namespace
+        )
+
         credentials = self.get_credentials(cluster_uuid=cluster_uuid)
         kubectl = self._get_kubectl(credentials)
         return kubectl.get_services(
         credentials = self.get_credentials(cluster_uuid=cluster_uuid)
         kubectl = self._get_kubectl(credentials)
         return kubectl.get_services(
-            field_selector="metadata.namespace={}".format(kdu_instance)
+            field_selector="metadata.namespace={}".format(namespace)
         )
 
     async def get_service(
         )
 
     async def get_service(
@@ -838,10 +879,7 @@ class K8sJujuConnector(K8sConnector):
         """
         return "cred-{}".format(cluster_uuid)
 
         """
         return "cred-{}".format(cluster_uuid)
 
-    def get_namespace(
-        self,
-        cluster_uuid: str,
-    ) -> str:
+    def get_namespace(self, cluster_uuid: str) -> str:
         """Get the namespace UUID
         Gets the namespace's unique name
 
         """Get the namespace UUID
         Gets the namespace's unique name
 
@@ -874,16 +912,11 @@ class K8sJujuConnector(K8sConnector):
             if not self.libjuju:
                 async with self.loading_libjuju:
                     vca_connection = await get_connection(self._store)
             if not self.libjuju:
                 async with self.loading_libjuju:
                     vca_connection = await get_connection(self._store)
-                    self.libjuju = Libjuju(vca_connection, loop=self.loop, log=self.log)
+                    self.libjuju = Libjuju(vca_connection, log=self.log)
             return self.libjuju
         else:
             vca_connection = await get_connection(self._store, vca_id)
             return self.libjuju
         else:
             vca_connection = await get_connection(self._store, vca_id)
-            return Libjuju(
-                vca_connection,
-                loop=self.loop,
-                log=self.log,
-                n2vc=self,
-            )
+            return Libjuju(vca_connection, log=self.log, n2vc=self)
 
     def _get_kubectl(self, credentials: str) -> Kubectl:
         """
 
     def _get_kubectl(self, credentials: str) -> Kubectl:
         """
@@ -895,3 +928,34 @@ class K8sJujuConnector(K8sConnector):
         with open(kubecfg.name, "w") as kubecfg_file:
             kubecfg_file.write(credentials)
         return Kubectl(config_file=kubecfg.name)
         with open(kubecfg.name, "w") as kubecfg_file:
             kubecfg_file.write(credentials)
         return Kubectl(config_file=kubecfg.name)
+
+    def _obtain_namespace(self, kdu_instance: str, namespace: str = None) -> str:
+        """
+        Obtain the namespace/model name to use in the instantiation of a Juju Bundle in K8s. The default namespace is
+        the kdu_instance name. However, if the user passes the namespace where he wants to deploy the bundle,
+        that namespace will be used.
+
+        :param kdu_instance: the default KDU instance name
+        :param namespace: the namespace passed by the User
+        """
+
+        # deault the namespace/model name to the kdu_instance name TODO -> this should be the real return... But
+        #  once the namespace is not passed in most methods, I had to do this in another way. But I think this should
+        #  be the procedure in the future return namespace if namespace else kdu_instance
+
+        # TODO -> has referred above, this should be avoided in the future, this is temporary, in order to avoid
+        #  compatibility issues
+        return (
+            namespace
+            if namespace
+            else self._obtain_namespace_from_db(kdu_instance=kdu_instance)
+        )
+
+    def _obtain_namespace_from_db(self, kdu_instance: str) -> str:
+        db_nsrs = self.db.get_one(
+            table="nsrs", q_filter={"_admin.deployed.K8s.kdu-instance": kdu_instance}
+        )
+        for k8s in db_nsrs["_admin"]["deployed"]["K8s"]:
+            if k8s.get("kdu-instance") == kdu_instance:
+                return k8s.get("namespace")
+        return ""
index a56b6cd..c16c95a 100644 (file)
@@ -16,19 +16,29 @@ import base64
 import logging
 from typing import Dict
 import typing
 import logging
 from typing import Dict
 import typing
+import uuid
+import json
 
 
+from distutils.version import LooseVersion
 
 from kubernetes import client, config
 
 from kubernetes import client, config
+from kubernetes.client.api import VersionApi
 from kubernetes.client.models import (
     V1ClusterRole,
 from kubernetes.client.models import (
     V1ClusterRole,
+    V1Role,
     V1ObjectMeta,
     V1PolicyRule,
     V1ServiceAccount,
     V1ClusterRoleBinding,
     V1ObjectMeta,
     V1PolicyRule,
     V1ServiceAccount,
     V1ClusterRoleBinding,
+    V1RoleBinding,
     V1RoleRef,
     V1Subject,
     V1RoleRef,
     V1Subject,
+    V1Secret,
+    V1SecretReference,
+    V1Namespace,
 )
 from kubernetes.client.rest import ApiException
 )
 from kubernetes.client.rest import ApiException
+from n2vc.libjuju import retry_callback
 from retrying_async import retry
 
 
 from retrying_async import retry
 
 
@@ -38,6 +48,7 @@ SERVICE_ACCOUNT_ROOT_CA_KEY = "ca.crt"
 CORE_CLIENT = "core_v1"
 RBAC_CLIENT = "rbac_v1"
 STORAGE_CLIENT = "storage_v1"
 CORE_CLIENT = "core_v1"
 RBAC_CLIENT = "rbac_v1"
 STORAGE_CLIENT = "storage_v1"
+CUSTOM_OBJECT_CLIENT = "custom_object"
 
 
 class Kubectl:
 
 
 class Kubectl:
@@ -47,6 +58,7 @@ class Kubectl:
             CORE_CLIENT: client.CoreV1Api(),
             RBAC_CLIENT: client.RbacAuthorizationV1Api(),
             STORAGE_CLIENT: client.StorageV1Api(),
             CORE_CLIENT: client.CoreV1Api(),
             RBAC_CLIENT: client.RbacAuthorizationV1Api(),
             STORAGE_CLIENT: client.StorageV1Api(),
+            CUSTOM_OBJECT_CLIENT: client.CustomObjectsApi(),
         }
         self._configuration = config.kube_config.Configuration.get_default_copy()
         self.logger = logging.getLogger("Kubectl")
         }
         self._configuration = config.kube_config.Configuration.get_default_copy()
         self.logger = logging.getLogger("Kubectl")
@@ -154,9 +166,7 @@ class Kubectl:
         )
 
         if len(cluster_roles.items) > 0:
         )
 
         if len(cluster_roles.items) > 0:
-            raise Exception(
-                "Cluster role with metadata.name={} already exists".format(name)
-            )
+            raise Exception("Role with metadata.name={} already exists".format(name))
 
         metadata = V1ObjectMeta(name=name, labels=labels, namespace=namespace)
         # Cluster role
 
         metadata = V1ObjectMeta(name=name, labels=labels, namespace=namespace)
         # Cluster role
@@ -170,6 +180,46 @@ class Kubectl:
 
         self.clients[RBAC_CLIENT].create_cluster_role(cluster_role)
 
 
         self.clients[RBAC_CLIENT].create_cluster_role(cluster_role)
 
+    async def create_role(
+        self,
+        name: str,
+        labels: Dict[str, str],
+        api_groups: list,
+        resources: list,
+        verbs: list,
+        namespace: str,
+    ):
+        """
+        Create a role with one PolicyRule
+
+        :param: name:       Name of the namespaced Role
+        :param: labels:     Labels for namespaced Role metadata
+        :param: api_groups: List with api-groups allowed in the policy rule
+        :param: resources:  List with resources allowed in the policy rule
+        :param: verbs:      List with verbs allowed in the policy rule
+        :param: namespace:  Kubernetes namespace for Role metadata
+
+        :return: None
+        """
+
+        roles = self.clients[RBAC_CLIENT].list_namespaced_role(
+            namespace, field_selector="metadata.name={}".format(name)
+        )
+
+        if len(roles.items) > 0:
+            raise Exception("Role with metadata.name={} already exists".format(name))
+
+        metadata = V1ObjectMeta(name=name, labels=labels, namespace=namespace)
+
+        role = V1Role(
+            metadata=metadata,
+            rules=[
+                V1PolicyRule(api_groups=api_groups, resources=resources, verbs=verbs),
+            ],
+        )
+
+        self.clients[RBAC_CLIENT].create_namespaced_role(namespace, role)
+
     def delete_cluster_role(self, name: str):
         """
         Delete a cluster role
     def delete_cluster_role(self, name: str):
         """
         Delete a cluster role
@@ -178,6 +228,58 @@ class Kubectl:
         """
         self.clients[RBAC_CLIENT].delete_cluster_role(name)
 
         """
         self.clients[RBAC_CLIENT].delete_cluster_role(name)
 
+    def _get_kubectl_version(self):
+        version = VersionApi().get_code()
+        return "{}.{}".format(version.major, version.minor)
+
+    def _need_to_create_new_secret(self):
+        min_k8s_version = "1.24"
+        current_k8s_version = self._get_kubectl_version()
+        return LooseVersion(min_k8s_version) <= LooseVersion(current_k8s_version)
+
+    def _get_secret_name(self, service_account_name: str):
+        random_alphanum = str(uuid.uuid4())[:5]
+        return "{}-token-{}".format(service_account_name, random_alphanum)
+
+    def _create_service_account_secret(
+        self, service_account_name: str, namespace: str, secret_name: str
+    ):
+        """
+        Create a secret for the service account. K8s version >= 1.24
+
+        :param: service_account_name: Name of the service account
+        :param: namespace:  Kubernetes namespace for service account metadata
+        :param: secret_name: Name of the secret
+        """
+        v1_core = self.clients[CORE_CLIENT]
+        secrets = v1_core.list_namespaced_secret(
+            namespace, field_selector="metadata.name={}".format(secret_name)
+        ).items
+
+        if len(secrets) > 0:
+            raise Exception(
+                "Secret with metadata.name={} already exists".format(secret_name)
+            )
+
+        annotations = {"kubernetes.io/service-account.name": service_account_name}
+        metadata = V1ObjectMeta(
+            name=secret_name, namespace=namespace, annotations=annotations
+        )
+        type = "kubernetes.io/service-account-token"
+        secret = V1Secret(metadata=metadata, type=type)
+        v1_core.create_namespaced_secret(namespace, secret)
+
+    def _get_secret_reference_list(self, namespace: str, secret_name: str):
+        """
+        Return a secret reference list with one secret.
+        K8s version >= 1.24
+
+        :param: namespace:  Kubernetes namespace for service account metadata
+        :param: secret_name: Name of the secret
+        :rtype: list[V1SecretReference]
+        """
+        return [V1SecretReference(name=secret_name, namespace=namespace)]
+
     def create_service_account(
         self,
         name: str,
     def create_service_account(
         self,
         name: str,
@@ -192,7 +294,8 @@ class Kubectl:
         :param: namespace:  Kubernetes namespace for service account metadata
                             Default: kube-system
         """
         :param: namespace:  Kubernetes namespace for service account metadata
                             Default: kube-system
         """
-        service_accounts = self.clients[CORE_CLIENT].list_namespaced_service_account(
+        v1_core = self.clients[CORE_CLIENT]
+        service_accounts = v1_core.list_namespaced_service_account(
             namespace, field_selector="metadata.name={}".format(name)
         )
         if len(service_accounts.items) > 0:
             namespace, field_selector="metadata.name={}".format(name)
         )
         if len(service_accounts.items) > 0:
@@ -201,11 +304,16 @@ class Kubectl:
             )
 
         metadata = V1ObjectMeta(name=name, labels=labels, namespace=namespace)
             )
 
         metadata = V1ObjectMeta(name=name, labels=labels, namespace=namespace)
-        service_account = V1ServiceAccount(metadata=metadata)
 
 
-        self.clients[CORE_CLIENT].create_namespaced_service_account(
-            namespace, service_account
-        )
+        if self._need_to_create_new_secret():
+            secret_name = self._get_secret_name(name)
+            secrets = self._get_secret_reference_list(namespace, secret_name)
+            service_account = V1ServiceAccount(metadata=metadata, secrets=secrets)
+            v1_core.create_namespaced_service_account(namespace, service_account)
+            self._create_service_account_secret(name, namespace, secret_name)
+        else:
+            service_account = V1ServiceAccount(metadata=metadata)
+            v1_core.create_namespaced_service_account(namespace, service_account)
 
     def delete_service_account(self, name: str, namespace: str = "kube-system"):
         """
 
     def delete_service_account(self, name: str, namespace: str = "kube-system"):
         """
@@ -241,6 +349,44 @@ class Kubectl:
         )
         self.clients[RBAC_CLIENT].create_cluster_role_binding(role_binding)
 
         )
         self.clients[RBAC_CLIENT].create_cluster_role_binding(role_binding)
 
+    async def create_role_binding(
+        self,
+        name: str,
+        role_name: str,
+        sa_name: str,
+        labels: Dict[str, str],
+        namespace: str,
+    ):
+        """
+        Create a cluster role binding
+
+        :param: name:       Name of the namespaced Role Binding
+        :param: role_name:  Name of the namespaced Role to be bound
+        :param: sa_name:    Name of the Service Account to be bound
+        :param: labels:     Labels for Role Binding metadata
+        :param: namespace:  Kubernetes namespace for Role Binding metadata
+
+        :return: None
+        """
+        role_bindings = self.clients[RBAC_CLIENT].list_namespaced_role_binding(
+            namespace, field_selector="metadata.name={}".format(name)
+        )
+        if len(role_bindings.items) > 0:
+            raise Exception(
+                "Role Binding with metadata.name={} already exists".format(name)
+            )
+
+        role_binding = V1RoleBinding(
+            metadata=V1ObjectMeta(name=name, labels=labels),
+            role_ref=V1RoleRef(kind="Role", name=role_name, api_group=""),
+            subjects=[
+                V1Subject(kind="ServiceAccount", name=sa_name, namespace=namespace)
+            ],
+        )
+        self.clients[RBAC_CLIENT].create_namespaced_role_binding(
+            namespace, role_binding
+        )
+
     def delete_cluster_role_binding(self, name: str):
         """
         Delete a cluster role binding
     def delete_cluster_role_binding(self, name: str):
         """
         Delete a cluster role binding
@@ -253,6 +399,7 @@ class Kubectl:
         attempts=10,
         delay=1,
         fallback=Exception("Failed getting the secret from service account"),
         attempts=10,
         delay=1,
         fallback=Exception("Failed getting the secret from service account"),
+        callback=retry_callback,
     )
     async def get_secret_data(
         self, name: str, namespace: str = "kube-system"
     )
     async def get_secret_data(
         self, name: str, namespace: str = "kube-system"
@@ -283,6 +430,7 @@ class Kubectl:
             raise Exception(
                 "Failed getting the secret from service account {}".format(name)
             )
             raise Exception(
                 "Failed getting the secret from service account {}".format(name)
             )
+        # TODO: refactor to use get_secret_content
         secret = v1_core.list_namespaced_secret(
             namespace, field_selector="metadata.name={}".format(secret_name)
         ).items[0]
         secret = v1_core.list_namespaced_secret(
             namespace, field_selector="metadata.name={}".format(secret_name)
         ).items[0]
@@ -294,3 +442,176 @@ class Kubectl:
             base64.b64decode(token).decode("utf-8"),
             base64.b64decode(client_certificate_data).decode("utf-8"),
         )
             base64.b64decode(token).decode("utf-8"),
             base64.b64decode(client_certificate_data).decode("utf-8"),
         )
+
+    @retry(
+        attempts=10,
+        delay=1,
+        fallback=Exception("Failed getting data from the secret"),
+    )
+    async def get_secret_content(
+        self,
+        name: str,
+        namespace: str,
+    ) -> dict:
+        """
+        Get secret data
+
+        :param: name:       Name of the secret
+        :param: namespace:  Name of the namespace where the secret is stored
+
+        :return: Dictionary with secret's data
+        """
+        v1_core = self.clients[CORE_CLIENT]
+
+        secret = v1_core.read_namespaced_secret(name, namespace)
+
+        return secret.data
+
+    @retry(
+        attempts=10,
+        delay=1,
+        fallback=Exception("Failed creating the secret"),
+    )
+    async def create_secret(
+        self, name: str, data: dict, namespace: str, secret_type: str
+    ):
+        """
+        Get secret data
+
+        :param: name:        Name of the secret
+        :param: data:        Dict with data content. Values must be already base64 encoded
+        :param: namespace:   Name of the namespace where the secret will be stored
+        :param: secret_type: Type of the secret, e.g., Opaque, kubernetes.io/service-account-token, kubernetes.io/tls
+
+        :return: None
+        """
+        v1_core = self.clients[CORE_CLIENT]
+        metadata = V1ObjectMeta(name=name, namespace=namespace)
+        secret = V1Secret(metadata=metadata, data=data, type=secret_type)
+        v1_core.create_namespaced_secret(namespace, secret)
+
+    async def create_certificate(
+        self,
+        namespace: str,
+        name: str,
+        dns_prefix: str,
+        secret_name: str,
+        usages: list,
+        issuer_name: str,
+    ):
+        """
+        Creates cert-manager certificate object
+
+        :param: namespace:       Name of the namespace where the certificate and secret is stored
+        :param: name:            Name of the certificate object
+        :param: dns_prefix:      Prefix for the dnsNames. They will be prefixed to the common k8s svc suffixes
+        :param: secret_name:     Name of the secret created by cert-manager
+        :param: usages:          List of X.509 key usages
+        :param: issuer_name:     Name of the cert-manager's Issuer or ClusterIssuer object
+
+        """
+        certificate_body = {
+            "apiVersion": "cert-manager.io/v1",
+            "kind": "Certificate",
+            "metadata": {"name": name, "namespace": namespace},
+            "spec": {
+                "secretName": secret_name,
+                "privateKey": {
+                    "rotationPolicy": "Always",
+                    "algorithm": "ECDSA",
+                    "size": 256,
+                },
+                "duration": "8760h",  # 1 Year
+                "renewBefore": "2208h",  # 9 months
+                "subject": {"organizations": ["osm"]},
+                "commonName": "osm",
+                "isCA": False,
+                "usages": usages,
+                "dnsNames": [
+                    "{}.{}".format(dns_prefix, namespace),
+                    "{}.{}.svc".format(dns_prefix, namespace),
+                    "{}.{}.svc.cluster".format(dns_prefix, namespace),
+                    "{}.{}.svc.cluster.local".format(dns_prefix, namespace),
+                ],
+                "issuerRef": {"name": issuer_name, "kind": "ClusterIssuer"},
+            },
+        }
+        client = self.clients[CUSTOM_OBJECT_CLIENT]
+        try:
+            client.create_namespaced_custom_object(
+                group="cert-manager.io",
+                plural="certificates",
+                version="v1",
+                body=certificate_body,
+                namespace=namespace,
+            )
+        except ApiException as e:
+            info = json.loads(e.body)
+            if info.get("reason").lower() == "alreadyexists":
+                self.logger.warning("Certificate already exists: {}".format(e))
+            else:
+                raise e
+
+    async def delete_certificate(self, namespace, object_name):
+        client = self.clients[CUSTOM_OBJECT_CLIENT]
+        try:
+            client.delete_namespaced_custom_object(
+                group="cert-manager.io",
+                plural="certificates",
+                version="v1",
+                name=object_name,
+                namespace=namespace,
+            )
+        except ApiException as e:
+            info = json.loads(e.body)
+            if info.get("reason").lower() == "notfound":
+                self.logger.warning("Certificate already deleted: {}".format(e))
+            else:
+                raise e
+
+    @retry(
+        attempts=10,
+        delay=1,
+        fallback=Exception("Failed creating the namespace"),
+    )
+    async def create_namespace(self, name: str, labels: dict = None):
+        """
+        Create a namespace
+
+        :param: name:       Name of the namespace to be created
+        :param: labels:     Dictionary with labels for the new namespace
+
+        """
+        v1_core = self.clients[CORE_CLIENT]
+        metadata = V1ObjectMeta(name=name, labels=labels)
+        namespace = V1Namespace(
+            metadata=metadata,
+        )
+
+        try:
+            v1_core.create_namespace(namespace)
+            self.logger.debug("Namespace created: {}".format(name))
+        except ApiException as e:
+            info = json.loads(e.body)
+            if info.get("reason").lower() == "alreadyexists":
+                self.logger.warning("Namespace already exists: {}".format(e))
+            else:
+                raise e
+
+    @retry(
+        attempts=10,
+        delay=1,
+        fallback=Exception("Failed deleting the namespace"),
+    )
+    async def delete_namespace(self, name: str):
+        """
+        Delete a namespace
+
+        :param: name:       Name of the namespace to be deleted
+
+        """
+        try:
+            self.clients[CORE_CLIENT].delete_namespace(name)
+        except ApiException as e:
+            if e.reason == "Not Found":
+                self.logger.warning("Namespace already deleted: {}".format(e))
index bca3665..f36ff39 100644 (file)
 
 import asyncio
 import logging
 
 import asyncio
 import logging
+import os
 import typing
 import typing
+import yaml
 
 import time
 
 import juju.errors
 
 import time
 
 import juju.errors
+from juju.bundle import BundleHandler
 from juju.model import Model
 from juju.machine import Machine
 from juju.application import Application
 from juju.unit import Unit
 from juju.model import Model
 from juju.machine import Machine
 from juju.application import Application
 from juju.unit import Unit
+from juju.url import URL
+from juju.version import DEFAULT_ARCHITECTURE
 from juju.client._definitions import (
     FullStatus,
     QueryApplicationOffersResults,
 from juju.client._definitions import (
     FullStatus,
     QueryApplicationOffersResults,
@@ -56,11 +61,18 @@ from retrying_async import retry
 RBAC_LABEL_KEY_NAME = "rbac-id"
 
 
 RBAC_LABEL_KEY_NAME = "rbac-id"
 
 
+@asyncio.coroutine
+def retry_callback(attempt, exc, args, kwargs, delay=0.5, *, loop):
+    # Specifically overridden from upstream implementation so it can
+    # continue to work with Python 3.10
+    yield from asyncio.sleep(attempt * delay)
+    return retry
+
+
 class Libjuju:
     def __init__(
         self,
         vca_connection: Connection,
 class Libjuju:
     def __init__(
         self,
         vca_connection: Connection,
-        loop: asyncio.AbstractEventLoop = None,
         log: logging.Logger = None,
         n2vc: N2VCConnector = None,
     ):
         log: logging.Logger = None,
         n2vc: N2VCConnector = None,
     ):
@@ -68,7 +80,6 @@ class Libjuju:
         Constructor
 
         :param: vca_connection:         n2vc.vca.connection object
         Constructor
 
         :param: vca_connection:         n2vc.vca.connection object
-        :param: loop:                   Asyncio loop
         :param: log:                    Logger
         :param: n2vc:                   N2VC object
         """
         :param: log:                    Logger
         :param: n2vc:                   N2VC object
         """
@@ -77,15 +88,13 @@ class Libjuju:
         self.n2vc = n2vc
         self.vca_connection = vca_connection
 
         self.n2vc = n2vc
         self.vca_connection = vca_connection
 
-        self.loop = loop or asyncio.get_event_loop()
-        self.loop.set_exception_handler(self.handle_exception)
-        self.creating_model = asyncio.Lock(loop=self.loop)
+        self.creating_model = asyncio.Lock()
 
         if self.vca_connection.is_default:
             self.health_check_task = self._create_health_check_task()
 
     def _create_health_check_task(self):
 
         if self.vca_connection.is_default:
             self.health_check_task = self._create_health_check_task()
 
     def _create_health_check_task(self):
-        return self.loop.create_task(self.health_check())
+        return asyncio.get_event_loop().create_task(self.health_check())
 
     async def get_controller(self, timeout: float = 60.0) -> Controller:
         """
 
     async def get_controller(self, timeout: float = 60.0) -> Controller:
         """
@@ -122,7 +131,10 @@ class Libjuju:
             )
             if controller:
                 await self.disconnect_controller(controller)
             )
             if controller:
                 await self.disconnect_controller(controller)
-            raise JujuControllerFailedConnecting(e)
+
+            raise JujuControllerFailedConnecting(
+                f"Error connecting to Juju controller: {e}"
+            )
 
     async def disconnect(self):
         """Disconnect"""
 
     async def disconnect(self):
         """Disconnect"""
@@ -147,7 +159,7 @@ class Libjuju:
         if controller:
             await controller.disconnect()
 
         if controller:
             await controller.disconnect()
 
-    @retry(attempts=3, delay=5, timeout=None)
+    @retry(attempts=3, delay=5, timeout=None, callback=retry_callback)
     async def add_model(self, model_name: str, cloud: VcaCloud):
         """
         Create model
     async def add_model(self, model_name: str, cloud: VcaCloud):
         """
         Create model
@@ -262,7 +274,7 @@ class Libjuju:
             await self.disconnect_controller(controller)
         return application_configs
 
             await self.disconnect_controller(controller)
         return application_configs
 
-    @retry(attempts=3, delay=5)
+    @retry(attempts=3, delay=5, callback=retry_callback)
     async def get_model(self, controller: Controller, model_name: str) -> Model:
         """
         Get model from controller
     async def get_model(self, controller: Controller, model_name: str) -> Model:
         """
         Get model from controller
@@ -546,27 +558,122 @@ class Libjuju:
         return machine_id
 
     async def deploy(
         return machine_id
 
     async def deploy(
-        self, uri: str, model_name: str, wait: bool = True, timeout: float = 3600
+        self,
+        uri: str,
+        model_name: str,
+        wait: bool = True,
+        timeout: float = 3600,
+        instantiation_params: dict = None,
     ):
         """
         Deploy bundle or charm: Similar to the juju CLI command `juju deploy`
 
     ):
         """
         Deploy bundle or charm: Similar to the juju CLI command `juju deploy`
 
-        :param: uri:            Path or Charm Store uri in which the charm or bundle can be found
-        :param: model_name:     Model name
-        :param: wait:           Indicates whether to wait or not until all applications are active
-        :param: timeout:        Time in seconds to wait until all applications are active
+        :param uri:            Path or Charm Store uri in which the charm or bundle can be found
+        :param model_name:     Model name
+        :param wait:           Indicates whether to wait or not until all applications are active
+        :param timeout:        Time in seconds to wait until all applications are active
+        :param instantiation_params: To be applied as overlay bundle over primary bundle.
         """
         controller = await self.get_controller()
         model = await self.get_model(controller, model_name)
         """
         controller = await self.get_controller()
         model = await self.get_model(controller, model_name)
+        overlays = []
         try:
         try:
-            await model.deploy(uri, trust=True)
+            await self._validate_instantiation_params(uri, model, instantiation_params)
+            overlays = self._get_overlays(model_name, instantiation_params)
+            await model.deploy(uri, trust=True, overlays=overlays)
             if wait:
                 await JujuModelWatcher.wait_for_model(model, timeout=timeout)
                 self.log.debug("All units active in model {}".format(model_name))
         finally:
             if wait:
                 await JujuModelWatcher.wait_for_model(model, timeout=timeout)
                 self.log.debug("All units active in model {}".format(model_name))
         finally:
+            self._remove_overlay_file(overlays)
             await self.disconnect_model(model)
             await self.disconnect_controller(controller)
 
             await self.disconnect_model(model)
             await self.disconnect_controller(controller)
 
+    async def _validate_instantiation_params(
+        self, uri: str, model, instantiation_params: dict
+    ) -> None:
+        """Checks if all the applications in instantiation_params
+        exist ins the original bundle.
+
+        Raises:
+            JujuApplicationNotFound if there is an invalid app in
+            the instantiation params.
+        """
+        overlay_apps = self._get_apps_in_instantiation_params(instantiation_params)
+        if not overlay_apps:
+            return
+        original_apps = await self._get_apps_in_original_bundle(uri, model)
+        if not all(app in original_apps for app in overlay_apps):
+            raise JujuApplicationNotFound(
+                "Cannot find application {} in original bundle {}".format(
+                    overlay_apps, original_apps
+                )
+            )
+
+    async def _get_apps_in_original_bundle(self, uri: str, model) -> set:
+        """Bundle is downloaded in BundleHandler.fetch_plan.
+        That method takes care of opening and exception handling.
+
+        Resolve method gets all the information regarding the channel,
+        track, revision, type, source.
+
+        Returns:
+            Set with the names of the applications in original bundle.
+        """
+        url = URL.parse(uri)
+        architecture = DEFAULT_ARCHITECTURE  # only AMD64 is allowed
+        res = await model.deploy_types[str(url.schema)].resolve(
+            url, architecture, entity_url=uri
+        )
+        handler = BundleHandler(model, trusted=True, forced=False)
+        await handler.fetch_plan(url, res.origin)
+        return handler.applications
+
+    def _get_apps_in_instantiation_params(self, instantiation_params: dict) -> list:
+        """Extract applications key in instantiation params.
+
+        Returns:
+            List with the names of the applications in instantiation params.
+
+        Raises:
+            JujuError if applications key is not found.
+        """
+        if not instantiation_params:
+            return []
+        try:
+            return [key for key in instantiation_params.get("applications")]
+        except Exception as e:
+            raise JujuError("Invalid overlay format. {}".format(str(e)))
+
+    def _get_overlays(self, model_name: str, instantiation_params: dict) -> list:
+        """Creates a temporary overlay file which includes the instantiation params.
+        Only one overlay file is created.
+
+        Returns:
+            List with one overlay filename. Empty list if there are no instantiation params.
+        """
+        if not instantiation_params:
+            return []
+        file_name = model_name + "-overlay.yaml"
+        self._write_overlay_file(file_name, instantiation_params)
+        return [file_name]
+
+    def _write_overlay_file(self, file_name: str, instantiation_params: dict) -> None:
+        with open(file_name, "w") as file:
+            yaml.dump(instantiation_params, file)
+
+    def _remove_overlay_file(self, overlay: list) -> None:
+        """Overlay contains either one or zero file names."""
+        if not overlay:
+            return
+        try:
+            filename = overlay[0]
+            os.remove(filename)
+        except OSError as e:
+            self.log.warning(
+                "Overlay file {} could not be removed: {}".format(filename, e)
+            )
+
     async def add_unit(
         self,
         application_name: str,
     async def add_unit(
         self,
         application_name: str,
@@ -595,7 +702,6 @@ class Libjuju:
             application = self._get_application(model, application_name)
 
             if application is not None:
             application = self._get_application(model, application_name)
 
             if application is not None:
-
                 # Checks if the given machine id in the model,
                 # otherwise function raises an error
                 _machine, _series = self._get_machine_info(model, machine_id)
                 # Checks if the given machine id in the model,
                 # otherwise function raises an error
                 _machine, _series = self._get_machine_info(model, machine_id)
@@ -750,7 +856,6 @@ class Libjuju:
 
         try:
             if application_name not in model.applications:
 
         try:
             if application_name not in model.applications:
-
                 if machine_id is not None:
                     machine, series = self._get_machine_info(model, machine_id)
 
                 if machine_id is not None:
                     machine, series = self._get_machine_info(model, machine_id)
 
@@ -890,7 +995,6 @@ class Libjuju:
         return application
 
     async def resolve_application(self, model_name: str, application_name: str):
         return application
 
     async def resolve_application(self, model_name: str, application_name: str):
-
         controller = await self.get_controller()
         model = await self.get_model(controller, model_name)
 
         controller = await self.get_controller()
         model = await self.get_model(controller, model_name)
 
@@ -923,6 +1027,34 @@ class Libjuju:
             await self.disconnect_model(model)
             await self.disconnect_controller(controller)
 
             await self.disconnect_model(model)
             await self.disconnect_controller(controller)
 
+    async def resolve(self, model_name: str):
+        controller = await self.get_controller()
+        model = await self.get_model(controller, model_name)
+        all_units_active = False
+        try:
+            while not all_units_active:
+                all_units_active = True
+                for application_name, application in model.applications.items():
+                    if application.status == "error":
+                        for unit in application.units:
+                            if unit.workload_status == "error":
+                                self.log.debug(
+                                    "Model {}, Application {}, Unit {} in error state, resolving".format(
+                                        model_name, application_name, unit.entity_id
+                                    )
+                                )
+                                try:
+                                    await unit.resolved(retry=False)
+                                    all_units_active = False
+                                except Exception:
+                                    pass
+
+                if not all_units_active:
+                    await asyncio.sleep(5)
+        finally:
+            await self.disconnect_model(model)
+            await self.disconnect_controller(controller)
+
     async def scale_application(
         self,
         model_name: str,
     async def scale_application(
         self,
         model_name: str,
@@ -1235,10 +1367,10 @@ class Libjuju:
         try:
             await model.add_relation(endpoint_1, endpoint_2)
         except juju.errors.JujuAPIError as e:
         try:
             await model.add_relation(endpoint_1, endpoint_2)
         except juju.errors.JujuAPIError as e:
-            if "not found" in e.message:
+            if self._relation_is_not_found(e):
                 self.log.warning("Relation not found: {}".format(e.message))
                 return
                 self.log.warning("Relation not found: {}".format(e.message))
                 return
-            if "already exists" in e.message:
+            if self._relation_already_exist(e):
                 self.log.warning("Relation already exists: {}".format(e.message))
                 return
             # another exception, raise it
                 self.log.warning("Relation already exists: {}".format(e.message))
                 return
             # another exception, raise it
@@ -1247,6 +1379,18 @@ class Libjuju:
             await self.disconnect_model(model)
             await self.disconnect_controller(controller)
 
             await self.disconnect_model(model)
             await self.disconnect_controller(controller)
 
+    def _relation_is_not_found(self, juju_error):
+        text = "not found"
+        return (text in juju_error.message) or (
+            juju_error.error_code and text in juju_error.error_code
+        )
+
+    def _relation_already_exist(self, juju_error):
+        text = "already exists"
+        return (text in juju_error.message) or (
+            juju_error.error_code and text in juju_error.error_code
+        )
+
     async def offer(self, endpoint: RelationEndpoint) -> Offer:
         """
         Create an offer from a RelationEndpoint
     async def offer(self, endpoint: RelationEndpoint) -> Offer:
         """
         Create an offer from a RelationEndpoint
@@ -1326,24 +1470,28 @@ class Libjuju:
         model = None
         try:
             if not await self.model_exists(model_name, controller=controller):
         model = None
         try:
             if not await self.model_exists(model_name, controller=controller):
+                self.log.warn(f"Model {model_name} doesn't exist")
                 return
 
                 return
 
-            self.log.debug("Destroying model {}".format(model_name))
-
+            self.log.debug(f"Getting model {model_name} to be destroyed")
             model = await self.get_model(controller, model_name)
             model = await self.get_model(controller, model_name)
+            self.log.debug(f"Destroying manual machines in model {model_name}")
             # Destroy machines that are manually provisioned
             # and still are in pending state
             await self._destroy_pending_machines(model, only_manual=True)
             await self.disconnect_model(model)
 
             # Destroy machines that are manually provisioned
             # and still are in pending state
             await self._destroy_pending_machines(model, only_manual=True)
             await self.disconnect_model(model)
 
-            await self._destroy_model(
-                model_name,
-                controller,
+            await asyncio.wait_for(
+                self._destroy_model(model_name, controller),
                 timeout=total_timeout,
             )
         except Exception as e:
             if not await self.model_exists(model_name, controller=controller):
                 timeout=total_timeout,
             )
         except Exception as e:
             if not await self.model_exists(model_name, controller=controller):
+                self.log.warn(
+                    f"Failed deleting model {model_name}: model doesn't exist"
+                )
                 return
                 return
+            self.log.warn(f"Failed deleting model {model_name}: {e}")
             raise e
         finally:
             if model:
             raise e
         finally:
             if model:
@@ -1351,7 +1499,9 @@ class Libjuju:
             await self.disconnect_controller(controller)
 
     async def _destroy_model(
             await self.disconnect_controller(controller)
 
     async def _destroy_model(
-        self, model_name: str, controller: Controller, timeout: float = 1800
+        self,
+        model_name: str,
+        controller: Controller,
     ):
         """
         Destroy model from controller
     ):
         """
         Destroy model from controller
@@ -1360,25 +1510,41 @@ class Libjuju:
         :param: controller: Controller object
         :param: timeout: Timeout in seconds
         """
         :param: controller: Controller object
         :param: timeout: Timeout in seconds
         """
+        self.log.debug(f"Destroying model {model_name}")
 
 
-        async def _destroy_model_loop(model_name: str, controller: Controller):
-            while await self.model_exists(model_name, controller=controller):
+        async def _destroy_model_gracefully(model_name: str, controller: Controller):
+            self.log.info(f"Gracefully deleting model {model_name}")
+            resolved = False
+            while model_name in await controller.list_models():
+                if not resolved:
+                    await self.resolve(model_name)
+                    resolved = True
+                await controller.destroy_model(model_name, destroy_storage=True)
+
+                await asyncio.sleep(5)
+            self.log.info(f"Model {model_name} deleted gracefully")
+
+        async def _destroy_model_forcefully(model_name: str, controller: Controller):
+            self.log.info(f"Forcefully deleting model {model_name}")
+            while model_name in await controller.list_models():
                 await controller.destroy_model(
                 await controller.destroy_model(
-                    model_name, destroy_storage=True, force=True, max_wait=0
+                    model_name, destroy_storage=True, force=True, max_wait=60
                 )
                 await asyncio.sleep(5)
                 )
                 await asyncio.sleep(5)
+            self.log.info(f"Model {model_name} deleted forcefully")
 
         try:
 
         try:
-            await asyncio.wait_for(
-                _destroy_model_loop(model_name, controller), timeout=timeout
-            )
-        except asyncio.TimeoutError:
-            raise Exception(
-                "Timeout waiting for model {} to be destroyed".format(model_name)
-            )
+            try:
+                await asyncio.wait_for(
+                    _destroy_model_gracefully(model_name, controller), timeout=120
+                )
+            except asyncio.TimeoutError:
+                await _destroy_model_forcefully(model_name, controller)
         except juju.errors.JujuError as e:
             if any("has been removed" in error for error in e.errors):
                 return
         except juju.errors.JujuError as e:
             if any("has been removed" in error for error in e.errors):
                 return
+            if any("model not found" in error for error in e.errors):
+                return
             raise e
 
     async def destroy_application(
             raise e
 
     async def destroy_application(
@@ -1478,10 +1644,6 @@ class Libjuju:
                     await self.disconnect_model(model)
                 await self.disconnect_controller(controller)
 
                     await self.disconnect_model(model)
                 await self.disconnect_controller(controller)
 
-    def handle_exception(self, loop, context):
-        # All unhandled exceptions by libjuju are handled here.
-        pass
-
     async def health_check(self, interval: float = 300.0):
         """
         Health check to make sure controller and controller_model connections are OK
     async def health_check(self, interval: float = 300.0):
         """
         Health check to make sure controller and controller_model connections are OK
@@ -1686,7 +1848,9 @@ class Libjuju:
         finally:
             await self.disconnect_controller(controller)
 
         finally:
             await self.disconnect_controller(controller)
 
-    @retry(attempts=20, delay=5, fallback=JujuLeaderUnitNotFound())
+    @retry(
+        attempts=20, delay=5, fallback=JujuLeaderUnitNotFound(), callback=retry_callback
+    )
     async def _get_leader_unit(self, application: Application) -> Unit:
         unit = None
         for u in application.units:
     async def _get_leader_unit(self, application: Application) -> Unit:
         unit = None
         for u in application.units:
index d588a1d..d129b4b 100644 (file)
@@ -31,7 +31,6 @@ import time
 
 class Loggable:
     def __init__(self, log, log_to_console: bool = False, prefix: str = ""):
 
 class Loggable:
     def __init__(self, log, log_to_console: bool = False, prefix: str = ""):
-
         self._last_log_time = None  # used for time increment in logging
         self._log_to_console = log_to_console
         self._prefix = prefix
         self._last_log_time = None  # used for time increment in logging
         self._log_to_console = log_to_console
         self._prefix = prefix
@@ -93,7 +92,6 @@ class Loggable:
         include_thread: bool = False,
         include_coroutine: bool = True,
     ) -> str:
         include_thread: bool = False,
         include_coroutine: bool = True,
     ) -> str:
-
         # time increment from last log
         now = time.perf_counter()
         if self._last_log_time is None:
         # time increment from last log
         now = time.perf_counter()
         if self._last_log_time is None:
@@ -133,7 +131,7 @@ class Loggable:
         coroutine_id = ""
         if include_coroutine:
             try:
         coroutine_id = ""
         if include_coroutine:
             try:
-                if asyncio.Task.current_task() is not None:
+                if asyncio.current_task() is not None:
 
                     def print_cor_name(c):
                         import inspect
 
                     def print_cor_name(c):
                         import inspect
@@ -145,7 +143,7 @@ class Loggable:
                         except Exception:
                             pass
 
                         except Exception:
                             pass
 
-                    coro = asyncio.Task.current_task()._coro
+                    coro = asyncio.current_task()._coro
                     coroutine_id = "coro-{} {}()".format(
                         hex(id(coro))[2:], print_cor_name(coro)
                     )
                     coroutine_id = "coro-{} {}()".format(
                         hex(id(coro))[2:], print_cor_name(coro)
                     )
index 6b0df89..01d7df8 100644 (file)
@@ -24,6 +24,7 @@
 import abc
 import asyncio
 from http import HTTPStatus
 import abc
 import asyncio
 from http import HTTPStatus
+from shlex import quote
 import os
 import shlex
 import subprocess
 import os
 import shlex
 import subprocess
@@ -54,7 +55,6 @@ class N2VCConnector(abc.ABC, Loggable):
         db: object,
         fs: object,
         log: object,
         db: object,
         fs: object,
         log: object,
-        loop: object,
         on_update_db=None,
         **kwargs,
     ):
         on_update_db=None,
         **kwargs,
     ):
@@ -64,7 +64,6 @@ class N2VCConnector(abc.ABC, Loggable):
         :param object fs: FileSystem object managing the package artifacts (repo common
             FsBase)
         :param object log: the logging object to log to
         :param object fs: FileSystem object managing the package artifacts (repo common
             FsBase)
         :param object log: the logging object to log to
-        :param object loop: the loop to use for asyncio (default current thread loop)
         :param on_update_db: callback called when n2vc connector updates database.
             Received arguments:
             table: e.g. "nsrs"
         :param on_update_db: callback called when n2vc connector updates database.
             Received arguments:
             table: e.g. "nsrs"
@@ -85,7 +84,6 @@ class N2VCConnector(abc.ABC, Loggable):
         # store arguments into self
         self.db = db
         self.fs = fs
         # store arguments into self
         self.db = db
         self.fs = fs
-        self.loop = loop or asyncio.get_event_loop()
         self.on_update_db = on_update_db
 
         # generate private/public key-pair
         self.on_update_db = on_update_db
 
         # generate private/public key-pair
@@ -118,19 +116,27 @@ class N2VCConnector(abc.ABC, Loggable):
             self.log.warning("No HOME environment variable, using /tmp")
             homedir = "/tmp"
         sshdir = "{}/.ssh".format(homedir)
             self.log.warning("No HOME environment variable, using /tmp")
             homedir = "/tmp"
         sshdir = "{}/.ssh".format(homedir)
+        sshdir = os.path.realpath(os.path.normpath(os.path.abspath(sshdir)))
         if not os.path.exists(sshdir):
             os.mkdir(sshdir)
 
         self.private_key_path = "{}/id_n2vc_rsa".format(sshdir)
         if not os.path.exists(sshdir):
             os.mkdir(sshdir)
 
         self.private_key_path = "{}/id_n2vc_rsa".format(sshdir)
+        self.private_key_path = os.path.realpath(
+            os.path.normpath(os.path.abspath(self.private_key_path))
+        )
         self.public_key_path = "{}.pub".format(self.private_key_path)
         self.public_key_path = "{}.pub".format(self.private_key_path)
+        self.public_key_path = os.path.realpath(
+            os.path.normpath(os.path.abspath(self.public_key_path))
+        )
 
         # If we don't have a key generated, then we have to generate it using ssh-keygen
         if not os.path.exists(self.private_key_path):
 
         # If we don't have a key generated, then we have to generate it using ssh-keygen
         if not os.path.exists(self.private_key_path):
-            cmd = "ssh-keygen -t {} -b {} -N '' -f {}".format(
-                "rsa", "4096", self.private_key_path
+            command = "ssh-keygen -t {} -b {} -N '' -f {}".format(
+                "rsa", "4096", quote(self.private_key_path)
             )
             # run command with arguments
             )
             # run command with arguments
-            subprocess.check_output(shlex.split(cmd))
+            args = shlex.split(command)
+            subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
 
         # Read the public key. Only one public key (one line) in the file
         with open(self.public_key_path, "r") as file:
 
         # Read the public key. Only one public key (one line) in the file
         with open(self.public_key_path, "r") as file:
@@ -146,7 +152,7 @@ class N2VCConnector(abc.ABC, Loggable):
         reuse_ee_id: str = None,
         progress_timeout: float = None,
         total_timeout: float = None,
         reuse_ee_id: str = None,
         progress_timeout: float = None,
         total_timeout: float = None,
-    ) -> (str, dict):
+    ) -> tuple[str, dict]:
         """Create an Execution Environment. Returns when it is created or raises an
         exception on failing
 
         """Create an Execution Environment. Returns when it is created or raises an
         exception on failing
 
@@ -331,6 +337,28 @@ class N2VCConnector(abc.ABC, Loggable):
         :param float total_timeout:
         """
 
         :param float total_timeout:
         """
 
+    @abc.abstractmethod
+    async def upgrade_charm(
+        self,
+        ee_id: str = None,
+        path: str = None,
+        charm_id: str = None,
+        charm_type: str = None,
+        timeout: float = None,
+    ) -> str:
+        """This method upgrade charms in VNFs
+
+        Args:
+            ee_id:  Execution environment id
+            path:   Local path to the charm
+            charm_id:   charm-id
+            charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
+            timeout: (Float)    Timeout for the ns update operation
+
+        Returns:
+            The output of the update operation if status equals to "completed"
+        """
+
     @abc.abstractmethod
     async def exec_primitive(
         self,
     @abc.abstractmethod
     async def exec_primitive(
         self,
@@ -371,7 +399,9 @@ class N2VCConnector(abc.ABC, Loggable):
     ####################################################################################
     """
 
     ####################################################################################
     """
 
-    def _get_namespace_components(self, namespace: str) -> (str, str, str, str, str):
+    def _get_namespace_components(
+        self, namespace: str
+    ) -> tuple[str, str, str, str, str]:
         """
         Split namespace components
 
         """
         Split namespace components
 
@@ -434,7 +464,6 @@ class N2VCConnector(abc.ABC, Loggable):
         #          .format(str(status.value), detailed_status, vca_status, entity_type))
 
         try:
         #          .format(str(status.value), detailed_status, vca_status, entity_type))
 
         try:
-
             the_table = db_dict["collection"]
             the_filter = db_dict["filter"]
             the_path = db_dict["path"]
             the_table = db_dict["collection"]
             the_filter = db_dict["filter"]
             the_path = db_dict["path"]
@@ -502,4 +531,4 @@ def obj_to_dict(obj: object) -> dict:
     # convert obj to yaml
     yaml_text = obj_to_yaml(obj)
     # parse to dict
     # convert obj to yaml
     yaml_text = obj_to_yaml(obj)
     # parse to dict
-    return yaml.load(yaml_text, Loader=yaml.Loader)
+    return yaml.load(yaml_text, Loader=yaml.SafeLoader)
index fb36809..f28a9bd 100644 (file)
@@ -37,11 +37,12 @@ from n2vc.exceptions import (
 )
 from n2vc.n2vc_conn import N2VCConnector
 from n2vc.n2vc_conn import obj_to_dict, obj_to_yaml
 )
 from n2vc.n2vc_conn import N2VCConnector
 from n2vc.n2vc_conn import obj_to_dict, obj_to_yaml
-from n2vc.libjuju import Libjuju
+from n2vc.libjuju import Libjuju, retry_callback
 from n2vc.store import MotorStore
 from n2vc.utils import get_ee_id_components, generate_random_alfanum_string
 from n2vc.vca.connection import get_connection
 from retrying_async import retry
 from n2vc.store import MotorStore
 from n2vc.utils import get_ee_id_components, generate_random_alfanum_string
 from n2vc.vca.connection import get_connection
 from retrying_async import retry
+from typing import Tuple
 
 
 class N2VCJujuConnector(N2VCConnector):
 
 
 class N2VCJujuConnector(N2VCConnector):
@@ -60,7 +61,6 @@ class N2VCJujuConnector(N2VCConnector):
         db: object,
         fs: object,
         log: object = None,
         db: object,
         fs: object,
         log: object = None,
-        loop: object = None,
         on_update_db=None,
     ):
         """
         on_update_db=None,
     ):
         """
@@ -69,19 +69,11 @@ class N2VCJujuConnector(N2VCConnector):
         :param: db: Database object from osm_common
         :param: fs: Filesystem object from osm_common
         :param: log: Logger
         :param: db: Database object from osm_common
         :param: fs: Filesystem object from osm_common
         :param: log: Logger
-        :param: loop: Asyncio loop
         :param: on_update_db: Callback function to be called for updating the database.
         """
 
         # parent class constructor
         :param: on_update_db: Callback function to be called for updating the database.
         """
 
         # parent class constructor
-        N2VCConnector.__init__(
-            self,
-            db=db,
-            fs=fs,
-            log=log,
-            loop=loop,
-            on_update_db=on_update_db,
-        )
+        N2VCConnector.__init__(self, db=db, fs=fs, log=log, on_update_db=on_update_db)
 
         # silence websocket traffic log
         logging.getLogger("websockets.protocol").setLevel(logging.INFO)
 
         # silence websocket traffic log
         logging.getLogger("websockets.protocol").setLevel(logging.INFO)
@@ -92,7 +84,7 @@ class N2VCJujuConnector(N2VCConnector):
 
         db_uri = EnvironConfig(prefixes=["OSMLCM_", "OSMMON_"]).get("database_uri")
         self._store = MotorStore(db_uri)
 
         db_uri = EnvironConfig(prefixes=["OSMLCM_", "OSMMON_"]).get("database_uri")
         self._store = MotorStore(db_uri)
-        self.loading_libjuju = asyncio.Lock(loop=self.loop)
+        self.loading_libjuju = asyncio.Lock()
         self.delete_namespace_locks = {}
         self.log.info("N2VC juju connector initialized")
 
         self.delete_namespace_locks = {}
         self.log.info("N2VC juju connector initialized")
 
@@ -226,10 +218,7 @@ class N2VCJujuConnector(N2VCConnector):
         # create or reuse a new juju machine
         try:
             if not await libjuju.model_exists(model_name):
         # create or reuse a new juju machine
         try:
             if not await libjuju.model_exists(model_name):
-                await libjuju.add_model(
-                    model_name,
-                    libjuju.vca_connection.lxd_cloud,
-                )
+                await libjuju.add_model(model_name, libjuju.vca_connection.lxd_cloud)
             machine, new = await libjuju.create_machine(
                 model_name=model_name,
                 machine_id=machine_id,
             machine, new = await libjuju.create_machine(
                 model_name=model_name,
                 machine_id=machine_id,
@@ -255,9 +244,7 @@ class N2VCJujuConnector(N2VCConnector):
             raise N2VCException(message=message)
 
         # new machine credentials
             raise N2VCException(message=message)
 
         # new machine credentials
-        credentials = {
-            "hostname": machine.dns_name,
-        }
+        credentials = {"hostname": machine.dns_name}
 
         self.log.info(
             "Execution environment created. ee_id: {}, credentials: {}".format(
 
         self.log.info(
             "Execution environment created. ee_id: {}, credentials: {}".format(
@@ -337,10 +324,7 @@ class N2VCJujuConnector(N2VCConnector):
         # register machine on juju
         try:
             if not await libjuju.model_exists(model_name):
         # register machine on juju
         try:
             if not await libjuju.model_exists(model_name):
-                await libjuju.add_model(
-                    model_name,
-                    libjuju.vca_connection.lxd_cloud,
-                )
+                await libjuju.add_model(model_name, libjuju.vca_connection.lxd_cloud)
             machine_id = await libjuju.provision_machine(
                 model_name=model_name,
                 hostname=hostname,
             machine_id = await libjuju.provision_machine(
                 model_name=model_name,
                 hostname=hostname,
@@ -371,7 +355,13 @@ class N2VCJujuConnector(N2VCConnector):
 
     # In case of native_charm is being deployed, if JujuApplicationExists error happens
     # it will try to add_unit
 
     # In case of native_charm is being deployed, if JujuApplicationExists error happens
     # it will try to add_unit
-    @retry(attempts=3, delay=5, retry_exceptions=(N2VCApplicationExists,), timeout=None)
+    @retry(
+        attempts=3,
+        delay=5,
+        retry_exceptions=(N2VCApplicationExists,),
+        timeout=None,
+        callback=retry_callback,
+    )
     async def install_configuration_sw(
         self,
         ee_id: str,
     async def install_configuration_sw(
         self,
         ee_id: str,
@@ -564,10 +554,7 @@ class N2VCJujuConnector(N2VCConnector):
         _, ns_id, _, _, _ = self._get_namespace_components(namespace=namespace)
         model_name = "{}-k8s".format(ns_id)
         if not await libjuju.model_exists(model_name):
         _, ns_id, _, _, _ = self._get_namespace_components(namespace=namespace)
         model_name = "{}-k8s".format(ns_id)
         if not await libjuju.model_exists(model_name):
-            await libjuju.add_model(
-                model_name,
-                libjuju.vca_connection.k8s_cloud,
-            )
+            await libjuju.add_model(model_name, libjuju.vca_connection.k8s_cloud)
         application_name = self._get_application_name(namespace)
 
         try:
         application_name = self._get_application_name(namespace)
 
         try:
@@ -586,9 +573,7 @@ class N2VCJujuConnector(N2VCConnector):
 
         self.log.info("K8s proxy charm installed")
         ee_id = N2VCJujuConnector._build_ee_id(
 
         self.log.info("K8s proxy charm installed")
         ee_id = N2VCJujuConnector._build_ee_id(
-            model_name=model_name,
-            application_name=application_name,
-            machine_id="k8s",
+            model_name=model_name, application_name=application_name, machine_id="k8s"
         )
 
         self._write_ee_id_db(db_dict=db_dict, ee_id=ee_id)
         )
 
         self._write_ee_id_db(db_dict=db_dict, ee_id=ee_id)
@@ -717,9 +702,7 @@ class N2VCJujuConnector(N2VCConnector):
         return await libjuju.get_metrics(model_name, application_name)
 
     async def add_relation(
         return await libjuju.get_metrics(model_name, application_name)
 
     async def add_relation(
-        self,
-        provider: RelationEndpoint,
-        requirer: RelationEndpoint,
+        self, provider: RelationEndpoint, requirer: RelationEndpoint
     ):
         """
         Add relation between two charmed endpoints
     ):
         """
         Add relation between two charmed endpoints
@@ -730,7 +713,7 @@ class N2VCJujuConnector(N2VCConnector):
         self.log.debug(f"adding new relation between {provider} and {requirer}")
         cross_model_relation = (
             provider.model_name != requirer.model_name
         self.log.debug(f"adding new relation between {provider} and {requirer}")
         cross_model_relation = (
             provider.model_name != requirer.model_name
-            or requirer.vca_id != requirer.vca_id
+            or provider.vca_id != requirer.vca_id
         )
         try:
             if cross_model_relation:
         )
         try:
             if cross_model_relation:
@@ -743,9 +726,7 @@ class N2VCJujuConnector(N2VCConnector):
                         requirer.model_name, offer, provider_libjuju
                     )
                     await requirer_libjuju.add_relation(
                         requirer.model_name, offer, provider_libjuju
                     )
                     await requirer_libjuju.add_relation(
-                        requirer.model_name,
-                        requirer.endpoint,
-                        saas_name,
+                        requirer.model_name, requirer.endpoint, saas_name
                     )
             else:
                 # Standard relation
                     )
             else:
                 # Standard relation
@@ -793,7 +774,7 @@ class N2VCJujuConnector(N2VCConnector):
         self.log.info("Deleting namespace={}".format(namespace))
         will_not_delete = False
         if namespace not in self.delete_namespace_locks:
         self.log.info("Deleting namespace={}".format(namespace))
         will_not_delete = False
         if namespace not in self.delete_namespace_locks:
-            self.delete_namespace_locks[namespace] = asyncio.Lock(loop=self.loop)
+            self.delete_namespace_locks[namespace] = asyncio.Lock()
         delete_lock = self.delete_namespace_locks[namespace]
 
         while delete_lock.locked():
         delete_lock = self.delete_namespace_locks[namespace]
 
         while delete_lock.locked():
@@ -829,6 +810,7 @@ class N2VCJujuConnector(N2VCConnector):
                                 model_name=model, total_timeout=total_timeout
                             )
                     except Exception as e:
                                 model_name=model, total_timeout=total_timeout
                             )
                     except Exception as e:
+                        self.log.error(f"Error deleting namespace {namespace} : {e}")
                         raise N2VCException(
                             message="Error deleting namespace {} : {}".format(
                                 namespace, e
                         raise N2VCException(
                             message="Error deleting namespace {} : {}".format(
                                 namespace, e
@@ -839,6 +821,9 @@ class N2VCJujuConnector(N2VCConnector):
                         message="only ns_id is permitted to delete yet",
                         bad_args=["namespace"],
                     )
                         message="only ns_id is permitted to delete yet",
                         bad_args=["namespace"],
                     )
+        except Exception as e:
+            self.log.error(f"Error deleting namespace {namespace} : {e}")
+            raise e
         finally:
             self.delete_namespace_locks.pop(namespace)
         self.log.info("Namespace {} deleted".format(namespace))
         finally:
             self.delete_namespace_locks.pop(namespace)
         self.log.info("Namespace {} deleted".format(namespace))
@@ -851,6 +836,7 @@ class N2VCJujuConnector(N2VCConnector):
         scaling_in: bool = False,
         vca_type: str = None,
         vca_id: str = None,
         scaling_in: bool = False,
         vca_type: str = None,
         vca_id: str = None,
+        application_to_delete: str = None,
     ):
         """
         Delete an execution environment
     ):
         """
         Delete an execution environment
@@ -860,10 +846,11 @@ class N2VCJujuConnector(N2VCConnector):
                             {collection: <str>, filter: {},  path: <str>},
                             e.g. {collection: "nsrs", filter:
                                 {_id: <nsd-id>, path: "_admin.deployed.VCA.3"}
                             {collection: <str>, filter: {},  path: <str>},
                             e.g. {collection: "nsrs", filter:
                                 {_id: <nsd-id>, path: "_admin.deployed.VCA.3"}
-        :param: total_timeout: Total timeout
-        :param: scaling_in: Boolean to indicate if it is a scaling in operation
-        :param: vca_type: VCA type
-        :param: vca_id: VCA ID
+        :param total_timeout: Total timeout
+        :param scaling_in: Boolean to indicate if it is a scaling in operation
+        :param vca_type: VCA type
+        :param vca_id: VCA ID
+        :param application_to_delete: name of the single application to be deleted
         """
         self.log.info("Deleting execution environment ee_id={}".format(ee_id))
         libjuju = await self._get_libjuju(vca_id)
         """
         self.log.info("Deleting execution environment ee_id={}".format(ee_id))
         libjuju = await self._get_libjuju(vca_id)
@@ -878,12 +865,30 @@ class N2VCJujuConnector(N2VCConnector):
             ee_id=ee_id
         )
         try:
             ee_id=ee_id
         )
         try:
-            if not scaling_in:
-                # destroy the model
-                await libjuju.destroy_model(
+            if application_to_delete == application_name:
+                # destroy the application
+                await libjuju.destroy_application(
                     model_name=model_name,
                     model_name=model_name,
+                    application_name=application_name,
                     total_timeout=total_timeout,
                 )
                     total_timeout=total_timeout,
                 )
+                # if model is empty delete it
+                controller = await libjuju.get_controller()
+                model = await libjuju.get_model(
+                    controller=controller,
+                    model_name=model_name,
+                )
+                if not model.applications:
+                    self.log.info("Model {} is empty, deleting it".format(model_name))
+                    await libjuju.destroy_model(
+                        model_name=model_name,
+                        total_timeout=total_timeout,
+                    )
+            elif not scaling_in:
+                # destroy the model
+                await libjuju.destroy_model(
+                    model_name=model_name, total_timeout=total_timeout
+                )
             elif vca_type == "native_charm" and scaling_in:
                 # destroy the unit in the application
                 await libjuju.destroy_unit(
             elif vca_type == "native_charm" and scaling_in:
                 # destroy the unit in the application
                 await libjuju.destroy_unit(
@@ -986,8 +991,7 @@ class N2VCJujuConnector(N2VCConnector):
                     config=params_dict,
                 )
                 actions = await libjuju.get_actions(
                     config=params_dict,
                 )
                 actions = await libjuju.get_actions(
-                    application_name=application_name,
-                    model_name=model_name,
+                    application_name=application_name, model_name=model_name
                 )
                 self.log.debug(
                     "Application {} has these actions: {}".format(
                 )
                 self.log.debug(
                     "Application {} has these actions: {}".format(
@@ -1056,18 +1060,81 @@ class N2VCJujuConnector(N2VCConnector):
                 if status == "completed":
                     return output
                 else:
                 if status == "completed":
                     return output
                 else:
-                    raise Exception("status is not completed: {}".format(status))
+                    if "output" in output:
+                        raise Exception(f'{status}: {output["output"]}')
+                    else:
+                        raise Exception(
+                            f"{status}: No further information received from action"
+                        )
+
             except Exception as e:
             except Exception as e:
-                self.log.error(
-                    "Error executing primitive {}: {}".format(primitive_name, e)
-                )
+                self.log.error(f"Error executing primitive {primitive_name}: {e}")
                 raise N2VCExecutionException(
                 raise N2VCExecutionException(
-                    message="Error executing primitive {} into ee={} : {}".format(
-                        primitive_name, ee_id, e
-                    ),
+                    message=f"Error executing primitive {primitive_name} in ee={ee_id}: {e}",
                     primitive_name=primitive_name,
                 )
 
                     primitive_name=primitive_name,
                 )
 
+    async def upgrade_charm(
+        self,
+        ee_id: str = None,
+        path: str = None,
+        charm_id: str = None,
+        charm_type: str = None,
+        timeout: float = None,
+    ) -> str:
+        """This method upgrade charms in VNFs
+
+        Args:
+            ee_id:  Execution environment id
+            path:   Local path to the charm
+            charm_id:   charm-id
+            charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
+            timeout: (Float)    Timeout for the ns update operation
+
+        Returns:
+            The output of the update operation if status equals to "completed"
+
+        """
+        self.log.info("Upgrading charm: {} on ee: {}".format(path, ee_id))
+        libjuju = await self._get_libjuju(charm_id)
+
+        # check arguments
+        if ee_id is None or len(ee_id) == 0:
+            raise N2VCBadArgumentsException(
+                message="ee_id is mandatory", bad_args=["ee_id"]
+            )
+        try:
+            (
+                model_name,
+                application_name,
+                machine_id,
+            ) = N2VCJujuConnector._get_ee_id_components(ee_id=ee_id)
+
+        except Exception:
+            raise N2VCBadArgumentsException(
+                message="ee_id={} is not a valid execution environment id".format(
+                    ee_id
+                ),
+                bad_args=["ee_id"],
+            )
+
+        try:
+            await libjuju.upgrade_charm(
+                application_name=application_name,
+                path=path,
+                model_name=model_name,
+                total_timeout=timeout,
+            )
+
+            return f"Charm upgraded with application name {application_name}"
+
+        except Exception as e:
+            self.log.error("Error upgrading charm {}: {}".format(path, e))
+
+            raise N2VCException(
+                message="Error upgrading charm {} in ee={} : {}".format(path, ee_id, e)
+            )
+
     async def disconnect(self, vca_id: str = None):
         """
         Disconnect from VCA
     async def disconnect(self, vca_id: str = None):
         """
         Disconnect from VCA
@@ -1104,19 +1171,13 @@ class N2VCJujuConnector(N2VCConnector):
             if not self.libjuju:
                 async with self.loading_libjuju:
                     vca_connection = await get_connection(self._store)
             if not self.libjuju:
                 async with self.loading_libjuju:
                     vca_connection = await get_connection(self._store)
-                    self.libjuju = Libjuju(vca_connection, loop=self.loop, log=self.log)
+                    self.libjuju = Libjuju(vca_connection, log=self.log)
             return self.libjuju
         else:
             vca_connection = await get_connection(self._store, vca_id)
             return self.libjuju
         else:
             vca_connection = await get_connection(self._store, vca_id)
-            return Libjuju(
-                vca_connection,
-                loop=self.loop,
-                log=self.log,
-                n2vc=self,
-            )
+            return Libjuju(vca_connection, log=self.log, n2vc=self)
 
     def _write_ee_id_db(self, db_dict: dict, ee_id: str):
 
     def _write_ee_id_db(self, db_dict: dict, ee_id: str):
-
         # write ee_id to database: _admin.deployed.VCA.x
         try:
             the_table = db_dict["collection"]
         # write ee_id to database: _admin.deployed.VCA.x
         try:
             the_table = db_dict["collection"]
@@ -1159,20 +1220,41 @@ class N2VCJujuConnector(N2VCConnector):
 
         return get_ee_id_components(ee_id)
 
 
         return get_ee_id_components(ee_id)
 
-    def _get_application_name(self, namespace: str) -> str:
-        """
-        Build application name from namespace
-        :param namespace:
-        :return: app-vnf-<vnf id>-vdu-<vdu-id>-cnt-<vdu-count>-<random_value>
+    @staticmethod
+    def _find_charm_level(vnf_id: str, vdu_id: str) -> str:
+        """Decides the charm level.
+        Args:
+            vnf_id  (str):  VNF id
+            vdu_id  (str):  VDU id
+
+        Returns:
+            charm_level (str):  ns-level or vnf-level or vdu-level
         """
         """
+        if vdu_id and not vnf_id:
+            raise N2VCException(message="If vdu-id exists, vnf-id should be provided.")
+        if vnf_id and vdu_id:
+            return "vdu-level"
+        if vnf_id and not vdu_id:
+            return "vnf-level"
+        if not vnf_id and not vdu_id:
+            return "ns-level"
 
 
-        # TODO: Enforce the Juju 50-character application limit
+    @staticmethod
+    def _generate_backward_compatible_application_name(
+        vnf_id: str, vdu_id: str, vdu_count: str
+    ) -> str:
+        """Generate backward compatible application name
+         by limiting the app name to 50 characters.
 
 
-        # split namespace components
-        _, _, vnf_id, vdu_id, vdu_count = self._get_namespace_components(
-            namespace=namespace
-        )
+        Args:
+            vnf_id  (str):  VNF ID
+            vdu_id  (str):  VDU ID
+            vdu_count   (str):  vdu-count-index
+
+        Returns:
+            application_name (str): generated application name
 
 
+        """
         if vnf_id is None or len(vnf_id) == 0:
             vnf_id = ""
         else:
         if vnf_id is None or len(vnf_id) == 0:
             vnf_id = ""
         else:
@@ -1196,6 +1278,227 @@ class N2VCJujuConnector(N2VCConnector):
         application_name = "app-{}{}{}-{}".format(
             vnf_id, vdu_id, vdu_count, random_suffix
         )
         application_name = "app-{}{}{}-{}".format(
             vnf_id, vdu_id, vdu_count, random_suffix
         )
+        return application_name
+
+    @staticmethod
+    def _get_vca_record(search_key: str, vca_records: list, vdu_id: str) -> dict:
+        """Get the correct VCA record dict depending on the search key
+
+        Args:
+            search_key  (str):      keyword to find the correct VCA record
+            vca_records (list):     All VCA records as list
+            vdu_id  (str):          VDU ID
+
+        Returns:
+            vca_record  (dict):     Dictionary which includes the correct VCA record
+
+        """
+        return next(
+            filter(lambda record: record[search_key] == vdu_id, vca_records), {}
+        )
+
+    @staticmethod
+    def _generate_application_name(
+        charm_level: str,
+        vnfrs: dict,
+        vca_records: list,
+        vnf_count: str = None,
+        vdu_id: str = None,
+        vdu_count: str = None,
+    ) -> str:
+        """Generate application name to make the relevant charm of VDU/KDU
+        in the VNFD descriptor become clearly visible.
+        Limiting the app name to 50 characters.
+
+        Args:
+            charm_level  (str):  level of charm
+            vnfrs  (dict):  vnf record dict
+            vca_records   (list):   db_nsr["_admin"]["deployed"]["VCA"] as list
+            vnf_count   (str): vnf count index
+            vdu_id   (str):  VDU ID
+            vdu_count   (str):  vdu count index
+
+        Returns:
+            application_name (str): generated application name
+
+        """
+        application_name = ""
+        if charm_level == "ns-level":
+            if len(vca_records) != 1:
+                raise N2VCException(message="One VCA record is expected.")
+            # Only one VCA record is expected if it's ns-level charm.
+            # Shorten the charm name to its first 40 characters.
+            charm_name = vca_records[0]["charm_name"][:40]
+            if not charm_name:
+                raise N2VCException(message="Charm name should be provided.")
+            application_name = charm_name + "-ns"
+
+        elif charm_level == "vnf-level":
+            if len(vca_records) < 1:
+                raise N2VCException(message="One or more VCA record is expected.")
+            # If VNF is scaled, more than one VCA record may be included in vca_records
+            # but ee_descriptor_id is same.
+            # Shorten the ee_descriptor_id and member-vnf-index-ref
+            # to first 12 characters.
+            application_name = (
+                vca_records[0]["ee_descriptor_id"][:12]
+                + "-"
+                + vnf_count
+                + "-"
+                + vnfrs["member-vnf-index-ref"][:12]
+                + "-vnf"
+            )
+        elif charm_level == "vdu-level":
+            if len(vca_records) < 1:
+                raise N2VCException(message="One or more VCA record is expected.")
+
+            # Charms are also used for deployments with Helm charts.
+            # If deployment unit is a Helm chart/KDU,
+            # vdu_profile_id and vdu_count will be empty string.
+            if vdu_count is None:
+                vdu_count = ""
+
+            # If vnf/vdu is scaled, more than one VCA record may be included in vca_records
+            # but ee_descriptor_id is same.
+            # Shorten the ee_descriptor_id, member-vnf-index-ref and vdu_profile_id
+            # to first 12 characters.
+            if not vdu_id:
+                raise N2VCException(message="vdu-id should be provided.")
+
+            vca_record = N2VCJujuConnector._get_vca_record(
+                "vdu_id", vca_records, vdu_id
+            )
+
+            if not vca_record:
+                vca_record = N2VCJujuConnector._get_vca_record(
+                    "kdu_name", vca_records, vdu_id
+                )
+
+            application_name = (
+                vca_record["ee_descriptor_id"][:12]
+                + "-"
+                + vnf_count
+                + "-"
+                + vnfrs["member-vnf-index-ref"][:12]
+                + "-"
+                + vdu_id[:12]
+                + "-"
+                + vdu_count
+                + "-vdu"
+            )
+
+        return application_name
+
+    def _get_vnf_count_and_record(
+        self, charm_level: str, vnf_id_and_count: str
+    ) -> Tuple[str, dict]:
+        """Get the vnf count and VNF record depend on charm level
+
+        Args:
+            charm_level  (str)
+            vnf_id_and_count (str)
+
+        Returns:
+            (vnf_count  (str), db_vnfr(dict)) as Tuple
+
+        """
+        vnf_count = ""
+        db_vnfr = {}
+
+        if charm_level in ("vnf-level", "vdu-level"):
+            vnf_id = "-".join(vnf_id_and_count.split("-")[:-1])
+            vnf_count = vnf_id_and_count.split("-")[-1]
+            db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
+
+        # If the charm is ns level, it returns empty vnf_count and db_vnfr
+        return vnf_count, db_vnfr
+
+    @staticmethod
+    def _get_vca_records(charm_level: str, db_nsr: dict, db_vnfr: dict) -> list:
+        """Get the VCA records from db_nsr dict
+
+        Args:
+            charm_level (str):  level of charm
+            db_nsr  (dict):     NS record from database
+            db_vnfr (dict):     VNF record from database
+
+        Returns:
+            vca_records (list):  List of VCA record dictionaries
+
+        """
+        vca_records = {}
+        if charm_level == "ns-level":
+            vca_records = list(
+                filter(
+                    lambda vca_record: vca_record["target_element"] == "ns",
+                    db_nsr["_admin"]["deployed"]["VCA"],
+                )
+            )
+        elif charm_level in ["vnf-level", "vdu-level"]:
+            vca_records = list(
+                filter(
+                    lambda vca_record: vca_record["member-vnf-index"]
+                    == db_vnfr["member-vnf-index-ref"],
+                    db_nsr["_admin"]["deployed"]["VCA"],
+                )
+            )
+
+        return vca_records
+
+    def _get_application_name(self, namespace: str) -> str:
+        """Build application name from namespace
+
+        Application name structure:
+            NS level: <charm-name>-ns
+            VNF level: <ee-name>-z<vnf-ordinal-scale-number>-<vnf-profile-id>-vnf
+            VDU level: <ee-name>-z<vnf-ordinal-scale-number>-<vnf-profile-id>-
+            <vdu-profile-id>-z<vdu-ordinal-scale-number>-vdu
+
+        Application naming for backward compatibility (old structure):
+            NS level: app-<random_value>
+            VNF level: app-vnf-<vnf-id>-z<ordinal-scale-number>-<random_value>
+            VDU level: app-vnf-<vnf-id>-z<vnf-ordinal-scale-number>-vdu-
+            <vdu-id>-cnt-<vdu-count>-z<vdu-ordinal-scale-number>-<random_value>
+
+        Args:
+            namespace   (str)
+
+        Returns:
+            application_name    (str)
+
+        """
+        # split namespace components
+        (
+            nsi_id,
+            ns_id,
+            vnf_id_and_count,
+            vdu_id,
+            vdu_count,
+        ) = self._get_namespace_components(namespace=namespace)
+
+        if not ns_id:
+            raise N2VCException(message="ns-id should be provided.")
+
+        charm_level = self._find_charm_level(vnf_id_and_count, vdu_id)
+        db_nsr = self.db.get_one("nsrs", {"_id": ns_id})
+        vnf_count, db_vnfr = self._get_vnf_count_and_record(
+            charm_level, vnf_id_and_count
+        )
+        vca_records = self._get_vca_records(charm_level, db_nsr, db_vnfr)
+
+        if all("charm_name" in vca_record.keys() for vca_record in vca_records):
+            application_name = self._generate_application_name(
+                charm_level,
+                db_vnfr,
+                vca_records,
+                vnf_count=vnf_count,
+                vdu_id=vdu_id,
+                vdu_count=vdu_count,
+            )
+        else:
+            application_name = self._generate_backward_compatible_application_name(
+                vnf_id_and_count, vdu_id, vdu_count
+            )
 
         return N2VCJujuConnector._format_app_name(application_name)
 
 
         return N2VCJujuConnector._format_app_name(application_name)
 
@@ -1256,6 +1559,6 @@ class N2VCJujuConnector(N2VCConnector):
         :param: vca_id: VCA ID
         """
         vca_connection = await get_connection(self._store, vca_id=vca_id)
         :param: vca_id: VCA ID
         """
         vca_connection = await get_connection(self._store, vca_id=vca_id)
-        libjuju = Libjuju(vca_connection, loop=self.loop, log=self.log, n2vc=self)
+        libjuju = Libjuju(vca_connection, log=self.log, n2vc=self)
         controller = await libjuju.get_controller()
         await libjuju.disconnect_controller(controller)
         controller = await libjuju.get_controller()
         await libjuju.disconnect_controller(controller)
index cd6c6fb..c8e5910 100644 (file)
 #     limitations under the License.
 
 import abc
 #     limitations under the License.
 
 import abc
-import asyncio
-from base64 import b64decode
-import re
 import typing
 
 import typing
 
-from Crypto.Cipher import AES
 from motor.motor_asyncio import AsyncIOMotorClient
 from n2vc.config import EnvironConfig
 from n2vc.vca.connection_data import ConnectionData
 from osm_common.dbmongo import DbMongo, DbException
 from motor.motor_asyncio import AsyncIOMotorClient
 from n2vc.config import EnvironConfig
 from n2vc.vca.connection_data import ConnectionData
 from osm_common.dbmongo import DbMongo, DbException
+from osm_common.dbbase import Encryption
+
 
 DB_NAME = "osm"
 
 
 DB_NAME = "osm"
 
@@ -184,17 +182,21 @@ class DbMongoStore(Store):
 
 
 class MotorStore(Store):
 
 
 class MotorStore(Store):
-    def __init__(self, uri: str, loop=None):
+    def __init__(self, uri: str):
         """
         Constructor
 
         :param: uri: Connection string to connect to the database.
         """
         Constructor
 
         :param: uri: Connection string to connect to the database.
-        :param: loop: Asyncio Loop
         """
         self._client = AsyncIOMotorClient(uri)
         """
         self._client = AsyncIOMotorClient(uri)
-        self.loop = loop or asyncio.get_event_loop()
         self._secret_key = None
         self._config = EnvironConfig(prefixes=["OSMLCM_", "OSMMON_"])
         self._secret_key = None
         self._config = EnvironConfig(prefixes=["OSMLCM_", "OSMMON_"])
+        self.encryption = Encryption(
+            uri=uri,
+            config=self._config,
+            encoding_type="utf-8",
+            logger_name="db",
+        )
 
     @property
     def _database(self):
 
     @property
     def _database(self):
@@ -223,7 +225,7 @@ class MotorStore(Store):
         data = await self._vca_collection.find_one({"_id": vca_id})
         if not data:
             raise Exception("vca with id {} not found".format(vca_id))
         data = await self._vca_collection.find_one({"_id": vca_id})
         if not data:
             raise Exception("vca with id {} not found".format(vca_id))
-        await self.decrypt_fields(
+        await self.encryption.decrypt_fields(
             data,
             ["secret", "cacert"],
             schema_version=data["schema_version"],
             data,
             ["secret", "cacert"],
             schema_version=data["schema_version"],
@@ -294,114 +296,3 @@ class MotorStore(Store):
     async def _get_juju_info(self):
         """Get Juju information (the default VCA) from the admin collection"""
         return await self._admin_collection.find_one({"_id": "juju"})
     async def _get_juju_info(self):
         """Get Juju information (the default VCA) from the admin collection"""
         return await self._admin_collection.find_one({"_id": "juju"})
-
-    # DECRYPT METHODS
-    async def decrypt_fields(
-        self,
-        item: dict,
-        fields: typing.List[str],
-        schema_version: str = None,
-        salt: str = None,
-    ):
-        """
-        Decrypt fields
-
-        Decrypt fields from a dictionary. Follows the same logic as in osm_common.
-
-        :param: item: Dictionary with the keys to be decrypted
-        :param: fields: List of keys to decrypt
-        :param: schema version: Schema version. (i.e. 1.11)
-        :param: salt: Salt for the decryption
-        """
-        flags = re.I
-
-        async def process(_item):
-            if isinstance(_item, list):
-                for elem in _item:
-                    await process(elem)
-            elif isinstance(_item, dict):
-                for key, val in _item.items():
-                    if isinstance(val, str):
-                        if any(re.search(f, key, flags) for f in fields):
-                            _item[key] = await self.decrypt(val, schema_version, salt)
-                    else:
-                        await process(val)
-
-        await process(item)
-
-    async def decrypt(self, value, schema_version=None, salt=None):
-        """
-        Decrypt an encrypted value
-        :param value: value to be decrypted. It is a base64 string
-        :param schema_version: used for known encryption method used. If None or '1.0' no encryption has been done.
-               If '1.1' symmetric AES encryption has been done
-        :param salt: optional salt to be used
-        :return: Plain content of value
-        """
-        await self.get_secret_key()
-        if not self.secret_key or not schema_version or schema_version == "1.0":
-            return value
-        else:
-            secret_key = self._join_secret_key(salt)
-            encrypted_msg = b64decode(value)
-            cipher = AES.new(secret_key)
-            decrypted_msg = cipher.decrypt(encrypted_msg)
-            try:
-                unpadded_private_msg = decrypted_msg.decode().rstrip("\0")
-            except UnicodeDecodeError:
-                raise DbException(
-                    "Cannot decrypt information. Are you using same COMMONKEY in all OSM components?",
-                    http_code=500,
-                )
-            return unpadded_private_msg
-
-    def _join_secret_key(self, update_key: typing.Any) -> bytes:
-        """
-        Join key with secret key
-
-        :param: update_key: str or bytes with the to update
-
-        :return: Joined key
-        """
-        return self._join_keys(update_key, self.secret_key)
-
-    def _join_keys(self, key: typing.Any, secret_key: bytes) -> bytes:
-        """
-        Join key with secret_key
-
-        :param: key: str or bytesof the key to update
-        :param: secret_key: bytes of the secret key
-
-        :return: Joined key
-        """
-        if isinstance(key, str):
-            update_key_bytes = key.encode()
-        else:
-            update_key_bytes = key
-        new_secret_key = bytearray(secret_key) if secret_key else bytearray(32)
-        for i, b in enumerate(update_key_bytes):
-            new_secret_key[i % 32] ^= b
-        return bytes(new_secret_key)
-
-    @property
-    def secret_key(self):
-        return self._secret_key
-
-    async def get_secret_key(self):
-        """
-        Get secret key using the database key and the serial key in the DB
-        The key is populated in the property self.secret_key
-        """
-        if self.secret_key:
-            return
-        secret_key = None
-        if self.database_key:
-            secret_key = self._join_keys(self.database_key, None)
-        version_data = await self._admin_collection.find_one({"_id": "version"})
-        if version_data and version_data.get("serial"):
-            secret_key = self._join_keys(b64decode(version_data["serial"]), secret_key)
-        self._secret_key = secret_key
-
-    @property
-    def database_key(self):
-        return self._config["database_commonkey"]
index 07b2127..b9e9e36 100644 (file)
@@ -181,7 +181,6 @@ class EntityStateTest(TestCase):
             os.path.join(os.path.dirname(__file__), "testdata", filename),
             "r",
         ) as self.upgrade_file:
             os.path.join(os.path.dirname(__file__), "testdata", filename),
             "r",
         ) as self.upgrade_file:
-
             all_changes = AsyncMock()
             all_changes.Next.side_effect = self._fetch_next_delta
             mock_all_watcher.return_value = all_changes
             all_changes = AsyncMock()
             all_changes.Next.side_effect = self._fetch_next_delta
             mock_all_watcher.return_value = all_changes
index d1b7903..bddfddd 100644 (file)
@@ -134,7 +134,6 @@ class TestK8sHelm3Conn(asynctest.TestCase):
 
     @asynctest.fail_on(active_handles=True)
     async def test_repo_list(self):
 
     @asynctest.fail_on(active_handles=True)
     async def test_repo_list(self):
-
         self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
 
         await self.helm_conn.repo_list(self.cluster_uuid)
         self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
 
         await self.helm_conn.repo_list(self.cluster_uuid)
@@ -150,7 +149,6 @@ class TestK8sHelm3Conn(asynctest.TestCase):
 
     @asynctest.fail_on(active_handles=True)
     async def test_repo_remove(self):
 
     @asynctest.fail_on(active_handles=True)
     async def test_repo_remove(self):
-
         self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
         repo_name = "bitnami"
         await self.helm_conn.repo_remove(self.cluster_uuid, repo_name)
         self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
         repo_name = "bitnami"
         await self.helm_conn.repo_remove(self.cluster_uuid, repo_name)
@@ -174,6 +172,7 @@ class TestK8sHelm3Conn(asynctest.TestCase):
         self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
         self.helm_conn._status_kdu = asynctest.CoroutineMock(return_value=None)
         self.helm_conn._store_status = asynctest.CoroutineMock()
         self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
         self.helm_conn._status_kdu = asynctest.CoroutineMock(return_value=None)
         self.helm_conn._store_status = asynctest.CoroutineMock()
+        self.helm_conn._repo_to_oci_url = Mock(return_value=None)
         self.kdu_instance = "stable-openldap-0005399828"
         self.helm_conn.generate_kdu_instance_name = Mock(return_value=self.kdu_instance)
         self.helm_conn._get_namespaces = asynctest.CoroutineMock(return_value=[])
         self.kdu_instance = "stable-openldap-0005399828"
         self.helm_conn.generate_kdu_instance_name = Mock(return_value=self.kdu_instance)
         self.helm_conn._get_namespaces = asynctest.CoroutineMock(return_value=[])
@@ -196,9 +195,17 @@ class TestK8sHelm3Conn(asynctest.TestCase):
         self.helm_conn._create_namespace.assert_called_once_with(
             self.cluster_id, self.namespace
         )
         self.helm_conn._create_namespace.assert_called_once_with(
             self.cluster_id, self.namespace
         )
-        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
-        self.helm_conn.fs.reverse_sync.assert_called_once_with(
-            from_path=self.cluster_id
+        self.helm_conn.fs.sync.assert_has_calls(
+            [
+                asynctest.call(from_path=self.cluster_id),
+                asynctest.call(from_path=self.cluster_id),
+            ]
+        )
+        self.helm_conn.fs.reverse_sync.assert_has_calls(
+            [
+                asynctest.call(from_path=self.cluster_id),
+                asynctest.call(from_path=self.cluster_id),
+            ]
         )
         self.helm_conn._store_status.assert_called_with(
             cluster_id=self.cluster_id,
         )
         self.helm_conn._store_status.assert_called_with(
             cluster_id=self.cluster_id,
@@ -206,15 +213,13 @@ class TestK8sHelm3Conn(asynctest.TestCase):
             namespace=self.namespace,
             db_dict=db_dict,
             operation="install",
             namespace=self.namespace,
             db_dict=db_dict,
             operation="install",
-            run_once=True,
-            check_every=0,
         )
         command = (
             "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config /usr/bin/helm3 "
             "install stable-openldap-0005399828 --atomic --output yaml   "
             "--timeout 300s --namespace testk8s stable/openldap --version 1.2.2"
         )
         )
         command = (
             "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config /usr/bin/helm3 "
             "install stable-openldap-0005399828 --atomic --output yaml   "
             "--timeout 300s --namespace testk8s stable/openldap --version 1.2.2"
         )
-        self.helm_conn._local_async_exec.assert_called_once_with(
+        self.helm_conn._local_async_exec.assert_called_with(
             command=command, env=self.env, raise_exception_on_error=False
         )
 
             command=command, env=self.env, raise_exception_on_error=False
         )
 
@@ -262,16 +267,57 @@ class TestK8sHelm3Conn(asynctest.TestCase):
         }
         self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
         self.helm_conn._store_status = asynctest.CoroutineMock()
         }
         self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
         self.helm_conn._store_status = asynctest.CoroutineMock()
+        self.helm_conn._repo_to_oci_url = Mock(return_value=None)
         self.helm_conn.get_instance_info = asynctest.CoroutineMock(
             return_value=instance_info
         )
         self.helm_conn.get_instance_info = asynctest.CoroutineMock(
             return_value=instance_info
         )
+        # TEST-1 (--force true)
+        await self.helm_conn.upgrade(
+            self.cluster_uuid,
+            kdu_instance,
+            kdu_model,
+            atomic=True,
+            db_dict=db_dict,
+            force=True,
+        )
+        self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id)
+        self.helm_conn.fs.reverse_sync.assert_has_calls(
+            [
+                asynctest.call(from_path=self.cluster_id),
+                asynctest.call(from_path=self.cluster_id),
+            ]
+        )
+        self.helm_conn._store_status.assert_called_with(
+            cluster_id=self.cluster_id,
+            kdu_instance=kdu_instance,
+            namespace=self.namespace,
+            db_dict=db_dict,
+            operation="upgrade",
+        )
+        command = (
+            "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config "
+            "/usr/bin/helm3 upgrade stable-openldap-0005399828 stable/openldap "
+            "--namespace testk8s --atomic --force --output yaml  --timeout 300s "
+            "--reuse-values --version 1.2.3"
+        )
+        self.helm_conn._local_async_exec.assert_called_with(
+            command=command, env=self.env, raise_exception_on_error=False
+        )
 
 
+        # TEST-2 (--force false)
         await self.helm_conn.upgrade(
         await self.helm_conn.upgrade(
-            self.cluster_uuid, kdu_instance, kdu_model, atomic=True, db_dict=db_dict
+            self.cluster_uuid,
+            kdu_instance,
+            kdu_model,
+            atomic=True,
+            db_dict=db_dict,
         )
         self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id)
         )
         self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id)
-        self.helm_conn.fs.reverse_sync.assert_called_once_with(
-            from_path=self.cluster_id
+        self.helm_conn.fs.reverse_sync.assert_has_calls(
+            [
+                asynctest.call(from_path=self.cluster_id),
+                asynctest.call(from_path=self.cluster_id),
+            ]
         )
         self.helm_conn._store_status.assert_called_with(
             cluster_id=self.cluster_id,
         )
         self.helm_conn._store_status.assert_called_with(
             cluster_id=self.cluster_id,
@@ -279,16 +325,65 @@ class TestK8sHelm3Conn(asynctest.TestCase):
             namespace=self.namespace,
             db_dict=db_dict,
             operation="upgrade",
             namespace=self.namespace,
             db_dict=db_dict,
             operation="upgrade",
-            run_once=True,
-            check_every=0,
         )
         command = (
             "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config "
             "/usr/bin/helm3 upgrade stable-openldap-0005399828 stable/openldap "
             "--namespace testk8s --atomic --output yaml  --timeout 300s "
         )
         command = (
             "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config "
             "/usr/bin/helm3 upgrade stable-openldap-0005399828 stable/openldap "
             "--namespace testk8s --atomic --output yaml  --timeout 300s "
-            "--version 1.2.3"
+            "--reuse-values --version 1.2.3"
         )
         )
-        self.helm_conn._local_async_exec.assert_called_once_with(
+        self.helm_conn._local_async_exec.assert_called_with(
+            command=command, env=self.env, raise_exception_on_error=False
+        )
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_upgrade_namespace(self):
+        kdu_model = "stable/openldap:1.2.3"
+        kdu_instance = "stable-openldap-0005399828"
+        db_dict = {}
+        instance_info = {
+            "chart": "openldap-1.2.2",
+            "name": kdu_instance,
+            "namespace": self.namespace,
+            "revision": 1,
+            "status": "DEPLOYED",
+        }
+        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
+        self.helm_conn._store_status = asynctest.CoroutineMock()
+        self.helm_conn._repo_to_oci_url = Mock(return_value=None)
+        self.helm_conn.get_instance_info = asynctest.CoroutineMock(
+            return_value=instance_info
+        )
+
+        await self.helm_conn.upgrade(
+            self.cluster_uuid,
+            kdu_instance,
+            kdu_model,
+            atomic=True,
+            db_dict=db_dict,
+            namespace="default",
+        )
+        self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id)
+        self.helm_conn.fs.reverse_sync.assert_has_calls(
+            [
+                asynctest.call(from_path=self.cluster_id),
+                asynctest.call(from_path=self.cluster_id),
+            ]
+        )
+        self.helm_conn._store_status.assert_called_with(
+            cluster_id=self.cluster_id,
+            kdu_instance=kdu_instance,
+            namespace="default",
+            db_dict=db_dict,
+            operation="upgrade",
+        )
+        command = (
+            "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config "
+            "/usr/bin/helm3 upgrade stable-openldap-0005399828 stable/openldap "
+            "--namespace default --atomic --output yaml  --timeout 300s "
+            "--reuse-values --version 1.2.3"
+        )
+        self.helm_conn._local_async_exec.assert_called_with(
             command=command, env=self.env, raise_exception_on_error=False
         )
 
             command=command, env=self.env, raise_exception_on_error=False
         )
 
@@ -324,6 +419,7 @@ class TestK8sHelm3Conn(asynctest.TestCase):
         self.helm_conn.values_kdu = asynctest.CoroutineMock(return_value=kdu_values)
         self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
         self.helm_conn._store_status = asynctest.CoroutineMock()
         self.helm_conn.values_kdu = asynctest.CoroutineMock(return_value=kdu_values)
         self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
         self.helm_conn._store_status = asynctest.CoroutineMock()
+        self.helm_conn._repo_to_oci_url = Mock(return_value=None)
         self.helm_conn.get_instance_info = asynctest.CoroutineMock(
             return_value=instance_info
         )
         self.helm_conn.get_instance_info = asynctest.CoroutineMock(
             return_value=instance_info
         )
@@ -342,9 +438,9 @@ class TestK8sHelm3Conn(asynctest.TestCase):
             "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config "
             "/usr/bin/helm3 upgrade stable-openldap-0005399828 stable/openldap "
             "--namespace testk8s --atomic --output yaml --set replicaCount=2 --timeout 1800s "
             "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config "
             "/usr/bin/helm3 upgrade stable-openldap-0005399828 stable/openldap "
             "--namespace testk8s --atomic --output yaml --set replicaCount=2 --timeout 1800s "
-            "--version 1.2.3"
+            "--reuse-values --version 1.2.3"
         )
         )
-        self.helm_conn._local_async_exec.assert_called_once_with(
+        self.helm_conn._local_async_exec.assert_called_with(
             command=command, env=self.env, raise_exception_on_error=False
         )
         # TEST-2
             command=command, env=self.env, raise_exception_on_error=False
         )
         # TEST-2
@@ -361,7 +457,7 @@ class TestK8sHelm3Conn(asynctest.TestCase):
             "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config "
             "/usr/bin/helm3 upgrade stable-openldap-0005399828 stable/openldap "
             "--namespace testk8s --atomic --output yaml --set dummy-app.replicas=3 --timeout 1800s "
             "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config "
             "/usr/bin/helm3 upgrade stable-openldap-0005399828 stable/openldap "
             "--namespace testk8s --atomic --output yaml --set dummy-app.replicas=3 --timeout 1800s "
-            "--version 1.2.3"
+            "--reuse-values --version 1.2.3"
         )
         self.helm_conn._local_async_exec.assert_called_with(
             command=command, env=self.env, raise_exception_on_error=False
         )
         self.helm_conn._local_async_exec.assert_called_with(
             command=command, env=self.env, raise_exception_on_error=False
@@ -373,8 +469,6 @@ class TestK8sHelm3Conn(asynctest.TestCase):
             namespace=self.namespace,
             db_dict=db_dict,
             operation="scale",
             namespace=self.namespace,
             db_dict=db_dict,
             operation="scale",
-            run_once=True,
-            check_every=0,
         )
 
     @asynctest.fail_on(active_handles=True)
         )
 
     @asynctest.fail_on(active_handles=True)
@@ -407,8 +501,6 @@ class TestK8sHelm3Conn(asynctest.TestCase):
             namespace=self.namespace,
             db_dict=db_dict,
             operation="rollback",
             namespace=self.namespace,
             db_dict=db_dict,
             operation="rollback",
-            run_once=True,
-            check_every=0,
         )
         command = (
             "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config /usr/bin/helm3 "
         )
         command = (
             "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config /usr/bin/helm3 "
@@ -612,8 +704,6 @@ class TestK8sHelm3Conn(asynctest.TestCase):
             namespace=self.namespace,
             db_dict=db_dict,
             operation="install",
             namespace=self.namespace,
             db_dict=db_dict,
             operation="install",
-            run_once=True,
-            check_every=0,
         )
         self.helm_conn._status_kdu.assert_called_once_with(
             cluster_id=self.cluster_id,
         )
         self.helm_conn._status_kdu.assert_called_once_with(
             cluster_id=self.cluster_id,
@@ -712,7 +802,13 @@ class TestK8sHelm3Conn(asynctest.TestCase):
         )
         self.helm_conn.repo_remove.assert_not_called()
         self.helm_conn.repo_add.assert_called_once_with(
         )
         self.helm_conn.repo_remove.assert_not_called()
         self.helm_conn.repo_add.assert_called_once_with(
-            self.cluster_uuid, "bitnami", "https://charts.bitnami.com/bitnami"
+            self.cluster_uuid,
+            "bitnami",
+            "https://charts.bitnami.com/bitnami",
+            cert=None,
+            user=None,
+            password=None,
+            oci=False,
         )
         self.assertEqual(deleted_repo_list, [], "Deleted repo list should be empty")
         self.assertEqual(
         )
         self.assertEqual(deleted_repo_list, [], "Deleted repo list should be empty")
         self.assertEqual(
diff --git a/n2vc/tests/unit/test_k8s_helm_conn.py b/n2vc/tests/unit/test_k8s_helm_conn.py
deleted file mode 100644 (file)
index fb586a3..0000000
+++ /dev/null
@@ -1,654 +0,0 @@
-##
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: alfonso.tiernosepulveda@telefonica.com
-##
-
-import asynctest
-import logging
-
-from asynctest.mock import Mock
-from osm_common.dbmemory import DbMemory
-from osm_common.fslocal import FsLocal
-from n2vc.k8s_helm_conn import K8sHelmConnector
-
-__author__ = "Isabel Lloret <illoret@indra.es>"
-
-
-class TestK8sHelmConn(asynctest.TestCase):
-    logging.basicConfig(level=logging.DEBUG)
-    logger = logging.getLogger(__name__)
-    logger.setLevel(logging.DEBUG)
-
-    async def setUp(self):
-        self.db = Mock(DbMemory())
-        self.fs = asynctest.Mock(FsLocal())
-        self.fs.path = "./tmp/"
-        self.namespace = "testk8s"
-        self.service_account = "osm"
-        self.cluster_id = "helm_cluster_id"
-        self.cluster_uuid = self.cluster_id
-        # pass fake kubectl and helm commands to make sure it does not call actual commands
-        K8sHelmConnector._check_file_exists = asynctest.Mock(return_value=True)
-        K8sHelmConnector._local_async_exec = asynctest.CoroutineMock(
-            return_value=(0, "")
-        )
-        cluster_dir = self.fs.path + self.cluster_id
-        self.kube_config = self.fs.path + self.cluster_id + "/.kube/config"
-        self.helm_home = self.fs.path + self.cluster_id + "/.helm"
-        self.env = {
-            "HELM_HOME": "{}/.helm".format(cluster_dir),
-            "KUBECONFIG": "{}/.kube/config".format(cluster_dir),
-        }
-        self.helm_conn = K8sHelmConnector(self.fs, self.db, log=self.logger)
-        self.logger.debug("Set up executed")
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_init_env(self):
-        # TODO
-        pass
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_repo_add(self):
-        repo_name = "bitnami"
-        repo_url = "https://charts.bitnami.com/bitnami"
-        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
-
-        await self.helm_conn.repo_add(self.cluster_uuid, repo_name, repo_url)
-
-        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
-        self.helm_conn.fs.reverse_sync.assert_called_once_with(
-            from_path=self.cluster_id
-        )
-        self.assertEqual(
-            self.helm_conn._local_async_exec.call_count,
-            2,
-            "local_async_exec expected 2 calls, called {}".format(
-                self.helm_conn._local_async_exec.call_count
-            ),
-        )
-
-        repo_update_command = (
-            "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm repo update {}"
-        ).format(repo_name)
-        repo_add_command = (
-            "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm repo add {} {}"
-        ).format(repo_name, repo_url)
-        calls = self.helm_conn._local_async_exec.call_args_list
-        call0_kargs = calls[0][1]
-        self.assertEqual(
-            call0_kargs.get("command"),
-            repo_add_command,
-            "Invalid repo add command: {}".format(call0_kargs.get("command")),
-        )
-        self.assertEqual(
-            call0_kargs.get("env"),
-            self.env,
-            "Invalid env for add command: {}".format(call0_kargs.get("env")),
-        )
-        call1_kargs = calls[1][1]
-        self.assertEqual(
-            call1_kargs.get("command"),
-            repo_update_command,
-            "Invalid repo update command: {}".format(call1_kargs.get("command")),
-        )
-        self.assertEqual(
-            call1_kargs.get("env"),
-            self.env,
-            "Invalid env for update command: {}".format(call1_kargs.get("env")),
-        )
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_repo_list(self):
-        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
-
-        await self.helm_conn.repo_list(self.cluster_uuid)
-
-        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
-        self.helm_conn.fs.reverse_sync.assert_called_once_with(
-            from_path=self.cluster_id
-        )
-        command = "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm repo list --output yaml"
-        self.helm_conn._local_async_exec.assert_called_with(
-            command=command, env=self.env, raise_exception_on_error=False
-        )
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_repo_remove(self):
-        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
-        repo_name = "bitnami"
-        await self.helm_conn.repo_remove(self.cluster_uuid, repo_name)
-
-        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
-        self.helm_conn.fs.reverse_sync.assert_called_once_with(
-            from_path=self.cluster_id
-        )
-        command = "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm repo remove {}".format(
-            repo_name
-        )
-        self.helm_conn._local_async_exec.assert_called_once_with(
-            command=command, env=self.env, raise_exception_on_error=True
-        )
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_install(self):
-        kdu_model = "stable/openldap:1.2.2"
-        kdu_instance = "stable-openldap-0005399828"
-        db_dict = {}
-        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
-        self.helm_conn._status_kdu = asynctest.CoroutineMock(return_value=None)
-        self.helm_conn._store_status = asynctest.CoroutineMock()
-        self.helm_conn.generate_kdu_instance_name = Mock(return_value=kdu_instance)
-
-        await self.helm_conn.install(
-            self.cluster_uuid,
-            kdu_model,
-            kdu_instance,
-            atomic=True,
-            namespace=self.namespace,
-            db_dict=db_dict,
-        )
-
-        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
-        self.helm_conn.fs.reverse_sync.assert_called_once_with(
-            from_path=self.cluster_id
-        )
-        self.helm_conn._store_status.assert_called_with(
-            cluster_id=self.cluster_id,
-            kdu_instance=kdu_instance,
-            namespace=self.namespace,
-            db_dict=db_dict,
-            operation="install",
-            run_once=True,
-            check_every=0,
-        )
-        command = (
-            "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm install "
-            "--atomic --output yaml   --timeout 300 "
-            "--name=stable-openldap-0005399828 --namespace testk8s stable/openldap "
-            "--version 1.2.2"
-        )
-        self.helm_conn._local_async_exec.assert_called_once_with(
-            command=command, env=self.env, raise_exception_on_error=False
-        )
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_upgrade(self):
-        kdu_model = "stable/openldap:1.2.3"
-        kdu_instance = "stable-openldap-0005399828"
-        db_dict = {}
-        instance_info = {
-            "chart": "openldap-1.2.2",
-            "name": kdu_instance,
-            "namespace": self.namespace,
-            "revision": 1,
-            "status": "DEPLOYED",
-        }
-        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
-        self.helm_conn._store_status = asynctest.CoroutineMock()
-        self.helm_conn.get_instance_info = asynctest.CoroutineMock(
-            return_value=instance_info
-        )
-
-        await self.helm_conn.upgrade(
-            self.cluster_uuid, kdu_instance, kdu_model, atomic=True, db_dict=db_dict
-        )
-        self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id)
-        self.helm_conn.fs.reverse_sync.assert_called_once_with(
-            from_path=self.cluster_id
-        )
-        self.helm_conn._store_status.assert_called_with(
-            cluster_id=self.cluster_id,
-            kdu_instance=kdu_instance,
-            namespace=self.namespace,
-            db_dict=db_dict,
-            operation="upgrade",
-            run_once=True,
-            check_every=0,
-        )
-        command = (
-            "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm upgrade "
-            "--atomic --output yaml  --timeout 300 stable-openldap-0005399828 stable/openldap --version 1.2.3"
-        )
-        self.helm_conn._local_async_exec.assert_called_once_with(
-            command=command, env=self.env, raise_exception_on_error=False
-        )
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_scale(self):
-        kdu_model = "stable/openldap:1.2.3"
-        kdu_instance = "stable-openldap-0005399828"
-        db_dict = {}
-        instance_info = {
-            "chart": "openldap-1.2.3",
-            "name": kdu_instance,
-            "namespace": self.namespace,
-            "revision": 1,
-            "status": "DEPLOYED",
-        }
-        repo_list = [
-            {
-                "name": "stable",
-                "url": "https://kubernetes-charts.storage.googleapis.com/",
-            }
-        ]
-        kdu_values = """
-            # Default values for openldap.
-            # This is a YAML-formatted file.
-            # Declare variables to be passed into your templates.
-
-            replicaCount: 1
-            dummy-app:
-              replicas: 2
-        """
-
-        self.helm_conn.repo_list = asynctest.CoroutineMock(return_value=repo_list)
-        self.helm_conn.values_kdu = asynctest.CoroutineMock(return_value=kdu_values)
-        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
-        self.helm_conn._store_status = asynctest.CoroutineMock()
-        self.helm_conn.get_instance_info = asynctest.CoroutineMock(
-            return_value=instance_info
-        )
-
-        # TEST-1
-        await self.helm_conn.scale(
-            kdu_instance,
-            2,
-            "",
-            kdu_model=kdu_model,
-            cluster_uuid=self.cluster_uuid,
-            atomic=True,
-            db_dict=db_dict,
-        )
-        command = (
-            "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config "
-            "/usr/bin/helm upgrade --atomic --output yaml --set replicaCount=2 "
-            "--timeout 1800s stable-openldap-0005399828 stable/openldap "
-            "--version 1.2.3"
-        )
-        self.helm_conn._local_async_exec.assert_called_once_with(
-            command=command, env=self.env, raise_exception_on_error=False
-        )
-
-        # TEST-2
-        await self.helm_conn.scale(
-            kdu_instance,
-            3,
-            "dummy-app",
-            kdu_model=kdu_model,
-            cluster_uuid=self.cluster_uuid,
-            atomic=True,
-            db_dict=db_dict,
-        )
-        command = (
-            "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config "
-            "/usr/bin/helm upgrade --atomic --output yaml --set dummy-app.replicas=3 "
-            "--timeout 1800s stable-openldap-0005399828 stable/openldap "
-            "--version 1.2.3"
-        )
-        self.helm_conn._local_async_exec.assert_called_with(
-            command=command, env=self.env, raise_exception_on_error=False
-        )
-        self.helm_conn.fs.reverse_sync.assert_called_with(from_path=self.cluster_id)
-        self.helm_conn._store_status.assert_called_with(
-            cluster_id=self.cluster_id,
-            kdu_instance=kdu_instance,
-            namespace=self.namespace,
-            db_dict=db_dict,
-            operation="scale",
-            run_once=True,
-            check_every=0,
-        )
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_rollback(self):
-        kdu_instance = "stable-openldap-0005399828"
-        db_dict = {}
-        instance_info = {
-            "chart": "openldap-1.2.3",
-            "name": kdu_instance,
-            "namespace": self.namespace,
-            "revision": 2,
-            "status": "DEPLOYED",
-        }
-        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
-        self.helm_conn._store_status = asynctest.CoroutineMock()
-        self.helm_conn.get_instance_info = asynctest.CoroutineMock(
-            return_value=instance_info
-        )
-
-        await self.helm_conn.rollback(
-            self.cluster_uuid, kdu_instance=kdu_instance, revision=1, db_dict=db_dict
-        )
-        self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id)
-        self.helm_conn.fs.reverse_sync.assert_called_once_with(
-            from_path=self.cluster_id
-        )
-        self.helm_conn._store_status.assert_called_with(
-            cluster_id=self.cluster_id,
-            kdu_instance=kdu_instance,
-            namespace=self.namespace,
-            db_dict=db_dict,
-            operation="rollback",
-            run_once=True,
-            check_every=0,
-        )
-        command = (
-            "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config "
-            "/usr/bin/helm rollback stable-openldap-0005399828 1 --wait"
-        )
-        self.helm_conn._local_async_exec.assert_called_once_with(
-            command=command, env=self.env, raise_exception_on_error=False
-        )
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_uninstall(self):
-        kdu_instance = "stable-openldap-0005399828"
-        instance_info = {
-            "chart": "openldap-1.2.2",
-            "name": kdu_instance,
-            "namespace": self.namespace,
-            "revision": 3,
-            "status": "DEPLOYED",
-        }
-        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
-        self.helm_conn._store_status = asynctest.CoroutineMock()
-        self.helm_conn.get_instance_info = asynctest.CoroutineMock(
-            return_value=instance_info
-        )
-
-        await self.helm_conn.uninstall(self.cluster_uuid, kdu_instance)
-        self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id)
-        self.helm_conn.fs.reverse_sync.assert_called_once_with(
-            from_path=self.cluster_id
-        )
-        command = "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm delete --purge  {}".format(
-            kdu_instance
-        )
-        self.helm_conn._local_async_exec.assert_called_once_with(
-            command=command, env=self.env, raise_exception_on_error=True
-        )
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_get_services(self):
-        kdu_instance = "test_services_1"
-        service = {"name": "testservice", "type": "LoadBalancer"}
-        self.helm_conn._local_async_exec_pipe = asynctest.CoroutineMock(
-            return_value=("", 0)
-        )
-        self.helm_conn._parse_services = Mock(return_value=["testservice"])
-        self.helm_conn._get_service = asynctest.CoroutineMock(return_value=service)
-
-        services = await self.helm_conn.get_services(
-            self.cluster_uuid, kdu_instance, self.namespace
-        )
-        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
-        self.helm_conn.fs.reverse_sync.assert_called_once_with(
-            from_path=self.cluster_id
-        )
-        self.helm_conn._parse_services.assert_called_once()
-        command1 = "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm get manifest {} ".format(
-            kdu_instance
-        )
-        command2 = "/usr/bin/kubectl get --namespace={} -f -".format(self.namespace)
-        self.helm_conn._local_async_exec_pipe.assert_called_once_with(
-            command1, command2, env=self.env, raise_exception_on_error=True
-        )
-        self.assertEqual(
-            services, [service], "Invalid service returned from get_service"
-        )
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_get_service(self):
-        service_name = "service1"
-
-        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
-        await self.helm_conn.get_service(
-            self.cluster_uuid, service_name, self.namespace
-        )
-
-        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
-        self.helm_conn.fs.reverse_sync.assert_called_once_with(
-            from_path=self.cluster_id
-        )
-        command = (
-            "/usr/bin/kubectl --kubeconfig=./tmp/helm_cluster_id/.kube/config "
-            "--namespace=testk8s get service service1 -o=yaml"
-        )
-        self.helm_conn._local_async_exec.assert_called_once_with(
-            command=command, env=self.env, raise_exception_on_error=True
-        )
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_inspect_kdu(self):
-        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
-
-        kdu_model = "stable/openldap:1.2.4"
-        repo_url = "https://kubernetes-charts.storage.googleapis.com/"
-        await self.helm_conn.inspect_kdu(kdu_model, repo_url)
-
-        command = (
-            "/usr/bin/helm inspect  openldap --repo "
-            "https://kubernetes-charts.storage.googleapis.com/ "
-            "--version 1.2.4"
-        )
-        self.helm_conn._local_async_exec.assert_called_with(command=command)
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_help_kdu(self):
-        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
-
-        kdu_model = "stable/openldap:1.2.4"
-        repo_url = "https://kubernetes-charts.storage.googleapis.com/"
-        await self.helm_conn.help_kdu(kdu_model, repo_url)
-
-        command = (
-            "/usr/bin/helm inspect readme openldap --repo "
-            "https://kubernetes-charts.storage.googleapis.com/ "
-            "--version 1.2.4"
-        )
-        self.helm_conn._local_async_exec.assert_called_with(command=command)
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_values_kdu(self):
-        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
-
-        kdu_model = "stable/openldap:1.2.4"
-        repo_url = "https://kubernetes-charts.storage.googleapis.com/"
-        await self.helm_conn.values_kdu(kdu_model, repo_url)
-
-        command = (
-            "/usr/bin/helm inspect values openldap --repo "
-            "https://kubernetes-charts.storage.googleapis.com/ "
-            "--version 1.2.4"
-        )
-        self.helm_conn._local_async_exec.assert_called_with(command=command)
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_get_values_kdu(self):
-        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
-
-        kdu_instance = "stable-openldap-0005399828"
-        await self.helm_conn.get_values_kdu(
-            kdu_instance, self.namespace, self.env["KUBECONFIG"]
-        )
-
-        command = (
-            "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm get values "
-            "stable-openldap-0005399828 --output yaml"
-        )
-        self.helm_conn._local_async_exec.assert_called_with(command=command)
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_instances_list(self):
-        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
-
-        await self.helm_conn.instances_list(self.cluster_uuid)
-        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
-        self.helm_conn.fs.reverse_sync.assert_called_once_with(
-            from_path=self.cluster_id
-        )
-        command = "/usr/bin/helm list --output yaml"
-        self.helm_conn._local_async_exec.assert_called_once_with(
-            command=command, env=self.env, raise_exception_on_error=True
-        )
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_status_kdu(self):
-        kdu_instance = "stable-openldap-0005399828"
-        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
-
-        await self.helm_conn._status_kdu(
-            self.cluster_id, kdu_instance, self.namespace, yaml_format=True
-        )
-        command = (
-            "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm status {} --output yaml"
-        ).format(kdu_instance)
-        self.helm_conn._local_async_exec.assert_called_once_with(
-            command=command,
-            env=self.env,
-            raise_exception_on_error=True,
-            show_error_log=False,
-        )
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_store_status(self):
-        kdu_instance = "stable-openldap-0005399828"
-        db_dict = {}
-        status = {
-            "info": {
-                "description": "Install complete",
-                "status": {
-                    "code": "1",
-                    "notes": "The openldap helm chart has been installed",
-                },
-            }
-        }
-        self.helm_conn._status_kdu = asynctest.CoroutineMock(return_value=status)
-        self.helm_conn.write_app_status_to_db = asynctest.CoroutineMock(
-            return_value=status
-        )
-
-        await self.helm_conn._store_status(
-            cluster_id=self.cluster_id,
-            kdu_instance=kdu_instance,
-            namespace=self.namespace,
-            db_dict=db_dict,
-            operation="install",
-            run_once=True,
-            check_every=0,
-        )
-        self.helm_conn._status_kdu.assert_called_once_with(
-            cluster_id=self.cluster_id,
-            kdu_instance=kdu_instance,
-            namespace=self.namespace,
-            yaml_format=False,
-        )
-        self.helm_conn.write_app_status_to_db.assert_called_once_with(
-            db_dict=db_dict,
-            status="Install complete",
-            detailed_status=str(status),
-            operation="install",
-        )
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_reset_uninstall_false(self):
-        self.helm_conn._uninstall_sw = asynctest.CoroutineMock()
-
-        await self.helm_conn.reset(self.cluster_uuid, force=False, uninstall_sw=False)
-        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
-        self.helm_conn.fs.file_delete.assert_called_once_with(
-            self.cluster_id, ignore_non_exist=True
-        )
-        self.helm_conn._uninstall_sw.assert_not_called()
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_reset_uninstall(self):
-        kdu_instance = "stable-openldap-0021099429"
-        instances = [
-            {
-                "app_version": "2.4.48",
-                "chart": "openldap-1.2.3",
-                "name": kdu_instance,
-                "namespace": self.namespace,
-                "revision": "1",
-                "status": "deployed",
-                "updated": "2020-10-30 11:11:20.376744191 +0000 UTC",
-            }
-        ]
-        self.helm_conn._get_namespace = Mock(return_value=self.namespace)
-        self.helm_conn._uninstall_sw = asynctest.CoroutineMock()
-        self.helm_conn.instances_list = asynctest.CoroutineMock(return_value=instances)
-        self.helm_conn.uninstall = asynctest.CoroutineMock()
-
-        await self.helm_conn.reset(self.cluster_uuid, force=True, uninstall_sw=True)
-        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
-        self.helm_conn.fs.file_delete.assert_called_once_with(
-            self.cluster_id, ignore_non_exist=True
-        )
-        self.helm_conn._get_namespace.assert_called_once_with(
-            cluster_uuid=self.cluster_uuid
-        )
-        self.helm_conn.instances_list.assert_called_once_with(
-            cluster_uuid=self.cluster_uuid
-        )
-        self.helm_conn.uninstall.assert_called_once_with(
-            cluster_uuid=self.cluster_uuid, kdu_instance=kdu_instance
-        )
-        self.helm_conn._uninstall_sw.assert_called_once_with(
-            cluster_id=self.cluster_id, namespace=self.namespace
-        )
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_uninstall_sw_namespace(self):
-        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
-
-        await self.helm_conn._uninstall_sw(self.cluster_id, self.namespace)
-        calls = self.helm_conn._local_async_exec.call_args_list
-        self.assertEqual(
-            len(calls), 3, "To uninstall should have executed three commands"
-        )
-        call0_kargs = calls[0][1]
-        command_0 = "/usr/bin/helm --kubeconfig={} --home={} reset".format(
-            self.kube_config, self.helm_home
-        )
-        self.assertEqual(
-            call0_kargs,
-            {"command": command_0, "raise_exception_on_error": True, "env": self.env},
-            "Invalid args for first call to local_exec",
-        )
-        call1_kargs = calls[1][1]
-        command_1 = (
-            "/usr/bin/kubectl --kubeconfig={} delete "
-            "clusterrolebinding.rbac.authorization.k8s.io/osm-tiller-cluster-rule".format(
-                self.kube_config
-            )
-        )
-        self.assertEqual(
-            call1_kargs,
-            {"command": command_1, "raise_exception_on_error": False, "env": self.env},
-            "Invalid args for second call to local_exec",
-        )
-        call2_kargs = calls[2][1]
-        command_2 = (
-            "/usr/bin/kubectl --kubeconfig={} --namespace {} delete "
-            "serviceaccount/{}".format(
-                self.kube_config, self.namespace, self.service_account
-            )
-        )
-        self.assertEqual(
-            call2_kargs,
-            {"command": command_2, "raise_exception_on_error": False, "env": self.env},
-            "Invalid args for third call to local_exec",
-        )
index e0faaf0..1de1288 100644 (file)
@@ -21,10 +21,7 @@ from n2vc.definitions import Offer, RelationEndpoint
 from n2vc.k8s_juju_conn import K8sJujuConnector, RBAC_LABEL_KEY_NAME
 from osm_common import fslocal
 from .utils import kubeconfig, FakeModel, FakeFileWrapper, AsyncMock, FakeApplication
 from n2vc.k8s_juju_conn import K8sJujuConnector, RBAC_LABEL_KEY_NAME
 from osm_common import fslocal
 from .utils import kubeconfig, FakeModel, FakeFileWrapper, AsyncMock, FakeApplication
-from n2vc.exceptions import (
-    MethodNotImplemented,
-    K8sException,
-)
+from n2vc.exceptions import MethodNotImplemented, K8sException
 from n2vc.vca.connection_data import ConnectionData
 
 
 from n2vc.vca.connection_data import ConnectionData
 
 
@@ -67,11 +64,14 @@ class K8sJujuConnTestCase(asynctest.TestCase):
         )
         logging.disable(logging.CRITICAL)
 
         )
         logging.disable(logging.CRITICAL)
 
+        self.kdu_name = "kdu_name"
+        self.kdu_instance = "{}-{}".format(self.kdu_name, "id")
+        self.default_namespace = self.kdu_instance
+
         self.k8s_juju_conn = K8sJujuConnector(
             fs=fslocal.FsLocal(),
             db=self.db,
             log=None,
         self.k8s_juju_conn = K8sJujuConnector(
             fs=fslocal.FsLocal(),
             db=self.db,
             log=None,
-            loop=self.loop,
             on_update_db=None,
         )
         self.k8s_juju_conn._store.get_vca_id.return_value = None
             on_update_db=None,
         )
         self.k8s_juju_conn._store.get_vca_id.return_value = None
@@ -83,6 +83,9 @@ class K8sJujuConnTestCase(asynctest.TestCase):
         self.kubectl.get_services.return_value = [{}]
         self.k8s_juju_conn._get_kubectl = Mock()
         self.k8s_juju_conn._get_kubectl.return_value = self.kubectl
         self.kubectl.get_services.return_value = [{}]
         self.k8s_juju_conn._get_kubectl = Mock()
         self.k8s_juju_conn._get_kubectl.return_value = self.kubectl
+        self.k8s_juju_conn._obtain_namespace_from_db = Mock(
+            return_value=self.default_namespace
+        )
 
 
 class InitEnvTest(K8sJujuConnTestCase):
 
 
 class InitEnvTest(K8sJujuConnTestCase):
@@ -127,9 +130,15 @@ class InitEnvTest(K8sJujuConnTestCase):
             uuid, created = self.loop.run_until_complete(
                 self.k8s_juju_conn.init_env(k8s_creds=kubeconfig)
             )
             uuid, created = self.loop.run_until_complete(
                 self.k8s_juju_conn.init_env(k8s_creds=kubeconfig)
             )
-
         self.assertIsNone(created)
         self.assertIsNone(uuid)
         self.assertIsNone(created)
         self.assertIsNone(uuid)
+        self.kubectl.create_cluster_role.assert_called_once()
+        self.kubectl.create_service_account.assert_called_once()
+        self.kubectl.create_cluster_role_binding.assert_called_once()
+        self.kubectl.get_default_storage_class.assert_called_once()
+        self.kubectl.delete_cluster_role.assert_called_once()
+        self.kubectl.delete_service_account.assert_called_once()
+        self.kubectl.delete_cluster_role_binding.assert_called_once()
         self.k8s_juju_conn.libjuju.add_k8s.assert_called_once()
 
 
         self.k8s_juju_conn.libjuju.add_k8s.assert_called_once()
 
 
@@ -203,9 +212,7 @@ class InstallTest(K8sJujuConnTestCase):
         self.local_bundle = "bundle"
         self.cs_bundle = "cs:bundle"
         self.http_bundle = "https://example.com/bundle.yaml"
         self.local_bundle = "bundle"
         self.cs_bundle = "cs:bundle"
         self.http_bundle = "https://example.com/bundle.yaml"
-        self.kdu_name = "kdu_name"
         self.cluster_uuid = "cluster"
         self.cluster_uuid = "cluster"
-        self.kdu_instance = "{}-{}".format(self.kdu_name, "id")
         self.k8s_juju_conn.libjuju.add_model = AsyncMock()
         self.k8s_juju_conn.libjuju.deploy = AsyncMock()
 
         self.k8s_juju_conn.libjuju.add_model = AsyncMock()
         self.k8s_juju_conn.libjuju.deploy = AsyncMock()
 
@@ -219,15 +226,17 @@ class InstallTest(K8sJujuConnTestCase):
                 kdu_name=self.kdu_name,
                 db_dict=self.db_dict,
                 timeout=1800,
                 kdu_name=self.kdu_name,
                 db_dict=self.db_dict,
                 timeout=1800,
+                params=None,
             )
         )
         self.assertEqual(mock_chdir.call_count, 2)
         self.k8s_juju_conn.libjuju.add_model.assert_called_once()
         self.k8s_juju_conn.libjuju.deploy.assert_called_once_with(
             "local:{}".format(self.local_bundle),
             )
         )
         self.assertEqual(mock_chdir.call_count, 2)
         self.k8s_juju_conn.libjuju.add_model.assert_called_once()
         self.k8s_juju_conn.libjuju.deploy.assert_called_once_with(
             "local:{}".format(self.local_bundle),
-            model_name=self.kdu_instance,
+            model_name=self.default_namespace,
             wait=True,
             timeout=1800,
             wait=True,
             timeout=1800,
+            instantiation_params=None,
         )
 
     def test_success_cs(self, mock_chdir):
         )
 
     def test_success_cs(self, mock_chdir):
@@ -240,17 +249,20 @@ class InstallTest(K8sJujuConnTestCase):
                 kdu_name=self.kdu_name,
                 db_dict=self.db_dict,
                 timeout=1800,
                 kdu_name=self.kdu_name,
                 db_dict=self.db_dict,
                 timeout=1800,
+                params={},
             )
         )
         self.k8s_juju_conn.libjuju.add_model.assert_called_once()
         self.k8s_juju_conn.libjuju.deploy.assert_called_once_with(
             self.cs_bundle,
             )
         )
         self.k8s_juju_conn.libjuju.add_model.assert_called_once()
         self.k8s_juju_conn.libjuju.deploy.assert_called_once_with(
             self.cs_bundle,
-            model_name=self.kdu_instance,
+            model_name=self.default_namespace,
             wait=True,
             timeout=1800,
             wait=True,
             timeout=1800,
+            instantiation_params=None,
         )
 
     def test_success_http(self, mock_chdir):
         )
 
     def test_success_http(self, mock_chdir):
+        params = {"overlay": {"applications": {"squid": {"scale": 2}}}}
         self.loop.run_until_complete(
             self.k8s_juju_conn.install(
                 self.cluster_uuid,
         self.loop.run_until_complete(
             self.k8s_juju_conn.install(
                 self.cluster_uuid,
@@ -260,17 +272,20 @@ class InstallTest(K8sJujuConnTestCase):
                 kdu_name=self.kdu_name,
                 db_dict=self.db_dict,
                 timeout=1800,
                 kdu_name=self.kdu_name,
                 db_dict=self.db_dict,
                 timeout=1800,
+                params=params,
             )
         )
         self.k8s_juju_conn.libjuju.add_model.assert_called_once()
         self.k8s_juju_conn.libjuju.deploy.assert_called_once_with(
             self.http_bundle,
             )
         )
         self.k8s_juju_conn.libjuju.add_model.assert_called_once()
         self.k8s_juju_conn.libjuju.deploy.assert_called_once_with(
             self.http_bundle,
-            model_name=self.kdu_instance,
+            model_name=self.default_namespace,
             wait=True,
             timeout=1800,
             wait=True,
             timeout=1800,
+            instantiation_params=params.get("overlay"),
         )
 
     def test_success_not_kdu_name(self, mock_chdir):
         )
 
     def test_success_not_kdu_name(self, mock_chdir):
+        params = {"some_key": {"applications": {"squid": {"scale": 2}}}}
         self.loop.run_until_complete(
             self.k8s_juju_conn.install(
                 self.cluster_uuid,
         self.loop.run_until_complete(
             self.k8s_juju_conn.install(
                 self.cluster_uuid,
@@ -279,14 +294,16 @@ class InstallTest(K8sJujuConnTestCase):
                 atomic=True,
                 db_dict=self.db_dict,
                 timeout=1800,
                 atomic=True,
                 db_dict=self.db_dict,
                 timeout=1800,
+                params=params,
             )
         )
         self.k8s_juju_conn.libjuju.add_model.assert_called_once()
         self.k8s_juju_conn.libjuju.deploy.assert_called_once_with(
             self.cs_bundle,
             )
         )
         self.k8s_juju_conn.libjuju.add_model.assert_called_once()
         self.k8s_juju_conn.libjuju.deploy.assert_called_once_with(
             self.cs_bundle,
-            model_name=self.kdu_instance,
+            model_name=self.default_namespace,
             wait=True,
             timeout=1800,
             wait=True,
             timeout=1800,
+            instantiation_params=None,
         )
 
     def test_missing_db_dict(self, mock_chdir):
         )
 
     def test_missing_db_dict(self, mock_chdir):
@@ -323,9 +340,10 @@ class InstallTest(K8sJujuConnTestCase):
         self.k8s_juju_conn.libjuju.add_model.assert_called_once()
         self.k8s_juju_conn.libjuju.deploy.assert_called_once_with(
             self.cs_bundle,
         self.k8s_juju_conn.libjuju.add_model.assert_called_once()
         self.k8s_juju_conn.libjuju.deploy.assert_called_once_with(
             self.cs_bundle,
-            model_name=self.kdu_instance,
+            model_name=self.default_namespace,
             wait=True,
             timeout=1800,
             wait=True,
             timeout=1800,
+            instantiation_params=None,
         )
 
     def test_missing_bundle(self, mock_chdir):
         )
 
     def test_missing_bundle(self, mock_chdir):
@@ -361,9 +379,10 @@ class InstallTest(K8sJujuConnTestCase):
         self.k8s_juju_conn.libjuju.add_model.assert_called_once()
         self.k8s_juju_conn.libjuju.deploy.assert_called_once_with(
             "local:{}".format(self.local_bundle),
         self.k8s_juju_conn.libjuju.add_model.assert_called_once()
         self.k8s_juju_conn.libjuju.deploy.assert_called_once_with(
             "local:{}".format(self.local_bundle),
-            model_name=self.kdu_instance,
+            model_name=self.default_namespace,
             wait=True,
             timeout=1800,
             wait=True,
             timeout=1800,
+            instantiation_params=None,
         )
 
 
         )
 
 
@@ -395,7 +414,6 @@ class ExecPrimitivesTest(K8sJujuConnTestCase):
         super(ExecPrimitivesTest, self).setUp()
         self.action_name = "touch"
         self.application_name = "myapp"
         super(ExecPrimitivesTest, self).setUp()
         self.action_name = "touch"
         self.application_name = "myapp"
-        self.model_name = "model"
         self.k8s_juju_conn.libjuju.get_actions = AsyncMock()
         self.k8s_juju_conn.libjuju.execute_action = AsyncMock()
 
         self.k8s_juju_conn.libjuju.get_actions = AsyncMock()
         self.k8s_juju_conn.libjuju.execute_action = AsyncMock()
 
@@ -409,16 +427,22 @@ class ExecPrimitivesTest(K8sJujuConnTestCase):
 
         output = self.loop.run_until_complete(
             self.k8s_juju_conn.exec_primitive(
 
         output = self.loop.run_until_complete(
             self.k8s_juju_conn.exec_primitive(
-                "cluster", self.model_name, self.action_name, params=params
+                "cluster", self.kdu_instance, self.action_name, params=params
             )
         )
 
         self.assertEqual(output, "success")
             )
         )
 
         self.assertEqual(output, "success")
+        self.k8s_juju_conn._obtain_namespace_from_db.assert_called_once_with(
+            kdu_instance=self.kdu_instance
+        )
         self.k8s_juju_conn.libjuju.get_actions.assert_called_once_with(
         self.k8s_juju_conn.libjuju.get_actions.assert_called_once_with(
-            self.application_name, self.model_name
+            application_name=self.application_name, model_name=self.default_namespace
         )
         self.k8s_juju_conn.libjuju.execute_action.assert_called_once_with(
         )
         self.k8s_juju_conn.libjuju.execute_action.assert_called_once_with(
-            self.application_name, self.model_name, self.action_name, **params
+            application_name=self.application_name,
+            model_name=self.default_namespace,
+            action_name=self.action_name,
+            **params
         )
 
     def test_exception(self):
         )
 
     def test_exception(self):
@@ -430,16 +454,22 @@ class ExecPrimitivesTest(K8sJujuConnTestCase):
         with self.assertRaises(Exception):
             output = self.loop.run_until_complete(
                 self.k8s_juju_conn.exec_primitive(
         with self.assertRaises(Exception):
             output = self.loop.run_until_complete(
                 self.k8s_juju_conn.exec_primitive(
-                    "cluster", self.model_name, self.action_name, params=params
+                    "cluster", self.kdu_instance, self.action_name, params=params
                 )
             )
 
         self.assertIsNone(output)
                 )
             )
 
         self.assertIsNone(output)
+        self.k8s_juju_conn._obtain_namespace_from_db.assert_called_once_with(
+            kdu_instance=self.kdu_instance
+        )
         self.k8s_juju_conn.libjuju.get_actions.assert_called_once_with(
         self.k8s_juju_conn.libjuju.get_actions.assert_called_once_with(
-            self.application_name, self.model_name
+            application_name=self.application_name, model_name=self.default_namespace
         )
         self.k8s_juju_conn.libjuju.execute_action.assert_called_once_with(
         )
         self.k8s_juju_conn.libjuju.execute_action.assert_called_once_with(
-            self.application_name, self.model_name, self.action_name, **params
+            application_name=self.application_name,
+            model_name=self.default_namespace,
+            action_name=self.action_name,
+            **params
         )
 
     def test_missing_application_name_in_params(self):
         )
 
     def test_missing_application_name_in_params(self):
@@ -449,7 +479,7 @@ class ExecPrimitivesTest(K8sJujuConnTestCase):
         with self.assertRaises(K8sException):
             output = self.loop.run_until_complete(
                 self.k8s_juju_conn.exec_primitive(
         with self.assertRaises(K8sException):
             output = self.loop.run_until_complete(
                 self.k8s_juju_conn.exec_primitive(
-                    "cluster", self.model_name, self.action_name, params=params
+                    "cluster", self.kdu_instance, self.action_name, params=params
                 )
             )
 
                 )
             )
 
@@ -462,7 +492,7 @@ class ExecPrimitivesTest(K8sJujuConnTestCase):
         with self.assertRaises(K8sException):
             output = self.loop.run_until_complete(
                 self.k8s_juju_conn.exec_primitive(
         with self.assertRaises(K8sException):
             output = self.loop.run_until_complete(
                 self.k8s_juju_conn.exec_primitive(
-                    "cluster", self.model_name, self.action_name
+                    "cluster", self.kdu_instance, self.action_name
                 )
             )
 
                 )
             )
 
@@ -481,13 +511,16 @@ class ExecPrimitivesTest(K8sJujuConnTestCase):
         with self.assertRaises(K8sException):
             output = self.loop.run_until_complete(
                 self.k8s_juju_conn.exec_primitive(
         with self.assertRaises(K8sException):
             output = self.loop.run_until_complete(
                 self.k8s_juju_conn.exec_primitive(
-                    "cluster", self.model_name, "non-existing-action", params=params
+                    "cluster", self.kdu_instance, "non-existing-action", params=params
                 )
             )
 
         self.assertIsNone(output)
                 )
             )
 
         self.assertIsNone(output)
+        self.k8s_juju_conn._obtain_namespace_from_db.assert_called_once_with(
+            kdu_instance=self.kdu_instance
+        )
         self.k8s_juju_conn.libjuju.get_actions.assert_called_once_with(
         self.k8s_juju_conn.libjuju.get_actions.assert_called_once_with(
-            self.application_name, self.model_name
+            application_name=self.application_name, model_name=self.default_namespace
         )
         self.k8s_juju_conn.libjuju.execute_action.assert_not_called()
 
         )
         self.k8s_juju_conn.libjuju.execute_action.assert_not_called()
 
@@ -499,16 +532,22 @@ class ExecPrimitivesTest(K8sJujuConnTestCase):
         with self.assertRaises(K8sException):
             output = self.loop.run_until_complete(
                 self.k8s_juju_conn.exec_primitive(
         with self.assertRaises(K8sException):
             output = self.loop.run_until_complete(
                 self.k8s_juju_conn.exec_primitive(
-                    "cluster", self.model_name, self.action_name, params=params
+                    "cluster", self.kdu_instance, self.action_name, params=params
                 )
             )
 
         self.assertIsNone(output)
                 )
             )
 
         self.assertIsNone(output)
+        self.k8s_juju_conn._obtain_namespace_from_db.assert_called_once_with(
+            kdu_instance=self.kdu_instance
+        )
         self.k8s_juju_conn.libjuju.get_actions.assert_called_once_with(
         self.k8s_juju_conn.libjuju.get_actions.assert_called_once_with(
-            self.application_name, self.model_name
+            application_name=self.application_name, model_name=self.default_namespace
         )
         self.k8s_juju_conn.libjuju.execute_action.assert_called_once_with(
         )
         self.k8s_juju_conn.libjuju.execute_action.assert_called_once_with(
-            self.application_name, self.model_name, self.action_name, **params
+            application_name=self.application_name,
+            model_name=self.default_namespace,
+            action_name=self.action_name,
+            **params
         )
 
 
         )
 
 
@@ -647,8 +686,6 @@ class UpdateVcaStatusTest(K8sJujuConnTestCase):
     def setUp(self):
         super(UpdateVcaStatusTest, self).setUp()
         self.vcaStatus = {"model": {"applications": {"app": {"actions": {}}}}}
     def setUp(self):
         super(UpdateVcaStatusTest, self).setUp()
         self.vcaStatus = {"model": {"applications": {"app": {"actions": {}}}}}
-        self.kdu_name = "kdu_name"
-        self.kdu_instance = "{}-{}".format(self.kdu_name, "id")
         self.k8s_juju_conn.libjuju.get_executed_actions = AsyncMock()
         self.k8s_juju_conn.libjuju.get_actions = AsyncMock()
         self.k8s_juju_conn.libjuju.get_application_configs = AsyncMock()
         self.k8s_juju_conn.libjuju.get_executed_actions = AsyncMock()
         self.k8s_juju_conn.libjuju.get_actions = AsyncMock()
         self.k8s_juju_conn.libjuju.get_application_configs = AsyncMock()
@@ -658,7 +695,6 @@ class UpdateVcaStatusTest(K8sJujuConnTestCase):
             self.k8s_juju_conn.update_vca_status(self.vcaStatus, self.kdu_instance)
         )
         self.k8s_juju_conn.libjuju.get_executed_actions.assert_called_once()
             self.k8s_juju_conn.update_vca_status(self.vcaStatus, self.kdu_instance)
         )
         self.k8s_juju_conn.libjuju.get_executed_actions.assert_called_once()
-        self.k8s_juju_conn.libjuju.get_actions.assert_called_once()
         self.k8s_juju_conn.libjuju.get_application_configs.assert_called_once()
 
     def test_exception(self):
         self.k8s_juju_conn.libjuju.get_application_configs.assert_called_once()
 
     def test_exception(self):
@@ -669,7 +705,6 @@ class UpdateVcaStatusTest(K8sJujuConnTestCase):
                 self.k8s_juju_conn.update_vca_status(self.vcaStatus, self.kdu_instance)
             )
             self.k8s_juju_conn.libjuju.get_executed_actions.assert_not_called()
                 self.k8s_juju_conn.update_vca_status(self.vcaStatus, self.kdu_instance)
             )
             self.k8s_juju_conn.libjuju.get_executed_actions.assert_not_called()
-            self.k8s_juju_conn.libjuju.get_actions.assert_not_called_once()
             self.k8s_juju_conn.libjuju.get_application_configs.assert_not_called_once()
 
 
             self.k8s_juju_conn.libjuju.get_application_configs.assert_not_called_once()
 
 
@@ -732,14 +767,16 @@ class AddRelationTest(K8sJujuConnTestCase):
         self.k8s_juju_conn.libjuju.get_controller = AsyncMock()
         self.k8s_juju_conn.libjuju.consume = AsyncMock()
 
         self.k8s_juju_conn.libjuju.get_controller = AsyncMock()
         self.k8s_juju_conn.libjuju.consume = AsyncMock()
 
-    def test_standard_relation(self):
-        relation_endpoint_1 = RelationEndpoint("model-1.app1.0", None, "endpoint")
-        relation_endpoint_2 = RelationEndpoint("model-1.app2.1", None, "endpoint")
+    def test_standard_relation_same_model_and_controller(self):
+        relation_endpoint_1 = RelationEndpoint("model-1.app1.0", None, "endpoint1")
+        relation_endpoint_2 = RelationEndpoint("model-1.app2.1", None, "endpoint2")
         self.loop.run_until_complete(
             self.k8s_juju_conn.add_relation(relation_endpoint_1, relation_endpoint_2)
         )
         self.k8s_juju_conn.libjuju.add_relation.assert_called_once_with(
         self.loop.run_until_complete(
             self.k8s_juju_conn.add_relation(relation_endpoint_1, relation_endpoint_2)
         )
         self.k8s_juju_conn.libjuju.add_relation.assert_called_once_with(
-            model_name="model-1", endpoint_1="app1:endpoint", endpoint_2="app2:endpoint"
+            model_name="model-1",
+            endpoint_1="app1:endpoint1",
+            endpoint_2="app2:endpoint2",
         )
         self.k8s_juju_conn.libjuju.offer.assert_not_called()
         self.k8s_juju_conn.libjuju.consume.assert_not_called()
         )
         self.k8s_juju_conn.libjuju.offer.assert_not_called()
         self.k8s_juju_conn.libjuju.consume.assert_not_called()
@@ -759,6 +796,24 @@ class AddRelationTest(K8sJujuConnTestCase):
             "model-2", "app2:endpoint", "saas"
         )
 
             "model-2", "app2:endpoint", "saas"
         )
 
+    def test_cmr_relation_different_controller(self):
+        self.k8s_juju_conn._get_libjuju = AsyncMock(
+            return_value=self.k8s_juju_conn.libjuju
+        )
+        relation_endpoint_1 = RelationEndpoint("model-1.app1.0", "vca-id-1", "endpoint")
+        relation_endpoint_2 = RelationEndpoint("model-1.app2.1", "vca-id-2", "endpoint")
+        offer = Offer("admin/model-1.app1")
+        self.k8s_juju_conn.libjuju.offer.return_value = offer
+        self.k8s_juju_conn.libjuju.consume.return_value = "saas"
+        self.loop.run_until_complete(
+            self.k8s_juju_conn.add_relation(relation_endpoint_1, relation_endpoint_2)
+        )
+        self.k8s_juju_conn.libjuju.offer.assert_called_once_with(relation_endpoint_1)
+        self.k8s_juju_conn.libjuju.consume.assert_called_once()
+        self.k8s_juju_conn.libjuju.add_relation.assert_called_once_with(
+            "model-1", "app2:endpoint", "saas"
+        )
+
     def test_relation_exception(self):
         relation_endpoint_1 = RelationEndpoint("model-1.app1.0", None, "endpoint")
         relation_endpoint_2 = RelationEndpoint("model-2.app2.1", None, "endpoint")
     def test_relation_exception(self):
         relation_endpoint_1 = RelationEndpoint("model-1.app1.0", None, "endpoint")
         relation_endpoint_2 = RelationEndpoint("model-2.app2.1", None, "endpoint")
index eb9b01d..a6d02ff 100644 (file)
 #     See the License for the specific language governing permissions and
 #     limitations under the License.
 
 #     See the License for the specific language governing permissions and
 #     limitations under the License.
 
+import asynctest
+import yaml
+import os
 from unittest import TestCase, mock
 from unittest import TestCase, mock
-from n2vc.kubectl import Kubectl, CORE_CLIENT
+from n2vc.kubectl import Kubectl, CORE_CLIENT, CUSTOM_OBJECT_CLIENT
 from n2vc.utils import Dict
 from kubernetes.client.rest import ApiException
 from n2vc.utils import Dict
 from kubernetes.client.rest import ApiException
+from kubernetes.client import (
+    V1ObjectMeta,
+    V1Secret,
+    V1ServiceAccount,
+    V1SecretReference,
+    V1Role,
+    V1RoleBinding,
+    V1RoleRef,
+    V1Subject,
+    V1PolicyRule,
+    V1Namespace,
+)
 
 
 class FakeK8sResourceMetadata:
 
 
 class FakeK8sResourceMetadata:
@@ -66,6 +81,56 @@ class FakeK8sStorageClassesList:
         return self._items
 
 
         return self._items
 
 
+class FakeK8sServiceAccountsList:
+    def __init__(self, items=[]):
+        self._items = items
+
+    @property
+    def items(self):
+        return self._items
+
+
+class FakeK8sSecretList:
+    def __init__(self, items=[]):
+        self._items = items
+
+    @property
+    def items(self):
+        return self._items
+
+
+class FakeK8sRoleList:
+    def __init__(self, items=[]):
+        self._items = items
+
+    @property
+    def items(self):
+        return self._items
+
+
+class FakeK8sRoleBindingList:
+    def __init__(self, items=[]):
+        self._items = items
+
+    @property
+    def items(self):
+        return self._items
+
+
+class FakeK8sVersionApiCode:
+    def __init__(self, major: str, minor: str):
+        self._major = major
+        self._minor = minor
+
+    @property
+    def major(self):
+        return self._major
+
+    @property
+    def minor(self):
+        return self._minor
+
+
 fake_list_services = Dict(
     {
         "items": [
 fake_list_services = Dict(
     {
         "items": [
@@ -248,3 +313,542 @@ class GetDefaultStorageClass(KubectlTestCase):
         sc_name = kubectl.get_default_storage_class()
         self.assertEqual(sc_name, self.default_sc_name)
         mock_list_storage_class.assert_called_once()
         sc_name = kubectl.get_default_storage_class()
         self.assertEqual(sc_name, self.default_sc_name)
         mock_list_storage_class.assert_called_once()
+
+
+@mock.patch("kubernetes.client.VersionApi.get_code")
+@mock.patch("kubernetes.client.CoreV1Api.list_namespaced_secret")
+@mock.patch("kubernetes.client.CoreV1Api.create_namespaced_secret")
+@mock.patch("kubernetes.client.CoreV1Api.create_namespaced_service_account")
+@mock.patch("kubernetes.client.CoreV1Api.list_namespaced_service_account")
+class CreateServiceAccountClass(KubectlTestCase):
+    @mock.patch("kubernetes.config.load_kube_config")
+    def setUp(self, mock_load_kube_config):
+        super(CreateServiceAccountClass, self).setUp()
+        self.service_account_name = "Service_account"
+        self.labels = {"Key1": "Value1", "Key2": "Value2"}
+        self.namespace = "kubernetes"
+        self.token_id = "abc12345"
+        self.kubectl = Kubectl()
+
+    def assert_create_secret(self, mock_create_secret, secret_name):
+        annotations = {"kubernetes.io/service-account.name": self.service_account_name}
+        secret_metadata = V1ObjectMeta(
+            name=secret_name, namespace=self.namespace, annotations=annotations
+        )
+        secret_type = "kubernetes.io/service-account-token"
+        secret = V1Secret(metadata=secret_metadata, type=secret_type)
+        mock_create_secret.assert_called_once_with(self.namespace, secret)
+
+    def assert_create_service_account_v_1_24(
+        self, mock_create_service_account, secret_name
+    ):
+        sevice_account_metadata = V1ObjectMeta(
+            name=self.service_account_name, labels=self.labels, namespace=self.namespace
+        )
+        secrets = [V1SecretReference(name=secret_name, namespace=self.namespace)]
+        service_account = V1ServiceAccount(
+            metadata=sevice_account_metadata, secrets=secrets
+        )
+        mock_create_service_account.assert_called_once_with(
+            self.namespace, service_account
+        )
+
+    def assert_create_service_account_v_1_23(self, mock_create_service_account):
+        metadata = V1ObjectMeta(
+            name=self.service_account_name, labels=self.labels, namespace=self.namespace
+        )
+        service_account = V1ServiceAccount(metadata=metadata)
+        mock_create_service_account.assert_called_once_with(
+            self.namespace, service_account
+        )
+
+    @mock.patch("n2vc.kubectl.uuid.uuid4")
+    def test_secret_is_created_when_k8s_1_24(
+        self,
+        mock_uuid4,
+        mock_list_service_account,
+        mock_create_service_account,
+        mock_create_secret,
+        mock_list_secret,
+        mock_version,
+    ):
+        mock_list_service_account.return_value = FakeK8sServiceAccountsList(items=[])
+        mock_list_secret.return_value = FakeK8sSecretList(items=[])
+        mock_version.return_value = FakeK8sVersionApiCode("1", "24")
+        mock_uuid4.return_value = self.token_id
+        self.kubectl.create_service_account(
+            self.service_account_name, self.labels, self.namespace
+        )
+        secret_name = "{}-token-{}".format(self.service_account_name, self.token_id[:5])
+        self.assert_create_service_account_v_1_24(
+            mock_create_service_account, secret_name
+        )
+        self.assert_create_secret(mock_create_secret, secret_name)
+
+    def test_secret_is_not_created_when_k8s_1_23(
+        self,
+        mock_list_service_account,
+        mock_create_service_account,
+        mock_create_secret,
+        mock_list_secret,
+        mock_version,
+    ):
+        mock_list_service_account.return_value = FakeK8sServiceAccountsList(items=[])
+        mock_version.return_value = FakeK8sVersionApiCode("1", "23+")
+        self.kubectl.create_service_account(
+            self.service_account_name, self.labels, self.namespace
+        )
+        self.assert_create_service_account_v_1_23(mock_create_service_account)
+        mock_create_secret.assert_not_called()
+        mock_list_secret.assert_not_called()
+
+    def test_raise_exception_if_service_account_already_exists(
+        self,
+        mock_list_service_account,
+        mock_create_service_account,
+        mock_create_secret,
+        mock_list_secret,
+        mock_version,
+    ):
+        mock_list_service_account.return_value = FakeK8sServiceAccountsList(items=[1])
+        with self.assertRaises(Exception) as context:
+            self.kubectl.create_service_account(
+                self.service_account_name, self.labels, self.namespace
+            )
+        self.assertTrue(
+            "Service account with metadata.name={} already exists".format(
+                self.service_account_name
+            )
+            in str(context.exception)
+        )
+        mock_create_service_account.assert_not_called()
+        mock_create_secret.assert_not_called()
+
+    @mock.patch("n2vc.kubectl.uuid.uuid4")
+    def test_raise_exception_if_secret_already_exists(
+        self,
+        mock_uuid4,
+        mock_list_service_account,
+        mock_create_service_account,
+        mock_create_secret,
+        mock_list_secret,
+        mock_version,
+    ):
+        mock_list_service_account.return_value = FakeK8sServiceAccountsList(items=[])
+        mock_list_secret.return_value = FakeK8sSecretList(items=[1])
+        mock_version.return_value = FakeK8sVersionApiCode("1", "24+")
+        mock_uuid4.return_value = self.token_id
+        with self.assertRaises(Exception) as context:
+            self.kubectl.create_service_account(
+                self.service_account_name, self.labels, self.namespace
+            )
+        self.assertTrue(
+            "Secret with metadata.name={}-token-{} already exists".format(
+                self.service_account_name, self.token_id[:5]
+            )
+            in str(context.exception)
+        )
+        mock_create_service_account.assert_called()
+        mock_create_secret.assert_not_called()
+
+
+@mock.patch("kubernetes.client.CustomObjectsApi.create_namespaced_custom_object")
+class CreateCertificateClass(asynctest.TestCase):
+    @mock.patch("kubernetes.config.load_kube_config")
+    def setUp(self, mock_load_kube_config):
+        super(CreateCertificateClass, self).setUp()
+        self.namespace = "osm"
+        self.name = "test-cert"
+        self.dns_prefix = "*"
+        self.secret_name = "test-cert-secret"
+        self.usages = ["server auth"]
+        self.issuer_name = "ca-issuer"
+        self.kubectl = Kubectl()
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_certificate_is_created(
+        self,
+        mock_create_certificate,
+    ):
+        with open(
+            os.path.join(
+                os.path.dirname(__file__), "testdata", "test_certificate.yaml"
+            ),
+            "r",
+        ) as test_certificate:
+            certificate_body = yaml.safe_load(test_certificate.read())
+            print(certificate_body)
+        await self.kubectl.create_certificate(
+            namespace=self.namespace,
+            name=self.name,
+            dns_prefix=self.dns_prefix,
+            secret_name=self.secret_name,
+            usages=self.usages,
+            issuer_name=self.issuer_name,
+        )
+        mock_create_certificate.assert_called_once_with(
+            group="cert-manager.io",
+            plural="certificates",
+            version="v1",
+            body=certificate_body,
+            namespace=self.namespace,
+        )
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_no_exception_if_alreadyexists(
+        self,
+        mock_create_certificate,
+    ):
+        api_exception = ApiException()
+        api_exception.body = '{"reason": "AlreadyExists"}'
+        self.kubectl.clients[
+            CUSTOM_OBJECT_CLIENT
+        ].create_namespaced_custom_object.side_effect = api_exception
+        raised = False
+        try:
+            await self.kubectl.create_certificate(
+                namespace=self.namespace,
+                name=self.name,
+                dns_prefix=self.dns_prefix,
+                secret_name=self.secret_name,
+                usages=self.usages,
+                issuer_name=self.issuer_name,
+            )
+        except Exception:
+            raised = True
+        self.assertFalse(raised, "An exception was raised")
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_other_exceptions(
+        self,
+        mock_create_certificate,
+    ):
+        self.kubectl.clients[
+            CUSTOM_OBJECT_CLIENT
+        ].create_namespaced_custom_object.side_effect = Exception()
+        with self.assertRaises(Exception):
+            await self.kubectl.create_certificate(
+                namespace=self.namespace,
+                name=self.name,
+                dns_prefix=self.dns_prefix,
+                secret_name=self.secret_name,
+                usages=self.usages,
+                issuer_name=self.issuer_name,
+            )
+
+
+@mock.patch("kubernetes.client.CustomObjectsApi.delete_namespaced_custom_object")
+class DeleteCertificateClass(asynctest.TestCase):
+    @mock.patch("kubernetes.config.load_kube_config")
+    def setUp(self, mock_load_kube_config):
+        super(DeleteCertificateClass, self).setUp()
+        self.namespace = "osm"
+        self.object_name = "test-cert"
+        self.kubectl = Kubectl()
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_no_exception_if_notfound(
+        self,
+        mock_create_certificate,
+    ):
+        api_exception = ApiException()
+        api_exception.body = '{"reason": "NotFound"}'
+        self.kubectl.clients[
+            CUSTOM_OBJECT_CLIENT
+        ].delete_namespaced_custom_object.side_effect = api_exception
+        raised = False
+        try:
+            await self.kubectl.delete_certificate(
+                namespace=self.namespace,
+                object_name=self.object_name,
+            )
+        except Exception:
+            raised = True
+        self.assertFalse(raised, "An exception was raised")
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_other_exceptions(
+        self,
+        mock_create_certificate,
+    ):
+        self.kubectl.clients[
+            CUSTOM_OBJECT_CLIENT
+        ].delete_namespaced_custom_object.side_effect = Exception()
+        with self.assertRaises(Exception):
+            await self.kubectl.delete_certificate(
+                namespace=self.namespace,
+                object_name=self.object_name,
+            )
+
+
+@mock.patch("kubernetes.client.RbacAuthorizationV1Api.create_namespaced_role")
+@mock.patch("kubernetes.client.RbacAuthorizationV1Api.list_namespaced_role")
+class CreateRoleClass(asynctest.TestCase):
+    @mock.patch("kubernetes.config.load_kube_config")
+    def setUp(self, mock_load_kube_config):
+        super(CreateRoleClass, self).setUp()
+        self.name = "role"
+        self.namespace = "osm"
+        self.resources = ["*"]
+        self.api_groups = ["*"]
+        self.verbs = ["*"]
+        self.labels = {}
+        self.kubectl = Kubectl()
+
+    @asynctest.fail_on(active_handles=True)
+    async def assert_create_role(self, mock_create_role):
+        metadata = V1ObjectMeta(
+            name=self.name, labels=self.labels, namespace=self.namespace
+        )
+        role = V1Role(
+            metadata=metadata,
+            rules=[
+                V1PolicyRule(
+                    api_groups=self.api_groups,
+                    resources=self.resources,
+                    verbs=self.verbs,
+                ),
+            ],
+        )
+        await self.kubectl.create_role(
+            namespace=self.namespace,
+            api_groups=self.api_groups,
+            name=self.name,
+            resources=self.resources,
+            verbs=self.verbs,
+            labels=self.labels,
+        )
+        mock_create_role.assert_called_once_with(self.namespace, role)
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_raise_exception_if_role_already_exists(
+        self,
+        mock_list_role,
+        mock_create_role,
+    ):
+        mock_list_role.return_value = FakeK8sRoleList(items=[1])
+        with self.assertRaises(Exception) as context:
+            await self.kubectl.create_role(
+                self.name,
+                self.labels,
+                self.api_groups,
+                self.resources,
+                self.verbs,
+                self.namespace,
+            )
+        self.assertTrue(
+            "Role with metadata.name={} already exists".format(self.name)
+            in str(context.exception)
+        )
+        mock_create_role.assert_not_called()
+
+
+@mock.patch("kubernetes.client.RbacAuthorizationV1Api.create_namespaced_role_binding")
+@mock.patch("kubernetes.client.RbacAuthorizationV1Api.list_namespaced_role_binding")
+class CreateRoleBindingClass(asynctest.TestCase):
+    @mock.patch("kubernetes.config.load_kube_config")
+    def setUp(self, mock_load_kube_config):
+        super(CreateRoleBindingClass, self).setUp()
+        self.name = "rolebinding"
+        self.namespace = "osm"
+        self.role_name = "role"
+        self.sa_name = "Default"
+        self.labels = {}
+        self.kubectl = Kubectl()
+
+    @asynctest.fail_on(active_handles=True)
+    async def assert_create_role_binding(self, mock_create_role_binding):
+        role_binding = V1RoleBinding(
+            metadata=V1ObjectMeta(name=self.name, labels=self.labels),
+            role_ref=V1RoleRef(kind="Role", name=self.role_name, api_group=""),
+            subjects=[
+                V1Subject(
+                    kind="ServiceAccount",
+                    name=self.sa_name,
+                    namespace=self.namespace,
+                )
+            ],
+        )
+        await self.kubectl.create_role_binding(
+            namespace=self.namespace,
+            role_name=self.role_name,
+            name=self.name,
+            sa_name=self.sa_name,
+            labels=self.labels,
+        )
+        mock_create_role_binding.assert_called_once_with(self.namespace, role_binding)
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_raise_exception_if_role_binding_already_exists(
+        self,
+        mock_list_role_binding,
+        mock_create_role_binding,
+    ):
+        mock_list_role_binding.return_value = FakeK8sRoleBindingList(items=[1])
+        with self.assertRaises(Exception) as context:
+            await self.kubectl.create_role_binding(
+                self.name,
+                self.role_name,
+                self.sa_name,
+                self.labels,
+                self.namespace,
+            )
+        self.assertTrue(
+            "Role Binding with metadata.name={} already exists".format(self.name)
+            in str(context.exception)
+        )
+        mock_create_role_binding.assert_not_called()
+
+
+@mock.patch("kubernetes.client.CoreV1Api.create_namespaced_secret")
+class CreateSecretClass(asynctest.TestCase):
+    @mock.patch("kubernetes.config.load_kube_config")
+    def setUp(self, mock_load_kube_config):
+        super(CreateSecretClass, self).setUp()
+        self.name = "secret"
+        self.namespace = "osm"
+        self.data = {"test": "1234"}
+        self.secret_type = "Opaque"
+        self.kubectl = Kubectl()
+
+    @asynctest.fail_on(active_handles=True)
+    async def assert_create_secret(self, mock_create_secret):
+        secret_metadata = V1ObjectMeta(name=self.name, namespace=self.namespace)
+        secret = V1Secret(
+            metadata=secret_metadata,
+            data=self.data,
+            type=self.secret_type,
+        )
+        await self.kubectl.create_secret(
+            namespace=self.namespace,
+            data=self.data,
+            name=self.name,
+            secret_type=self.secret_type,
+        )
+        mock_create_secret.assert_called_once_with(self.namespace, secret)
+
+
+@mock.patch("kubernetes.client.CoreV1Api.create_namespace")
+class CreateNamespaceClass(asynctest.TestCase):
+    @mock.patch("kubernetes.config.load_kube_config")
+    def setUp(self, mock_load_kube_config):
+        super(CreateNamespaceClass, self).setUp()
+        self.namespace = "osm"
+        self.labels = {"key": "value"}
+        self.kubectl = Kubectl()
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_namespace_is_created(
+        self,
+        mock_create_namespace,
+    ):
+        metadata = V1ObjectMeta(name=self.namespace, labels=self.labels)
+        namespace = V1Namespace(
+            metadata=metadata,
+        )
+        await self.kubectl.create_namespace(
+            name=self.namespace,
+            labels=self.labels,
+        )
+        mock_create_namespace.assert_called_once_with(namespace)
+
+    async def test_namespace_is_created_default_labels(
+        self,
+        mock_create_namespace,
+    ):
+        metadata = V1ObjectMeta(name=self.namespace, labels=None)
+        namespace = V1Namespace(
+            metadata=metadata,
+        )
+        await self.kubectl.create_namespace(
+            name=self.namespace,
+        )
+        mock_create_namespace.assert_called_once_with(namespace)
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_no_exception_if_alreadyexists(
+        self,
+        mock_create_namespace,
+    ):
+        api_exception = ApiException()
+        api_exception.body = '{"reason": "AlreadyExists"}'
+        self.kubectl.clients[CORE_CLIENT].create_namespace.side_effect = api_exception
+        raised = False
+        try:
+            await self.kubectl.create_namespace(
+                name=self.namespace,
+            )
+        except Exception:
+            raised = True
+        self.assertFalse(raised, "An exception was raised")
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_other_exceptions(
+        self,
+        mock_create_namespace,
+    ):
+        self.kubectl.clients[CORE_CLIENT].create_namespace.side_effect = Exception()
+        with self.assertRaises(Exception):
+            await self.kubectl.create_namespace(
+                name=self.namespace,
+            )
+
+
+@mock.patch("kubernetes.client.CoreV1Api.delete_namespace")
+class DeleteNamespaceClass(asynctest.TestCase):
+    @mock.patch("kubernetes.config.load_kube_config")
+    def setUp(self, mock_load_kube_config):
+        super(DeleteNamespaceClass, self).setUp()
+        self.namespace = "osm"
+        self.kubectl = Kubectl()
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_no_exception_if_notfound(
+        self,
+        mock_delete_namespace,
+    ):
+        api_exception = ApiException()
+        api_exception.body = '{"reason": "NotFound"}'
+        self.kubectl.clients[CORE_CLIENT].delete_namespace.side_effect = api_exception
+        raised = False
+        try:
+            await self.kubectl.delete_namespace(
+                name=self.namespace,
+            )
+        except Exception:
+            raised = True
+        self.assertFalse(raised, "An exception was raised")
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_other_exceptions(
+        self,
+        mock_delete_namespace,
+    ):
+        self.kubectl.clients[CORE_CLIENT].delete_namespace.side_effect = Exception()
+        with self.assertRaises(Exception):
+            await self.kubectl.delete_namespace(
+                name=self.namespace,
+            )
+
+
+@mock.patch("kubernetes.client.CoreV1Api.read_namespaced_secret")
+class GetSecretContentClass(asynctest.TestCase):
+    @mock.patch("kubernetes.config.load_kube_config")
+    def setUp(self, mock_load_kube_config):
+        super(GetSecretContentClass, self).setUp()
+        self.name = "my_secret"
+        self.namespace = "osm"
+        self.data = {"my_key": "my_value"}
+        self.type = "Opaque"
+        self.kubectl = Kubectl()
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_return_type_is_dict(
+        self,
+        mock_read_namespaced_secret,
+    ):
+        metadata = V1ObjectMeta(name=self.name, namespace=self.namespace)
+        secret = V1Secret(metadata=metadata, data=self.data, type=self.type)
+        mock_read_namespaced_secret.return_value = secret
+        content = await self.kubectl.get_secret_content(self.name, self.namespace)
+        assert type(content) is dict
index 67cd19f..38d8d0e 100644 (file)
@@ -60,7 +60,7 @@ class LibjujuTestCase(asynctest.TestCase):
         self.loop = asyncio.get_event_loop()
         self.db = Mock()
         mock_base64_to_cacert.return_value = cacert
         self.loop = asyncio.get_event_loop()
         self.db = Mock()
         mock_base64_to_cacert.return_value = cacert
-        Connection._load_vca_connection_data = Mock()
+        Connection._load_vca_connection_data = Mock()
         vca_connection = Connection(AsyncMock())
         vca_connection._data = ConnectionData(
             **{
         vca_connection = Connection(AsyncMock())
         vca_connection._data = ConnectionData(
             **{
@@ -78,7 +78,7 @@ class LibjujuTestCase(asynctest.TestCase):
             }
         )
         logging.disable(logging.CRITICAL)
             }
         )
         logging.disable(logging.CRITICAL)
-        self.libjuju = Libjuju(vca_connection, self.loop)
+        self.libjuju = Libjuju(vca_connection)
         self.loop.run_until_complete(self.libjuju.disconnect())
 
 
         self.loop.run_until_complete(self.libjuju.disconnect())
 
 
@@ -496,70 +496,408 @@ class CreateMachineTest(LibjujuTestCase):
 # TODO test provision machine
 
 
 # TODO test provision machine
 
 
+@asynctest.mock.patch("os.remove")
+@asynctest.mock.patch("n2vc.libjuju.yaml.dump")
+@asynctest.mock.patch("builtins.open", create=True)
 @asynctest.mock.patch("n2vc.libjuju.Libjuju.get_controller")
 @asynctest.mock.patch("n2vc.libjuju.Libjuju.get_model")
 @asynctest.mock.patch("n2vc.libjuju.Libjuju.disconnect_model")
 @asynctest.mock.patch("n2vc.libjuju.Libjuju.disconnect_controller")
 @asynctest.mock.patch("n2vc.juju_watcher.JujuModelWatcher.wait_for_model")
 @asynctest.mock.patch("juju.model.Model.deploy")
 @asynctest.mock.patch("n2vc.libjuju.Libjuju.get_controller")
 @asynctest.mock.patch("n2vc.libjuju.Libjuju.get_model")
 @asynctest.mock.patch("n2vc.libjuju.Libjuju.disconnect_model")
 @asynctest.mock.patch("n2vc.libjuju.Libjuju.disconnect_controller")
 @asynctest.mock.patch("n2vc.juju_watcher.JujuModelWatcher.wait_for_model")
 @asynctest.mock.patch("juju.model.Model.deploy")
+@asynctest.mock.patch("juju.model.CharmhubDeployType.resolve")
+@asynctest.mock.patch("n2vc.libjuju.BundleHandler")
+@asynctest.mock.patch("juju.url.URL.parse")
 class DeployTest(LibjujuTestCase):
     def setUp(self):
         super(DeployTest, self).setUp()
 class DeployTest(LibjujuTestCase):
     def setUp(self):
         super(DeployTest, self).setUp()
+        self.instantiation_params = {"applications": {"squid": {"scale": 2}}}
+        self.architecture = "amd64"
+        self.uri = "cs:osm"
+        self.url = AsyncMock()
+        self.url.schema = juju.url.Schema.CHARM_HUB
+        self.bundle_instance = None
+
+    def setup_bundle_download_mocks(
+        self, mock_url_parse, mock_bundle, mock_resolve, mock_get_model
+    ):
+        mock_url_parse.return_value = self.url
+        mock_bundle.return_value = AsyncMock()
+        mock_resolve.return_value = AsyncMock()
+        mock_resolve.origin = AsyncMock()
+        mock_get_model.return_value = juju.model.Model()
+        self.bundle_instance = mock_bundle.return_value
+        self.bundle_instance.applications = {"squid"}
+
+    def assert_overlay_file_is_written(self, filename, mocked_file, mock_yaml, mock_os):
+        mocked_file.assert_called_once_with(filename, "w")
+        mock_yaml.assert_called_once_with(
+            self.instantiation_params, mocked_file.return_value.__enter__.return_value
+        )
+        mock_os.assert_called_once_with(filename)
+
+    def assert_overlay_file_is_not_written(self, mocked_file, mock_yaml, mock_os):
+        mocked_file.assert_not_called()
+        mock_yaml.assert_not_called()
+        mock_os.assert_not_called()
+
+    def assert_bundle_is_downloaded(self, mock_resolve, mock_url_parse):
+        mock_resolve.assert_called_once_with(
+            self.url, self.architecture, entity_url=self.uri
+        )
+        mock_url_parse.assert_called_once_with(self.uri)
+        self.bundle_instance.fetch_plan.assert_called_once_with(
+            self.url, mock_resolve.origin
+        )
+
+    def assert_bundle_is_not_downloaded(self, mock_resolve, mock_url_parse):
+        mock_resolve.assert_not_called()
+        mock_url_parse.assert_not_called()
+        self.bundle_instance.fetch_plan.assert_not_called()
 
     def test_deploy(
         self,
 
     def test_deploy(
         self,
+        mock_url_parse,
+        mock_bundle,
+        mock_resolve,
         mock_deploy,
         mock_wait_for_model,
         mock_disconnect_controller,
         mock_disconnect_model,
         mock_get_model,
         mock_get_controller,
         mock_deploy,
         mock_wait_for_model,
         mock_disconnect_controller,
         mock_disconnect_model,
         mock_get_model,
         mock_get_controller,
+        mocked_file,
+        mock_yaml,
+        mock_os,
     ):
     ):
-        mock_get_model.return_value = juju.model.Model()
+        self.setup_bundle_download_mocks(
+            mock_url_parse, mock_bundle, mock_resolve, mock_get_model
+        )
+        model_name = "model1"
+
         self.loop.run_until_complete(
         self.loop.run_until_complete(
-            self.libjuju.deploy("cs:osm", "model", wait=True, timeout=0)
+            self.libjuju.deploy(
+                "cs:osm",
+                model_name,
+                wait=True,
+                timeout=0,
+                instantiation_params=None,
+            )
         )
         )
-        mock_deploy.assert_called_once()
+        self.assert_overlay_file_is_not_written(mocked_file, mock_yaml, mock_os)
+        self.assert_bundle_is_not_downloaded(mock_resolve, mock_url_parse)
+        mock_deploy.assert_called_once_with("cs:osm", trust=True, overlays=[])
         mock_wait_for_model.assert_called_once()
         mock_disconnect_controller.assert_called_once()
         mock_disconnect_model.assert_called_once()
 
     def test_deploy_no_wait(
         self,
         mock_wait_for_model.assert_called_once()
         mock_disconnect_controller.assert_called_once()
         mock_disconnect_model.assert_called_once()
 
     def test_deploy_no_wait(
         self,
+        mock_url_parse,
+        mock_bundle,
+        mock_resolve,
         mock_deploy,
         mock_wait_for_model,
         mock_disconnect_controller,
         mock_disconnect_model,
         mock_get_model,
         mock_get_controller,
         mock_deploy,
         mock_wait_for_model,
         mock_disconnect_controller,
         mock_disconnect_model,
         mock_get_model,
         mock_get_controller,
+        mocked_file,
+        mock_yaml,
+        mock_os,
     ):
     ):
-        mock_get_model.return_value = juju.model.Model()
+        self.setup_bundle_download_mocks(
+            mock_url_parse, mock_bundle, mock_resolve, mock_get_model
+        )
         self.loop.run_until_complete(
         self.loop.run_until_complete(
-            self.libjuju.deploy("cs:osm", "model", wait=False, timeout=0)
+            self.libjuju.deploy(
+                "cs:osm", "model", wait=False, timeout=0, instantiation_params={}
+            )
         )
         )
-        mock_deploy.assert_called_once()
+        self.assert_overlay_file_is_not_written(mocked_file, mock_yaml, mock_os)
+        self.assert_bundle_is_not_downloaded(mock_resolve, mock_url_parse)
+        mock_deploy.assert_called_once_with("cs:osm", trust=True, overlays=[])
         mock_wait_for_model.assert_not_called()
         mock_disconnect_controller.assert_called_once()
         mock_disconnect_model.assert_called_once()
 
     def test_deploy_exception(
         self,
         mock_wait_for_model.assert_not_called()
         mock_disconnect_controller.assert_called_once()
         mock_disconnect_model.assert_called_once()
 
     def test_deploy_exception(
         self,
+        mock_url_parse,
+        mock_bundle,
+        mock_resolve,
         mock_deploy,
         mock_wait_for_model,
         mock_disconnect_controller,
         mock_disconnect_model,
         mock_get_model,
         mock_get_controller,
         mock_deploy,
         mock_wait_for_model,
         mock_disconnect_controller,
         mock_disconnect_model,
         mock_get_model,
         mock_get_controller,
+        mocked_file,
+        mock_yaml,
+        mock_os,
     ):
     ):
+        self.setup_bundle_download_mocks(
+            mock_url_parse, mock_bundle, mock_resolve, mock_get_model
+        )
         mock_deploy.side_effect = Exception()
         mock_deploy.side_effect = Exception()
-        mock_get_model.return_value = juju.model.Model()
         with self.assertRaises(Exception):
             self.loop.run_until_complete(self.libjuju.deploy("cs:osm", "model"))
         with self.assertRaises(Exception):
             self.loop.run_until_complete(self.libjuju.deploy("cs:osm", "model"))
+        self.assert_overlay_file_is_not_written(mocked_file, mock_yaml, mock_os)
+        self.assert_bundle_is_not_downloaded(mock_resolve, mock_url_parse)
         mock_deploy.assert_called_once()
         mock_wait_for_model.assert_not_called()
         mock_disconnect_controller.assert_called_once()
         mock_disconnect_model.assert_called_once()
 
         mock_deploy.assert_called_once()
         mock_wait_for_model.assert_not_called()
         mock_disconnect_controller.assert_called_once()
         mock_disconnect_model.assert_called_once()
 
+    def test_deploy_with_instantiation_params(
+        self,
+        mock_url_parse,
+        mock_bundle,
+        mock_resolve,
+        mock_deploy,
+        mock_wait_for_model,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+        mocked_file,
+        mock_yaml,
+        mock_os,
+    ):
+        self.setup_bundle_download_mocks(
+            mock_url_parse, mock_bundle, mock_resolve, mock_get_model
+        )
+        model_name = "model1"
+        expected_filename = "{}-overlay.yaml".format(model_name)
+        self.loop.run_until_complete(
+            self.libjuju.deploy(
+                self.uri,
+                model_name,
+                wait=True,
+                timeout=0,
+                instantiation_params=self.instantiation_params,
+            )
+        )
+        self.assert_overlay_file_is_written(
+            expected_filename, mocked_file, mock_yaml, mock_os
+        )
+        self.assert_bundle_is_downloaded(mock_resolve, mock_url_parse)
+        mock_deploy.assert_called_once_with(
+            self.uri, trust=True, overlays=[expected_filename]
+        )
+        mock_wait_for_model.assert_called_once()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    def test_deploy_with_instantiation_params_no_applications(
+        self,
+        mock_url_parse,
+        mock_bundle,
+        mock_resolve,
+        mock_deploy,
+        mock_wait_for_model,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+        mocked_file,
+        mock_yaml,
+        mock_os,
+    ):
+        self.instantiation_params = {"applications": {}}
+        self.setup_bundle_download_mocks(
+            mock_url_parse, mock_bundle, mock_resolve, mock_get_model
+        )
+
+        model_name = "model3"
+        expected_filename = "{}-overlay.yaml".format(model_name)
+        self.loop.run_until_complete(
+            self.libjuju.deploy(
+                self.uri,
+                model_name,
+                wait=False,
+                timeout=0,
+                instantiation_params=self.instantiation_params,
+            )
+        )
+
+        self.assert_overlay_file_is_written(
+            expected_filename, mocked_file, mock_yaml, mock_os
+        )
+        self.assert_bundle_is_not_downloaded(mock_resolve, mock_url_parse)
+        mock_deploy.assert_called_once_with(
+            self.uri, trust=True, overlays=[expected_filename]
+        )
+        mock_wait_for_model.assert_not_called()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    def test_deploy_with_instantiation_params_applications_not_found(
+        self,
+        mock_url_parse,
+        mock_bundle,
+        mock_resolve,
+        mock_deploy,
+        mock_wait_for_model,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+        mocked_file,
+        mock_yaml,
+        mock_os,
+    ):
+        self.instantiation_params = {"some_key": {"squid": {"scale": 2}}}
+        self.setup_bundle_download_mocks(
+            mock_url_parse, mock_bundle, mock_resolve, mock_get_model
+        )
+
+        with self.assertRaises(JujuError):
+            self.loop.run_until_complete(
+                self.libjuju.deploy(
+                    self.uri,
+                    "model1",
+                    wait=True,
+                    timeout=0,
+                    instantiation_params=self.instantiation_params,
+                )
+            )
+
+        self.assert_overlay_file_is_not_written(mocked_file, mock_yaml, mock_os)
+        self.assert_bundle_is_not_downloaded(mock_resolve, mock_url_parse)
+        mock_deploy.assert_not_called()
+        mock_wait_for_model.assert_not_called()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    def test_deploy_overlay_contains_invalid_app(
+        self,
+        mock_url_parse,
+        mock_bundle,
+        mock_resolve,
+        mock_deploy,
+        mock_wait_for_model,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+        mocked_file,
+        mock_yaml,
+        mock_os,
+    ):
+        self.setup_bundle_download_mocks(
+            mock_url_parse, mock_bundle, mock_resolve, mock_get_model
+        )
+        self.bundle_instance.applications = {"new_app"}
+
+        with self.assertRaises(JujuApplicationNotFound) as error:
+            self.loop.run_until_complete(
+                self.libjuju.deploy(
+                    self.uri,
+                    "model2",
+                    wait=True,
+                    timeout=0,
+                    instantiation_params=self.instantiation_params,
+                )
+            )
+        error_msg = "Cannot find application ['squid'] in original bundle {'new_app'}"
+        self.assertEqual(str(error.exception), error_msg)
+
+        self.assert_overlay_file_is_not_written(mocked_file, mock_yaml, mock_os)
+        self.assert_bundle_is_downloaded(mock_resolve, mock_url_parse)
+        mock_deploy.assert_not_called()
+        mock_wait_for_model.assert_not_called()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    def test_deploy_exception_with_instantiation_params(
+        self,
+        mock_url_parse,
+        mock_bundle,
+        mock_resolve,
+        mock_deploy,
+        mock_wait_for_model,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+        mocked_file,
+        mock_yaml,
+        mock_os,
+    ):
+        self.setup_bundle_download_mocks(
+            mock_url_parse, mock_bundle, mock_resolve, mock_get_model
+        )
+
+        mock_deploy.side_effect = Exception()
+        model_name = "model2"
+        expected_filename = "{}-overlay.yaml".format(model_name)
+        with self.assertRaises(Exception):
+            self.loop.run_until_complete(
+                self.libjuju.deploy(
+                    self.uri,
+                    model_name,
+                    instantiation_params=self.instantiation_params,
+                )
+            )
+
+        self.assert_overlay_file_is_written(
+            expected_filename, mocked_file, mock_yaml, mock_os
+        )
+        self.assert_bundle_is_downloaded(mock_resolve, mock_url_parse)
+        mock_deploy.assert_called_once_with(
+            self.uri, trust=True, overlays=[expected_filename]
+        )
+        mock_wait_for_model.assert_not_called()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    @asynctest.mock.patch("logging.Logger.warning")
+    def test_deploy_exception_when_deleting_file_is_not_propagated(
+        self,
+        mock_warning,
+        mock_url_parse,
+        mock_bundle,
+        mock_resolve,
+        mock_deploy,
+        mock_wait_for_model,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+        mocked_file,
+        mock_yaml,
+        mock_os,
+    ):
+        self.setup_bundle_download_mocks(
+            mock_url_parse, mock_bundle, mock_resolve, mock_get_model
+        )
+
+        mock_os.side_effect = OSError("Error")
+        model_name = "model2"
+        expected_filename = "{}-overlay.yaml".format(model_name)
+        self.loop.run_until_complete(
+            self.libjuju.deploy(
+                self.uri,
+                model_name,
+                instantiation_params=self.instantiation_params,
+            )
+        )
+
+        self.assert_overlay_file_is_written(
+            expected_filename, mocked_file, mock_yaml, mock_os
+        )
+        self.assert_bundle_is_downloaded(mock_resolve, mock_url_parse)
+        mock_deploy.assert_called_once_with(
+            self.uri, trust=True, overlays=[expected_filename]
+        )
+        mock_wait_for_model.assert_called_once()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+        mock_warning.assert_called_with(
+            "Overlay file {} could not be removed: Error".format(expected_filename)
+        )
+
 
 @asynctest.mock.patch("n2vc.libjuju.Libjuju.get_controller")
 @asynctest.mock.patch("n2vc.libjuju.Libjuju.get_model")
 
 @asynctest.mock.patch("n2vc.libjuju.Libjuju.get_controller")
 @asynctest.mock.patch("n2vc.libjuju.Libjuju.get_model")
@@ -778,7 +1116,6 @@ class ExecuteActionTest(LibjujuTestCase):
         mock_get_model,
         mock_get_controller,
     ):
         mock_get_model,
         mock_get_controller,
     ):
-
         mock_get_model.return_value = juju.model.Model()
         mock__get_application.return_value = FakeApplication()
         output = None
         mock_get_model.return_value = juju.model.Model()
         mock__get_application.return_value = FakeApplication()
         output = None
@@ -1007,6 +1344,38 @@ class AddRelationTest(LibjujuTestCase):
         mock_disconnect_controller.assert_called_once()
         mock_disconnect_model.assert_called_once()
 
         mock_disconnect_controller.assert_called_once()
         mock_disconnect_model.assert_called_once()
 
+    @asynctest.mock.patch("logging.Logger.warning")
+    def test_not_found_in_error_code(
+        self,
+        mock_warning,
+        mock_add_relation,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        result = {
+            "error": "relation cannot be added",
+            "error-code": "not found",
+            "response": "response",
+            "request-id": 1,
+        }
+
+        mock_get_model.return_value = juju.model.Model()
+        mock_add_relation.side_effect = JujuAPIError(result)
+
+        self.loop.run_until_complete(
+            self.libjuju.add_relation(
+                "model",
+                "app1:relation1",
+                "app2:relation2",
+            )
+        )
+
+        mock_warning.assert_called_with("Relation not found: relation cannot be added")
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
     @asynctest.mock.patch("logging.Logger.warning")
     def test_already_exists(
         self,
     @asynctest.mock.patch("logging.Logger.warning")
     def test_already_exists(
         self,
@@ -1035,6 +1404,40 @@ class AddRelationTest(LibjujuTestCase):
         mock_disconnect_controller.assert_called_once()
         mock_disconnect_model.assert_called_once()
 
         mock_disconnect_controller.assert_called_once()
         mock_disconnect_model.assert_called_once()
 
+    @asynctest.mock.patch("logging.Logger.warning")
+    def test_already_exists_error_code(
+        self,
+        mock_warning,
+        mock_add_relation,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        result = {
+            "error": "relation cannot be added",
+            "error-code": "already exists",
+            "response": "response",
+            "request-id": 1,
+        }
+
+        mock_get_model.return_value = juju.model.Model()
+        mock_add_relation.side_effect = JujuAPIError(result)
+
+        self.loop.run_until_complete(
+            self.libjuju.add_relation(
+                "model",
+                "app1:relation1",
+                "app2:relation2",
+            )
+        )
+
+        mock_warning.assert_called_with(
+            "Relation already exists: relation cannot be added"
+        )
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
     def test_exception(
         self,
         mock_add_relation,
     def test_exception(
         self,
         mock_add_relation,
@@ -1229,7 +1632,6 @@ class ConfigureApplicationTest(LibjujuTestCase):
         mock_get_model,
         mock_get_controller,
     ):
         mock_get_model,
         mock_get_controller,
     ):
-
         mock_get_application.return_value = FakeApplication()
 
         self.loop.run_until_complete(
         mock_get_application.return_value = FakeApplication()
 
         self.loop.run_until_complete(
@@ -1251,7 +1653,6 @@ class ConfigureApplicationTest(LibjujuTestCase):
         mock_get_model,
         mock_get_controller,
     ):
         mock_get_model,
         mock_get_controller,
     ):
-
         mock_get_application.side_effect = Exception()
 
         with self.assertRaises(Exception):
         mock_get_application.side_effect = Exception()
 
         with self.assertRaises(Exception):
@@ -1273,7 +1674,6 @@ class ConfigureApplicationTest(LibjujuTestCase):
         mock_get_model,
         mock_get_controller,
     ):
         mock_get_model,
         mock_get_controller,
     ):
-
         result = {"error": "not found", "response": "response", "request-id": 1}
 
         mock_get_controller.side_effect = JujuAPIError(result)
         result = {"error": "not found", "response": "response", "request-id": 1}
 
         mock_get_controller.side_effect = JujuAPIError(result)
@@ -1298,7 +1698,6 @@ class ConfigureApplicationTest(LibjujuTestCase):
         mock_get_model,
         mock_get_controller,
     ):
         mock_get_model,
         mock_get_controller,
     ):
-
         result = {"error": "not found", "response": "response", "request-id": 1}
         mock_get_model.side_effect = JujuAPIError(result)
 
         result = {"error": "not found", "response": "response", "request-id": 1}
         mock_get_model.side_effect = JujuAPIError(result)
 
index 3caae03..2ce5024 100644 (file)
@@ -15,7 +15,7 @@
 
 import asyncio
 import logging
 
 import asyncio
 import logging
-from unittest.mock import Mock
+from unittest.mock import Mock, MagicMock
 from unittest.mock import patch
 
 
 from unittest.mock import patch
 
 
@@ -23,12 +23,16 @@ import asynctest
 from n2vc.definitions import Offer, RelationEndpoint
 from n2vc.n2vc_juju_conn import N2VCJujuConnector
 from osm_common import fslocal
 from n2vc.definitions import Offer, RelationEndpoint
 from n2vc.n2vc_juju_conn import N2VCJujuConnector
 from osm_common import fslocal
+from osm_common.dbmemory import DbMemory
 from n2vc.exceptions import (
     N2VCBadArgumentsException,
     N2VCException,
 from n2vc.exceptions import (
     N2VCBadArgumentsException,
     N2VCException,
+    JujuApplicationNotFound,
 )
 from n2vc.tests.unit.utils import AsyncMock
 from n2vc.vca.connection_data import ConnectionData
 )
 from n2vc.tests.unit.utils import AsyncMock
 from n2vc.vca.connection_data import ConnectionData
+from n2vc.tests.unit.testdata import test_db_descriptors as descriptors
+import yaml
 
 
 class N2VCJujuConnTestCase(asynctest.TestCase):
 
 
 class N2VCJujuConnTestCase(asynctest.TestCase):
@@ -36,10 +40,7 @@ class N2VCJujuConnTestCase(asynctest.TestCase):
     @asynctest.mock.patch("n2vc.n2vc_juju_conn.get_connection")
     @asynctest.mock.patch("n2vc.vca.connection_data.base64_to_cacert")
     def setUp(
     @asynctest.mock.patch("n2vc.n2vc_juju_conn.get_connection")
     @asynctest.mock.patch("n2vc.vca.connection_data.base64_to_cacert")
     def setUp(
-        self,
-        mock_base64_to_cacert=None,
-        mock_get_connection=None,
-        mock_store=None,
+        self, mock_base64_to_cacert=None, mock_get_connection=None, mock_store=None
     ):
         self.loop = asyncio.get_event_loop()
         self.db = Mock()
     ):
         self.loop = asyncio.get_event_loop()
         self.db = Mock()
@@ -72,7 +73,6 @@ class N2VCJujuConnTestCase(asynctest.TestCase):
             db=self.db,
             fs=fslocal.FsLocal(),
             log=None,
             db=self.db,
             fs=fslocal.FsLocal(),
             log=None,
-            loop=self.loop,
             on_update_db=None,
         )
         N2VCJujuConnector.get_public_key.assert_not_called()
             on_update_db=None,
         )
         N2VCJujuConnector.get_public_key.assert_not_called()
@@ -133,10 +133,6 @@ class UpdateVcaStatusTest(N2VCJujuConnTestCase):
             self.n2vc.libjuju.get_application_configs.assert_not_called_once()
 
 
             self.n2vc.libjuju.get_application_configs.assert_not_called_once()
 
 
-@asynctest.mock.patch("osm_common.fslocal.FsLocal.file_exists")
-@asynctest.mock.patch(
-    "osm_common.fslocal.FsLocal.path", new_callable=asynctest.PropertyMock, create=True
-)
 class K8sProxyCharmsTest(N2VCJujuConnTestCase):
     def setUp(self):
         super(K8sProxyCharmsTest, self).setUp()
 class K8sProxyCharmsTest(N2VCJujuConnTestCase):
     def setUp(self):
         super(K8sProxyCharmsTest, self).setUp()
@@ -144,51 +140,53 @@ class K8sProxyCharmsTest(N2VCJujuConnTestCase):
         self.n2vc.libjuju.add_model = AsyncMock()
         self.n2vc.libjuju.deploy_charm = AsyncMock()
         self.n2vc.libjuju.model_exists.return_value = False
         self.n2vc.libjuju.add_model = AsyncMock()
         self.n2vc.libjuju.deploy_charm = AsyncMock()
         self.n2vc.libjuju.model_exists.return_value = False
+        self.db = DbMemory()
+        self.fs = fslocal.FsLocal()
+        self.fs.path = "/"
+        self.n2vc.fs = self.fs
+        self.n2vc.db = self.db
+        self.db.create_list("nsrs", yaml.safe_load(descriptors.db_nsrs_text))
+        self.db.create_list("vnfrs", yaml.safe_load(descriptors.db_vnfrs_text))
 
     @patch(
         "n2vc.n2vc_juju_conn.generate_random_alfanum_string",
         **{"return_value": "random"}
     )
 
     @patch(
         "n2vc.n2vc_juju_conn.generate_random_alfanum_string",
         **{"return_value": "random"}
     )
-    def test_success(
-        self,
-        mock_generate_random_alfanum_string,
-        mock_path,
-        mock_file_exists,
-    ):
-        mock_file_exists.return_value = True
-        mock_path.return_value = "/path"
+    def test_success(self, mock_generate_random_alfanum_string):
+        self.n2vc.fs.file_exists = MagicMock(create_autospec=True)
+        self.n2vc.fs.file_exists.return_value = True
         ee_id = self.loop.run_until_complete(
             self.n2vc.install_k8s_proxy_charm(
         ee_id = self.loop.run_until_complete(
             self.n2vc.install_k8s_proxy_charm(
-                "charm",
-                "nsi-id.ns-id.vnf-id.vdu",
-                "////path/",
+                "simple",
+                ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0",
+                "path",
                 {},
             )
         )
 
         self.n2vc.libjuju.add_model.assert_called_once()
         self.n2vc.libjuju.deploy_charm.assert_called_once_with(
                 {},
             )
         )
 
         self.n2vc.libjuju.add_model.assert_called_once()
         self.n2vc.libjuju.deploy_charm.assert_called_once_with(
-            model_name="ns-id-k8s",
-            application_name="app-vnf-vnf-id-vdu-vdu-random",
-            path="/path/path/",
+            model_name="dbfbd751-3de4-4e68-bd40-ec5ae0a53898-k8s",
+            application_name="simple-ee-z0-vnf1-vnf",
+            path="//path",
             machine_id=None,
             db_dict={},
             progress_timeout=None,
             total_timeout=None,
             config=None,
         )
             machine_id=None,
             db_dict={},
             progress_timeout=None,
             total_timeout=None,
             config=None,
         )
-        self.assertEqual(ee_id, "ns-id-k8s.app-vnf-vnf-id-vdu-vdu-random.k8s")
+        self.assertEqual(
+            ee_id, "dbfbd751-3de4-4e68-bd40-ec5ae0a53898-k8s.simple-ee-z0-vnf1-vnf.k8s"
+        )
 
     def test_no_artifact_path(
         self,
 
     def test_no_artifact_path(
         self,
-        mock_path,
-        mock_file_exists,
     ):
         with self.assertRaises(N2VCBadArgumentsException):
             ee_id = self.loop.run_until_complete(
                 self.n2vc.install_k8s_proxy_charm(
     ):
         with self.assertRaises(N2VCBadArgumentsException):
             ee_id = self.loop.run_until_complete(
                 self.n2vc.install_k8s_proxy_charm(
-                    "charm",
-                    "nsi-id.ns-id.vnf-id.vdu",
+                    "simple",
+                    ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0",
                     "",
                     {},
                 )
                     "",
                     {},
                 )
@@ -197,15 +195,13 @@ class K8sProxyCharmsTest(N2VCJujuConnTestCase):
 
     def test_no_db(
         self,
 
     def test_no_db(
         self,
-        mock_path,
-        mock_file_exists,
     ):
         with self.assertRaises(N2VCBadArgumentsException):
             ee_id = self.loop.run_until_complete(
                 self.n2vc.install_k8s_proxy_charm(
     ):
         with self.assertRaises(N2VCBadArgumentsException):
             ee_id = self.loop.run_until_complete(
                 self.n2vc.install_k8s_proxy_charm(
-                    "charm",
-                    "nsi-id.ns-id.vnf-id.vdu",
-                    "/path/",
+                    "simple",
+                    ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0",
+                    "path",
                     None,
                 )
             )
                     None,
                 )
             )
@@ -213,16 +209,15 @@ class K8sProxyCharmsTest(N2VCJujuConnTestCase):
 
     def test_file_not_exists(
         self,
 
     def test_file_not_exists(
         self,
-        mock_path,
-        mock_file_exists,
     ):
     ):
-        mock_file_exists.return_value = False
+        self.n2vc.fs.file_exists = MagicMock(create_autospec=True)
+        self.n2vc.fs.file_exists.return_value = False
         with self.assertRaises(N2VCBadArgumentsException):
             ee_id = self.loop.run_until_complete(
                 self.n2vc.install_k8s_proxy_charm(
         with self.assertRaises(N2VCBadArgumentsException):
             ee_id = self.loop.run_until_complete(
                 self.n2vc.install_k8s_proxy_charm(
-                    "charm",
-                    "nsi-id.ns-id.vnf-id.vdu",
-                    "/path/",
+                    "simple",
+                    ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0",
+                    "path",
                     {},
                 )
             )
                     {},
                 )
             )
@@ -230,18 +225,18 @@ class K8sProxyCharmsTest(N2VCJujuConnTestCase):
 
     def test_exception(
         self,
 
     def test_exception(
         self,
-        mock_path,
-        mock_file_exists,
     ):
     ):
-        mock_file_exists.return_value = True
-        mock_path.return_value = "/path"
+        self.n2vc.fs.file_exists = MagicMock(create_autospec=True)
+        self.n2vc.fs.file_exists.return_value = True
+        self.n2vc.fs.path = MagicMock(create_autospec=True)
+        self.n2vc.fs.path.return_value = "path"
         self.n2vc.libjuju.deploy_charm.side_effect = Exception()
         with self.assertRaises(N2VCException):
             ee_id = self.loop.run_until_complete(
                 self.n2vc.install_k8s_proxy_charm(
         self.n2vc.libjuju.deploy_charm.side_effect = Exception()
         with self.assertRaises(N2VCException):
             ee_id = self.loop.run_until_complete(
                 self.n2vc.install_k8s_proxy_charm(
-                    "charm",
-                    "nsi-id.ns-id.vnf-id.vdu",
-                    "path/",
+                    "simple",
+                    ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0",
+                    "path",
                     {},
                 )
             )
                     {},
                 )
             )
@@ -256,14 +251,16 @@ class AddRelationTest(N2VCJujuConnTestCase):
         self.n2vc.libjuju.get_controller = AsyncMock()
         self.n2vc.libjuju.consume = AsyncMock()
 
         self.n2vc.libjuju.get_controller = AsyncMock()
         self.n2vc.libjuju.consume = AsyncMock()
 
-    def test_standard_relation(self):
-        relation_endpoint_1 = RelationEndpoint("model-1.app1.0", None, "endpoint")
-        relation_endpoint_2 = RelationEndpoint("model-1.app2.1", None, "endpoint")
+    def test_standard_relation_same_model_and_controller(self):
+        relation_endpoint_1 = RelationEndpoint("model-1.app1.0", None, "endpoint1")
+        relation_endpoint_2 = RelationEndpoint("model-1.app2.1", None, "endpoint2")
         self.loop.run_until_complete(
             self.n2vc.add_relation(relation_endpoint_1, relation_endpoint_2)
         )
         self.n2vc.libjuju.add_relation.assert_called_once_with(
         self.loop.run_until_complete(
             self.n2vc.add_relation(relation_endpoint_1, relation_endpoint_2)
         )
         self.n2vc.libjuju.add_relation.assert_called_once_with(
-            model_name="model-1", endpoint_1="app1:endpoint", endpoint_2="app2:endpoint"
+            model_name="model-1",
+            endpoint_1="app1:endpoint1",
+            endpoint_2="app2:endpoint2",
         )
         self.n2vc.libjuju.offer.assert_not_called()
         self.n2vc.libjuju.consume.assert_not_called()
         )
         self.n2vc.libjuju.offer.assert_not_called()
         self.n2vc.libjuju.consume.assert_not_called()
@@ -283,6 +280,26 @@ class AddRelationTest(N2VCJujuConnTestCase):
             "model-2", "app2:endpoint", "saas"
         )
 
             "model-2", "app2:endpoint", "saas"
         )
 
+    def test_cmr_relation_different_controller(self):
+        self.n2vc._get_libjuju = AsyncMock(return_value=self.n2vc.libjuju)
+        relation_endpoint_1 = RelationEndpoint(
+            "model-1.app1.0", "vca-id-1", "endpoint1"
+        )
+        relation_endpoint_2 = RelationEndpoint(
+            "model-1.app2.1", "vca-id-2", "endpoint2"
+        )
+        offer = Offer("admin/model-1.app1")
+        self.n2vc.libjuju.offer.return_value = offer
+        self.n2vc.libjuju.consume.return_value = "saas"
+        self.loop.run_until_complete(
+            self.n2vc.add_relation(relation_endpoint_1, relation_endpoint_2)
+        )
+        self.n2vc.libjuju.offer.assert_called_once_with(relation_endpoint_1)
+        self.n2vc.libjuju.consume.assert_called_once()
+        self.n2vc.libjuju.add_relation.assert_called_once_with(
+            "model-1", "app2:endpoint2", "saas"
+        )
+
     def test_relation_exception(self):
         relation_endpoint_1 = RelationEndpoint("model-1.app1.0", None, "endpoint")
         relation_endpoint_2 = RelationEndpoint("model-2.app2.1", None, "endpoint")
     def test_relation_exception(self):
         relation_endpoint_1 = RelationEndpoint("model-1.app1.0", None, "endpoint")
         relation_endpoint_2 = RelationEndpoint("model-2.app2.1", None, "endpoint")
@@ -291,3 +308,1188 @@ class AddRelationTest(N2VCJujuConnTestCase):
             self.loop.run_until_complete(
                 self.n2vc.add_relation(relation_endpoint_1, relation_endpoint_2)
             )
             self.loop.run_until_complete(
                 self.n2vc.add_relation(relation_endpoint_1, relation_endpoint_2)
             )
+
+
+class UpgradeCharmTest(N2VCJujuConnTestCase):
+    def setUp(self):
+        super(UpgradeCharmTest, self).setUp()
+        self.n2vc._get_libjuju = AsyncMock(return_value=self.n2vc.libjuju)
+        N2VCJujuConnector._get_ee_id_components = Mock()
+        self.n2vc.libjuju.upgrade_charm = AsyncMock()
+
+    def test_empty_ee_id(self):
+        with self.assertRaises(N2VCBadArgumentsException):
+            self.loop.run_until_complete(
+                self.n2vc.upgrade_charm(
+                    "", "/sample_charm_path", "sample_charm_id", "native-charm", None
+                )
+            )
+        self.n2vc._get_libjuju.assert_called()
+        self.n2vc._get_ee_id_components.assert_not_called()
+        self.n2vc.libjuju.upgrade_charm.assert_not_called()
+
+    def test_wrong_ee_id(self):
+        N2VCJujuConnector._get_ee_id_components.side_effect = Exception
+        with self.assertRaises(N2VCBadArgumentsException):
+            self.loop.run_until_complete(
+                self.n2vc.upgrade_charm(
+                    "ns-id-k8s.app-vnf-vnf-id-vdu-vdu-random.k8s",
+                    "/sample_charm_path",
+                    "sample_charm_id",
+                    "native-charm",
+                    500,
+                )
+            )
+        self.n2vc._get_libjuju.assert_called()
+        self.n2vc._get_ee_id_components.assert_called()
+        self.n2vc.libjuju.upgrade_charm.assert_not_called()
+
+    def test_charm_upgrade_succeded(self):
+        N2VCJujuConnector._get_ee_id_components.return_value = (
+            "sample_model",
+            "sample_app",
+            "sample_machine_id",
+        )
+        self.loop.run_until_complete(
+            self.n2vc.upgrade_charm(
+                "ns-id-k8s.app-vnf-vnf-id-vdu-vdu-random.k8s",
+                "/sample_charm_path",
+                "sample_charm_id",
+                "native-charm",
+                500,
+            )
+        )
+        self.n2vc._get_libjuju.assert_called()
+        self.n2vc._get_ee_id_components.assert_called()
+        self.n2vc.libjuju.upgrade_charm.assert_called_with(
+            application_name="sample_app",
+            path="/sample_charm_path",
+            model_name="sample_model",
+            total_timeout=500,
+        )
+
+    def test_charm_upgrade_failed(self):
+        N2VCJujuConnector._get_ee_id_components.return_value = (
+            "sample_model",
+            "sample_app",
+            "sample_machine_id",
+        )
+        self.n2vc.libjuju.upgrade_charm.side_effect = JujuApplicationNotFound
+        with self.assertRaises(N2VCException):
+            self.loop.run_until_complete(
+                self.n2vc.upgrade_charm(
+                    "ns-id-k8s.app-vnf-vnf-id-vdu-vdu-random.k8s",
+                    "/sample_charm_path",
+                    "sample_charm_id",
+                    "native-charm",
+                    None,
+                )
+            )
+        self.n2vc._get_libjuju.assert_called()
+        self.n2vc._get_ee_id_components.assert_called()
+        self.n2vc.libjuju.upgrade_charm.assert_called_with(
+            application_name="sample_app",
+            path="/sample_charm_path",
+            model_name="sample_model",
+            total_timeout=None,
+        )
+
+
+class GenerateApplicationNameTest(N2VCJujuConnTestCase):
+    vnf_id = "dbfbd751-3de4-4e68-bd40-ec5ae0a53898"
+
+    def setUp(self):
+        super(GenerateApplicationNameTest, self).setUp()
+        self.db = MagicMock(DbMemory)
+
+    @patch(
+        "n2vc.n2vc_juju_conn.generate_random_alfanum_string",
+        **{"return_value": "random"}
+    )
+    def test_generate_backward_compatible_application_name(
+        self, mock_generate_random_alfanum
+    ):
+        vdu_id = "mgmtVM"
+        vdu_count = "0"
+        expected_result = "app-vnf-ec5ae0a53898-vdu-mgmtVM-cnt-0-random"
+
+        application_name = self.n2vc._generate_backward_compatible_application_name(
+            GenerateApplicationNameTest.vnf_id, vdu_id, vdu_count
+        )
+        self.assertEqual(application_name, expected_result)
+
+    @patch(
+        "n2vc.n2vc_juju_conn.generate_random_alfanum_string",
+        **{"return_value": "random"}
+    )
+    def test_generate_backward_compatible_application_name_without_vnf_id_vdu_id(
+        self, mock_generate_random_alfanum
+    ):
+        vnf_id = None
+        vdu_id = ""
+        vdu_count = None
+        expected_result = "app--random"
+        application_name = self.n2vc._generate_backward_compatible_application_name(
+            vnf_id, vdu_id, vdu_count
+        )
+
+        self.assertEqual(application_name, expected_result)
+        self.assertLess(len(application_name), 50)
+
+    def test_find_charm_level_with_vnf_id(self):
+        vdu_id = ""
+        expected_result = "vnf-level"
+        charm_level = self.n2vc._find_charm_level(
+            GenerateApplicationNameTest.vnf_id, vdu_id
+        )
+        self.assertEqual(charm_level, expected_result)
+
+    def test_find_charm_level_with_vdu_id(self):
+        vnf_id = ""
+        vdu_id = "mgmtVM"
+        with self.assertRaises(N2VCException):
+            self.n2vc._find_charm_level(vnf_id, vdu_id)
+
+    def test_find_charm_level_with_vnf_id_and_vdu_id(self):
+        vdu_id = "mgmtVM"
+        expected_result = "vdu-level"
+        charm_level = self.n2vc._find_charm_level(
+            GenerateApplicationNameTest.vnf_id, vdu_id
+        )
+        self.assertEqual(charm_level, expected_result)
+
+    def test_find_charm_level_without_vnf_id_and_vdu_id(self):
+        vnf_id = ""
+        vdu_id = ""
+        expected_result = "ns-level"
+        charm_level = self.n2vc._find_charm_level(vnf_id, vdu_id)
+        self.assertEqual(charm_level, expected_result)
+
+    def test_generate_application_name_ns_charm(self):
+        charm_level = "ns-level"
+        vnfrs = {}
+        vca_records = [
+            {
+                "target_element": "ns",
+                "member-vnf-index": "",
+                "vdu_id": None,
+                "kdu_name": None,
+                "vdu_count_index": None,
+                "vnfd_id": None,
+                "vdu_name": None,
+                "type": "proxy_charm",
+                "ee_descriptor_id": None,
+                "charm_name": "simple-ns-charm-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh",
+                "ee_id": None,
+                "application": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            }
+        ]
+        vnf_count = ""
+        vdu_count = ""
+        vdu_id = None
+        expected_result = "simple-ns-charm-abc-000-rrrr-nnnn-4444-h-ns"
+        application_name = self.n2vc._generate_application_name(
+            charm_level,
+            vnfrs,
+            vca_records,
+            vnf_count=vnf_count,
+            vdu_id=vdu_id,
+            vdu_count=vdu_count,
+        )
+        self.assertEqual(application_name, expected_result)
+        self.assertLess(len(application_name), 50)
+
+    def test_generate_application_name_ns_charm_empty_vca_records(self):
+        charm_level = "ns-level"
+        vnfrs = {}
+        vca_records = []
+        vnf_count = ""
+        vdu_count = ""
+        vdu_id = None
+        with self.assertRaises(N2VCException):
+            self.n2vc._generate_application_name(
+                charm_level,
+                vnfrs,
+                vca_records,
+                vnf_count=vnf_count,
+                vdu_id=vdu_id,
+                vdu_count=vdu_count,
+            )
+
+    def test_generate_application_name_vnf_charm(self):
+        charm_level = "vnf-level"
+        vnfrs = {"member-vnf-index-ref": "vnf111-xxx-yyy-zzz"}
+        vca_records = [
+            {
+                "target_element": "vnf/vnf1",
+                "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                "charm_name": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            }
+        ]
+        vnf_count = "1"
+        vdu_count = ""
+        vdu_id = None
+        expected_result = "simple-ee-ab-1-vnf111-xxx-y-vnf"
+        application_name = self.n2vc._generate_application_name(
+            charm_level,
+            vnfrs,
+            vca_records,
+            vnf_count=vnf_count,
+            vdu_id=vdu_id,
+            vdu_count=vdu_count,
+        )
+        self.assertEqual(application_name, expected_result)
+        self.assertLess(len(application_name), 50)
+
+    def test_generate_application_name_vdu_charm_kdu_name_in_vca_record_is_none(self):
+        charm_level = "vdu-level"
+        vnfrs = {
+            "member-vnf-index-ref": "vnf111-xxx-yyy-zzz",
+            "vdur": [
+                {"_id": "38912ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "mgmtVM"},
+                {"_id": "45512ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "dataVM"},
+            ],
+        }
+        vca_records = [
+            {
+                "target_element": "vnf/vnf1/mgmtvm",
+                "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                "vdu_id": "mgmtVM",
+                "kdu_name": None,
+                "vdu_count_index": None,
+                "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                "vdu_name": "mgmtvm",
+                "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                "charm_name": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            },
+            {
+                "target_element": "vnf/vnf1/dataVM",
+                "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                "vdu_id": "dataVM",
+                "kdu_name": None,
+                "vdu_count_index": None,
+                "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                "vdu_name": "datavm",
+                "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-8888-hhh-3333-yyyy-888-hhh-ttt-444",
+                "charm_name": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            },
+        ]
+        vnf_count = "2"
+        vdu_count = "0"
+        vdu_id = "mgmtVM"
+        expected_result = "simple-ee-ab-2-vnf111-xxx-y-mgmtVM-0-vdu"
+        application_name = self.n2vc._generate_application_name(
+            charm_level,
+            vnfrs,
+            vca_records,
+            vnf_count=vnf_count,
+            vdu_id=vdu_id,
+            vdu_count=vdu_count,
+        )
+        self.assertEqual(application_name, expected_result)
+        self.assertLess(len(application_name), 50)
+
+    def test_generate_application_name_vdu_charm_vdu_id_kdu_name_in_vca_record_are_both_set(
+        self,
+    ):
+        charm_level = "vdu-level"
+        vnfrs = {
+            "member-vnf-index-ref": "vnf111-xxx-yyy-zzz",
+            "vdur": [
+                {"_id": "38912ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "mgmtVM"},
+                {"_id": "45512ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "dataVM"},
+            ],
+        }
+        vca_records = [
+            {
+                "target_element": "vnf/vnf1/mgmtVM",
+                "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                "vdu_id": "mgmtVM",
+                "kdu_name": "mgmtVM",
+                "vdu_count_index": None,
+                "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                "vdu_name": "mgmtvm",
+                "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                "charm_name": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            },
+            {
+                "target_element": "vnf/vnf1/dataVM",
+                "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                "vdu_id": "dataVM",
+                "kdu_name": None,
+                "vdu_count_index": None,
+                "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                "vdu_name": "datavm",
+                "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-8888-hhh-3333-yyyy-888-hhh-ttt-444",
+                "charm_name": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            },
+        ]
+        vnf_count = "2"
+        vdu_count = "0"
+        vdu_id = "mgmtVM"
+        expected_result = "simple-ee-ab-2-vnf111-xxx-y-mgmtVM-0-vdu"
+        application_name = self.n2vc._generate_application_name(
+            charm_level,
+            vnfrs,
+            vca_records,
+            vnf_count=vnf_count,
+            vdu_id=vdu_id,
+            vdu_count=vdu_count,
+        )
+        self.assertEqual(application_name, expected_result)
+        self.assertLess(len(application_name), 50)
+
+    def test_generate_application_name_vdu_charm_both_vdu_id_kdu_name_in_vca_record_are_none(
+        self,
+    ):
+        charm_level = "vdu-level"
+        vnfrs = {"member-vnf-index-ref": "vnf111-xxx-yyy-zzz"}
+        vca_records = [
+            {
+                "target_element": "vnf/vnf1/mgmtVM",
+                "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                "vdu_id": None,
+                "kdu_name": None,
+                "vdu_count_index": None,
+                "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                "vdu_name": "mgmtvm",
+                "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                "charm_name": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            }
+        ]
+        vnf_count = "2"
+        vdu_count = "0"
+        vdu_id = "mgmtVM"
+        with self.assertRaises(KeyError):
+            self.n2vc._generate_application_name(
+                charm_level,
+                vnfrs,
+                vca_records,
+                vnf_count=vnf_count,
+                vdu_id=vdu_id,
+                vdu_count=vdu_count,
+            )
+
+    def test_generate_application_name_vdu_charm_given_vdu_id_is_none(self):
+        charm_level = "vdu-level"
+        vnfrs = {"member-vnf-index-ref": "vnf111-xxx-yyy-zzz"}
+        vca_records = [
+            {
+                "target_element": "vnf/vnf1/mgmtvVM",
+                "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                "vdu_id": None,
+                "kdu_name": "mgmtVM",
+                "vdu_count_index": None,
+                "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                "vdu_name": "mgmtvm",
+                "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                "charm_name": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            }
+        ]
+        vnf_count = "2"
+        vdu_count = "0"
+        vdu_id = None
+        with self.assertRaises(N2VCException):
+            self.n2vc._generate_application_name(
+                charm_level,
+                vnfrs,
+                vca_records,
+                vnf_count=vnf_count,
+                vdu_id=vdu_id,
+                vdu_count=vdu_count,
+            )
+
+    def test_generate_application_name_vdu_charm_vdu_id_does_not_match_with_the_key_in_vca_record(
+        self,
+    ):
+        charm_level = "vdu-level"
+        vnfrs = {"member-vnf-index-ref": "vnf111-xxx-yyy-zzz"}
+        vca_records = [
+            {
+                "target_element": "vnf/vnf1/mgmtVM",
+                "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                "vdu_id": None,
+                "kdu_name": "mgmtVM",
+                "vdu_count_index": None,
+                "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                "vdu_name": "mgmtvm",
+                "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                "charm_name": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            }
+        ]
+        vnf_count = "2"
+        vdu_count = "0"
+        vdu_id = "mgmtvm"
+        with self.assertRaises(KeyError):
+            self.n2vc._generate_application_name(
+                charm_level,
+                vnfrs,
+                vca_records,
+                vnf_count=vnf_count,
+                vdu_id=vdu_id,
+                vdu_count=vdu_count,
+            )
+
+    def test_generate_application_name_vdu_charm_vdu_id_in_vca_record_is_none(self):
+        charm_level = "vdu-level"
+        vnfrs = {"member-vnf-index-ref": "vnf111-xxx-yyy-zzz"}
+        vca_records = [
+            {
+                "target_element": "vnf/vnf1/mgmtVM",
+                "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                "vdu_id": None,
+                "kdu_name": "mgmtVM",
+                "vdu_count_index": None,
+                "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                "vdu_name": "mgmtvm",
+                "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                "charm_name": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            }
+        ]
+        vnf_count = "2"
+        vdu_count = "0"
+        vdu_id = "mgmtVM"
+        expected_result = "simple-ee-ab-2-vnf111-xxx-y-mgmtVM-0-vdu"
+        application_name = self.n2vc._generate_application_name(
+            charm_level,
+            vnfrs,
+            vca_records,
+            vnf_count=vnf_count,
+            vdu_id=vdu_id,
+            vdu_count=vdu_count,
+        )
+        self.assertEqual(application_name, expected_result)
+        self.assertLess(len(application_name), 50)
+
+    def test_get_vnf_count_db_vnfr_ns_charm(self):
+        self.db.get_one.return_value = {"member-vnf-index-ref": "sample-ref"}
+        charm_level = "ns-level"
+        vnf_id_and_count = "m7fbd751-3de4-4e68-bd40-ec5ae0a53898-4"
+        with patch.object(self.n2vc, "db", self.db):
+            vnf_count, db_vnfr = self.n2vc._get_vnf_count_and_record(
+                charm_level, vnf_id_and_count
+            )
+        self.assertEqual(vnf_count, "")
+        self.assertEqual(db_vnfr, {})
+
+    def test_get_vnf_count_db_vnfr_vnf_charm(self):
+        self.db.get_one.return_value = {"member-vnf-index-ref": "sample-ref"}
+        charm_level = "vnf-level"
+        vnf_id_and_count = "m7fbd751-3de4-4e68-bd40-ec5ae0a53898-4"
+        with patch.object(self.n2vc, "db", self.db):
+            vnf_count, db_vnfr = self.n2vc._get_vnf_count_and_record(
+                charm_level, vnf_id_and_count
+            )
+        self.assertEqual(vnf_count, "4")
+        self.assertEqual(db_vnfr, {"member-vnf-index-ref": "sample-ref"})
+
+    def test_get_vnf_count_db_vnfr_vdu_charm(self):
+        self.db.get_one.return_value = {"member-vnf-index-ref": "sample-ref"}
+        charm_level = "vdu-level"
+        vnf_id_and_count = "m7fbd751-3de4-4e68-bd40-ec5ae0a53898-2"
+        with patch.object(self.n2vc, "db", self.db):
+            vnf_count, db_vnfr = self.n2vc._get_vnf_count_and_record(
+                charm_level, vnf_id_and_count
+            )
+        self.assertEqual(vnf_count, "2")
+        self.assertEqual(db_vnfr, {"member-vnf-index-ref": "sample-ref"})
+
+    def test_get_vca_records_vdu_charm(self):
+        charm_level = "vdu-level"
+        db_vnfr = {
+            "member-vnf-index-ref": "vnf111-xxx-yyy-zzz",
+            "vdur": [
+                {"_id": "38912ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "mgmtVM"},
+                {"_id": "45512ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "dataVM"},
+            ],
+        }
+        db_nsr = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "vnf/vnf1/mgmtvm",
+                            "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                            "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a",
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                            "vdu_name": "mgmtvm",
+                            "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                        {
+                            "target_element": "vnf/vnf2/datavm",
+                            "member-vnf-index": "vnf222-xxx-yyy-zzz",
+                            "vdu_id": "45512ff7-5bdd-4228-911f-c2bee259c44a",
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                            "vdu_name": "datavm",
+                            "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-8888-hhh-3333-yyyy-888-hhh-ttt-444",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                    ]
+                }
+            }
+        }
+        expected_result = [
+            {
+                "target_element": "vnf/vnf1/mgmtvm",
+                "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a",
+                "kdu_name": None,
+                "vdu_count_index": None,
+                "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                "vdu_name": "mgmtvm",
+                "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                "charm_name": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            }
+        ]
+        vca_records = self.n2vc._get_vca_records(charm_level, db_nsr, db_vnfr)
+        self.assertEqual(vca_records, expected_result)
+
+    def test_get_vca_records_vnf_charm_member_vnf_index_mismatch(self):
+        charm_level = "vnf-level"
+        db_vnfr = {"member-vnf-index-ref": "vnf222-xxx-yyy-zzz"}
+        db_nsr = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "vnf/vnf1/mgmtvm",
+                            "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                            "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a",
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                            "vdu_name": "mgmtvm",
+                            "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                        {
+                            "target_element": "vnf/vnf1/mgmtvm",
+                            "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                            "vdu_id": "45512ff7-5bdd-4228-911f-c2bee259c44a",
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                            "vdu_name": "datavm",
+                            "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-8888-hhh-3333-yyyy-888-hhh-ttt-444",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                    ]
+                }
+            }
+        }
+        expected_result = []
+        vca_records = self.n2vc._get_vca_records(charm_level, db_nsr, db_vnfr)
+        self.assertEqual(vca_records, expected_result)
+
+    def test_get_vca_records_ns_charm(self):
+        charm_level = "ns-level"
+        db_vnfr = {"member-vnf-index-ref": "vnf222-xxx-yyy-zzz"}
+        db_nsr = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "vnf/vnf1/mgmtvm",
+                            "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                            "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a",
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                            "vdu_name": "mgmtvm",
+                            "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                        {
+                            "target_element": "ns",
+                            "member-vnf-index": None,
+                            "vdu_id": None,
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "",
+                            "vdu_name": "",
+                            "ee_descriptor_id": "",
+                            "charm_name": "simple-ns-charm-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                    ]
+                }
+            }
+        }
+        expected_result = [
+            {
+                "target_element": "ns",
+                "member-vnf-index": None,
+                "vdu_id": None,
+                "kdu_name": None,
+                "vdu_count_index": None,
+                "vnfd_id": "",
+                "vdu_name": "",
+                "ee_descriptor_id": "",
+                "charm_name": "simple-ns-charm-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            }
+        ]
+        vca_records = self.n2vc._get_vca_records(charm_level, db_nsr, db_vnfr)
+        self.assertEqual(vca_records, expected_result)
+
+    def test_get_vca_records_ns_charm_empty_charm_name(self):
+        charm_level = "ns-level"
+        db_vnfr = {"member-vnf-index-ref": "vnf222-xxx-yyy-zzz"}
+        db_nsr = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "vnf/vnf1/mgmtvm",
+                            "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                            "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a",
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                            "vdu_name": "mgmtvm",
+                            "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                        {
+                            "target_element": "ns",
+                            "member-vnf-index": None,
+                            "vdu_id": None,
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "",
+                            "vdu_name": "",
+                            "ee_descriptor_id": "",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                    ]
+                }
+            }
+        }
+        expected_result = [
+            {
+                "target_element": "ns",
+                "member-vnf-index": None,
+                "vdu_id": None,
+                "kdu_name": None,
+                "vdu_count_index": None,
+                "vnfd_id": "",
+                "vdu_name": "",
+                "ee_descriptor_id": "",
+                "charm_name": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            }
+        ]
+        vca_records = self.n2vc._get_vca_records(charm_level, db_nsr, db_vnfr)
+        self.assertEqual(vca_records, expected_result)
+
+    def test_get_application_name_vnf_charm(self):
+        namespace = ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0"
+        self.db.get_one.return_value = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "vnf/vnf1/mgmtvm",
+                            "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                            "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a",
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                            "vdu_name": "mgmtvm",
+                            "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                        {
+                            "target_element": "ns",
+                            "member-vnf-index": None,
+                            "vdu_id": None,
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "",
+                            "vdu_name": "",
+                            "ee_descriptor_id": "",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                    ]
+                }
+            }
+        }
+        mock_vnf_count_and_record = MagicMock()
+        db_vnfr = {"member-vnf-index-ref": "vnf111-xxx-yyy-zzz"}
+        vnf_count = "0"
+        mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr)
+        expected_result = "simple-ee-ab-z0-vnf111-xxx-y-vnf"
+        with patch.object(self.n2vc, "db", self.db), patch.object(
+            self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record
+        ):
+            application_name = self.n2vc._get_application_name(namespace)
+            self.assertEqual(application_name, expected_result)
+            self.assertLess(len(application_name), 50)
+            mock_vnf_count_and_record.assert_called_once_with(
+                "vnf-level", "1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0"
+            )
+            self.db.get_one.assert_called_once()
+
+    @patch(
+        "n2vc.n2vc_juju_conn.generate_random_alfanum_string",
+        **{"return_value": "random"}
+    )
+    def test_get_application_name_vnf_charm_old_naming(
+        self, mock_generate_random_alfanum
+    ):
+        namespace = ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0"
+        self.db.get_one.return_value = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "vnf/vnf1/mgmtvm",
+                            "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                            "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a",
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                            "vdu_name": "mgmtvm",
+                            "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                        {
+                            "target_element": "ns",
+                            "member-vnf-index": None,
+                            "vdu_id": None,
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "",
+                            "vdu_name": "",
+                            "ee_descriptor_id": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                    ]
+                }
+            }
+        }
+        mock_vnf_count_and_record = MagicMock()
+        db_vnfr = {"member-vnf-index-ref": "vnf111-xxx-yyy-zzz"}
+        vnf_count = "0"
+        mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr)
+        expected_result = "app-vnf-eb3161eec0-z0-random"
+        with patch.object(self.n2vc, "db", self.db), patch.object(
+            self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record
+        ):
+            application_name = self.n2vc._get_application_name(namespace)
+            self.assertEqual(application_name, expected_result)
+            mock_vnf_count_and_record.assert_called_once_with(
+                "vnf-level", "1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0"
+            )
+            self.db.get_one.assert_called_once()
+
+    def test_get_application_name_vnf_charm_vnf_index_ref_mismatch(self):
+        namespace = ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0"
+        self.db.get_one.return_value = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "vnf/vnf1/mgmtvm",
+                            "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                            "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a",
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                            "vdu_name": "mgmtvm",
+                            "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                        {
+                            "target_element": "ns",
+                            "member-vnf-index": None,
+                            "vdu_id": None,
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "",
+                            "vdu_name": "",
+                            "ee_descriptor_id": "",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                    ]
+                }
+            }
+        }
+        mock_vnf_count_and_record = MagicMock()
+        db_vnfr = {"member-vnf-index-ref": "vnf222-xxx-yyy-zzz"}
+        vnf_count = "0"
+        mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr)
+        with patch.object(self.n2vc, "db", self.db), patch.object(
+            self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record
+        ):
+            with self.assertRaises(N2VCException):
+                self.n2vc._get_application_name(namespace)
+                mock_vnf_count_and_record.assert_called_once_with(
+                    "vnf-level", "1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0"
+                )
+                self.db.get_one.assert_called_once()
+
+    def test_get_application_name_vdu_charm(self):
+        namespace = ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0.mgmtVM-0"
+        self.db.get_one.return_value = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "vnf/vnf1/mgmtvm",
+                            "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                            "vdu_id": "mgmtVM",
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                            "vdu_name": "mgmtvm",
+                            "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                        {
+                            "target_element": "ns",
+                            "member-vnf-index": None,
+                            "vdu_id": None,
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "",
+                            "vdu_name": "",
+                            "ee_descriptor_id": "",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                    ]
+                }
+            }
+        }
+        mock_vnf_count_and_record = MagicMock()
+        db_vnfr = {
+            "member-vnf-index-ref": "vnf111-xxx-yyy-zzz",
+            "vdur": [
+                {"_id": "38912ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "mgmtVM"},
+                {"_id": "45512ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "dataVM"},
+            ],
+        }
+        vnf_count = "0"
+        mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr)
+        expected_result = "simple-ee-ab-z0-vnf111-xxx-y-mgmtvm-z0-vdu"
+        with patch.object(self.n2vc, "db", self.db), patch.object(
+            self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record
+        ):
+            application_name = self.n2vc._get_application_name(namespace)
+            self.assertEqual(application_name, expected_result)
+            self.assertLess(len(application_name), 50)
+            mock_vnf_count_and_record.assert_called_once_with(
+                "vdu-level", "1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0"
+            )
+            self.db.get_one.assert_called_once()
+
+    def test_get_application_name_kdu_charm(self):
+        namespace = ".82b11965-e580-47c0-9ee0-329f318a305b.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0.ldap"
+        self.db.get_one.return_value = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "vnf/openldap/kdu/ldap",
+                            "member-vnf-index": "openldap",
+                            "vdu_id": None,
+                            "kdu_name": "ldap",
+                            "vdu_count_index": 0,
+                            "operational-status": "init",
+                            "detailed-status": "",
+                            "step": "initial-deploy",
+                            "vnfd_id": "openldap_knf",
+                            "vdu_name": None,
+                            "type": "lxc_proxy_charm",
+                            "ee_descriptor_id": "openldap-ee",
+                            "charm_name": "",
+                            "ee_id": "",
+                            "application": "openldap-ee-z0-openldap-vdu",
+                            "model": "82b11965-e580-47c0-9ee0-329f318a305b",
+                            "config_sw_installed": True,
+                        }
+                    ]
+                }
+            }
+        }
+        mock_vnf_count_and_record = MagicMock()
+        db_vnfr = {"member-vnf-index-ref": "openldap", "vdur": {}}
+        vnf_count = "0"
+        mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr)
+        expected_result = "openldap-ee-z0-openldap-ldap-vdu"
+        with patch.object(self.n2vc, "db", self.db), patch.object(
+            self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record
+        ):
+            application_name = self.n2vc._get_application_name(namespace)
+            self.assertEqual(application_name, expected_result)
+            self.assertLess(len(application_name), 50)
+            mock_vnf_count_and_record.assert_called_once_with(
+                "vdu-level", "1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0"
+            )
+            self.db.get_one.assert_called_once()
+
+    @patch(
+        "n2vc.n2vc_juju_conn.generate_random_alfanum_string",
+        **{"return_value": "random"}
+    )
+    def test_get_application_name_vdu_charm_old_naming(
+        self, mock_generate_random_alfanum
+    ):
+        namespace = ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0.mgmtVM-0"
+        self.db.get_one.return_value = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "vnf/vnf1/mgmtVM",
+                            "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                            "vdu_id": "mgmtVM",
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                            "vdu_name": "mgmtvm",
+                            "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                        {
+                            "target_element": "ns",
+                            "member-vnf-index": None,
+                            "vdu_id": None,
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "",
+                            "vdu_name": "",
+                            "ee_descriptor_id": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                    ]
+                }
+            }
+        }
+        mock_vnf_count_and_record = MagicMock()
+        db_vnfr = {
+            "member-vnf-index-ref": "vnf111-xxx-yyy-zzz",
+            "vdur": [
+                {"_id": "38912ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "mgmtVM"},
+                {"_id": "45512ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "dataVM"},
+            ],
+        }
+        vnf_count = "0"
+        mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr)
+        expected_result = "app-vnf-eb3161eec0-z0-vdu-mgmtvm-cnt-z0-random"
+
+        with patch.object(self.n2vc, "db", self.db), patch.object(
+            self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record
+        ):
+            application_name = self.n2vc._get_application_name(namespace)
+            self.assertEqual(application_name, expected_result)
+            self.assertLess(len(application_name), 50)
+            mock_vnf_count_and_record.assert_called_once_with(
+                "vdu-level", "1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0"
+            )
+            self.db.get_one.assert_called_once()
+
+    def test_get_application_name_ns_charm(self):
+        namespace = ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898"
+        self.db.get_one.return_value = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "ns",
+                            "member-vnf-index": None,
+                            "vdu_id": None,
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "",
+                            "vdu_name": "",
+                            "ee_descriptor_id": "",
+                            "charm_name": "simple-ns-charm-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        }
+                    ]
+                }
+            }
+        }
+        mock_vnf_count_and_record = MagicMock()
+        db_vnfr = {}
+        vnf_count = ""
+        mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr)
+        expected_result = "simple-ns-charm-abc-z000-rrrr-nnnn-z4444-h-ns"
+        with patch.object(self.n2vc, "db", self.db), patch.object(
+            self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record
+        ):
+            application_name = self.n2vc._get_application_name(namespace)
+            self.assertEqual(application_name, expected_result)
+            self.assertLess(len(application_name), 50)
+            mock_vnf_count_and_record.assert_called_once_with("ns-level", None)
+            self.db.get_one.assert_called_once()
+
+    def test_get_application_name_ns_charm_empty_charm_name(self):
+        namespace = ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898"
+        self.db.get_one.return_value = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "ns",
+                            "member-vnf-index": None,
+                            "vdu_id": None,
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "",
+                            "vdu_name": "",
+                            "ee_descriptor_id": "",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        }
+                    ]
+                }
+            }
+        }
+        mock_vnf_count_and_record = MagicMock()
+        db_vnfr = {}
+        vnf_count = ""
+        mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr)
+        with patch.object(self.n2vc, "db", self.db), patch.object(
+            self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record
+        ):
+            with self.assertRaises(N2VCException):
+                self.n2vc._get_application_name(namespace)
+                mock_vnf_count_and_record.assert_called_once_with("ns-level", None)
+                self.db.get_one.assert_called_once()
+
+    @patch(
+        "n2vc.n2vc_juju_conn.generate_random_alfanum_string",
+        **{"return_value": "random"}
+    )
+    def test_get_application_name_ns_charm_old_naming(
+        self, mock_generate_random_alfanum
+    ):
+        namespace = ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898"
+        self.db.get_one.return_value = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "ns",
+                            "member-vnf-index": None,
+                            "vdu_id": None,
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "",
+                            "vdu_name": "",
+                            "ee_descriptor_id": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        }
+                    ]
+                }
+            }
+        }
+        mock_vnf_count_and_record = MagicMock()
+        db_vnfr = {}
+        vnf_count = ""
+        mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr)
+        expected_result = "app-random"
+        with patch.object(self.n2vc, "db", self.db), patch.object(
+            self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record
+        ):
+            application_name = self.n2vc._get_application_name(namespace)
+            self.assertEqual(application_name, expected_result)
+            self.assertLess(len(application_name), 50)
+            mock_vnf_count_and_record.assert_called_once_with("ns-level", None)
+            self.db.get_one.assert_called_once()
+
+
+class DeleteExecutionEnvironmentTest(N2VCJujuConnTestCase):
+    def setUp(self):
+        super(DeleteExecutionEnvironmentTest, self).setUp()
+        self.n2vc.libjuju.get_controller = AsyncMock()
+        self.n2vc.libjuju.destroy_model = AsyncMock()
+        self.n2vc.libjuju.destroy_application = AsyncMock()
+
+    def test_remove_ee__target_application_exists__model_is_deleted(self):
+        get_ee_id_components = MagicMock()
+        get_ee_id_components.return_value = ("my_model", "my_app", None)
+        model = MagicMock(create_autospec=True)
+        model.applications = {}
+        self.n2vc.libjuju.get_model = AsyncMock()
+        self.n2vc.libjuju.get_model.return_value = model
+        with patch.object(self.n2vc, "_get_ee_id_components", get_ee_id_components):
+            self.loop.run_until_complete(
+                self.n2vc.delete_execution_environment(
+                    "my_ee", application_to_delete="my_app"
+                )
+            )
+        self.n2vc.libjuju.destroy_application.assert_called_with(
+            model_name="my_model",
+            application_name="my_app",
+            total_timeout=None,
+        )
+        self.n2vc.libjuju.destroy_model.assert_called_with(
+            model_name="my_model",
+            total_timeout=None,
+        )
+
+    def test_remove_ee__multiple_applications_exist__model_is_not_deleted(self):
+        get_ee_id_components = MagicMock()
+        get_ee_id_components.return_value = ("my_model", "my_app", None)
+        model = MagicMock(create_autospec=True)
+        model.applications = {MagicMock(create_autospec=True)}
+        self.n2vc.libjuju.get_model = AsyncMock()
+        self.n2vc.libjuju.get_model.return_value = model
+        with patch.object(self.n2vc, "_get_ee_id_components", get_ee_id_components):
+            self.loop.run_until_complete(
+                self.n2vc.delete_execution_environment(
+                    "my_ee", application_to_delete="my_app"
+                )
+            )
+        self.n2vc.libjuju.destroy_application.assert_called_with(
+            model_name="my_model",
+            application_name="my_app",
+            total_timeout=None,
+        )
+        self.n2vc.libjuju.destroy_model.assert_not_called()
+
+    def test_remove_ee__target_application_does_not_exist__model_is_deleted(self):
+        get_ee_id_components = MagicMock()
+        get_ee_id_components.return_value = ("my_model", "my_app", None)
+        with patch.object(self.n2vc, "_get_ee_id_components", get_ee_id_components):
+            self.loop.run_until_complete(
+                self.n2vc.delete_execution_environment("my_ee")
+            )
+        self.n2vc.libjuju.destroy_model.assert_called_with(
+            model_name="my_model",
+            total_timeout=None,
+        )
index c7aa2d6..abc5e13 100644 (file)
@@ -138,12 +138,20 @@ class TestMotorStore(TestCase):
         self.vca_collection.find_one = AsyncMock()
         self.vca_collection.insert_one = AsyncMock()
         self.vca_collection.replace_one = AsyncMock()
         self.vca_collection.find_one = AsyncMock()
         self.vca_collection.insert_one = AsyncMock()
         self.vca_collection.replace_one = AsyncMock()
+        self.encryption = Mock()
+        self.encryption.admin_collection = Mock()
+        self.encryption.admin_collection.find_one = AsyncMock()
         self.admin_collection = Mock()
         self.admin_collection.find_one = AsyncMock()
         self.admin_collection.insert_one = AsyncMock()
         self.admin_collection.replace_one = AsyncMock()
         self.vim_accounts_collection = Mock()
         self.vim_accounts_collection.find_one = AsyncMock()
         self.admin_collection = Mock()
         self.admin_collection.find_one = AsyncMock()
         self.admin_collection.insert_one = AsyncMock()
         self.admin_collection.replace_one = AsyncMock()
         self.vim_accounts_collection = Mock()
         self.vim_accounts_collection.find_one = AsyncMock()
+        self.store.encryption._client = {
+            "osm": {
+                "admin": self.encryption.admin_collection,
+            }
+        }
         self.store._client = {
             "osm": {
                 "vca": self.vca_collection,
         self.store._client = {
             "osm": {
                 "vca": self.vca_collection,
@@ -152,7 +160,7 @@ class TestMotorStore(TestCase):
             }
         }
         self.store._config = {"database_commonkey": "osm"}
             }
         }
         self.store._config = {"database_commonkey": "osm"}
-        # self.store.decrypt_fields = Mock()
+        self.store.encryption._config = {"database_commonkey": "osm"}
         self.loop = asyncio.get_event_loop()
 
     @patch("n2vc.vca.connection_data.base64_to_cacert")
         self.loop = asyncio.get_event_loop()
 
     @patch("n2vc.vca.connection_data.base64_to_cacert")
@@ -174,7 +182,7 @@ class TestMotorStore(TestCase):
         db_find_one = conn_data.copy()
         db_find_one.update({"schema_version": "1.1", "_id": "id"})
         self.vca_collection.find_one.return_value = db_find_one
         db_find_one = conn_data.copy()
         db_find_one.update({"schema_version": "1.1", "_id": "id"})
         self.vca_collection.find_one.return_value = db_find_one
-        self.store.decrypt_fields = AsyncMock()
+        self.store.encryption.decrypt_fields = AsyncMock()
         connection_data = self.loop.run_until_complete(
             self.store.get_vca_connection_data("vca_id")
         )
         connection_data = self.loop.run_until_complete(
             self.store.get_vca_connection_data("vca_id")
         )
@@ -207,7 +215,6 @@ class TestMotorStore(TestCase):
         encrypted_secret = "kI46kRJh828ExSNpr16OG/q5a5/qTsE0bsHrv/W/2/g="
         cacert = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQ4ekNDQWx1Z0F3SUJBZ0lVRWlzTTBoQWxiYzQ0Z1ZhZWh6bS80ZUsyNnRZd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0lURU5NQXNHQTFVRUNoTUVTblZxZFRFUU1BNEdBMVVFQXhNSGFuVnFkUzFqWVRBZUZ3MHlNVEEwTWpNeApNRFV3TXpSYUZ3MHpNVEEwTWpNeE1EVTFNelJhTUNFeERUQUxCZ05WQkFvVEJFcDFhblV4RURBT0JnTlZCQU1UCkIycDFhblV0WTJFd2dnR2lNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJqd0F3Z2dHS0FvSUJnUUNhTmFvNGZab2gKTDJWYThtdy9LdCs3RG9tMHBYTlIvbEUxSHJyVmZvbmZqZFVQV01zSHpTSjJZZXlXcUNSd3BiaHlLaE82N1c1dgpUY2RsV3Y3WGFLTGtsdVkraDBZY3BQT3BFTmZZYmxrNGk0QkV1L0wzYVY5MFFkUFFrMG94S01CS2R5QlBNZVNNCkJmS2pPWXdyOGgzM0ZWUWhmVkJnMXVGZ2tGaDdTamNuNHczUFdvc1BCMjNiVHBCbGR3VE9zemN4Qm9TaDNSVTkKTzZjb3lQdDdEN0drOCtHRlA3RGRUQTdoV1RkaUM4cDBkeHp2RUNmY0psMXNFeFEyZVprS1QvVzZyelNtVDhUTApCM0ErM1FDRDhEOEVsQU1IVy9zS25SeHphYU8welpNVmVlQnRnNlFGZ1F3M0dJMGo2ZTY0K2w3VExoOW8wSkZVCjdpUitPY01xUzVDY0NROGpWV3JPSk9Xc2dEbDZ4T2FFREczYnR5SVJHY29jbVcvcEZFQjNZd1A2S1BRTUIrNXkKWDdnZExEWmFGRFBVakZmblhkMnhHdUZlMnpRTDNVbXZEUkZuUlBBaW02QlpQbWo1OFh2emFhZXROa3lyaUZLZwp4Z0Z1dVpTcDUwV2JWdjF0MkdzOTMrRE53NlhFZHRFYnlWWUNBa28xTTY0MkozczFnN3NoQnRFQ0F3RUFBYU1qCk1DRXdEZ1lEVlIwUEFRSC9CQVFEQWdLa01BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUwKQlFBRGdnR0JBRXYxM2o2ZGFVbDBqeERPSnNTV1ZJZS9JdXNXVTRpN2ZXSWlqMHAwRU1GNS9LTE8yemRndTR5SQoreVd2T3N5aVFPanEzMlRYVlo2bTRDSnBkR1dGVE5HK2lLdXVOU3M0N3g3Q3dmVUNBWm5VVzhyamd3ZWJyS3BmCkJMNEVQcTZTcW0rSmltN0VPankyMWJkY2cyUXdZb3A3eUhvaHcveWEvL0l6RTMzVzZxNHlJeEFvNDBVYUhPTEMKTGtGbnNVYitjcFZBeFlPZGp6bjFzNWhnclpuWXlETEl3WmtIdFdEWm94alUzeC9jdnZzZ1FzLytzTWYrRFU4RgpZMkJKRHJjQ1VQM2xzclc0QVpFMFplZkEwOTlncFEvb3dSN0REYnMwSjZUeFM4NGt6Tldjc1FuWnRraXZheHJNClkyVHNnaWVndFExVFdGRWpxLy9sUFV4emJCdmpnd1FBZm5CQXZGeVNKejdTa0VuVm5rUXJGaUlUQVArTHljQVIKMlg4UFI2ZGI1bEt0SitBSENDM3kvZmNQS2k0ZzNTL3djeXRRdmdvOXJ6ODRFalp5YUNTaGJXNG9jNzNrMS9RcAowQWtHRDU0ZGVDWWVPYVJNbW96c0w3ZzdxWkpFekhtODdOcVBYSy9EZFoweWNxaVFhMXY2T3QxNjdXNUlzMUkzCjBWb0IzUzloSlE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCgo="  # noqa: E501
         encrypted_cacert = "QeV4evTLXzcKwZZvmXQ/OvSHToXH3ISwfoLmU+Q9JlQWAFUHSJ9IhO0ewaQrJmx3NkfFb7NCxsQhh+wE57zDW4rWgn4w/SWkzvwSi1h2xYOO3ECEHzzVqgUm15Sk0xaj1Fv9Ed4hipf6PRijeOZ7A1G9zekr1w9WIvebMyJZrK+f6QJ8AP20NUZqG/3k+MeJr3kjrl+8uwU5aPOrHAexSQGAqSKTkWzW7glmlyMWTjwkuSgNVgFg0ctdWTZ5JnNwxXbpjwIKrC4E4sIHcxko2vsTeLF8pZFPk+3QUZIg8BrgtyM3lJC2kO1g3emPQhCIk3VDb5GBgssc/GyFyRXNS651d5BNgcABOKZ4Rv/gGnprB35zP7TKJKkST44XJTEBiugWMkSZg+T9H98/l3eE34O6thfTZXgIyG+ZM6uGlW2XOce0OoEIyJiEL039WJe3izjbD3b9sCCdgQc0MgS+hTaayJI6oCUWPsJLmRji19jLi/wjOsU5gPItCFWw3pBye/A4Zf8Hxm+hShvqBnk8R2yx1fPTiyw/Zx4Jn8m49XQJyjDSZnhIck0PVHR9xWzKCr++PKljLMLdkdFxVRVPFQk/FBbesqofjSXsq9DASY6ACTL3Jmignx2OXD6ac4SlBqCTjV2dIM0yEgZF7zwMNCtppRdXTV8S29JP4W2mfaiqXCUSRTggv8EYU+9diCE+8sPB6HjuLrsfiySbFlYR2m4ysDGXjsVx5CDAf0Nh4IRfcSceYnnBGIQ2sfgGcJFOZoJqr/QeE2NWz6jlWYbWT7MjS/0decpKxP7L88qrR+F48WXQvfsvjWgKjlMKw7lHmFF8FeY836VWWICTRZx+y6IlY1Ys2ML4kySF27Hal4OPhOOoBljMNMVwUEvBulOnKUWw4BGz8eGCl8Hw6tlyJdC7kcBj/aCyNCR/NnuDk4Wck6e//He8L6mS83OJi/hIFc8vYQxnCJMXj9Ou7wr5hxtBnvxXzZM3kFHxCDO24Cd5UyBV9GD8TiQJfBGAy7a2BCBMb5ESVX8NOkyyv2hXMHOjpnKhUM9yP3Ke4CBImO7mCKJNHdFVtAmuyVKJ+jT6ooAAArkX2xwEAvBEpvGNmW2jgs6wxSuKY0h5aUm0rA4v/s8fqSZhzdInB54sMldyAnt9G+9e+g933DfyA/tkc56Ed0vZ/XEvTkThVHyUbfYR/Gjsoab1RpnDBi4aZ2E7iceoBshy+L6NXdL0jlWEs4ZubiWlbVNWlN/MqJcjV/quLU7q4HtkG0MDEFm6To3o48x7xpv8otih6YBduNqBFnwQ6Qz9rM2chFgOR4IgNSZKPxHO0AGCi1gnK/CeCvrSfWYAMn+2rmw0hMZybqKMStG28+rXsKDdqmy6vAwL/+dJwkAW+ix68rWRXpeqHlWidu4SkIBELuwEkFIC/GJU/DRvcN2GG9uP1m+VFifCIS2UdiO4OVrP6PVoW1O+jBJvFH3K1YT7CRqevb9OzjS9fO1wjkOff0W8zZyJK9Mp25aynpf0k3oMpZDpjnlOsFXFUb3N6SvXD1Yi95szIlmsr5yRYaeGUJH7/SAmMr8R6RqsCR0ANptL2dtRoGPi/qcDQE15vnjJ+QMYCg9KbCdV+Qq5di93XAjmwPj6tKZv0aXQuaTZgYR7bdLmAnJaFLbHWcQG1k6F/vdKNEb7llLsoAD9KuKXPZT/LErIyKcI0RZySy9yvhTZb4jQWn17b83yfvqfd5/2NpcyaY4gNERhDRJHw7VhoS5Leai5ZnFaO3C1vU9tIJ85XgCUASTsBLoQWVCKPSQZGxzF7PVLnHui3YA5OsOQpVqAPtgGZ12tP9XkEKj+u2/Atj2bgYrqBF7zUL64X/AQpwr/UElWDhJLSD/KStVeDOUx3AwAVVi9eTUJr6NiNMutCE1sqUf9XVIddgZ/BaG5t3NV2L+T+11QzAl+Xrh8wH/XeUCTmnU3NGkvCz/9Y7PMS+qQL7T7WeGdYmEhb5s/5p/yjSYeqybr5sANOHs83OdeSXbop9cLWW+JksHmS//rHHcrrJhZgCb3P0EOpEoEMCarT6sJq0V1Hwf/YNFdJ9V7Ac654ALS+a9ffNthMUEJeY21QMtNOrEg3QH5RWBPn+yOYN/f38tzwlT1k6Ec94y/sBmeQVv8rRzkkiMSXeAL5ATdJntq8NQq5JbvLQDNnZnHQthZt+uhcUf08mWlRrxxBUaE6xLppgMqFdYSjLGvgn/d8FZ9y7UCg5ZBhgP1rrRQL1COpNKKlJLf5laqwiGAucIDmzSbhO+MidSauDLWuv+fsdd2QYk98PHxqNrPYLrlAlABFi3JEApBm4IlrGbHxKg6dRiy7L1c9xWnAD7E3XrZrSc6DXvGRsjMXWoQdlp4CX5H3cdH9sjIE6akWqiwwrOP6QTbJcxmJGv/MVhsDVrVKmrKSn2H0/Us1fyYCHCOyCSc2L96uId8i9wQO1NXj+1PJmUq3tJ8U0TUwTblOEQdYej99xEI8EzsXLjNJHCgbDygtHBYd/SHToXH3ISwfoLmU+Q9JlS1woaUpVa5sdvbsr4BXR6J"  # noqa: E501
         encrypted_secret = "kI46kRJh828ExSNpr16OG/q5a5/qTsE0bsHrv/W/2/g="
         cacert = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQ4ekNDQWx1Z0F3SUJBZ0lVRWlzTTBoQWxiYzQ0Z1ZhZWh6bS80ZUsyNnRZd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0lURU5NQXNHQTFVRUNoTUVTblZxZFRFUU1BNEdBMVVFQXhNSGFuVnFkUzFqWVRBZUZ3MHlNVEEwTWpNeApNRFV3TXpSYUZ3MHpNVEEwTWpNeE1EVTFNelJhTUNFeERUQUxCZ05WQkFvVEJFcDFhblV4RURBT0JnTlZCQU1UCkIycDFhblV0WTJFd2dnR2lNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJqd0F3Z2dHS0FvSUJnUUNhTmFvNGZab2gKTDJWYThtdy9LdCs3RG9tMHBYTlIvbEUxSHJyVmZvbmZqZFVQV01zSHpTSjJZZXlXcUNSd3BiaHlLaE82N1c1dgpUY2RsV3Y3WGFLTGtsdVkraDBZY3BQT3BFTmZZYmxrNGk0QkV1L0wzYVY5MFFkUFFrMG94S01CS2R5QlBNZVNNCkJmS2pPWXdyOGgzM0ZWUWhmVkJnMXVGZ2tGaDdTamNuNHczUFdvc1BCMjNiVHBCbGR3VE9zemN4Qm9TaDNSVTkKTzZjb3lQdDdEN0drOCtHRlA3RGRUQTdoV1RkaUM4cDBkeHp2RUNmY0psMXNFeFEyZVprS1QvVzZyelNtVDhUTApCM0ErM1FDRDhEOEVsQU1IVy9zS25SeHphYU8welpNVmVlQnRnNlFGZ1F3M0dJMGo2ZTY0K2w3VExoOW8wSkZVCjdpUitPY01xUzVDY0NROGpWV3JPSk9Xc2dEbDZ4T2FFREczYnR5SVJHY29jbVcvcEZFQjNZd1A2S1BRTUIrNXkKWDdnZExEWmFGRFBVakZmblhkMnhHdUZlMnpRTDNVbXZEUkZuUlBBaW02QlpQbWo1OFh2emFhZXROa3lyaUZLZwp4Z0Z1dVpTcDUwV2JWdjF0MkdzOTMrRE53NlhFZHRFYnlWWUNBa28xTTY0MkozczFnN3NoQnRFQ0F3RUFBYU1qCk1DRXdEZ1lEVlIwUEFRSC9CQVFEQWdLa01BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUwKQlFBRGdnR0JBRXYxM2o2ZGFVbDBqeERPSnNTV1ZJZS9JdXNXVTRpN2ZXSWlqMHAwRU1GNS9LTE8yemRndTR5SQoreVd2T3N5aVFPanEzMlRYVlo2bTRDSnBkR1dGVE5HK2lLdXVOU3M0N3g3Q3dmVUNBWm5VVzhyamd3ZWJyS3BmCkJMNEVQcTZTcW0rSmltN0VPankyMWJkY2cyUXdZb3A3eUhvaHcveWEvL0l6RTMzVzZxNHlJeEFvNDBVYUhPTEMKTGtGbnNVYitjcFZBeFlPZGp6bjFzNWhnclpuWXlETEl3WmtIdFdEWm94alUzeC9jdnZzZ1FzLytzTWYrRFU4RgpZMkJKRHJjQ1VQM2xzclc0QVpFMFplZkEwOTlncFEvb3dSN0REYnMwSjZUeFM4NGt6Tldjc1FuWnRraXZheHJNClkyVHNnaWVndFExVFdGRWpxLy9sUFV4emJCdmpnd1FBZm5CQXZGeVNKejdTa0VuVm5rUXJGaUlUQVArTHljQVIKMlg4UFI2ZGI1bEt0SitBSENDM3kvZmNQS2k0ZzNTL3djeXRRdmdvOXJ6ODRFalp5YUNTaGJXNG9jNzNrMS9RcAowQWtHRDU0ZGVDWWVPYVJNbW96c0w3ZzdxWkpFekhtODdOcVBYSy9EZFoweWNxaVFhMXY2T3QxNjdXNUlzMUkzCjBWb0IzUzloSlE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCgo="  # noqa: E501
         encrypted_cacert = "QeV4evTLXzcKwZZvmXQ/OvSHToXH3ISwfoLmU+Q9JlQWAFUHSJ9IhO0ewaQrJmx3NkfFb7NCxsQhh+wE57zDW4rWgn4w/SWkzvwSi1h2xYOO3ECEHzzVqgUm15Sk0xaj1Fv9Ed4hipf6PRijeOZ7A1G9zekr1w9WIvebMyJZrK+f6QJ8AP20NUZqG/3k+MeJr3kjrl+8uwU5aPOrHAexSQGAqSKTkWzW7glmlyMWTjwkuSgNVgFg0ctdWTZ5JnNwxXbpjwIKrC4E4sIHcxko2vsTeLF8pZFPk+3QUZIg8BrgtyM3lJC2kO1g3emPQhCIk3VDb5GBgssc/GyFyRXNS651d5BNgcABOKZ4Rv/gGnprB35zP7TKJKkST44XJTEBiugWMkSZg+T9H98/l3eE34O6thfTZXgIyG+ZM6uGlW2XOce0OoEIyJiEL039WJe3izjbD3b9sCCdgQc0MgS+hTaayJI6oCUWPsJLmRji19jLi/wjOsU5gPItCFWw3pBye/A4Zf8Hxm+hShvqBnk8R2yx1fPTiyw/Zx4Jn8m49XQJyjDSZnhIck0PVHR9xWzKCr++PKljLMLdkdFxVRVPFQk/FBbesqofjSXsq9DASY6ACTL3Jmignx2OXD6ac4SlBqCTjV2dIM0yEgZF7zwMNCtppRdXTV8S29JP4W2mfaiqXCUSRTggv8EYU+9diCE+8sPB6HjuLrsfiySbFlYR2m4ysDGXjsVx5CDAf0Nh4IRfcSceYnnBGIQ2sfgGcJFOZoJqr/QeE2NWz6jlWYbWT7MjS/0decpKxP7L88qrR+F48WXQvfsvjWgKjlMKw7lHmFF8FeY836VWWICTRZx+y6IlY1Ys2ML4kySF27Hal4OPhOOoBljMNMVwUEvBulOnKUWw4BGz8eGCl8Hw6tlyJdC7kcBj/aCyNCR/NnuDk4Wck6e//He8L6mS83OJi/hIFc8vYQxnCJMXj9Ou7wr5hxtBnvxXzZM3kFHxCDO24Cd5UyBV9GD8TiQJfBGAy7a2BCBMb5ESVX8NOkyyv2hXMHOjpnKhUM9yP3Ke4CBImO7mCKJNHdFVtAmuyVKJ+jT6ooAAArkX2xwEAvBEpvGNmW2jgs6wxSuKY0h5aUm0rA4v/s8fqSZhzdInB54sMldyAnt9G+9e+g933DfyA/tkc56Ed0vZ/XEvTkThVHyUbfYR/Gjsoab1RpnDBi4aZ2E7iceoBshy+L6NXdL0jlWEs4ZubiWlbVNWlN/MqJcjV/quLU7q4HtkG0MDEFm6To3o48x7xpv8otih6YBduNqBFnwQ6Qz9rM2chFgOR4IgNSZKPxHO0AGCi1gnK/CeCvrSfWYAMn+2rmw0hMZybqKMStG28+rXsKDdqmy6vAwL/+dJwkAW+ix68rWRXpeqHlWidu4SkIBELuwEkFIC/GJU/DRvcN2GG9uP1m+VFifCIS2UdiO4OVrP6PVoW1O+jBJvFH3K1YT7CRqevb9OzjS9fO1wjkOff0W8zZyJK9Mp25aynpf0k3oMpZDpjnlOsFXFUb3N6SvXD1Yi95szIlmsr5yRYaeGUJH7/SAmMr8R6RqsCR0ANptL2dtRoGPi/qcDQE15vnjJ+QMYCg9KbCdV+Qq5di93XAjmwPj6tKZv0aXQuaTZgYR7bdLmAnJaFLbHWcQG1k6F/vdKNEb7llLsoAD9KuKXPZT/LErIyKcI0RZySy9yvhTZb4jQWn17b83yfvqfd5/2NpcyaY4gNERhDRJHw7VhoS5Leai5ZnFaO3C1vU9tIJ85XgCUASTsBLoQWVCKPSQZGxzF7PVLnHui3YA5OsOQpVqAPtgGZ12tP9XkEKj+u2/Atj2bgYrqBF7zUL64X/AQpwr/UElWDhJLSD/KStVeDOUx3AwAVVi9eTUJr6NiNMutCE1sqUf9XVIddgZ/BaG5t3NV2L+T+11QzAl+Xrh8wH/XeUCTmnU3NGkvCz/9Y7PMS+qQL7T7WeGdYmEhb5s/5p/yjSYeqybr5sANOHs83OdeSXbop9cLWW+JksHmS//rHHcrrJhZgCb3P0EOpEoEMCarT6sJq0V1Hwf/YNFdJ9V7Ac654ALS+a9ffNthMUEJeY21QMtNOrEg3QH5RWBPn+yOYN/f38tzwlT1k6Ec94y/sBmeQVv8rRzkkiMSXeAL5ATdJntq8NQq5JbvLQDNnZnHQthZt+uhcUf08mWlRrxxBUaE6xLppgMqFdYSjLGvgn/d8FZ9y7UCg5ZBhgP1rrRQL1COpNKKlJLf5laqwiGAucIDmzSbhO+MidSauDLWuv+fsdd2QYk98PHxqNrPYLrlAlABFi3JEApBm4IlrGbHxKg6dRiy7L1c9xWnAD7E3XrZrSc6DXvGRsjMXWoQdlp4CX5H3cdH9sjIE6akWqiwwrOP6QTbJcxmJGv/MVhsDVrVKmrKSn2H0/Us1fyYCHCOyCSc2L96uId8i9wQO1NXj+1PJmUq3tJ8U0TUwTblOEQdYej99xEI8EzsXLjNJHCgbDygtHBYd/SHToXH3ISwfoLmU+Q9JlS1woaUpVa5sdvbsr4BXR6J"  # noqa: E501
-
         self.vca_collection.find_one.return_value = {
             "_id": "2ade7f0e-9b58-4dbd-93a3-4ec076185d39",
             "schema_version": "1.11",
         self.vca_collection.find_one.return_value = {
             "_id": "2ade7f0e-9b58-4dbd-93a3-4ec076185d39",
             "schema_version": "1.11",
@@ -216,7 +223,7 @@ class TestMotorStore(TestCase):
             "secret": encrypted_secret,
             "cacert": encrypted_cacert,
         }
             "secret": encrypted_secret,
             "cacert": encrypted_cacert,
         }
-        self.admin_collection.find_one.return_value = {
+        self.encryption.admin_collection.find_one.return_value = {
             "serial": b"l+U3HDp9td+UjQ+AN+Ypj/Uh7n3C+rMJueQNNxkIpWI="
         }
         connection_data = self.loop.run_until_complete(
             "serial": b"l+U3HDp9td+UjQ+AN+Ypj/Uh7n3C+rMJueQNNxkIpWI="
         }
         connection_data = self.loop.run_until_complete(
diff --git a/n2vc/tests/unit/testdata/test_certificate.yaml b/n2vc/tests/unit/testdata/test_certificate.yaml
new file mode 100644 (file)
index 0000000..d21e022
--- /dev/null
@@ -0,0 +1,42 @@
+# Copyright 2022 Whitestack, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+#
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+  name: test-cert
+  namespace: osm
+spec:
+  secretName: test-cert-secret
+  privateKey:
+    rotationPolicy: Always
+    algorithm: ECDSA
+    size: 256
+  duration: 8760h
+  renewBefore: 2208h
+  subject:
+    organizations:
+      - osm
+  commonName: osm
+  isCA: false
+  usages:
+    - server auth
+  dnsNames:
+    - "*.osm"
+    - "*.osm.svc"
+    - "*.osm.svc.cluster"
+    - "*.osm.svc.cluster.local"
+  issuerRef:
+    name: ca-issuer
+    kind: ClusterIssuer
diff --git a/n2vc/tests/unit/testdata/test_db_descriptors.py b/n2vc/tests/unit/testdata/test_db_descriptors.py
new file mode 100644 (file)
index 0000000..c6f3670
--- /dev/null
@@ -0,0 +1,414 @@
+# Copyright 2022 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+
+db_nsrs_text = """
+---
+-   _id: dbfbd751-3de4-4e68-bd40-ec5ae0a53898
+    name: k8s-ns
+    name-ref: k8s-ns
+    short-name: k8s-ns
+    admin-status: ENABLED
+    nsState: READY
+    currentOperation: IDLE
+    currentOperationID: null
+    errorDescription: null
+    errorDetail: null
+    deploymentStatus: null
+    configurationStatus:
+      - elementType: VNF
+        elementUnderConfiguration: 1b6a4eb3-4fbf-415e-985c-4aeb3161eec0
+        status: READY
+      - elementType: VNF
+        elementUnderConfiguration: 17892d73-aa19-4b87-9a00-1d094f07a6b3
+        status: READY
+    vcaStatus: null
+    nsd:
+      _id: 12f320b5-2a57-40f4-82b5-020a6b1171d7
+      id: k8s_proxy_charm-ns
+      version: '1.0'
+      name: k8s_proxy_charm-ns
+      vnfd-id:
+        - k8s_proxy_charm-vnf
+      virtual-link-desc:
+        - id: mgmtnet
+          mgmt-network: true
+        - id: datanet
+      df:
+        - id: default-df
+          vnf-profile:
+            - id: vnf1
+              virtual-link-connectivity:
+                - constituent-cpd-id:
+                    - constituent-base-element-id: vnf1
+                      constituent-cpd-id: vnf-mgmt-ext
+                  virtual-link-profile-id: mgmtnet
+                - constituent-cpd-id:
+                    - constituent-base-element-id: vnf1
+                      constituent-cpd-id: vnf-data-ext
+                  virtual-link-profile-id: datanet
+              vnfd-id: k8s_proxy_charm-vnf
+            - id: vnf2
+              virtual-link-connectivity:
+                - constituent-cpd-id:
+                    - constituent-base-element-id: vnf2
+                      constituent-cpd-id: vnf-mgmt-ext
+                  virtual-link-profile-id: mgmtnet
+                - constituent-cpd-id:
+                    - constituent-base-element-id: vnf2
+                      constituent-cpd-id: vnf-data-ext
+                  virtual-link-profile-id: datanet
+              vnfd-id: k8s_proxy_charm-vnf
+      description: NS with 2 VNFs with cloudinit connected by datanet and mgmtnet VLs
+      _admin:
+        userDefinedData: {}
+        revision: 1
+        created: 1658990740.88281
+        modified: 1658990741.09266
+        projects_read:
+          - 51e0e80fe533469d98766caa16552a3e
+        projects_write:
+          - 51e0e80fe533469d98766caa16552a3e
+        onboardingState: ONBOARDED
+        operationalState: ENABLED
+        usageState: NOT_IN_USE
+        storage:
+          fs: mongo
+          path: /app/storage/
+          folder: '12f320b5-2a57-40f4-82b5-020a6b1171d7:1'
+          pkg-dir: k8s_proxy_charm_ns
+          descriptor: k8s_proxy_charm_ns/k8s_proxy_charm_nsd.yaml
+          zipfile: k8s_proxy_charm_ns.tar.gz
+    datacenter: bad7338b-ae46-43d4-a434-c3337a8054ac
+    resource-orchestrator: osmopenmano
+    description: default description
+    constituent-vnfr-ref:
+      - 1b6a4eb3-4fbf-415e-985c-4aeb3161eec0
+      - 17892d73-aa19-4b87-9a00-1d094f07a6b3
+    operational-status: running
+    config-status: configured
+    detailed-status: Done
+    orchestration-progress: {}
+    create-time: 1658998097.57611
+    nsd-name-ref: k8s_proxy_charm-ns
+    operational-events: []
+    nsd-ref: k8s_proxy_charm-ns
+    nsd-id: 12f320b5-2a57-40f4-82b5-020a6b1171d7
+    vnfd-id:
+      - 6d9e1ca1-f387-4d01-9876-066fc7311e0f
+    instantiate_params:
+      nsdId: 12f320b5-2a57-40f4-82b5-020a6b1171d7
+      nsName: k8s-ns
+      nsDescription: default description
+      vimAccountId: bad7338b-ae46-43d4-a434-c3337a8054ac
+      vld:
+        - name: mgmtnet
+          vim-network-name: osm-ext
+    additionalParamsForNs: null
+    ns-instance-config-ref: dbfbd751-3de4-4e68-bd40-ec5ae0a53898
+    id: dbfbd751-3de4-4e68-bd40-ec5ae0a53898
+    ssh-authorized-key: null
+    flavor:
+      - id: '0'
+        memory-mb: 1024
+        name: mgmtVM-flv
+        storage-gb: '10'
+        vcpu-count: 1
+        vim_info:
+          'vim:bad7338b-ae46-43d4-a434-c3337a8054ac':
+            vim_details: null
+            vim_id: 17a9ba76-beb7-4ad4-a481-97de37174866
+            vim_status: DONE
+      - vcpu-count: 1
+        memory-mb: 1024
+        storage-gb: '10'
+        name: mgmtVM-flv
+        id: '1'
+    image:
+      - id: '0'
+        image: ubuntu18.04
+        vim_info:
+          'vim:bad7338b-ae46-43d4-a434-c3337a8054ac':
+            vim_details: null
+            vim_id: 919fc71a-6acd-4ee3-8123-739a9abbc2e7
+            vim_status: DONE
+      - image: 'Canonical:UbuntuServer:18.04-LTS:latest'
+        vim-type: azure
+        id: '1'
+      - image: 'ubuntu-os-cloud:image-family:ubuntu-1804-lts'
+        vim-type: gcp
+        id: '2'
+      - image: ubuntu/images/hvm-ssd/ubuntu-artful-17.10-amd64-server-20180509
+        vim-type: aws
+        id: '3'
+    affinity-or-anti-affinity-group: []
+    revision: 1
+    vld:
+      - id: mgmtnet
+        mgmt-network: true
+        name: mgmtnet
+        type: null
+        vim_info:
+          'vim:bad7338b-ae46-43d4-a434-c3337a8054ac':
+            vim_account_id: bad7338b-ae46-43d4-a434-c3337a8054ac
+            vim_network_name: osm-ext
+            vim_details: >
+              {admin_state_up: true, availability_zone_hints: [],
+              availability_zones: [nova], created_at: '2019-10-17T23:44:03Z',
+              description: '', encapsulation: vlan, encapsulation_id: 2148,
+              encapsulation_type: vlan, id: 21ea5d92-24f1-40ab-8d28-83230e277a49,
+              ipv4_address_scope: null,
+                ipv6_address_scope: null, is_default: false, mtu: 1500, name: osm-ext, port_security_enabled: true, project_id: 456b6471010b4737b47a0dd599c920c5, 'provider:network_type': vlan, 'provider:physical_network': physnet1, 'provider:segmentation_id': 2148, revision_number: 1009,
+                'router:external': true, segmentation_id: 2148, shared: true, status: ACTIVE, subnets: [{subnet: {allocation_pools: [{end: 172.21.249.255, start: 172.21.248.1}], cidr: 172.21.248.0/22, created_at: '2019-10-17T23:44:07Z', description: '', dns_nameservers: [],
+                      enable_dhcp: true, gateway_ip: 172.21.251.254, host_routes: [], id: d14f68b7-8287-41fe-b533-dafb2240680a, ip_version: 4, ipv6_address_mode: null, ipv6_ra_mode: null, name: osm-ext-subnet, network_id: 21ea5d92-24f1-40ab-8d28-83230e277a49, project_id: 456b6471010b4737b47a0dd599c920c5,
+                      revision_number: 5, service_types: [], subnetpool_id: null, tags: [], tenant_id: 456b6471010b4737b47a0dd599c920c5, updated_at: '2020-09-14T15:15:06Z'}}], tags: [], tenant_id: 456b6471010b4737b47a0dd599c920c5, type: data, updated_at: '2022-07-05T18:39:02Z'}
+            vim_id: 21ea5d92-24f1-40ab-8d28-83230e277a49
+            vim_status: ACTIVE
+      - id: datanet
+        mgmt-network: false
+        name: datanet
+        type: null
+        vim_info:
+          'vim:bad7338b-ae46-43d4-a434-c3337a8054ac':
+            vim_account_id: bad7338b-ae46-43d4-a434-c3337a8054ac
+            vim_network_name: null
+            vim_details: >
+              {admin_state_up: true, availability_zone_hints: [],
+              availability_zones: [nova], created_at: '2022-07-28T08:41:59Z',
+              description: '', encapsulation: vxlan, encapsulation_id: 27,
+              encapsulation_type: vxlan, id: 34056287-3cd5-42cb-92d3-413382b50813,
+              ipv4_address_scope: null,
+                ipv6_address_scope: null, mtu: 1450, name: k8s-ns-datanet, port_security_enabled: true, project_id: 71c7971a7cab4b72bd5c10dbe6617f1e, 'provider:network_type': vxlan, 'provider:physical_network': null, 'provider:segmentation_id': 27, revision_number: 2, 'router:external': false,
+                segmentation_id: 27, shared: false, status: ACTIVE, subnets: [{subnet: {allocation_pools: [{end: 192.168.181.254, start: 192.168.181.1}], cidr: 192.168.181.0/24, created_at: '2022-07-28T08:41:59Z', description: '', dns_nameservers: [], enable_dhcp: true, gateway_ip: null,
+                      host_routes: [], id: ab2920f8-881b-4bef-82a5-9582a7930786, ip_version: 4, ipv6_address_mode: null, ipv6_ra_mode: null, name: k8s-ns-datanet-subnet, network_id: 34056287-3cd5-42cb-92d3-413382b50813, project_id: 71c7971a7cab4b72bd5c10dbe6617f1e, revision_number: 0,
+                      service_types: [], subnetpool_id: null, tags: [], tenant_id: 71c7971a7cab4b72bd5c10dbe6617f1e, updated_at: '2022-07-28T08:41:59Z'}}], tags: [], tenant_id: 71c7971a7cab4b72bd5c10dbe6617f1e, type: bridge, updated_at: '2022-07-28T08:41:59Z'}
+            vim_id: 34056287-3cd5-42cb-92d3-413382b50813
+            vim_status: ACTIVE
+    _admin:
+      created: 1658998097.58182
+      modified: 1658998193.42562
+      projects_read:
+        - 51e0e80fe533469d98766caa16552a3e
+      projects_write:
+        - 51e0e80fe533469d98766caa16552a3e
+      nsState: INSTANTIATED
+      current-operation: null
+      nslcmop: null
+      operation-type: null
+      deployed:
+        RO:
+          vnfd: []
+          operational-status: running
+        VCA:
+          - target_element: vnf/vnf1
+            member-vnf-index: vnf1
+            vdu_id: null
+            kdu_name: null
+            vdu_count_index: 0
+            operational-status: init
+            detailed-status: ''
+            step: initial-deploy
+            vnfd_id: k8s_proxy_charm-vnf
+            vdu_name: null
+            type: k8s_proxy_charm
+            ee_descriptor_id: simple-ee
+            charm_name: ''
+            ee_id: dbfbd751-3de4-4e68-bd40-ec5ae0a53898-k8s.simple-ee-z0-vnf1-vnf.k8s
+            application: simple-ee-z0-vnf1-vnf
+            model: dbfbd751-3de4-4e68-bd40-ec5ae0a53898-k8s
+            config_sw_installed: true
+          - target_element: vnf/vnf2
+            member-vnf-index: vnf2
+            vdu_id: null
+            kdu_name: null
+            vdu_count_index: 0
+            operational-status: init
+            detailed-status: ''
+            step: initial-deploy
+            vnfd_id: k8s_proxy_charm-vnf
+            vdu_name: null
+            type: k8s_proxy_charm
+            ee_descriptor_id: simple-ee
+            charm_name: ''
+            ee_id: dbfbd751-3de4-4e68-bd40-ec5ae0a53898-k8s.simple-ee-z0-vnf2-vnf.k8s
+            application: simple-ee-z0-vnf2-vnf
+            model: dbfbd751-3de4-4e68-bd40-ec5ae0a53898-k8s
+            config_sw_installed: true
+        K8s: []
+"""
+
+db_vnfrs_text = """
+-   _id: 1b6a4eb3-4fbf-415e-985c-4aeb3161eec0
+    id: 1b6a4eb3-4fbf-415e-985c-4aeb3161eec0
+    nsr-id-ref: dbfbd751-3de4-4e68-bd40-ec5ae0a53898
+    member-vnf-index-ref: vnf1
+    additionalParamsForVnf: null
+    created-time: 1658998097.58036
+    vnfd-ref: k8s_proxy_charm-vnf
+    vnfd-id: 6d9e1ca1-f387-4d01-9876-066fc7311e0f
+    vim-account-id: bad7338b-ae46-43d4-a434-c3337a8054ac
+    vca-id: null
+    vdur:
+      - _id: 38912ff7-5bdd-4228-911f-c2bee259c44a
+        additionalParams:
+          OSM:
+            count_index: 0
+            member_vnf_index: vnf1
+            ns_id: dbfbd751-3de4-4e68-bd40-ec5ae0a53898
+            vdu:
+              mgmtVM-0:
+                count_index: 0
+                interfaces:
+                  dataVM-xe0:
+                    name: dataVM-xe0
+                  mgmtVM-eth0:
+                    name: mgmtVM-eth0
+                vdu_id: mgmtVM
+            vdu_id: mgmtVM
+            vim_account_id: bad7338b-ae46-43d4-a434-c3337a8054ac
+            vnf_id: 1b6a4eb3-4fbf-415e-985c-4aeb3161eec0
+            vnfd_id: 6d9e1ca1-f387-4d01-9876-066fc7311e0f
+            vnfd_ref: k8s_proxy_charm-vnf
+        affinity-or-anti-affinity-group-id: []
+        alt-image-ids:
+          - '1'
+          - '2'
+          - '3'
+        cloud-init: '6d9e1ca1-f387-4d01-9876-066fc7311e0f:file:cloud-config.txt'
+        count-index: 0
+        id: 38912ff7-5bdd-4228-911f-c2bee259c44a
+        interfaces:
+          - external-connection-point-ref: vnf-mgmt-ext
+            internal-connection-point-ref: mgmtVM-eth0-int
+            mgmt-interface: true
+            mgmt-vnf: true
+            name: mgmtVM-eth0
+            ns-vld-id: mgmtnet
+            position: 1
+            type: PARAVIRT
+            compute_node: nfvisrv11
+            ip-address: 172.21.248.199
+            mac-address: 'fa:16:3e:4d:65:e9'
+            pci: null
+            vlan: 2148
+          - external-connection-point-ref: vnf-data-ext
+            internal-connection-point-ref: dataVM-xe0-int
+            name: dataVM-xe0
+            ns-vld-id: datanet
+            position: 2
+            type: PARAVIRT
+            compute_node: nfvisrv11
+            ip-address: 192.168.181.179
+            mac-address: 'fa:16:3e:ca:b5:d3'
+            pci: null
+            vlan: null
+        internal-connection-point:
+          - connection-point-id: mgmtVM-eth0-int
+            id: mgmtVM-eth0-int
+            name: mgmtVM-eth0-int
+          - connection-point-id: dataVM-xe0-int
+            id: dataVM-xe0-int
+            name: dataVM-xe0-int
+        ip-address: 172.21.248.199
+        ns-flavor-id: '0'
+        ns-image-id: '0'
+        ssh-access-required: true
+        ssh-keys:
+          - >
+            ssh-rsa
+            AAAAB3NzaC1yc2EAAAADAQABAAACAQDW3dtEDKfwZL0WZp6LeJUZFlZzYAHP7M4AsJwl2YFO/wmblfrTpWZ8tRyGwyjQacB7Zb7J07wD5AZACE71A3Nc9zjI22/gWN7N8X+ZxH6ywcr1GdXBqZDBeOdzD4pRb11E9mydGZ9l++KtFRtlF4G7IFYuxkOiSCJrkgiKuVDGodtQ/6VUKwxuI8U6N7MxtIBN2L3IfvMwuNyTo1daiUabQMwQKt/Q8Zpp78zsZ6SoxU+eYAHzbeTjAfNwhA88nRzRZn7tQW+gWl9wbSINbr2+JetTN+BTot/CMPmKzzul9tZrzhSzck1QSM3UDrD36ctRdaLABnWCoxpm0wJthNt693xVrFP+bMgK2BR0fyu9WwVEcHkC9CZ8yoi37k5rGVtoDw6sW6lxQ5QKS+Plv/YjGKqK3Ro/UoIEhgxcW53uz4PveyMBss4geB9ad/1T8dtugd288qfCWJRBpJBrE497EalhHolF3L/2bEu3uCKN0TY4POzqP/5cuAUc/uTJ2mjZewJdlJtrn7IyFtSUypeuVmXRx5LwByQw9EwPhUZlKVjYEHYmu5YTKlFSWyorWgRLBBIK7LLPj+bCGgLeT+fXmip6eFquAyVtoQfDofQ/gc0OXEA1uKfK2VFKg1le+joz1WA/XieGSvKRQ4aZorYgi/FzbpxKj2a60cZubJMq5w==
+            root@lcm-7b6bcf7cdd-5h2ql
+          - >-
+            ssh-rsa
+            AAAAB3NzaC1yc2EAAAADAQABAAABAQDtg65/Jh3KDWC9+YzkTz8Md/uhalkjPo15DSxlUNWzYQNFUzaG5Pt0trDwQ29UOQIUy1CB9HpWSZMTA1ESet/+cyXWkZ9MznAmGLQBdnwqWU792UQf6rv74Zpned8MbnKQXfs8gog1ZFFKRMcwitNRqs8xs8XsPLE/l1Jo2QemhM0fIRofjJiLKYaKeGP59Fb8UlIeGDaxmIFgLs8bAZvrmjbae3o4b1fZDNboqlQbHb9rakxI9uCnsaBrCmelXpP9EFmENx85vdHEwCAfCRvSWKnbXuOojJJzFM5odoWFZo8AuIhEb5ZiLkGet3CvCfWZZPpQc4TuNDaY0t1XUegH
+            juju-client-key
+        vdu-id-ref: mgmtVM
+        vdu-name: mgmtVM
+        vim_info:
+          'vim:bad7338b-ae46-43d4-a434-c3337a8054ac':
+            interfaces:
+              - vim_info: >
+                  {admin_state_up: true, allowed_address_pairs: [],
+                  'binding:host_id': nfvisrv11, 'binding:profile': {},
+                  'binding:vif_details': {bridge_name: br-int, connectivity: l2,
+                  datapath_type: system, ovs_hybrid_plug: true, port_filter: true},
+                  'binding:vif_type': ovs, 'binding:vnic_type': normal,
+                    created_at: '2022-07-28T08:42:04Z', description: '', device_id: 1fabddca-0dcf-4702-a5f3-5cc028c2aba7, device_owner: 'compute:nova', extra_dhcp_opts: [], fixed_ips: [{ip_address: 172.21.248.199, subnet_id: d14f68b7-8287-41fe-b533-dafb2240680a}], id: e053d44f-1d67-4274-b85d-1cef243353d6,
+                    mac_address: 'fa:16:3e:4d:65:e9', name: mgmtVM-eth0, network_id: 21ea5d92-24f1-40ab-8d28-83230e277a49, port_security_enabled: true, project_id: 71c7971a7cab4b72bd5c10dbe6617f1e, revision_number: 4, security_groups: [1de4b2c2-e4be-4e91-985c-d887e2715949], status: ACTIVE,
+                    tags: [], tenant_id: 71c7971a7cab4b72bd5c10dbe6617f1e, updated_at: '2022-07-28T08:42:16Z'}
+                mac_address: 'fa:16:3e:4d:65:e9'
+                vim_net_id: 21ea5d92-24f1-40ab-8d28-83230e277a49
+                vim_interface_id: e053d44f-1d67-4274-b85d-1cef243353d6
+                compute_node: nfvisrv11
+                pci: null
+                vlan: 2148
+                ip_address: 172.21.248.199
+                mgmt_vnf_interface: true
+                mgmt_vdu_interface: true
+              - vim_info: >
+                  {admin_state_up: true, allowed_address_pairs: [],
+                  'binding:host_id': nfvisrv11, 'binding:profile': {},
+                  'binding:vif_details': {bridge_name: br-int, connectivity: l2,
+                  datapath_type: system, ovs_hybrid_plug: true, port_filter: true},
+                  'binding:vif_type': ovs, 'binding:vnic_type': normal,
+                    created_at: '2022-07-28T08:42:04Z', description: '', device_id: 1fabddca-0dcf-4702-a5f3-5cc028c2aba7, device_owner: 'compute:nova', extra_dhcp_opts: [], fixed_ips: [{ip_address: 192.168.181.179, subnet_id: ab2920f8-881b-4bef-82a5-9582a7930786}], id: 8a34c944-0fc1-41ae-9dbc-9743e5988162,
+                    mac_address: 'fa:16:3e:ca:b5:d3', name: dataVM-xe0, network_id: 34056287-3cd5-42cb-92d3-413382b50813, port_security_enabled: true, project_id: 71c7971a7cab4b72bd5c10dbe6617f1e, revision_number: 4, security_groups: [1de4b2c2-e4be-4e91-985c-d887e2715949], status: ACTIVE,
+                    tags: [], tenant_id: 71c7971a7cab4b72bd5c10dbe6617f1e, updated_at: '2022-07-28T08:42:15Z'}
+                mac_address: 'fa:16:3e:ca:b5:d3'
+                vim_net_id: 34056287-3cd5-42cb-92d3-413382b50813
+                vim_interface_id: 8a34c944-0fc1-41ae-9dbc-9743e5988162
+                compute_node: nfvisrv11
+                pci: null
+                vlan: null
+                ip_address: 192.168.181.179
+            vim_details: >
+              {'OS-DCF:diskConfig': MANUAL, 'OS-EXT-AZ:availability_zone': nova,
+              'OS-EXT-SRV-ATTR:host': nfvisrv11,
+              'OS-EXT-SRV-ATTR:hypervisor_hostname': nfvisrv11,
+              'OS-EXT-SRV-ATTR:instance_name': instance-0002967a,
+              'OS-EXT-STS:power_state': 1, 'OS-EXT-STS:task_state': null,
+                'OS-EXT-STS:vm_state': active, 'OS-SRV-USG:launched_at': '2022-07-28T08:42:17.000000', 'OS-SRV-USG:terminated_at': null, accessIPv4: '', accessIPv6: '', addresses: {k8s-ns-datanet: [{'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:ca:b5:d3', 'OS-EXT-IPS:type': fixed,
+                      addr: 192.168.181.179, version: 4}], osm-ext: [{'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:4d:65:e9', 'OS-EXT-IPS:type': fixed, addr: 172.21.248.199, version: 4}]}, config_drive: '', created: '2022-07-28T08:42:06Z', flavor: {id: 17a9ba76-beb7-4ad4-a481-97de37174866,
+                  links: [{href: 'http://172.21.247.1:8774/flavors/17a9ba76-beb7-4ad4-a481-97de37174866', rel: bookmark}]}, hostId: 2aa7155bd281bd308d8e3776af56d428210c21aab788a8cbdf5ef500, id: 1fabddca-0dcf-4702-a5f3-5cc028c2aba7, image: {id: 919fc71a-6acd-4ee3-8123-739a9abbc2e7,
+                  links: [{href: 'http://172.21.247.1:8774/images/919fc71a-6acd-4ee3-8123-739a9abbc2e7', rel: bookmark}]}, key_name: null, links: [{href: 'http://172.21.247.1:8774/v2.1/servers/1fabddca-0dcf-4702-a5f3-5cc028c2aba7', rel: self}, {href: 'http://172.21.247.1:8774/servers/1fabddca-0dcf-4702-a5f3-5cc028c2aba7',
+                    rel: bookmark}], metadata: {}, name: k8s-ns-vnf1-mgmtVM-0, 'os-extended-volumes:volumes_attached': [], progress: 0, security_groups: [{name: default}, {name: default}], status: ACTIVE, tenant_id: 71c7971a7cab4b72bd5c10dbe6617f1e, updated: '2022-07-28T08:42:17Z',
+                user_id: f043c84f940b4fc8a01a98714ea97c80}
+            vim_id: 1fabddca-0dcf-4702-a5f3-5cc028c2aba7
+            vim_status: ACTIVE
+            vim_name: k8s-ns-vnf1-mgmtVM-0
+        virtual-storages:
+          - id: mgmtVM-storage
+            size-of-storage: '10'
+        status: ACTIVE
+        vim-id: 1fabddca-0dcf-4702-a5f3-5cc028c2aba7
+        name: k8s-ns-vnf1-mgmtVM-0
+    connection-point:
+      - name: vnf-mgmt-ext
+        connection-point-id: mgmtVM-eth0-int
+        connection-point-vdu-id: mgmtVM
+        id: vnf-mgmt-ext
+      - name: vnf-data-ext
+        connection-point-id: dataVM-xe0-int
+        connection-point-vdu-id: mgmtVM
+        id: vnf-data-ext
+    ip-address: 172.21.248.199
+    revision: 1
+    _admin:
+      created: 1658998097.58048
+      modified: 1658998097.58048
+      projects_read:
+        - 51e0e80fe533469d98766caa16552a3e
+      projects_write:
+        - 51e0e80fe533469d98766caa16552a3e
+      nsState: INSTANTIATED
+"""
index b2d5c60..807c892 100644 (file)
@@ -130,7 +130,6 @@ class FakeManualMachine(MagicMock):
 
 
 class FakeWatcher(AsyncMock):
 
 
 class FakeWatcher(AsyncMock):
-
     delta_to_return = None
 
     async def Next(self):
     delta_to_return = None
 
     async def Next(self):
index 286f0fc..a4a6a23 100644 (file)
@@ -149,7 +149,7 @@ def obj_to_dict(obj: object) -> dict:
     # convert obj to yaml
     yaml_text = obj_to_yaml(obj)
     # parse to dict
     # convert obj to yaml
     yaml_text = obj_to_yaml(obj)
     # parse to dict
-    return yaml.load(yaml_text, Loader=yaml.Loader)
+    return yaml.load(yaml_text, Loader=yaml.SafeLoader)
 
 
 def get_ee_id_components(ee_id: str) -> Tuple[str, str, str]:
 
 
 def get_ee_id_components(ee_id: str) -> Tuple[str, str, str]:
index 21f6ba6..cc6daf0 100644 (file)
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #######################################################################################
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #######################################################################################
-aiokafka==0.7.2
+aiokafka==0.8.1
     # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
     # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
+async-timeout==4.0.3
+    # via
+    #   -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
+    #   aiokafka
 dataclasses==0.6
     # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
 dataclasses==0.6
     # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
+dnspython==2.4.2
+    # via
+    #   -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
+    #   pymongo
 kafka-python==2.0.2
     # via
     #   -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
     #   aiokafka
 kafka-python==2.0.2
     # via
     #   -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
     #   aiokafka
+motor==3.3.1
+    # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
 osm-common @ git+https://osm.etsi.org/gerrit/osm/common.git@master
     # via -r requirements-dev.in
 osm-common @ git+https://osm.etsi.org/gerrit/osm/common.git@master
     # via -r requirements-dev.in
-pycrypto==2.6.1
-    # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
-pymongo==3.12.3
+packaging==23.1
+    # via
+    #   -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
+    #   aiokafka
+pycryptodome==3.19.0
     # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
     # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
-pyyaml==5.4.1
+pymongo==4.5.0
+    # via
+    #   -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
+    #   motor
+pyyaml==6.0.1
     # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
     # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
index 08df82d..a0d68c4 100644 (file)
@@ -13,8 +13,9 @@
 #     limitations under the License.
 
 asynctest
 #     limitations under the License.
 
 asynctest
+charset-normalizer
 coverage
 coverage
-flake8
+flake8<5.0.0
 mock
 nose2
 requests-mock
 mock
 nose2
 requests-mock
index c6718ee..57e30a7 100644 (file)
 #######################################################################################
 asynctest==0.13.0
     # via -r requirements-test.in
 #######################################################################################
 asynctest==0.13.0
     # via -r requirements-test.in
-certifi==2021.10.8
+certifi==2023.7.22
     # via requests
     # via requests
-charset-normalizer==2.0.10
-    # via requests
-coverage==6.2
+charset-normalizer==3.2.0
     # via
     #   -r requirements-test.in
     # via
     #   -r requirements-test.in
-    #   nose2
+    #   requests
+coverage==7.3.1
+    # via -r requirements-test.in
 flake8==4.0.1
     # via -r requirements-test.in
 flake8==4.0.1
     # via -r requirements-test.in
-idna==3.3
+idna==3.4
     # via requests
 mccabe==0.6.1
     # via flake8
     # via requests
 mccabe==0.6.1
     # via flake8
-mock==4.0.3
+mock==5.1.0
     # via -r requirements-test.in
     # via -r requirements-test.in
-nose2==0.10.0
+nose2==0.13.0
     # via -r requirements-test.in
 pycodestyle==2.8.0
     # via flake8
 pyflakes==2.4.0
     # via flake8
     # via -r requirements-test.in
 pycodestyle==2.8.0
     # via flake8
 pyflakes==2.4.0
     # via flake8
-requests==2.27.1
+requests==2.31.0
     # via requests-mock
     # via requests-mock
-requests-mock==1.9.3
+requests-mock==1.11.0
     # via -r requirements-test.in
 six==1.16.0
     # via -r requirements-test.in
 six==1.16.0
-    # via
-    #   nose2
-    #   requests-mock
-urllib3==1.26.8
+    # via requests-mock
+urllib3==2.0.5
     # via requests
     # via requests
index 576d1ff..95605f5 100644 (file)
 #     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 #     See the License for the specific language governing permissions and
 #     limitations under the License.
 #     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 #     See the License for the specific language governing permissions and
 #     limitations under the License.
-
-async-timeout<4
-juju==2.9.8
-kubernetes
-motor==1.3.1
+charset-normalizer
+google-auth<2.18.0
+juju==2.9.44.0
+kubernetes==26.1.0
+motor
 pyasn1
 pyasn1
-pyyaml<6
+pyyaml>6
 retrying-async
 retrying-async
index ef0d3a9..215ac92 100644 (file)
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #######################################################################################
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #######################################################################################
-async-timeout==3.0.1
-    # via
-    #   -r requirements.in
-    #   retrying-async
-bcrypt==3.2.0
+async-timeout==4.0.3
+    # via retrying-async
+bcrypt==4.0.1
     # via paramiko
     # via paramiko
-cachetools==4.2.4
+cachetools==5.3.1
     # via google-auth
     # via google-auth
-certifi==2021.10.8
+certifi==2023.7.22
     # via
     #   kubernetes
     #   requests
     # via
     #   kubernetes
     #   requests
-cffi==1.15.0
+cffi==1.16.0
     # via
     # via
-    #   bcrypt
     #   cryptography
     #   pynacl
     #   cryptography
     #   pynacl
-charset-normalizer==2.0.10
-    # via requests
-cryptography==36.0.1
+charset-normalizer==3.2.0
+    # via
+    #   -r requirements.in
+    #   requests
+cryptography==41.0.4
     # via paramiko
     # via paramiko
-google-auth==2.3.3
-    # via kubernetes
-idna==3.3
+dnspython==2.4.2
+    # via pymongo
+google-auth==2.17.3
+    # via
+    #   -r requirements.in
+    #   kubernetes
+idna==3.4
     # via requests
     # via requests
-juju==2.9.8
+juju==2.9.44.0
     # via -r requirements.in
 jujubundlelib==0.5.7
     # via theblues
     # via -r requirements.in
 jujubundlelib==0.5.7
     # via theblues
-kubernetes==21.7.0
+kubernetes==26.1.0
     # via
     #   -r requirements.in
     #   juju
     # via
     #   -r requirements.in
     #   juju
@@ -51,29 +54,29 @@ macaroonbakery==1.3.1
     # via
     #   juju
     #   theblues
     # via
     #   juju
     #   theblues
-motor==1.3.1
+motor==3.3.1
     # via -r requirements.in
     # via -r requirements.in
-mypy-extensions==0.4.3
+mypy-extensions==1.0.0
     # via typing-inspect
     # via typing-inspect
-oauthlib==3.1.1
+oauthlib==3.2.2
     # via requests-oauthlib
     # via requests-oauthlib
-paramiko==2.9.2
+paramiko==2.12.0
     # via juju
     # via juju
-protobuf==3.19.3
+protobuf==3.20.3
     # via macaroonbakery
     # via macaroonbakery
-pyasn1==0.4.8
+pyasn1==0.5.0
     # via
     #   -r requirements.in
     #   juju
     #   pyasn1-modules
     #   rsa
     # via
     #   -r requirements.in
     #   juju
     #   pyasn1-modules
     #   rsa
-pyasn1-modules==0.2.8
+pyasn1-modules==0.3.0
     # via google-auth
 pycparser==2.21
     # via cffi
 pymacaroons==0.13.0
     # via macaroonbakery
     # via google-auth
 pycparser==2.21
     # via cffi
 pymacaroons==0.13.0
     # via macaroonbakery
-pymongo==3.12.3
+pymongo==4.5.0
     # via motor
 pynacl==1.5.0
     # via
     # via motor
 pynacl==1.5.0
     # via
@@ -86,49 +89,49 @@ pyrfc3339==1.1
     #   macaroonbakery
 python-dateutil==2.8.2
     # via kubernetes
     #   macaroonbakery
 python-dateutil==2.8.2
     # via kubernetes
-pytz==2021.3
+pytz==2023.3.post1
     # via pyrfc3339
     # via pyrfc3339
-pyyaml==5.4.1
+pyyaml==6.0.1
     # via
     #   -r requirements.in
     #   juju
     #   jujubundlelib
     #   kubernetes
     # via
     #   -r requirements.in
     #   juju
     #   jujubundlelib
     #   kubernetes
-requests==2.27.1
+requests==2.31.0
     # via
     #   kubernetes
     #   macaroonbakery
     #   requests-oauthlib
     #   theblues
     # via
     #   kubernetes
     #   macaroonbakery
     #   requests-oauthlib
     #   theblues
-requests-oauthlib==1.3.0
+requests-oauthlib==1.3.1
     # via kubernetes
     # via kubernetes
-retrying-async==1.2.0
+retrying-async==2.0.0
     # via -r requirements.in
     # via -r requirements.in
-rsa==4.8
+rsa==4.9
     # via google-auth
 six==1.16.0
     # via
     # via google-auth
 six==1.16.0
     # via
-    #   bcrypt
     #   google-auth
     #   kubernetes
     #   macaroonbakery
     #   google-auth
     #   kubernetes
     #   macaroonbakery
+    #   paramiko
     #   pymacaroons
     #   python-dateutil
 theblues==0.5.2
     # via juju
     #   pymacaroons
     #   python-dateutil
 theblues==0.5.2
     # via juju
-toposort==1.7
+toposort==1.10
     # via juju
     # via juju
-typing-extensions==4.0.1
+typing-extensions==4.8.0
     # via typing-inspect
     # via typing-inspect
-typing-inspect==0.7.1
+typing-inspect==0.9.0
     # via juju
     # via juju
-urllib3==1.26.8
+urllib3==2.0.5
     # via
     #   kubernetes
     #   requests
     # via
     #   kubernetes
     #   requests
-websocket-client==1.2.3
+websocket-client==1.6.3
     # via kubernetes
     # via kubernetes
-websockets==7.0
+websockets==11.0.3
     # via juju
 
 # The following packages are considered to be unsafe in a requirements file:
     # via juju
 
 # The following packages are considered to be unsafe in a requirements file:
diff --git a/tox.ini b/tox.ini
index 5aaf2a4..63aaf7a 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -23,7 +23,7 @@ toxworkdir = /tmp/.tox
 
 [testenv]
 usedevelop = True
 
 [testenv]
 usedevelop = True
-basepython = python3.8
+basepython = python3.10
 setenv = VIRTUAL_ENV={envdir}
          PYTHONDONTWRITEBYTECODE = 1
 deps =  -r{toxinidir}/requirements.txt
 setenv = VIRTUAL_ENV={envdir}
          PYTHONDONTWRITEBYTECODE = 1
 deps =  -r{toxinidir}/requirements.txt
@@ -31,7 +31,7 @@ deps =  -r{toxinidir}/requirements.txt
 
 #######################################################################################
 [testenv:black]
 
 #######################################################################################
 [testenv:black]
-deps = black
+deps = black==23.12.1
 skip_install = true
 commands =
         black --check --diff n2vc/
 skip_install = true
 commands =
         black --check --diff n2vc/
@@ -50,7 +50,7 @@ commands =
         coverage report --omit='*tests*'
         coverage html -d ./cover --omit='*tests*'
         coverage xml -o coverage.xml --omit=*tests*
         coverage report --omit='*tests*'
         coverage html -d ./cover --omit='*tests*'
         coverage xml -o coverage.xml --omit=*tests*
-whitelist_externals = sh
+allowlist_externals = sh
 
 
 #######################################################################################
 
 
 #######################################################################################
@@ -83,15 +83,15 @@ commands =
 
 #######################################################################################
 [testenv:pip-compile]
 
 #######################################################################################
 [testenv:pip-compile]
-deps =  pip-tools==6.4.0
+deps =  pip-tools==6.13.0
 skip_install = true
 skip_install = true
-whitelist_externals = bash
+allowlist_externals = bash
         [
 commands =
         - bash -c "for file in requirements*.in ; do \
         UNSAFE="" ; \
         if [[ $file =~ 'dist' ]] ; then UNSAFE='--allow-unsafe' ; fi ; \
         [
 commands =
         - bash -c "for file in requirements*.in ; do \
         UNSAFE="" ; \
         if [[ $file =~ 'dist' ]] ; then UNSAFE='--allow-unsafe' ; fi ; \
-        pip-compile -rU --no-header $UNSAFE $file ;\
+        pip-compile --resolver=backtracking -rU --no-header $UNSAFE $file ;\
         out=`echo $file | sed 's/.in/.txt/'` ; \
         sed -i -e '1 e head -16 tox.ini' $out ;\
         done"
         out=`echo $file | sed 's/.in/.txt/'` ; \
         sed -i -e '1 e head -16 tox.ini' $out ;\
         done"
@@ -109,7 +109,7 @@ commands =
         python3 setup.py --command-packages=stdeb.command sdist_dsc
         sh -c 'cd deb_dist/n2vc*/ && dpkg-buildpackage -rfakeroot -uc -us'
         sh -c 'rm n2vc/requirements.txt'
         python3 setup.py --command-packages=stdeb.command sdist_dsc
         sh -c 'cd deb_dist/n2vc*/ && dpkg-buildpackage -rfakeroot -uc -us'
         sh -c 'rm n2vc/requirements.txt'
-whitelist_externals = sh
+allowlist_externals = sh
 
 #######################################################################################
 [flake8]
 
 #######################################################################################
 [flake8]
@@ -121,7 +121,8 @@ ignore =
         E125,
         E203,
         E226,
         E125,
         E203,
         E226,
-        E241
+        E241,
+        E501
 exclude =
         .git,
         __pycache__,
 exclude =
         .git,
         __pycache__,