Pin black version in tox.ini to 23.12.1 78/14178/1 master
authorgarciadeblas <gerardo.garciadeblas@telefonica.com>
Mon, 29 Jan 2024 17:26:53 +0000 (18:26 +0100)
committergarciadeblas <gerardo.garciadeblas@telefonica.com>
Mon, 29 Jan 2024 17:26:53 +0000 (18:26 +0100)
Change-Id: Ica4b7a2ec310cffa116f319818f755c5062f0787
Signed-off-by: garciadeblas <gerardo.garciadeblas@telefonica.com>
43 files changed:
Dockerfile
Jenkinsfile
devops-stages/stage-archive.sh
n2vc/definitions.py [new file with mode: 0644]
n2vc/juju_watcher.py
n2vc/k8s_conn.py
n2vc/k8s_helm3_conn.py
n2vc/k8s_helm_base_conn.py
n2vc/k8s_helm_conn.py [deleted file]
n2vc/k8s_juju_conn.py
n2vc/kubectl.py
n2vc/libjuju.py
n2vc/loggable.py
n2vc/n2vc_conn.py
n2vc/n2vc_juju_conn.py
n2vc/store.py
n2vc/tests/unit/test_definitions.py [new file with mode: 0644]
n2vc/tests/unit/test_juju_watcher.py
n2vc/tests/unit/test_k8s_helm3_conn.py
n2vc/tests/unit/test_k8s_helm_conn.py [deleted file]
n2vc/tests/unit/test_k8s_juju_conn.py
n2vc/tests/unit/test_kubectl.py
n2vc/tests/unit/test_libjuju.py
n2vc/tests/unit/test_n2vc_juju_conn.py
n2vc/tests/unit/test_store.py
n2vc/tests/unit/test_utils.py
n2vc/tests/unit/testdata/test_certificate.yaml [new file with mode: 0644]
n2vc/tests/unit/testdata/test_db_descriptors.py [new file with mode: 0644]
n2vc/tests/unit/testdata/upgrade-machine.log [new file with mode: 0644]
n2vc/tests/unit/testdata/upgrade-operator.log [new file with mode: 0644]
n2vc/tests/unit/testdata/upgrade-podspec-stateful.log [new file with mode: 0644]
n2vc/tests/unit/testdata/upgrade-podspec-stateless.log [new file with mode: 0644]
n2vc/tests/unit/testdata/upgrade-sidecar.log [new file with mode: 0644]
n2vc/tests/unit/utils.py
n2vc/utils.py
requirements-dev.txt
requirements-dist.in
requirements-dist.txt
requirements-test.in
requirements-test.txt
requirements.in
requirements.txt
tox.ini

index 3fad212..27ab273 100644 (file)
 #   devops-stages/stage-build.sh
 #
 
 #   devops-stages/stage-build.sh
 #
 
-FROM ubuntu:18.04
+FROM ubuntu:22.04
+
+ARG APT_PROXY
+RUN if [ ! -z $APT_PROXY ] ; then \
+    echo "Acquire::http::Proxy \"$APT_PROXY\";" > /etc/apt/apt.conf.d/proxy.conf ;\
+    echo "Acquire::https::Proxy \"$APT_PROXY\";" >> /etc/apt/apt.conf.d/proxy.conf ;\
+    fi
 
 RUN DEBIAN_FRONTEND=noninteractive apt-get update && \
     DEBIAN_FRONTEND=noninteractive apt-get -y install \
         debhelper \
 
 RUN DEBIAN_FRONTEND=noninteractive apt-get update && \
     DEBIAN_FRONTEND=noninteractive apt-get -y install \
         debhelper \
+        dh-python \
         git \
         python3 \
         python3-all \
         python3-dev \
         git \
         python3 \
         python3-all \
         python3-dev \
-        python3-setuptools
+        python3-setuptools \
+        python3-pip \
+        tox
 
 
-RUN python3 -m easy_install pip==21.0.1
-RUN pip3 install tox==3.22.0
+ENV LC_ALL C.UTF-8
+ENV LANG C.UTF-8
index e384cbd..d8e7474 100644 (file)
@@ -1,17 +1,19 @@
-/*
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-  implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-*/
+/* Copyright ETSI OSM and others
+ *
+ * All Rights Reserved.
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License"); you may
+ *   not use this file except in compliance with the License. You may obtain
+ *   a copy of the License at
+ *
+ *        http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ *   WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ *   License for the specific language governing permissions and limitations
+ *   under the License.
+ */
 
 properties([
     parameters([
 
 properties([
     parameters([
@@ -31,7 +33,7 @@ def devops_checkout() {
     }
 }
 
     }
 }
 
-node('docker') {
+node('stage_2') {
     checkout scm
     devops_checkout()
 
     checkout scm
     devops_checkout()
 
index 662616c..eead613 100755 (executable)
@@ -18,7 +18,4 @@ rm -rf pool
 rm -rf dists
 mkdir -p pool/$MDG
 mv deb_dist/*.deb pool/$MDG/
 rm -rf dists
 mkdir -p pool/$MDG
 mv deb_dist/*.deb pool/$MDG/
-mkdir -p dists/unstable/$MDG/binary-amd64/
-apt-ftparchive packages pool/$MDG > dists/unstable/$MDG/binary-amd64/Packages
-gzip -9fk dists/unstable/$MDG/binary-amd64/Packages
-echo "dists/**,pool/$MDG/*.deb"
+
diff --git a/n2vc/definitions.py b/n2vc/definitions.py
new file mode 100644 (file)
index 0000000..92d4f51
--- /dev/null
@@ -0,0 +1,108 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+
+from typing import NoReturn
+
+from n2vc.utils import get_ee_id_components
+
+
+class RelationEndpoint:
+    """Represents an endpoint of an application"""
+
+    def __init__(self, ee_id: str, vca_id: str, endpoint_name: str) -> NoReturn:
+        """
+        Args:
+            ee_id: Execution environment id.
+                   Format: "<model>.<application_name>.<machine_id>".
+            vca_id: Id of the VCA. Identifies the Juju Controller
+                    where the application is deployed
+            endpoint_name: Name of the endpoint for the relation
+        """
+        ee_components = get_ee_id_components(ee_id)
+        self._model_name = ee_components[0]
+        self._application_name = ee_components[1]
+        self._vca_id = vca_id
+        self._endpoint_name = endpoint_name
+
+    @property
+    def application_name(self) -> str:
+        """Returns the application name"""
+        return self._application_name
+
+    @property
+    def endpoint(self) -> str:
+        """Returns the application name and the endpoint. Format: <application>:<endpoint>"""
+        return f"{self.application_name}:{self._endpoint_name}"
+
+    @property
+    def endpoint_name(self) -> str:
+        """Returns the endpoint name"""
+        return self._endpoint_name
+
+    @property
+    def model_name(self) -> str:
+        """Returns the model name"""
+        return self._model_name
+
+    @property
+    def vca_id(self) -> str:
+        """Returns the vca id"""
+        return self._vca_id
+
+    def __str__(self) -> str:
+        app = self.application_name
+        endpoint = self.endpoint_name
+        model = self.model_name
+        vca = self.vca_id
+        return f"{app}:{endpoint} (model: {model}, vca: {vca})"
+
+
+class Offer:
+    """Represents a juju offer"""
+
+    def __init__(self, url: str, vca_id: str = None) -> NoReturn:
+        """
+        Args:
+            url: Offer url. Format: <user>/<model>.<offer-name>.
+        """
+        self._url = url
+        self._username = url.split(".")[0].split("/")[0]
+        self._model_name = url.split(".")[0].split("/")[1]
+        self._name = url.split(".")[1]
+        self._vca_id = vca_id
+
+    @property
+    def model_name(self) -> str:
+        """Returns the model name"""
+        return self._model_name
+
+    @property
+    def name(self) -> str:
+        """Returns the offer name"""
+        return self._name
+
+    @property
+    def username(self) -> str:
+        """Returns the username"""
+        return self._username
+
+    @property
+    def url(self) -> str:
+        """Returns the offer url"""
+        return self._url
+
+    @property
+    def vca_id(self) -> str:
+        """Returns the vca id"""
+        return self._vca_id
index 9f9520f..747f08e 100644 (file)
@@ -14,6 +14,7 @@
 
 import asyncio
 import time
 
 import asyncio
 import time
+
 from juju.client import client
 from n2vc.exceptions import EntityInvalidException
 from n2vc.n2vc_conn import N2VCConnector
 from juju.client import client
 from n2vc.exceptions import EntityInvalidException
 from n2vc.n2vc_conn import N2VCConnector
@@ -42,6 +43,7 @@ def entity_ready(entity: ModelEntity) -> bool:
 
     :returns: boolean saying if the entity is ready or not
     """
 
     :returns: boolean saying if the entity is ready or not
     """
+
     entity_type = entity.entity_type
     if entity_type == "machine":
         return entity.agent_status in ["started"]
     entity_type = entity.entity_type
     if entity_type == "machine":
         return entity.agent_status in ["started"]
@@ -50,6 +52,8 @@ def entity_ready(entity: ModelEntity) -> bool:
     elif entity_type == "application":
         # Workaround for bug: https://github.com/juju/python-libjuju/issues/441
         return entity.status in ["active", "blocked"]
     elif entity_type == "application":
         # Workaround for bug: https://github.com/juju/python-libjuju/issues/441
         return entity.status in ["active", "blocked"]
+    elif entity_type == "unit":
+        return entity.agent_status in ["idle"]
     else:
         raise EntityInvalidException("Unknown entity type: {}".format(entity_type))
 
     else:
         raise EntityInvalidException("Unknown entity type: {}".format(entity_type))
 
@@ -143,7 +147,7 @@ class JujuModelWatcher:
             total_timeout = 3600.0
 
         entity_type = entity.entity_type
             total_timeout = 3600.0
 
         entity_type = entity.entity_type
-        if entity_type not in ["application", "action", "machine"]:
+        if entity_type not in ["application", "action", "machine", "unit"]:
             raise EntityInvalidException("Unknown entity type: {}".format(entity_type))
 
         # Coroutine to wait until the entity reaches the final state
             raise EntityInvalidException("Unknown entity type: {}".format(entity_type))
 
         # Coroutine to wait until the entity reaches the final state
@@ -177,6 +181,113 @@ class JujuModelWatcher:
             for task in tasks:
                 task.cancel()
 
             for task in tasks:
                 task.cancel()
 
+    @staticmethod
+    async def wait_for_units_idle(
+        model: Model, application: Application, timeout: float = 60
+    ):
+        """
+        Waits for the application and all its units to transition back to idle
+
+        :param: model:          Model to observe
+        :param: application:    The application to be observed
+        :param: timeout:        Maximum time between two updates in the model
+
+        :raises: asyncio.TimeoutError when timeout reaches
+        """
+
+        ensure_units_idle = asyncio.ensure_future(
+            asyncio.wait_for(
+                JujuModelWatcher.ensure_units_idle(model, application), timeout
+            )
+        )
+        tasks = [
+            ensure_units_idle,
+        ]
+        (done, pending) = await asyncio.wait(
+            tasks, timeout=timeout, return_when=asyncio.FIRST_COMPLETED
+        )
+
+        if ensure_units_idle in pending:
+            ensure_units_idle.cancel()
+            raise TimeoutError(
+                "Application's units failed to return to idle after {} seconds".format(
+                    timeout
+                )
+            )
+        if ensure_units_idle.result():
+            pass
+
+    @staticmethod
+    async def ensure_units_idle(model: Model, application: Application):
+        """
+        Waits forever until the application's units to transition back to idle
+
+        :param: model:          Model to observe
+        :param: application:    The application to be observed
+        """
+
+        try:
+            allwatcher = client.AllWatcherFacade.from_connection(model.connection())
+            unit_wanted_state = "executing"
+            final_state_reached = False
+
+            units = application.units
+            final_state_seen = {unit.entity_id: False for unit in units}
+            agent_state_seen = {unit.entity_id: False for unit in units}
+            workload_state = {unit.entity_id: False for unit in units}
+
+            try:
+                while not final_state_reached:
+                    change = await allwatcher.Next()
+
+                    # Keep checking to see if new units were added during the change
+                    for unit in units:
+                        if unit.entity_id not in final_state_seen:
+                            final_state_seen[unit.entity_id] = False
+                            agent_state_seen[unit.entity_id] = False
+                            workload_state[unit.entity_id] = False
+
+                    for delta in change.deltas:
+                        await asyncio.sleep(0)
+                        if delta.entity != units[0].entity_type:
+                            continue
+
+                        final_state_reached = True
+                        for unit in units:
+                            if delta.data["name"] == unit.entity_id:
+                                status = delta.data["agent-status"]["current"]
+                                workload_state[unit.entity_id] = delta.data[
+                                    "workload-status"
+                                ]["current"]
+
+                                if status == unit_wanted_state:
+                                    agent_state_seen[unit.entity_id] = True
+                                    final_state_seen[unit.entity_id] = False
+
+                                if (
+                                    status == "idle"
+                                    and agent_state_seen[unit.entity_id]
+                                ):
+                                    final_state_seen[unit.entity_id] = True
+
+                            final_state_reached = (
+                                final_state_reached
+                                and final_state_seen[unit.entity_id]
+                                and workload_state[unit.entity_id]
+                                in [
+                                    "active",
+                                    "error",
+                                ]
+                            )
+
+            except ConnectionClosed:
+                pass
+                # This is expected to happen when the
+                # entity reaches its final state, because
+                # the model connection is closed afterwards
+        except Exception as e:
+            raise e
+
     @staticmethod
     async def model_watcher(
         model: Model,
     @staticmethod
     async def model_watcher(
         model: Model,
@@ -201,69 +312,76 @@ class JujuModelWatcher:
         :raises: asyncio.TimeoutError when timeout reaches
         """
 
         :raises: asyncio.TimeoutError when timeout reaches
         """
 
-        allwatcher = client.AllWatcherFacade.from_connection(model.connection())
+        try:
+            allwatcher = client.AllWatcherFacade.from_connection(model.connection())
 
 
-        # Genenerate array with entity types to listen
-        entity_types = (
-            [entity_type, "unit"]
-            if entity_type == "application"  # TODO: Add "action" too
-            else [entity_type]
-        )
+            # Genenerate array with entity types to listen
+            entity_types = (
+                [entity_type, "unit"]
+                if entity_type == "application"  # TODO: Add "action" too
+                else [entity_type]
+            )
 
 
-        # Get time when it should timeout
-        timeout_end = time.time() + timeout
+            # Get time when it should timeout
+            timeout_end = time.time() + timeout
 
 
-        try:
-            while True:
-                change = await allwatcher.Next()
-                for delta in change.deltas:
-                    write = False
-                    delta_entity = None
-
-                    # Get delta EntityType
-                    delta_entity = delta.entity
-
-                    if delta_entity in entity_types:
-                        # Get entity id
-                        if entity_type == "application":
-                            id = (
-                                delta.data["application"]
-                                if delta_entity == "unit"
-                                else delta.data["name"]
-                            )
-                        else:
-                            id = delta.data["id"]
-
-                        # Write if the entity id match
-                        write = True if id == entity_id else False
-
-                        # Update timeout
-                        timeout_end = time.time() + timeout
-                        (
-                            status,
-                            status_message,
-                            vca_status,
-                        ) = JujuModelWatcher.get_status(delta)
-
-                        if write and n2vc is not None and db_dict:
-                            # Write status to DB
-                            status = n2vc.osm_status(delta_entity, status)
-                            await n2vc.write_app_status_to_db(
-                                db_dict=db_dict,
-                                status=status,
-                                detailed_status=status_message,
-                                vca_status=vca_status,
-                                entity_type=delta_entity,
-                                vca_id=vca_id,
-                            )
-                # Check if timeout
-                if time.time() > timeout_end:
-                    raise asyncio.TimeoutError()
-        except ConnectionClosed:
-            pass
-            # This is expected to happen when the
-            # entity reaches its final state, because
-            # the model connection is closed afterwards
+            try:
+                while True:
+                    change = await allwatcher.Next()
+                    for delta in change.deltas:
+                        write = False
+                        delta_entity = None
+
+                        # Get delta EntityType
+                        delta_entity = delta.entity
+
+                        if delta_entity in entity_types:
+                            # Get entity id
+                            id = None
+                            if entity_type == "application":
+                                id = (
+                                    delta.data["application"]
+                                    if delta_entity == "unit"
+                                    else delta.data["name"]
+                                )
+                            else:
+                                if "id" in delta.data:
+                                    id = delta.data["id"]
+                                else:
+                                    print("No id {}".format(delta.data))
+
+                            # Write if the entity id match
+                            write = True if id == entity_id else False
+
+                            # Update timeout
+                            timeout_end = time.time() + timeout
+                            (
+                                status,
+                                status_message,
+                                vca_status,
+                            ) = JujuModelWatcher.get_status(delta)
+
+                            if write and n2vc is not None and db_dict:
+                                # Write status to DB
+                                status = n2vc.osm_status(delta_entity, status)
+                                await n2vc.write_app_status_to_db(
+                                    db_dict=db_dict,
+                                    status=status,
+                                    detailed_status=status_message,
+                                    vca_status=vca_status,
+                                    entity_type=delta_entity,
+                                    vca_id=vca_id,
+                                )
+                    # Check if timeout
+                    if time.time() > timeout_end:
+                        raise asyncio.TimeoutError()
+            except ConnectionClosed:
+                pass
+                # This is expected to happen when the
+                # entity reaches its final state, because
+                # the model connection is closed afterwards
+        except Exception as e:
+            raise e
 
     @staticmethod
     def get_status(delta: Delta) -> (str, str, str):
 
     @staticmethod
     def get_status(delta: Delta) -> (str, str, str):
index 89805b3..3a1a5ef 100644 (file)
@@ -22,6 +22,7 @@
 
 import abc
 import asyncio
 
 import abc
 import asyncio
+from typing import Union
 import time
 
 from n2vc.loggable import Loggable
 import time
 
 from n2vc.loggable import Loggable
@@ -78,7 +79,14 @@ class K8sConnector(abc.ABC, Loggable):
 
     @abc.abstractmethod
     async def repo_add(
 
     @abc.abstractmethod
     async def repo_add(
-        self, cluster_uuid: str, name: str, url: str, repo_type: str = "chart"
+        self,
+        cluster_uuid: str,
+        name: str,
+        url: str,
+        repo_type: str = "chart",
+        cert: str = None,
+        user: str = None,
+        password: str = None,
     ):
         """
         Add a new repository to OSM database
     ):
         """
         Add a new repository to OSM database
@@ -186,6 +194,7 @@ class K8sConnector(abc.ABC, Loggable):
         timeout: float = 300,
         params: dict = None,
         db_dict: dict = None,
         timeout: float = 300,
         params: dict = None,
         db_dict: dict = None,
+        force: bool = False,
     ):
         """
         Upgrades an existing KDU instance. It would implicitly use the `upgrade` call
     ):
         """
         Upgrades an existing KDU instance. It would implicitly use the `upgrade` call
@@ -205,6 +214,7 @@ class K8sConnector(abc.ABC, Loggable):
                         path: <str>},
                             e.g. {collection: "nsrs", filter:
                             {_id: <nsd-id>, path: "_admin.deployed.K8S.3"}
                         path: <str>},
                             e.g. {collection: "nsrs", filter:
                             {_id: <nsd-id>, path: "_admin.deployed.K8S.3"}
+        :param force: force recreation of resources if necessary
         :return: reference to the new revision number of the KDU instance
         """
 
         :return: reference to the new revision number of the KDU instance
         """
 
@@ -215,19 +225,30 @@ class K8sConnector(abc.ABC, Loggable):
         scale: int,
         resource_name: str,
         total_timeout: float = 1800,
         scale: int,
         resource_name: str,
         total_timeout: float = 1800,
+        cluster_uuid: str = None,
+        kdu_model: str = None,
+        atomic: bool = True,
+        db_dict: dict = None,
         **kwargs,
     ) -> bool:
         **kwargs,
     ) -> bool:
-        """
-        Scales an application in KDU instance.
-
-        :param: kdu_instance str:        KDU instance name
-        :param: scale int:               Scale to which to set this application
-        :param: resource_name str:       Resource name (Application name)
-        :param: timeout float:           The time, in seconds, to wait for the install
-                                         to finish
-        :param kwargs:                   Additional parameters
-
-        :return: If successful, returns True
+        """Scale a resource in a KDU instance.
+
+        Args:
+            kdu_instance: KDU instance name
+            scale: Scale to which to set the resource
+            resource_name: Resource name
+            total_timeout: The time, in seconds, to wait for the install
+                to finish
+            cluster_uuid: The UUID of the cluster
+            kdu_model: The chart/bundle reference
+            atomic: if set, upgrade process rolls back changes made in case of failed upgrade.
+                The --wait flag will be set automatically if --atomic is used
+            db_dict: Dictionary for any additional data
+            kwargs: Additional parameters
+                vca_id (str): VCA ID
+
+        Returns:
+            True if successful, False otherwise
         """
 
     @abc.abstractmethod
         """
 
     @abc.abstractmethod
@@ -235,16 +256,23 @@ class K8sConnector(abc.ABC, Loggable):
         self,
         resource_name: str,
         kdu_instance: str,
         self,
         resource_name: str,
         kdu_instance: str,
+        cluster_uuid: str,
+        kdu_model: str,
+        timeout: float = 300,
         **kwargs,
     ) -> int:
         **kwargs,
     ) -> int:
-        """
-        Get an application scale count.
+        """Get a resource scale count in a KDU instance.
 
 
-        :param: resource_name str:       Resource name (Application name)
-        :param: kdu_instance str:        KDU instance name
-        :param kwargs:                   Additional parameters
+        Args:
+            resource_name: Resource name
+            kdu_instance: KDU instance name
+            cluster_uuid: The UUID of the cluster
+            kdu_model:    chart/bundle reference
+            timeout:  The time, in seconds, to wait
+            kwargs: Additional parameters
 
 
-        :return: Return application instance count
+        Returns:
+            Resource instance count
         """
 
     @abc.abstractmethod
         """
 
     @abc.abstractmethod
@@ -303,6 +331,28 @@ class K8sConnector(abc.ABC, Loggable):
         :return: Returns the output of the action
         """
 
         :return: Returns the output of the action
         """
 
+    @abc.abstractmethod
+    async def upgrade_charm(
+        self,
+        ee_id: str = None,
+        path: str = None,
+        charm_id: str = None,
+        charm_type: str = None,
+        timeout: float = None,
+    ) -> str:
+        """This method upgrade charms in VNFs
+
+        Args:
+            ee_id:  Execution environment id
+            path:   Local path to the charm
+            charm_id:   charm-id
+            charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
+            timeout: (Float)    Timeout for the ns update operation
+
+        Returns:
+            The output of the update operation if status equals to "completed"
+        """
+
     @abc.abstractmethod
     async def inspect_kdu(self, kdu_model: str, repo_url: str = None) -> str:
         """
     @abc.abstractmethod
     async def inspect_kdu(self, kdu_model: str, repo_url: str = None) -> str:
         """
@@ -333,7 +383,9 @@ class K8sConnector(abc.ABC, Loggable):
         """
 
     @abc.abstractmethod
         """
 
     @abc.abstractmethod
-    async def status_kdu(self, cluster_uuid: str, kdu_instance: str) -> str:
+    async def status_kdu(
+        self, cluster_uuid: str, kdu_instance: str, yaml_format: str
+    ) -> Union[str, dict]:
         """
         This call would retrieve tha current state of a given KDU instance. It would be
         would allow to retrieve the _composition_ (i.e. K8s objects) and _specific
         """
         This call would retrieve tha current state of a given KDU instance. It would be
         would allow to retrieve the _composition_ (i.e. K8s objects) and _specific
@@ -342,6 +394,8 @@ class K8sConnector(abc.ABC, Loggable):
 
         :param cluster_uuid: UUID of a K8s cluster known by OSM
         :param kdu_instance: unique name for the KDU instance
 
         :param cluster_uuid: UUID of a K8s cluster known by OSM
         :param kdu_instance: unique name for the KDU instance
+        :param yaml_format: if the return shall be returned as an YAML string or as a
+                                dictionary
         :return: If successful, it will return the following vector of arguments:
         - K8s `namespace` in the cluster where the KDU lives
         - `state` of the KDU instance. It can be:
         :return: If successful, it will return the following vector of arguments:
         - K8s `namespace` in the cluster where the KDU lives
         - `state` of the KDU instance. It can be:
@@ -406,6 +460,18 @@ class K8sConnector(abc.ABC, Loggable):
     async def write_app_status_to_db(
         self, db_dict: dict, status: str, detailed_status: str, operation: str
     ) -> bool:
     async def write_app_status_to_db(
         self, db_dict: dict, status: str, detailed_status: str, operation: str
     ) -> bool:
+        """
+        This method will write the status of the application to the database.
+
+        :param db_dict: A dictionary with the database necessary information. It shall contain the values for the keys:
+            - "collection": The Mongo DB collection to write to
+            - "filter": The query filter to use in the update process
+            - "path": The dot separated keys which targets the object to be updated
+        :param status: Status of the application
+        :param detailed_status: Detailed status of the application
+        :param operation: Operation that is being performed on the application
+        :return: True if successful
+        """
 
         if not self.db:
             self.warning("No db => No database write")
 
         if not self.db:
             self.warning("No db => No database write")
@@ -418,7 +484,6 @@ class K8sConnector(abc.ABC, Loggable):
         self.log.debug("status={}".format(status))
 
         try:
         self.log.debug("status={}".format(status))
 
         try:
-
             the_table = db_dict["collection"]
             the_filter = db_dict["filter"]
             the_path = db_dict["path"]
             the_table = db_dict["collection"]
             the_filter = db_dict["filter"]
             the_path = db_dict["path"]
index edefc86..14f7fe0 100644 (file)
@@ -19,6 +19,8 @@
 # For those usages not covered by the Apache License, Version 2.0 please
 # contact with: nfvlabs@tid.es
 ##
 # For those usages not covered by the Apache License, Version 2.0 please
 # contact with: nfvlabs@tid.es
 ##
+from typing import Union
+from shlex import quote
 import os
 import yaml
 
 import os
 import yaml
 
@@ -83,7 +85,12 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         """Install a helm chart
 
         :param cluster_uuid str: The UUID of the cluster to install to
         """Install a helm chart
 
         :param cluster_uuid str: The UUID of the cluster to install to
-        :param kdu_model str: The name or path of a bundle to install
+        :param kdu_model str: chart/reference (string), which can be either
+            of these options:
+            - a name of chart available via the repos known by OSM
+              (e.g. stable/openldap, stable/openldap:1.2.4)
+            - a path to a packaged chart (e.g. mychart.tgz)
+            - a path to an unpacked chart directory or a URL (e.g. mychart)
         :param kdu_instance: Kdu instance name
         :param atomic bool: If set, waits until the model is active and resets
                             the cluster on failure.
         :param kdu_instance: Kdu instance name
         :param atomic bool: If set, waits until the model is active and resets
                             the cluster on failure.
@@ -97,24 +104,25 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
 
         :return: True if successful
         """
 
         :return: True if successful
         """
-        _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
-        self.log.debug("installing {} in cluster {}".format(kdu_model, cluster_id))
+
+        self.log.debug("installing {} in cluster {}".format(kdu_model, cluster_uuid))
 
         # sync local dir
 
         # sync local dir
-        self.fs.sync(from_path=cluster_id)
+        self.fs.sync(from_path=cluster_uuid)
 
         # init env, paths
         paths, env = self._init_paths_env(
 
         # init env, paths
         paths, env = self._init_paths_env(
-            cluster_name=cluster_id, create_if_not_exist=True
+            cluster_name=cluster_uuid, create_if_not_exist=True
         )
 
         # for helm3 if namespace does not exist must create it
         if namespace and namespace != "kube-system":
         )
 
         # for helm3 if namespace does not exist must create it
         if namespace and namespace != "kube-system":
-            if not await self._namespace_exists(cluster_id, namespace):
+            if not await self._namespace_exists(cluster_uuid, namespace):
                 try:
                 try:
-                    await self._create_namespace(cluster_id, namespace)
+                    # TODO: refactor to use kubernetes API client
+                    await self._create_namespace(cluster_uuid, namespace)
                 except Exception as e:
                 except Exception as e:
-                    if not await self._namespace_exists(cluster_id, namespace):
+                    if not await self._namespace_exists(cluster_uuid, namespace):
                         err_msg = (
                             "namespace {} does not exist in cluster_id {} "
                             "error message: ".format(namespace, e)
                         err_msg = (
                             "namespace {} does not exist in cluster_id {} "
                             "error message: ".format(namespace, e)
@@ -123,7 +131,7 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
                         raise K8sException(err_msg)
 
         await self._install_impl(
                         raise K8sException(err_msg)
 
         await self._install_impl(
-            cluster_id,
+            cluster_uuid,
             kdu_model,
             paths,
             env,
             kdu_model,
             paths,
             env,
@@ -137,18 +145,17 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         )
 
         # sync fs
         )
 
         # sync fs
-        self.fs.reverse_sync(from_path=cluster_id)
+        self.fs.reverse_sync(from_path=cluster_uuid)
 
         self.log.debug("Returning kdu_instance {}".format(kdu_instance))
         return True
 
     async def inspect_kdu(self, kdu_model: str, repo_url: str = None) -> str:
 
         self.log.debug("Returning kdu_instance {}".format(kdu_instance))
         return True
 
     async def inspect_kdu(self, kdu_model: str, repo_url: str = None) -> str:
-
         self.log.debug(
             "inspect kdu_model {} from (optional) repo: {}".format(kdu_model, repo_url)
         )
 
         self.log.debug(
             "inspect kdu_model {} from (optional) repo: {}".format(kdu_model, repo_url)
         )
 
-        return await self._exec_inspect_comand(
+        return await self._exec_inspect_command(
             inspect_command="all", kdu_model=kdu_model, repo_url=repo_url
         )
 
             inspect_command="all", kdu_model=kdu_model, repo_url=repo_url
         )
 
@@ -244,7 +251,6 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         return namespace in namespaces if namespaces else False
 
     async def _get_namespaces(self, cluster_id: str):
         return namespace in namespaces if namespaces else False
 
     async def _get_namespaces(self, cluster_id: str):
-
         self.log.debug("get namespaces cluster_id {}".format(cluster_id))
 
         # init config, env
         self.log.debug("get namespaces cluster_id {}".format(cluster_id))
 
         # init config, env
@@ -253,7 +259,7 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         )
 
         command = "{} --kubeconfig={} get namespaces -o=yaml".format(
         )
 
         command = "{} --kubeconfig={} get namespaces -o=yaml".format(
-            self.kubectl_command, paths["kube_config"]
+            self.kubectl_command, quote(paths["kube_config"])
         )
         output, _rc = await self._local_async_exec(
             command=command, raise_exception_on_error=True, env=env
         )
         output, _rc = await self._local_async_exec(
             command=command, raise_exception_on_error=True, env=env
@@ -266,7 +272,6 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         return namespaces
 
     async def _create_namespace(self, cluster_id: str, namespace: str):
         return namespaces
 
     async def _create_namespace(self, cluster_id: str, namespace: str):
-
         self.log.debug(f"create namespace: {cluster_id} for cluster_id: {namespace}")
 
         # init config, env
         self.log.debug(f"create namespace: {cluster_id} for cluster_id: {namespace}")
 
         # init config, env
@@ -275,7 +280,7 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         )
 
         command = "{} --kubeconfig={} create namespace {}".format(
         )
 
         command = "{} --kubeconfig={} create namespace {}".format(
-            self.kubectl_command, paths["kube_config"], namespace
+            self.kubectl_command, quote(paths["kube_config"]), quote(namespace)
         )
         _, _rc = await self._local_async_exec(
             command=command, raise_exception_on_error=True, env=env
         )
         _, _rc = await self._local_async_exec(
             command=command, raise_exception_on_error=True, env=env
@@ -284,17 +289,20 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
 
         return _rc
 
 
         return _rc
 
-    async def _get_services(self, cluster_id: str, kdu_instance: str, namespace: str):
-
+    async def _get_services(
+        self, cluster_id: str, kdu_instance: str, namespace: str, kubeconfig: str
+    ):
         # init config, env
         paths, env = self._init_paths_env(
             cluster_name=cluster_id, create_if_not_exist=True
         )
 
         # init config, env
         paths, env = self._init_paths_env(
             cluster_name=cluster_id, create_if_not_exist=True
         )
 
-        command1 = "{} get manifest {} --namespace={}".format(
-            self._helm_command, kdu_instance, namespace
+        command1 = "env KUBECONFIG={} {} get manifest {} --namespace={}".format(
+            kubeconfig, self._helm_command, quote(kdu_instance), quote(namespace)
+        )
+        command2 = "{} get --namespace={} -f -".format(
+            self.kubectl_command, quote(namespace)
         )
         )
-        command2 = "{} get --namespace={} -f -".format(self.kubectl_command, namespace)
         output, _rc = await self._local_async_exec_pipe(
             command1, command2, env=env, raise_exception_on_error=True
         )
         output, _rc = await self._local_async_exec_pipe(
             command1, command2, env=env, raise_exception_on_error=True
         )
@@ -310,14 +318,13 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         if namespace != "kube-system":
             namespaces = await self._get_namespaces(cluster_id)
             if namespace not in namespaces:
         if namespace != "kube-system":
             namespaces = await self._get_namespaces(cluster_id)
             if namespace not in namespaces:
+                # TODO: refactor to use kubernetes API client
                 await self._create_namespace(cluster_id, namespace)
 
                 await self._create_namespace(cluster_id, namespace)
 
-        # If default repo is not included add
-        cluster_uuid = "{}:{}".format(namespace, cluster_id)
-        repo_list = await self.repo_list(cluster_uuid)
+        repo_list = await self.repo_list(cluster_id)
         stable_repo = [repo for repo in repo_list if repo["name"] == "stable"]
         if not stable_repo and self._stable_repo_url:
         stable_repo = [repo for repo in repo_list if repo["name"] == "stable"]
         if not stable_repo and self._stable_repo_url:
-            await self.repo_add(cluster_uuid, "stable", self._stable_repo_url)
+            await self.repo_add(cluster_id, "stable", self._stable_repo_url)
 
         # Returns False as no software needs to be uninstalled
         return False
 
         # Returns False as no software needs to be uninstalled
         return False
@@ -327,7 +334,6 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         pass
 
     async def _instances_list(self, cluster_id: str):
         pass
 
     async def _instances_list(self, cluster_id: str):
-
         # init paths, env
         paths, env = self._init_paths_env(
             cluster_name=cluster_id, create_if_not_exist=True
         # init paths, env
         paths, env = self._init_paths_env(
             cluster_name=cluster_id, create_if_not_exist=True
@@ -345,22 +351,48 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
             return []
 
     def _get_inspect_command(
             return []
 
     def _get_inspect_command(
-        self, inspect_command: str, kdu_model: str, repo_str: str, version: str
+        self, show_command: str, kdu_model: str, repo_str: str, version: str
     ):
     ):
+        """Generates the command to obtain the information about an Helm Chart package
+            (´helm show ...´ command)
+
+        Args:
+            show_command: the second part of the command (`helm show <show_command>`)
+            kdu_model: The name or path of a Helm Chart
+            repo_str: Helm Chart repository url
+            version: constraint with specific version of the Chart to use
+
+        Returns:
+            str: the generated Helm Chart command
+        """
+
         inspect_command = "{} show {} {}{} {}".format(
         inspect_command = "{} show {} {}{} {}".format(
-            self._helm_command, inspect_command, kdu_model, repo_str, version
+            self._helm_command, show_command, quote(kdu_model), repo_str, version
         )
         return inspect_command
 
         )
         return inspect_command
 
+    def _get_get_command(
+        self, get_command: str, kdu_instance: str, namespace: str, kubeconfig: str
+    ):
+        get_command = (
+            "env KUBECONFIG={} {} get {} {} --namespace={} --output yaml".format(
+                kubeconfig,
+                self._helm_command,
+                get_command,
+                quote(kdu_instance),
+                quote(namespace),
+            )
+        )
+        return get_command
+
     async def _status_kdu(
         self,
         cluster_id: str,
         kdu_instance: str,
         namespace: str = None,
     async def _status_kdu(
         self,
         cluster_id: str,
         kdu_instance: str,
         namespace: str = None,
+        yaml_format: bool = False,
         show_error_log: bool = False,
         show_error_log: bool = False,
-        return_text: bool = False,
-    ):
-
+    ) -> Union[str, dict]:
         self.log.debug(
             "status of kdu_instance: {}, namespace: {} ".format(kdu_instance, namespace)
         )
         self.log.debug(
             "status of kdu_instance: {}, namespace: {} ".format(kdu_instance, namespace)
         )
@@ -372,8 +404,11 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         paths, env = self._init_paths_env(
             cluster_name=cluster_id, create_if_not_exist=True
         )
         paths, env = self._init_paths_env(
             cluster_name=cluster_id, create_if_not_exist=True
         )
-        command = "{} status {} --namespace={} --output yaml".format(
-            self._helm_command, kdu_instance, namespace
+        command = "env KUBECONFIG={} {} status {} --namespace={} --output yaml".format(
+            paths["kube_config"],
+            self._helm_command,
+            quote(kdu_instance),
+            quote(namespace),
         )
 
         output, rc = await self._local_async_exec(
         )
 
         output, rc = await self._local_async_exec(
@@ -383,7 +418,7 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
             env=env,
         )
 
             env=env,
         )
 
-        if return_text:
+        if yaml_format:
             return str(output)
 
         if rc != 0:
             return str(output)
 
         if rc != 0:
@@ -394,11 +429,18 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         # remove field 'notes' and manifest
         try:
             del data.get("info")["notes"]
         # remove field 'notes' and manifest
         try:
             del data.get("info")["notes"]
-            del data["manifest"]
         except KeyError:
             pass
 
         except KeyError:
             pass
 
-        # unable to parse 'resources' as currently it is not included in helm3
+        # parse the manifest to a list of dictionaries
+        if "manifest" in data:
+            manifest_str = data.get("manifest")
+            manifest_docs = yaml.load_all(manifest_str, Loader=yaml.SafeLoader)
+
+            data["manifest"] = []
+            for doc in manifest_docs:
+                data["manifest"].append(doc)
+
         return data
 
     def _get_install_command(
         return data
 
     def _get_install_command(
@@ -410,8 +452,8 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         version: str,
         atomic: bool,
         timeout: float,
         version: str,
         atomic: bool,
         timeout: float,
+        kubeconfig: str,
     ) -> str:
     ) -> str:
-
         timeout_str = ""
         if timeout:
             timeout_str = "--timeout {}s".format(timeout)
         timeout_str = ""
         if timeout:
             timeout_str = "--timeout {}s".format(timeout)
@@ -423,7 +465,7 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         # namespace
         namespace_str = ""
         if namespace:
         # namespace
         namespace_str = ""
         if namespace:
-            namespace_str = "--namespace {}".format(namespace)
+            namespace_str = "--namespace {}".format(quote(namespace))
 
         # version
         version_str = ""
 
         # version
         version_str = ""
@@ -431,20 +473,72 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
             version_str = "--version {}".format(version)
 
         command = (
             version_str = "--version {}".format(version)
 
         command = (
-            "{helm} install {name} {atomic} --output yaml  "
+            "env KUBECONFIG={kubeconfig} {helm} install {name} {atomic} --output yaml  "
             "{params} {timeout} {ns} {model} {ver}".format(
             "{params} {timeout} {ns} {model} {ver}".format(
+                kubeconfig=kubeconfig,
                 helm=self._helm_command,
                 helm=self._helm_command,
-                name=kdu_instance,
+                name=quote(kdu_instance),
                 atomic=atomic_str,
                 params=params_str,
                 timeout=timeout_str,
                 ns=namespace_str,
                 atomic=atomic_str,
                 params=params_str,
                 timeout=timeout_str,
                 ns=namespace_str,
-                model=kdu_model,
+                model=quote(kdu_model),
                 ver=version_str,
             )
         )
         return command
 
                 ver=version_str,
             )
         )
         return command
 
+    def _get_upgrade_scale_command(
+        self,
+        kdu_model: str,
+        kdu_instance: str,
+        namespace: str,
+        scale: int,
+        version: str,
+        atomic: bool,
+        replica_str: str,
+        timeout: float,
+        resource_name: str,
+        kubeconfig: str,
+    ) -> str:
+        """Generates the command to scale a Helm Chart release
+
+        Args:
+            kdu_model (str): Kdu model name, corresponding to the Helm local location or repository
+            kdu_instance (str): KDU instance, corresponding to the Helm Chart release in question
+            namespace (str): Namespace where this KDU instance is deployed
+            scale (int): Scale count
+            version (str): Constraint with specific version of the Chart to use
+            atomic (bool): If set, upgrade process rolls back changes made in case of failed upgrade.
+                The --wait flag will be set automatically if --atomic is used
+            replica_str (str): The key under resource_name key where the scale count is stored
+            timeout (float): The time, in seconds, to wait
+            resource_name (str): The KDU's resource to scale
+            kubeconfig (str): Kubeconfig file path
+
+        Returns:
+            str: command to scale a Helm Chart release
+        """
+
+        # scale
+        if resource_name:
+            scale_dict = {"{}.{}".format(resource_name, replica_str): scale}
+        else:
+            scale_dict = {replica_str: scale}
+
+        scale_str = self._params_to_set_option(scale_dict)
+
+        return self._get_upgrade_command(
+            kdu_model=kdu_model,
+            kdu_instance=kdu_instance,
+            namespace=namespace,
+            params_str=scale_str,
+            version=version,
+            atomic=atomic,
+            timeout=timeout,
+            kubeconfig=kubeconfig,
+        )
+
     def _get_upgrade_command(
         self,
         kdu_model: str,
     def _get_upgrade_command(
         self,
         kdu_model: str,
@@ -454,7 +548,25 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         version: str,
         atomic: bool,
         timeout: float,
         version: str,
         atomic: bool,
         timeout: float,
+        kubeconfig: str,
+        force: bool = False,
     ) -> str:
     ) -> str:
+        """Generates the command to upgrade a Helm Chart release
+
+        Args:
+            kdu_model (str): Kdu model name, corresponding to the Helm local location or repository
+            kdu_instance (str): KDU instance, corresponding to the Helm Chart release in question
+            namespace (str): Namespace where this KDU instance is deployed
+            params_str (str): Params used to upgrade the Helm Chart release
+            version (str): Constraint with specific version of the Chart to use
+            atomic (bool): If set, upgrade process rolls back changes made in case of failed upgrade.
+                The --wait flag will be set automatically if --atomic is used
+            timeout (float): The time, in seconds, to wait
+            kubeconfig (str): Kubeconfig file path
+            force (bool): If set, helm forces resource updates through a replacement strategy. This may recreate pods.
+        Returns:
+            str: command to upgrade a Helm Chart release
+        """
 
         timeout_str = ""
         if timeout:
 
         timeout_str = ""
         if timeout:
@@ -465,42 +577,54 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         if atomic:
             atomic_str = "--atomic"
 
         if atomic:
             atomic_str = "--atomic"
 
+        # force
+        force_str = ""
+        if force:
+            force_str = "--force "
+
         # version
         version_str = ""
         if version:
         # version
         version_str = ""
         if version:
-            version_str = "--version {}".format(version)
+            version_str = "--version {}".format(quote(version))
 
         # namespace
         namespace_str = ""
         if namespace:
 
         # namespace
         namespace_str = ""
         if namespace:
-            namespace_str = "--namespace {}".format(namespace)
+            namespace_str = "--namespace {}".format(quote(namespace))
 
         command = (
 
         command = (
-            "{helm} upgrade {name} {model} {namespace} {atomic} --output yaml {params} "
-            "{timeout}  {ver}".format(
-                helm=self._helm_command,
-                name=kdu_instance,
-                namespace=namespace_str,
-                atomic=atomic_str,
-                params=params_str,
-                timeout=timeout_str,
-                model=kdu_model,
-                ver=version_str,
-            )
+            "env KUBECONFIG={kubeconfig} {helm} upgrade {name} {model} {namespace} {atomic} {force}"
+            "--output yaml {params} {timeout} --reuse-values {ver}"
+        ).format(
+            kubeconfig=kubeconfig,
+            helm=self._helm_command,
+            name=quote(kdu_instance),
+            namespace=namespace_str,
+            atomic=atomic_str,
+            force=force_str,
+            params=params_str,
+            timeout=timeout_str,
+            model=quote(kdu_model),
+            ver=version_str,
         )
         return command
 
     def _get_rollback_command(
         )
         return command
 
     def _get_rollback_command(
-        self, kdu_instance: str, namespace: str, revision: float
+        self, kdu_instance: str, namespace: str, revision: float, kubeconfig: str
     ) -> str:
     ) -> str:
-        return "{} rollback {} {} --namespace={} --wait".format(
-            self._helm_command, kdu_instance, revision, namespace
+        return "env KUBECONFIG={} {} rollback {} {} --namespace={} --wait".format(
+            kubeconfig,
+            self._helm_command,
+            quote(kdu_instance),
+            revision,
+            quote(namespace),
         )
 
         )
 
-    def _get_uninstall_command(self, kdu_instance: str, namespace: str) -> str:
-
-        return "{} uninstall {} --namespace={}".format(
-            self._helm_command, kdu_instance, namespace
+    def _get_uninstall_command(
+        self, kdu_instance: str, namespace: str, kubeconfig: str
+    ) -> str:
+        return "env KUBECONFIG={} {} uninstall {} --namespace={}".format(
+            kubeconfig, self._helm_command, quote(kdu_instance), quote(namespace)
         )
 
     def _get_helm_chart_repos_ids(self, cluster_uuid) -> list:
         )
 
     def _get_helm_chart_repos_ids(self, cluster_uuid) -> list:
index cca0cab..5f004b3 100644 (file)
@@ -21,6 +21,8 @@
 ##
 import abc
 import asyncio
 ##
 import abc
 import asyncio
+from typing import Union
+from shlex import quote
 import random
 import time
 import shlex
 import random
 import time
 import shlex
@@ -29,10 +31,12 @@ import stat
 import os
 import yaml
 from uuid import uuid4
 import os
 import yaml
 from uuid import uuid4
+from urllib.parse import urlparse
 
 from n2vc.config import EnvironConfig
 from n2vc.exceptions import K8sException
 from n2vc.k8s_conn import K8sConnector
 
 from n2vc.config import EnvironConfig
 from n2vc.exceptions import K8sException
 from n2vc.k8s_conn import K8sConnector
+from n2vc.kubectl import Kubectl
 
 
 class K8sHelmBaseConnector(K8sConnector):
 
 
 class K8sHelmBaseConnector(K8sConnector):
@@ -89,14 +93,21 @@ class K8sHelmBaseConnector(K8sConnector):
         if self._stable_repo_url == "None":
             self._stable_repo_url = None
 
         if self._stable_repo_url == "None":
             self._stable_repo_url = None
 
-    @staticmethod
-    def _get_namespace_cluster_id(cluster_uuid: str) -> (str, str):
+        # Lock to avoid concurrent execution of helm commands
+        self.cmd_lock = asyncio.Lock()
+
+    def _get_namespace(self, cluster_uuid: str) -> str:
         """
         """
-        Parses cluster_uuid stored at database that can be either 'namespace:cluster_id' or only
-        cluster_id for backward compatibility
+        Obtains the namespace used by the cluster with the uuid passed by argument
+
+        param: cluster_uuid: cluster's uuid
         """
         """
-        namespace, _, cluster_id = cluster_uuid.rpartition(":")
-        return namespace, cluster_id
+
+        # first, obtain the cluster corresponding to the uuid passed by argument
+        k8scluster = self.db.get_one(
+            "k8sclusters", q_filter={"_id": cluster_uuid}, fail_on_empty=False
+        )
+        return k8scluster.get("namespace")
 
     async def init_env(
         self,
 
     async def init_env(
         self,
@@ -104,7 +115,7 @@ class K8sHelmBaseConnector(K8sConnector):
         namespace: str = "kube-system",
         reuse_cluster_uuid=None,
         **kwargs,
         namespace: str = "kube-system",
         reuse_cluster_uuid=None,
         **kwargs,
-    ) -> (str, bool):
+    ) -> tuple[str, bool]:
         """
         It prepares a given K8s cluster environment to run Charts
 
         """
         It prepares a given K8s cluster environment to run Charts
 
@@ -120,11 +131,9 @@ class K8sHelmBaseConnector(K8sConnector):
         """
 
         if reuse_cluster_uuid:
         """
 
         if reuse_cluster_uuid:
-            namespace_, cluster_id = self._get_namespace_cluster_id(reuse_cluster_uuid)
-            namespace = namespace_ or namespace
+            cluster_id = reuse_cluster_uuid
         else:
             cluster_id = str(uuid4())
         else:
             cluster_id = str(uuid4())
-        cluster_uuid = "{}:{}".format(namespace, cluster_id)
 
         self.log.debug(
             "Initializing K8S Cluster {}. namespace: {}".format(cluster_id, namespace)
 
         self.log.debug(
             "Initializing K8S Cluster {}. namespace: {}".format(cluster_id, namespace)
@@ -146,42 +155,108 @@ class K8sHelmBaseConnector(K8sConnector):
 
         self.log.info("Cluster {} initialized".format(cluster_id))
 
 
         self.log.info("Cluster {} initialized".format(cluster_id))
 
-        return cluster_uuid, n2vc_installed_sw
+        return cluster_id, n2vc_installed_sw
 
     async def repo_add(
 
     async def repo_add(
-        self, cluster_uuid: str, name: str, url: str, repo_type: str = "chart"
+        self,
+        cluster_uuid: str,
+        name: str,
+        url: str,
+        repo_type: str = "chart",
+        cert: str = None,
+        user: str = None,
+        password: str = None,
+        oci: bool = False,
     ):
     ):
-        _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
         self.log.debug(
             "Cluster {}, adding {} repository {}. URL: {}".format(
         self.log.debug(
             "Cluster {}, adding {} repository {}. URL: {}".format(
-                cluster_id, repo_type, name, url
+                cluster_uuid, repo_type, name, url
             )
         )
 
             )
         )
 
+        # init_env
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+
         # sync local dir
         # sync local dir
-        self.fs.sync(from_path=cluster_id)
+        self.fs.sync(from_path=cluster_uuid)
+
+        if oci:
+            if user and password:
+                host_port = urlparse(url).netloc if url.startswith("oci://") else url
+                # helm registry login url
+                command = "env KUBECONFIG={} {} registry login {}".format(
+                    paths["kube_config"], self._helm_command, quote(host_port)
+                )
+            else:
+                self.log.debug(
+                    "OCI registry login is not needed for repo: {}".format(name)
+                )
+                return
+        else:
+            # helm repo add name url
+            command = "env KUBECONFIG={} {} repo add {} {}".format(
+                paths["kube_config"], self._helm_command, quote(name), quote(url)
+            )
+
+        if cert:
+            temp_cert_file = os.path.join(
+                self.fs.path, "{}/helmcerts/".format(cluster_uuid), "temp.crt"
+            )
+            os.makedirs(os.path.dirname(temp_cert_file), exist_ok=True)
+            with open(temp_cert_file, "w") as the_cert:
+                the_cert.write(cert)
+            command += " --ca-file {}".format(quote(temp_cert_file))
+
+        if user:
+            command += " --username={}".format(quote(user))
+
+        if password:
+            command += " --password={}".format(quote(password))
+
+        self.log.debug("adding repo: {}".format(command))
+        await self._local_async_exec(
+            command=command, raise_exception_on_error=True, env=env
+        )
+
+        if not oci:
+            # helm repo update
+            command = "env KUBECONFIG={} {} repo update {}".format(
+                paths["kube_config"], self._helm_command, quote(name)
+            )
+            self.log.debug("updating repo: {}".format(command))
+            await self._local_async_exec(
+                command=command, raise_exception_on_error=False, env=env
+            )
+
+        # sync fs
+        self.fs.reverse_sync(from_path=cluster_uuid)
+
+    async def repo_update(self, cluster_uuid: str, name: str, repo_type: str = "chart"):
+        self.log.debug(
+            "Cluster {}, updating {} repository {}".format(
+                cluster_uuid, repo_type, name
+            )
+        )
 
         # init_env
         paths, env = self._init_paths_env(
 
         # init_env
         paths, env = self._init_paths_env(
-            cluster_name=cluster_id, create_if_not_exist=True
+            cluster_name=cluster_uuid, create_if_not_exist=True
         )
 
         )
 
+        # sync local dir
+        self.fs.sync(from_path=cluster_uuid)
+
         # helm repo update
         # helm repo update
-        command = "{} repo update".format(self._helm_command)
+        command = "{} repo update {}".format(self._helm_command, quote(name))
         self.log.debug("updating repo: {}".format(command))
         await self._local_async_exec(
             command=command, raise_exception_on_error=False, env=env
         )
 
         self.log.debug("updating repo: {}".format(command))
         await self._local_async_exec(
             command=command, raise_exception_on_error=False, env=env
         )
 
-        # helm repo add name url
-        command = "{} repo add {} {}".format(self._helm_command, name, url)
-        self.log.debug("adding repo: {}".format(command))
-        await self._local_async_exec(
-            command=command, raise_exception_on_error=True, env=env
-        )
-
         # sync fs
         # sync fs
-        self.fs.reverse_sync(from_path=cluster_id)
+        self.fs.reverse_sync(from_path=cluster_uuid)
 
     async def repo_list(self, cluster_uuid: str) -> list:
         """
 
     async def repo_list(self, cluster_uuid: str) -> list:
         """
@@ -190,18 +265,19 @@ class K8sHelmBaseConnector(K8sConnector):
         :return: list of registered repositories: [ (name, url) .... ]
         """
 
         :return: list of registered repositories: [ (name, url) .... ]
         """
 
-        _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
-        self.log.debug("list repositories for cluster {}".format(cluster_id))
-
-        # sync local dir
-        self.fs.sync(from_path=cluster_id)
+        self.log.debug("list repositories for cluster {}".format(cluster_uuid))
 
         # config filename
         paths, env = self._init_paths_env(
 
         # config filename
         paths, env = self._init_paths_env(
-            cluster_name=cluster_id, create_if_not_exist=True
+            cluster_name=cluster_uuid, create_if_not_exist=True
         )
 
         )
 
-        command = "{} repo list --output yaml".format(self._helm_command)
+        # sync local dir
+        self.fs.sync(from_path=cluster_uuid)
+
+        command = "env KUBECONFIG={} {} repo list --output yaml".format(
+            paths["kube_config"], self._helm_command
+        )
 
         # Set exception to false because if there are no repos just want an empty list
         output, _rc = await self._local_async_exec(
 
         # Set exception to false because if there are no repos just want an empty list
         output, _rc = await self._local_async_exec(
@@ -209,7 +285,7 @@ class K8sHelmBaseConnector(K8sConnector):
         )
 
         # sync fs
         )
 
         # sync fs
-        self.fs.reverse_sync(from_path=cluster_id)
+        self.fs.reverse_sync(from_path=cluster_uuid)
 
         if _rc == 0:
             if output and len(output) > 0:
 
         if _rc == 0:
             if output and len(output) > 0:
@@ -222,25 +298,27 @@ class K8sHelmBaseConnector(K8sConnector):
             return []
 
     async def repo_remove(self, cluster_uuid: str, name: str):
             return []
 
     async def repo_remove(self, cluster_uuid: str, name: str):
-
-        _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
-        self.log.debug("remove {} repositories for cluster {}".format(name, cluster_id))
-
-        # sync local dir
-        self.fs.sync(from_path=cluster_id)
+        self.log.debug(
+            "remove {} repositories for cluster {}".format(name, cluster_uuid)
+        )
 
         # init env, paths
         paths, env = self._init_paths_env(
 
         # init env, paths
         paths, env = self._init_paths_env(
-            cluster_name=cluster_id, create_if_not_exist=True
+            cluster_name=cluster_uuid, create_if_not_exist=True
         )
 
         )
 
-        command = "{} repo remove {}".format(self._helm_command, name)
+        # sync local dir
+        self.fs.sync(from_path=cluster_uuid)
+
+        command = "env KUBECONFIG={} {} repo remove {}".format(
+            paths["kube_config"], self._helm_command, quote(name)
+        )
         await self._local_async_exec(
             command=command, raise_exception_on_error=True, env=env
         )
 
         # sync fs
         await self._local_async_exec(
             command=command, raise_exception_on_error=True, env=env
         )
 
         # sync fs
-        self.fs.reverse_sync(from_path=cluster_id)
+        self.fs.reverse_sync(from_path=cluster_uuid)
 
     async def reset(
         self,
 
     async def reset(
         self,
@@ -259,15 +337,15 @@ class K8sHelmBaseConnector(K8sConnector):
         :param kwargs: Additional parameters (None yet)
         :return: Returns True if successful or raises an exception.
         """
         :param kwargs: Additional parameters (None yet)
         :return: Returns True if successful or raises an exception.
         """
-        namespace, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
+        namespace = self._get_namespace(cluster_uuid=cluster_uuid)
         self.log.debug(
             "Resetting K8s environment. cluster uuid: {} uninstall={}".format(
         self.log.debug(
             "Resetting K8s environment. cluster uuid: {} uninstall={}".format(
-                cluster_id, uninstall_sw
+                cluster_uuid, uninstall_sw
             )
         )
 
         # sync local dir
             )
         )
 
         # sync local dir
-        self.fs.sync(from_path=cluster_id)
+        self.fs.sync(from_path=cluster_uuid)
 
         # uninstall releases if needed.
         if uninstall_sw:
 
         # uninstall releases if needed.
         if uninstall_sw:
@@ -296,24 +374,32 @@ class K8sHelmBaseConnector(K8sConnector):
                 else:
                     msg = (
                         "Cluster uuid: {} has releases and not force. Leaving K8s helm environment"
                 else:
                     msg = (
                         "Cluster uuid: {} has releases and not force. Leaving K8s helm environment"
-                    ).format(cluster_id)
+                    ).format(cluster_uuid)
                     self.log.warn(msg)
                     uninstall_sw = (
                         False  # Allow to remove k8s cluster without removing Tiller
                     )
 
         if uninstall_sw:
                     self.log.warn(msg)
                     uninstall_sw = (
                         False  # Allow to remove k8s cluster without removing Tiller
                     )
 
         if uninstall_sw:
-            await self._uninstall_sw(cluster_idnamespace)
+            await self._uninstall_sw(cluster_id=cluster_uuid, namespace=namespace)
 
         # delete cluster directory
 
         # delete cluster directory
-        self.log.debug("Removing directory {}".format(cluster_id))
-        self.fs.file_delete(cluster_id, ignore_non_exist=True)
+        self.log.debug("Removing directory {}".format(cluster_uuid))
+        self.fs.file_delete(cluster_uuid, ignore_non_exist=True)
         # Remove also local directorio if still exist
         # Remove also local directorio if still exist
-        direct = self.fs.path + "/" + cluster_id
+        direct = self.fs.path + "/" + cluster_uuid
         shutil.rmtree(direct, ignore_errors=True)
 
         return True
 
         shutil.rmtree(direct, ignore_errors=True)
 
         return True
 
+    def _is_helm_chart_a_file(self, chart_name: str):
+        return chart_name.count("/") > 1
+
+    @staticmethod
+    def _is_helm_chart_a_url(chart_name: str):
+        result = urlparse(chart_name)
+        return all([result.scheme, result.netloc])
+
     async def _install_impl(
         self,
         cluster_id: str,
     async def _install_impl(
         self,
         cluster_id: str,
@@ -328,21 +414,27 @@ class K8sHelmBaseConnector(K8sConnector):
         kdu_name: str = None,
         namespace: str = None,
     ):
         kdu_name: str = None,
         namespace: str = None,
     ):
+        # init env, paths
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_id, create_if_not_exist=True
+        )
+
         # params to str
         params_str, file_to_delete = self._params_to_file_option(
             cluster_id=cluster_id, params=params
         )
 
         # params to str
         params_str, file_to_delete = self._params_to_file_option(
             cluster_id=cluster_id, params=params
         )
 
-        # version
-        version = None
-        if ":" in kdu_model:
-            parts = kdu_model.split(sep=":")
-            if len(parts) == 2:
-                version = str(parts[1])
-                kdu_model = parts[0]
+        kdu_model, version = await self._prepare_helm_chart(kdu_model, cluster_id)
 
         command = self._get_install_command(
 
         command = self._get_install_command(
-            kdu_model, kdu_instance, namespace, params_str, version, atomic, timeout
+            kdu_model,
+            kdu_instance,
+            namespace,
+            params_str,
+            version,
+            atomic,
+            timeout,
+            paths["kube_config"],
         )
 
         self.log.debug("installing: {}".format(command))
         )
 
         self.log.debug("installing: {}".format(command))
@@ -363,7 +455,6 @@ class K8sHelmBaseConnector(K8sConnector):
                     namespace=namespace,
                     db_dict=db_dict,
                     operation="install",
                     namespace=namespace,
                     db_dict=db_dict,
                     operation="install",
-                    run_once=False,
                 )
             )
 
                 )
             )
 
@@ -376,7 +467,6 @@ class K8sHelmBaseConnector(K8sConnector):
             output, rc = exec_task.result()
 
         else:
             output, rc = exec_task.result()
 
         else:
-
             output, rc = await self._local_async_exec(
                 command=command, raise_exception_on_error=False, env=env
             )
             output, rc = await self._local_async_exec(
                 command=command, raise_exception_on_error=False, env=env
             )
@@ -392,8 +482,6 @@ class K8sHelmBaseConnector(K8sConnector):
             namespace=namespace,
             db_dict=db_dict,
             operation="install",
             namespace=namespace,
             db_dict=db_dict,
             operation="install",
-            run_once=True,
-            check_every=0,
         )
 
         if rc != 0:
         )
 
         if rc != 0:
@@ -410,50 +498,53 @@ class K8sHelmBaseConnector(K8sConnector):
         timeout: float = 300,
         params: dict = None,
         db_dict: dict = None,
         timeout: float = 300,
         params: dict = None,
         db_dict: dict = None,
+        namespace: str = None,
+        force: bool = False,
     ):
     ):
-        _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
-        self.log.debug("upgrading {} in cluster {}".format(kdu_model, cluster_id))
+        self.log.debug("upgrading {} in cluster {}".format(kdu_model, cluster_uuid))
 
         # sync local dir
 
         # sync local dir
-        self.fs.sync(from_path=cluster_id)
+        self.fs.sync(from_path=cluster_uuid)
 
         # look for instance to obtain namespace
 
         # look for instance to obtain namespace
-        instance_info = await self.get_instance_info(cluster_uuid, kdu_instance)
-        if not instance_info:
-            raise K8sException("kdu_instance {} not found".format(kdu_instance))
+
+        # set namespace
+        if not namespace:
+            instance_info = await self.get_instance_info(cluster_uuid, kdu_instance)
+            if not instance_info:
+                raise K8sException("kdu_instance {} not found".format(kdu_instance))
+            namespace = instance_info["namespace"]
 
         # init env, paths
         paths, env = self._init_paths_env(
 
         # init env, paths
         paths, env = self._init_paths_env(
-            cluster_name=cluster_id, create_if_not_exist=True
+            cluster_name=cluster_uuid, create_if_not_exist=True
         )
 
         )
 
+        # sync local dir
+        self.fs.sync(from_path=cluster_uuid)
+
         # params to str
         params_str, file_to_delete = self._params_to_file_option(
         # params to str
         params_str, file_to_delete = self._params_to_file_option(
-            cluster_id=cluster_id, params=params
+            cluster_id=cluster_uuid, params=params
         )
 
         )
 
-        # version
-        version = None
-        if ":" in kdu_model:
-            parts = kdu_model.split(sep=":")
-            if len(parts) == 2:
-                version = str(parts[1])
-                kdu_model = parts[0]
+        kdu_model, version = await self._prepare_helm_chart(kdu_model, cluster_uuid)
 
         command = self._get_upgrade_command(
             kdu_model,
             kdu_instance,
 
         command = self._get_upgrade_command(
             kdu_model,
             kdu_instance,
-            instance_info["namespace"],
+            namespace,
             params_str,
             version,
             atomic,
             timeout,
             params_str,
             version,
             atomic,
             timeout,
+            paths["kube_config"],
+            force,
         )
 
         self.log.debug("upgrading: {}".format(command))
 
         if atomic:
         )
 
         self.log.debug("upgrading: {}".format(command))
 
         if atomic:
-
             # exec helm in a task
             exec_task = asyncio.ensure_future(
                 coro_or_future=self._local_async_exec(
             # exec helm in a task
             exec_task = asyncio.ensure_future(
                 coro_or_future=self._local_async_exec(
@@ -463,12 +554,11 @@ class K8sHelmBaseConnector(K8sConnector):
             # write status in another task
             status_task = asyncio.ensure_future(
                 coro_or_future=self._store_status(
             # write status in another task
             status_task = asyncio.ensure_future(
                 coro_or_future=self._store_status(
-                    cluster_id=cluster_id,
+                    cluster_id=cluster_uuid,
                     kdu_instance=kdu_instance,
                     kdu_instance=kdu_instance,
-                    namespace=instance_info["namespace"],
+                    namespace=namespace,
                     db_dict=db_dict,
                     operation="upgrade",
                     db_dict=db_dict,
                     operation="upgrade",
-                    run_once=False,
                 )
             )
 
                 )
             )
 
@@ -480,7 +570,6 @@ class K8sHelmBaseConnector(K8sConnector):
             output, rc = exec_task.result()
 
         else:
             output, rc = exec_task.result()
 
         else:
-
             output, rc = await self._local_async_exec(
                 command=command, raise_exception_on_error=False, env=env
             )
             output, rc = await self._local_async_exec(
                 command=command, raise_exception_on_error=False, env=env
             )
@@ -491,13 +580,11 @@ class K8sHelmBaseConnector(K8sConnector):
 
         # write final status
         await self._store_status(
 
         # write final status
         await self._store_status(
-            cluster_id=cluster_id,
+            cluster_id=cluster_uuid,
             kdu_instance=kdu_instance,
             kdu_instance=kdu_instance,
-            namespace=instance_info["namespace"],
+            namespace=namespace,
             db_dict=db_dict,
             operation="upgrade",
             db_dict=db_dict,
             operation="upgrade",
-            run_once=True,
-            check_every=0,
         )
 
         if rc != 0:
         )
 
         if rc != 0:
@@ -506,7 +593,7 @@ class K8sHelmBaseConnector(K8sConnector):
             raise K8sException(msg)
 
         # sync fs
             raise K8sException(msg)
 
         # sync fs
-        self.fs.reverse_sync(from_path=cluster_id)
+        self.fs.reverse_sync(from_path=cluster_uuid)
 
         # return new revision number
         instance = await self.get_instance_info(
 
         # return new revision number
         instance = await self.get_instance_info(
@@ -525,31 +612,203 @@ class K8sHelmBaseConnector(K8sConnector):
         scale: int,
         resource_name: str,
         total_timeout: float = 1800,
         scale: int,
         resource_name: str,
         total_timeout: float = 1800,
+        cluster_uuid: str = None,
+        kdu_model: str = None,
+        atomic: bool = True,
+        db_dict: dict = None,
         **kwargs,
     ):
         **kwargs,
     ):
-        raise NotImplementedError("Method not implemented")
+        """Scale a resource in a Helm Chart.
+
+        Args:
+            kdu_instance: KDU instance name
+            scale: Scale to which to set the resource
+            resource_name: Resource name
+            total_timeout: The time, in seconds, to wait
+            cluster_uuid: The UUID of the cluster
+            kdu_model: The chart reference
+            atomic: if set, upgrade process rolls back changes made in case of failed upgrade.
+                The --wait flag will be set automatically if --atomic is used
+            db_dict: Dictionary for any additional data
+            kwargs: Additional parameters
+
+        Returns:
+            True if successful, False otherwise
+        """
+
+        debug_mgs = "scaling {} in cluster {}".format(kdu_model, cluster_uuid)
+        if resource_name:
+            debug_mgs = "scaling resource {} in model {} (cluster {})".format(
+                resource_name, kdu_model, cluster_uuid
+            )
+
+        self.log.debug(debug_mgs)
+
+        # look for instance to obtain namespace
+        # get_instance_info function calls the sync command
+        instance_info = await self.get_instance_info(cluster_uuid, kdu_instance)
+        if not instance_info:
+            raise K8sException("kdu_instance {} not found".format(kdu_instance))
+
+        # init env, paths
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+
+        # version
+        kdu_model, version = await self._prepare_helm_chart(kdu_model, cluster_uuid)
+
+        repo_url = await self._find_repo(kdu_model, cluster_uuid)
+
+        _, replica_str = await self._get_replica_count_url(
+            kdu_model, repo_url, resource_name
+        )
+
+        command = self._get_upgrade_scale_command(
+            kdu_model,
+            kdu_instance,
+            instance_info["namespace"],
+            scale,
+            version,
+            atomic,
+            replica_str,
+            total_timeout,
+            resource_name,
+            paths["kube_config"],
+        )
+
+        self.log.debug("scaling: {}".format(command))
+
+        if atomic:
+            # exec helm in a task
+            exec_task = asyncio.ensure_future(
+                coro_or_future=self._local_async_exec(
+                    command=command, raise_exception_on_error=False, env=env
+                )
+            )
+            # write status in another task
+            status_task = asyncio.ensure_future(
+                coro_or_future=self._store_status(
+                    cluster_id=cluster_uuid,
+                    kdu_instance=kdu_instance,
+                    namespace=instance_info["namespace"],
+                    db_dict=db_dict,
+                    operation="scale",
+                )
+            )
+
+            # wait for execution task
+            await asyncio.wait([exec_task])
+
+            # cancel status task
+            status_task.cancel()
+            output, rc = exec_task.result()
+
+        else:
+            output, rc = await self._local_async_exec(
+                command=command, raise_exception_on_error=False, env=env
+            )
+
+        # write final status
+        await self._store_status(
+            cluster_id=cluster_uuid,
+            kdu_instance=kdu_instance,
+            namespace=instance_info["namespace"],
+            db_dict=db_dict,
+            operation="scale",
+        )
+
+        if rc != 0:
+            msg = "Error executing command: {}\nOutput: {}".format(command, output)
+            self.log.error(msg)
+            raise K8sException(msg)
+
+        # sync fs
+        self.fs.reverse_sync(from_path=cluster_uuid)
+
+        return True
 
     async def get_scale_count(
         self,
         resource_name: str,
         kdu_instance: str,
 
     async def get_scale_count(
         self,
         resource_name: str,
         kdu_instance: str,
+        cluster_uuid: str,
+        kdu_model: str,
         **kwargs,
         **kwargs,
-    ):
-        raise NotImplementedError("Method not implemented")
+    ) -> int:
+        """Get a resource scale count.
+
+        Args:
+            cluster_uuid: The UUID of the cluster
+            resource_name: Resource name
+            kdu_instance: KDU instance name
+            kdu_model: The name or path of an Helm Chart
+            kwargs: Additional parameters
+
+        Returns:
+            Resource instance count
+        """
+
+        self.log.debug(
+            "getting scale count for {} in cluster {}".format(kdu_model, cluster_uuid)
+        )
+
+        # look for instance to obtain namespace
+        instance_info = await self.get_instance_info(cluster_uuid, kdu_instance)
+        if not instance_info:
+            raise K8sException("kdu_instance {} not found".format(kdu_instance))
+
+        # init env, paths
+        paths, _ = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+
+        replicas = await self._get_replica_count_instance(
+            kdu_instance=kdu_instance,
+            namespace=instance_info["namespace"],
+            kubeconfig=paths["kube_config"],
+            resource_name=resource_name,
+        )
+
+        self.log.debug(
+            f"Number of replicas of the KDU instance {kdu_instance} and resource {resource_name} obtained: {replicas}"
+        )
+
+        # Get default value if scale count is not found from provided values
+        # Important note: this piece of code shall only be executed in the first scaling operation,
+        # since it is expected that the _get_replica_count_instance is able to obtain the number of
+        # replicas when a scale operation was already conducted previously for this KDU/resource!
+        if replicas is None:
+            repo_url = await self._find_repo(
+                kdu_model=kdu_model, cluster_uuid=cluster_uuid
+            )
+            replicas, _ = await self._get_replica_count_url(
+                kdu_model=kdu_model, repo_url=repo_url, resource_name=resource_name
+            )
+
+            self.log.debug(
+                f"Number of replicas of the Helm Chart package for KDU instance {kdu_instance} and resource "
+                f"{resource_name} obtained: {replicas}"
+            )
+
+            if replicas is None:
+                msg = "Replica count not found. Cannot be scaled"
+                self.log.error(msg)
+                raise K8sException(msg)
+
+        return int(replicas)
 
     async def rollback(
         self, cluster_uuid: str, kdu_instance: str, revision=0, db_dict: dict = None
     ):
 
     async def rollback(
         self, cluster_uuid: str, kdu_instance: str, revision=0, db_dict: dict = None
     ):
-
-        _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
         self.log.debug(
             "rollback kdu_instance {} to revision {} from cluster {}".format(
         self.log.debug(
             "rollback kdu_instance {} to revision {} from cluster {}".format(
-                kdu_instance, revision, cluster_id
+                kdu_instance, revision, cluster_uuid
             )
         )
 
         # sync local dir
             )
         )
 
         # sync local dir
-        self.fs.sync(from_path=cluster_id)
+        self.fs.sync(from_path=cluster_uuid)
 
         # look for instance to obtain namespace
         instance_info = await self.get_instance_info(cluster_uuid, kdu_instance)
 
         # look for instance to obtain namespace
         instance_info = await self.get_instance_info(cluster_uuid, kdu_instance)
@@ -558,11 +817,14 @@ class K8sHelmBaseConnector(K8sConnector):
 
         # init env, paths
         paths, env = self._init_paths_env(
 
         # init env, paths
         paths, env = self._init_paths_env(
-            cluster_name=cluster_id, create_if_not_exist=True
+            cluster_name=cluster_uuid, create_if_not_exist=True
         )
 
         )
 
+        # sync local dir
+        self.fs.sync(from_path=cluster_uuid)
+
         command = self._get_rollback_command(
         command = self._get_rollback_command(
-            kdu_instance, instance_info["namespace"], revision
+            kdu_instance, instance_info["namespace"], revision, paths["kube_config"]
         )
 
         self.log.debug("rolling_back: {}".format(command))
         )
 
         self.log.debug("rolling_back: {}".format(command))
@@ -576,12 +838,11 @@ class K8sHelmBaseConnector(K8sConnector):
         # write status in another task
         status_task = asyncio.ensure_future(
             coro_or_future=self._store_status(
         # write status in another task
         status_task = asyncio.ensure_future(
             coro_or_future=self._store_status(
-                cluster_id=cluster_id,
+                cluster_id=cluster_uuid,
                 kdu_instance=kdu_instance,
                 namespace=instance_info["namespace"],
                 db_dict=db_dict,
                 operation="rollback",
                 kdu_instance=kdu_instance,
                 namespace=instance_info["namespace"],
                 db_dict=db_dict,
                 operation="rollback",
-                run_once=False,
             )
         )
 
             )
         )
 
@@ -595,13 +856,11 @@ class K8sHelmBaseConnector(K8sConnector):
 
         # write final status
         await self._store_status(
 
         # write final status
         await self._store_status(
-            cluster_id=cluster_id,
+            cluster_id=cluster_uuid,
             kdu_instance=kdu_instance,
             namespace=instance_info["namespace"],
             db_dict=db_dict,
             operation="rollback",
             kdu_instance=kdu_instance,
             namespace=instance_info["namespace"],
             db_dict=db_dict,
             operation="rollback",
-            run_once=True,
-            check_every=0,
         )
 
         if rc != 0:
         )
 
         if rc != 0:
@@ -610,7 +869,7 @@ class K8sHelmBaseConnector(K8sConnector):
             raise K8sException(msg)
 
         # sync fs
             raise K8sException(msg)
 
         # sync fs
-        self.fs.reverse_sync(from_path=cluster_id)
+        self.fs.reverse_sync(from_path=cluster_uuid)
 
         # return new revision number
         instance = await self.get_instance_info(
 
         # return new revision number
         instance = await self.get_instance_info(
@@ -635,13 +894,14 @@ class K8sHelmBaseConnector(K8sConnector):
         :return: True if successful
         """
 
         :return: True if successful
         """
 
-        _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
         self.log.debug(
         self.log.debug(
-            "uninstall kdu_instance {} from cluster {}".format(kdu_instance, cluster_id)
+            "uninstall kdu_instance {} from cluster {}".format(
+                kdu_instance, cluster_uuid
+            )
         )
 
         # sync local dir
         )
 
         # sync local dir
-        self.fs.sync(from_path=cluster_id)
+        self.fs.sync(from_path=cluster_uuid)
 
         # look for instance to obtain namespace
         instance_info = await self.get_instance_info(cluster_uuid, kdu_instance)
 
         # look for instance to obtain namespace
         instance_info = await self.get_instance_info(cluster_uuid, kdu_instance)
@@ -650,16 +910,21 @@ class K8sHelmBaseConnector(K8sConnector):
             return True
         # init env, paths
         paths, env = self._init_paths_env(
             return True
         # init env, paths
         paths, env = self._init_paths_env(
-            cluster_name=cluster_id, create_if_not_exist=True
+            cluster_name=cluster_uuid, create_if_not_exist=True
         )
 
         )
 
-        command = self._get_uninstall_command(kdu_instance, instance_info["namespace"])
+        # sync local dir
+        self.fs.sync(from_path=cluster_uuid)
+
+        command = self._get_uninstall_command(
+            kdu_instance, instance_info["namespace"], paths["kube_config"]
+        )
         output, _rc = await self._local_async_exec(
             command=command, raise_exception_on_error=True, env=env
         )
 
         # sync fs
         output, _rc = await self._local_async_exec(
             command=command, raise_exception_on_error=True, env=env
         )
 
         # sync fs
-        self.fs.reverse_sync(from_path=cluster_id)
+        self.fs.reverse_sync(from_path=cluster_uuid)
 
         return self._output_to_table(output)
 
 
         return self._output_to_table(output)
 
@@ -671,17 +936,16 @@ class K8sHelmBaseConnector(K8sConnector):
         :return:
         """
 
         :return:
         """
 
-        _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
-        self.log.debug("list releases for cluster {}".format(cluster_id))
+        self.log.debug("list releases for cluster {}".format(cluster_uuid))
 
         # sync local dir
 
         # sync local dir
-        self.fs.sync(from_path=cluster_id)
+        self.fs.sync(from_path=cluster_uuid)
 
         # execute internal command
 
         # execute internal command
-        result = await self._instances_list(cluster_id)
+        result = await self._instances_list(cluster_uuid)
 
         # sync fs
 
         # sync fs
-        self.fs.reverse_sync(from_path=cluster_id)
+        self.fs.reverse_sync(from_path=cluster_uuid)
 
         return result
 
 
         return result
 
@@ -693,6 +957,28 @@ class K8sHelmBaseConnector(K8sConnector):
         self.log.debug("Instance {} not found".format(kdu_instance))
         return None
 
         self.log.debug("Instance {} not found".format(kdu_instance))
         return None
 
+    async def upgrade_charm(
+        self,
+        ee_id: str = None,
+        path: str = None,
+        charm_id: str = None,
+        charm_type: str = None,
+        timeout: float = None,
+    ) -> str:
+        """This method upgrade charms in VNFs
+
+        Args:
+            ee_id:  Execution environment id
+            path:   Local path to the charm
+            charm_id:   charm-id
+            charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
+            timeout: (Float)    Timeout for the ns update operation
+
+        Returns:
+            The output of the update operation if status equals to "completed"
+        """
+        raise K8sException("KDUs deployed with Helm do not support charm upgrade")
+
     async def exec_primitive(
         self,
         cluster_uuid: str = None,
     async def exec_primitive(
         self,
         cluster_uuid: str = None,
@@ -739,52 +1025,57 @@ class K8sHelmBaseConnector(K8sConnector):
         - `external_ip` List of external ips (in case they are available)
         """
 
         - `external_ip` List of external ips (in case they are available)
         """
 
-        _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
         self.log.debug(
             "get_services: cluster_uuid: {}, kdu_instance: {}".format(
                 cluster_uuid, kdu_instance
             )
         )
 
         self.log.debug(
             "get_services: cluster_uuid: {}, kdu_instance: {}".format(
                 cluster_uuid, kdu_instance
             )
         )
 
+        # init env, paths
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+
         # sync local dir
         # sync local dir
-        self.fs.sync(from_path=cluster_id)
+        self.fs.sync(from_path=cluster_uuid)
 
         # get list of services names for kdu
 
         # get list of services names for kdu
-        service_names = await self._get_services(cluster_id, kdu_instance, namespace)
+        service_names = await self._get_services(
+            cluster_uuid, kdu_instance, namespace, paths["kube_config"]
+        )
 
         service_list = []
         for service in service_names:
 
         service_list = []
         for service in service_names:
-            service = await self._get_service(cluster_id, service, namespace)
+            service = await self._get_service(cluster_uuid, service, namespace)
             service_list.append(service)
 
         # sync fs
             service_list.append(service)
 
         # sync fs
-        self.fs.reverse_sync(from_path=cluster_id)
+        self.fs.reverse_sync(from_path=cluster_uuid)
 
         return service_list
 
     async def get_service(
         self, cluster_uuid: str, service_name: str, namespace: str
     ) -> object:
 
         return service_list
 
     async def get_service(
         self, cluster_uuid: str, service_name: str, namespace: str
     ) -> object:
-
         self.log.debug(
             "get service, service_name: {}, namespace: {}, cluster_uuid: {}".format(
                 service_name, namespace, cluster_uuid
             )
         )
 
         self.log.debug(
             "get service, service_name: {}, namespace: {}, cluster_uuid: {}".format(
                 service_name, namespace, cluster_uuid
             )
         )
 
-        _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
-
         # sync local dir
         # sync local dir
-        self.fs.sync(from_path=cluster_id)
+        self.fs.sync(from_path=cluster_uuid)
 
 
-        service = await self._get_service(cluster_id, service_name, namespace)
+        service = await self._get_service(cluster_uuid, service_name, namespace)
 
         # sync fs
 
         # sync fs
-        self.fs.reverse_sync(from_path=cluster_id)
+        self.fs.reverse_sync(from_path=cluster_uuid)
 
         return service
 
 
         return service
 
-    async def status_kdu(self, cluster_uuid: str, kdu_instance: str, **kwargs) -> str:
+    async def status_kdu(
+        self, cluster_uuid: str, kdu_instance: str, yaml_format: str = False, **kwargs
+    ) -> Union[str, dict]:
         """
         This call would retrieve tha current state of a given KDU instance. It would be
         would allow to retrieve the _composition_ (i.e. K8s objects) and _specific
         """
         This call would retrieve tha current state of a given KDU instance. It would be
         would allow to retrieve the _composition_ (i.e. K8s objects) and _specific
@@ -794,6 +1085,8 @@ class K8sHelmBaseConnector(K8sConnector):
         :param cluster_uuid: UUID of a K8s cluster known by OSM
         :param kdu_instance: unique name for the KDU instance
         :param kwargs: Additional parameters (None yet)
         :param cluster_uuid: UUID of a K8s cluster known by OSM
         :param kdu_instance: unique name for the KDU instance
         :param kwargs: Additional parameters (None yet)
+        :param yaml_format: if the return shall be returned as an YAML string or as a
+                                dictionary
         :return: If successful, it will return the following vector of arguments:
         - K8s `namespace` in the cluster where the KDU lives
         - `state` of the KDU instance. It can be:
         :return: If successful, it will return the following vector of arguments:
         - K8s `namespace` in the cluster where the KDU lives
         - `state` of the KDU instance. It can be:
@@ -814,13 +1107,11 @@ class K8sHelmBaseConnector(K8sConnector):
             )
         )
 
             )
         )
 
-        _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
-
         # sync local dir
         # sync local dir
-        self.fs.sync(from_path=cluster_id)
+        self.fs.sync(from_path=cluster_uuid)
 
         # get instance: needed to obtain namespace
 
         # get instance: needed to obtain namespace
-        instances = await self._instances_list(cluster_id=cluster_id)
+        instances = await self._instances_list(cluster_id=cluster_uuid)
         for instance in instances:
             if instance.get("name") == kdu_instance:
                 break
         for instance in instances:
             if instance.get("name") == kdu_instance:
                 break
@@ -828,24 +1119,45 @@ class K8sHelmBaseConnector(K8sConnector):
             # instance does not exist
             raise K8sException(
                 "Instance name: {} not found in cluster: {}".format(
             # instance does not exist
             raise K8sException(
                 "Instance name: {} not found in cluster: {}".format(
-                    kdu_instance, cluster_id
+                    kdu_instance, cluster_uuid
                 )
             )
 
         status = await self._status_kdu(
                 )
             )
 
         status = await self._status_kdu(
-            cluster_id=cluster_id,
+            cluster_id=cluster_uuid,
             kdu_instance=kdu_instance,
             namespace=instance["namespace"],
             kdu_instance=kdu_instance,
             namespace=instance["namespace"],
+            yaml_format=yaml_format,
             show_error_log=True,
             show_error_log=True,
-            return_text=True,
         )
 
         # sync fs
         )
 
         # sync fs
-        self.fs.reverse_sync(from_path=cluster_id)
+        self.fs.reverse_sync(from_path=cluster_uuid)
 
         return status
 
 
         return status
 
+    async def get_values_kdu(
+        self, kdu_instance: str, namespace: str, kubeconfig: str
+    ) -> str:
+        self.log.debug("get kdu_instance values {}".format(kdu_instance))
+
+        return await self._exec_get_command(
+            get_command="values",
+            kdu_instance=kdu_instance,
+            namespace=namespace,
+            kubeconfig=kubeconfig,
+        )
+
     async def values_kdu(self, kdu_model: str, repo_url: str = None) -> str:
     async def values_kdu(self, kdu_model: str, repo_url: str = None) -> str:
+        """Method to obtain the Helm Chart package's values
+
+        Args:
+            kdu_model: The name or path of an Helm Chart
+            repo_url: Helm Chart repository url
+
+        Returns:
+            str: the values of the Helm Chart package
+        """
 
         self.log.debug(
             "inspect kdu_model values {} from (optional) repo: {}".format(
 
         self.log.debug(
             "inspect kdu_model values {} from (optional) repo: {}".format(
@@ -853,22 +1165,20 @@ class K8sHelmBaseConnector(K8sConnector):
             )
         )
 
             )
         )
 
-        return await self._exec_inspect_comand(
+        return await self._exec_inspect_command(
             inspect_command="values", kdu_model=kdu_model, repo_url=repo_url
         )
 
     async def help_kdu(self, kdu_model: str, repo_url: str = None) -> str:
             inspect_command="values", kdu_model=kdu_model, repo_url=repo_url
         )
 
     async def help_kdu(self, kdu_model: str, repo_url: str = None) -> str:
-
         self.log.debug(
             "inspect kdu_model {} readme.md from repo: {}".format(kdu_model, repo_url)
         )
 
         self.log.debug(
             "inspect kdu_model {} readme.md from repo: {}".format(kdu_model, repo_url)
         )
 
-        return await self._exec_inspect_comand(
+        return await self._exec_inspect_command(
             inspect_command="readme", kdu_model=kdu_model, repo_url=repo_url
         )
 
     async def synchronize_repos(self, cluster_uuid: str):
             inspect_command="readme", kdu_model=kdu_model, repo_url=repo_url
         )
 
     async def synchronize_repos(self, cluster_uuid: str):
-
         self.log.debug("synchronize repos for cluster helm-id: {}".format(cluster_uuid))
         try:
             db_repo_ids = self._get_helm_chart_repos_ids(cluster_uuid)
         self.log.debug("synchronize repos for cluster helm-id: {}".format(cluster_uuid))
         try:
             db_repo_ids = self._get_helm_chart_repos_ids(cluster_uuid)
@@ -900,7 +1210,13 @@ class K8sHelmBaseConnector(K8sConnector):
                         # add repo
                         self.log.debug("add repo {}".format(db_repo["name"]))
                         await self.repo_add(
                         # add repo
                         self.log.debug("add repo {}".format(db_repo["name"]))
                         await self.repo_add(
-                            cluster_uuid, db_repo["name"], db_repo["url"]
+                            cluster_uuid,
+                            db_repo["name"],
+                            db_repo["url"],
+                            cert=db_repo.get("ca_cert"),
+                            user=db_repo.get("user"),
+                            password=db_repo.get("password"),
+                            oci=db_repo.get("oci", False),
                         )
                         added_repo_dict[repo_id] = db_repo["name"]
                 except Exception as e:
                         )
                         added_repo_dict[repo_id] = db_repo["name"]
                 except Exception as e:
@@ -970,7 +1286,7 @@ class K8sHelmBaseConnector(K8sConnector):
         """
 
     @abc.abstractmethod
         """
 
     @abc.abstractmethod
-    async def _get_services(self, cluster_id, kdu_instance, namespace):
+    async def _get_services(self, cluster_id, kdu_instance, namespace, kubeconfig):
         """
         Implements the helm version dependent method to obtain services from a helm instance
         """
         """
         Implements the helm version dependent method to obtain services from a helm instance
         """
@@ -981,37 +1297,104 @@ class K8sHelmBaseConnector(K8sConnector):
         cluster_id: str,
         kdu_instance: str,
         namespace: str = None,
         cluster_id: str,
         kdu_instance: str,
         namespace: str = None,
+        yaml_format: bool = False,
         show_error_log: bool = False,
         show_error_log: bool = False,
-        return_text: bool = False,
-    ):
+    ) -> Union[str, dict]:
         """
         Implements the helm version dependent method to obtain status of a helm instance
         """
 
     @abc.abstractmethod
     def _get_install_command(
         """
         Implements the helm version dependent method to obtain status of a helm instance
         """
 
     @abc.abstractmethod
     def _get_install_command(
-        self, kdu_model, kdu_instance, namespace, params_str, version, atomic, timeout
+        self,
+        kdu_model,
+        kdu_instance,
+        namespace,
+        params_str,
+        version,
+        atomic,
+        timeout,
+        kubeconfig,
     ) -> str:
         """
         Obtain command to be executed to delete the indicated instance
         """
 
     @abc.abstractmethod
     ) -> str:
         """
         Obtain command to be executed to delete the indicated instance
         """
 
     @abc.abstractmethod
-    def _get_upgrade_command(
-        self, kdu_model, kdu_instance, namespace, params_str, version, atomic, timeout
+    def _get_upgrade_scale_command(
+        self,
+        kdu_model,
+        kdu_instance,
+        namespace,
+        count,
+        version,
+        atomic,
+        replicas,
+        timeout,
+        resource_name,
+        kubeconfig,
     ) -> str:
     ) -> str:
+        """Generates the command to scale a Helm Chart release
+
+        Args:
+            kdu_model (str): Kdu model name, corresponding to the Helm local location or repository
+            kdu_instance (str): KDU instance, corresponding to the Helm Chart release in question
+            namespace (str): Namespace where this KDU instance is deployed
+            scale (int): Scale count
+            version (str): Constraint with specific version of the Chart to use
+            atomic (bool): If set, upgrade process rolls back changes made in case of failed upgrade.
+                The --wait flag will be set automatically if --atomic is used
+            replica_str (str): The key under resource_name key where the scale count is stored
+            timeout (float): The time, in seconds, to wait
+            resource_name (str): The KDU's resource to scale
+            kubeconfig (str): Kubeconfig file path
+
+        Returns:
+            str: command to scale a Helm Chart release
         """
         """
-        Obtain command to be executed to upgrade the indicated instance
+
+    @abc.abstractmethod
+    def _get_upgrade_command(
+        self,
+        kdu_model,
+        kdu_instance,
+        namespace,
+        params_str,
+        version,
+        atomic,
+        timeout,
+        kubeconfig,
+        force,
+    ) -> str:
+        """Generates the command to upgrade a Helm Chart release
+
+        Args:
+            kdu_model (str): Kdu model name, corresponding to the Helm local location or repository
+            kdu_instance (str): KDU instance, corresponding to the Helm Chart release in question
+            namespace (str): Namespace where this KDU instance is deployed
+            params_str (str): Params used to upgrade the Helm Chart release
+            version (str): Constraint with specific version of the Chart to use
+            atomic (bool): If set, upgrade process rolls back changes made in case of failed upgrade.
+                The --wait flag will be set automatically if --atomic is used
+            timeout (float): The time, in seconds, to wait
+            kubeconfig (str): Kubeconfig file path
+            force (bool): If set, helm forces resource updates through a replacement strategy. This may recreate pods.
+        Returns:
+            str: command to upgrade a Helm Chart release
         """
 
     @abc.abstractmethod
         """
 
     @abc.abstractmethod
-    def _get_rollback_command(self, kdu_instance, namespace, revision) -> str:
+    def _get_rollback_command(
+        self, kdu_instance, namespace, revision, kubeconfig
+    ) -> str:
         """
         Obtain command to be executed to rollback the indicated instance
         """
 
     @abc.abstractmethod
         """
         Obtain command to be executed to rollback the indicated instance
         """
 
     @abc.abstractmethod
-    def _get_uninstall_command(self, kdu_instance: str, namespace: str) -> str:
+    def _get_uninstall_command(
+        self, kdu_instance: str, namespace: str, kubeconfig: str
+    ) -> str:
         """
         Obtain command to be executed to delete the indicated instance
         """
         """
         Obtain command to be executed to delete the indicated instance
         """
@@ -1020,10 +1403,25 @@ class K8sHelmBaseConnector(K8sConnector):
     def _get_inspect_command(
         self, show_command: str, kdu_model: str, repo_str: str, version: str
     ):
     def _get_inspect_command(
         self, show_command: str, kdu_model: str, repo_str: str, version: str
     ):
-        """
-        Obtain command to be executed to obtain information about the kdu
+        """Generates the command to obtain the information about an Helm Chart package
+            (´helm show ...´ command)
+
+        Args:
+            show_command: the second part of the command (`helm show <show_command>`)
+            kdu_model: The name or path of an Helm Chart
+            repo_url: Helm Chart repository url
+            version: constraint with specific version of the Chart to use
+
+        Returns:
+            str: the generated Helm Chart command
         """
 
         """
 
+    @abc.abstractmethod
+    def _get_get_command(
+        self, get_command: str, kdu_instance: str, namespace: str, kubeconfig: str
+    ):
+        """Obtain command to be executed to get information about the kdu instance."""
+
     @abc.abstractmethod
     async def _uninstall_sw(self, cluster_id: str, namespace: str):
         """
     @abc.abstractmethod
     async def _uninstall_sw(self, cluster_id: str, namespace: str):
         """
@@ -1148,8 +1546,7 @@ class K8sHelmBaseConnector(K8sConnector):
         show_error_log: bool = True,
         encode_utf8: bool = False,
         env: dict = None,
         show_error_log: bool = True,
         encode_utf8: bool = False,
         env: dict = None,
-    ) -> (str, int):
-
+    ) -> tuple[str, int]:
         command = K8sHelmBaseConnector._remove_multiple_spaces(command)
         self.log.debug(
             "Executing async local command: {}, env: {}".format(command, env)
         command = K8sHelmBaseConnector._remove_multiple_spaces(command)
         self.log.debug(
             "Executing async local command: {}, env: {}".format(command, env)
@@ -1163,17 +1560,18 @@ class K8sHelmBaseConnector(K8sConnector):
             environ.update(env)
 
         try:
             environ.update(env)
 
         try:
-            process = await asyncio.create_subprocess_exec(
-                *command,
-                stdout=asyncio.subprocess.PIPE,
-                stderr=asyncio.subprocess.PIPE,
-                env=environ,
-            )
+            async with self.cmd_lock:
+                process = await asyncio.create_subprocess_exec(
+                    *command,
+                    stdout=asyncio.subprocess.PIPE,
+                    stderr=asyncio.subprocess.PIPE,
+                    env=environ,
+                )
 
 
-            # wait for command terminate
-            stdout, stderr = await process.communicate()
+                # wait for command terminate
+                stdout, stderr = await process.communicate()
 
 
-            return_code = process.returncode
+                return_code = process.returncode
 
             output = ""
             if stdout:
 
             output = ""
             if stdout:
@@ -1200,6 +1598,9 @@ class K8sHelmBaseConnector(K8sConnector):
             return output, return_code
 
         except asyncio.CancelledError:
             return output, return_code
 
         except asyncio.CancelledError:
+            # first, kill the process if it is still running
+            if process.returncode is None:
+                process.kill()
             raise
         except K8sException:
             raise
             raise
         except K8sException:
             raise
@@ -1220,7 +1621,6 @@ class K8sHelmBaseConnector(K8sConnector):
         encode_utf8: bool = False,
         env: dict = None,
     ):
         encode_utf8: bool = False,
         env: dict = None,
     ):
-
         command1 = K8sHelmBaseConnector._remove_multiple_spaces(command1)
         command2 = K8sHelmBaseConnector._remove_multiple_spaces(command2)
         command = "{} | {}".format(command1, command2)
         command1 = K8sHelmBaseConnector._remove_multiple_spaces(command1)
         command2 = K8sHelmBaseConnector._remove_multiple_spaces(command2)
         command = "{} | {}".format(command1, command2)
@@ -1237,16 +1637,19 @@ class K8sHelmBaseConnector(K8sConnector):
             environ.update(env)
 
         try:
             environ.update(env)
 
         try:
-            read, write = os.pipe()
-            await asyncio.create_subprocess_exec(*command1, stdout=write, env=environ)
-            os.close(write)
-            process_2 = await asyncio.create_subprocess_exec(
-                *command2, stdin=read, stdout=asyncio.subprocess.PIPE, env=environ
-            )
-            os.close(read)
-            stdout, stderr = await process_2.communicate()
+            async with self.cmd_lock:
+                read, write = os.pipe()
+                process_1 = await asyncio.create_subprocess_exec(
+                    *command1, stdout=write, env=environ
+                )
+                os.close(write)
+                process_2 = await asyncio.create_subprocess_exec(
+                    *command2, stdin=read, stdout=asyncio.subprocess.PIPE, env=environ
+                )
+                os.close(read)
+                stdout, stderr = await process_2.communicate()
 
 
-            return_code = process_2.returncode
+                return_code = process_2.returncode
 
             output = ""
             if stdout:
 
             output = ""
             if stdout:
@@ -1272,6 +1675,10 @@ class K8sHelmBaseConnector(K8sConnector):
 
             return output, return_code
         except asyncio.CancelledError:
 
             return output, return_code
         except asyncio.CancelledError:
+            # first, kill the processes if they are still running
+            for process in (process_1, process_2):
+                if process.returncode is None:
+                    process.kill()
             raise
         except K8sException:
             raise
             raise
         except K8sException:
             raise
@@ -1305,7 +1712,10 @@ class K8sHelmBaseConnector(K8sConnector):
         )
 
         command = "{} --kubeconfig={} --namespace={} get service {} -o=yaml".format(
         )
 
         command = "{} --kubeconfig={} --namespace={} get service {} -o=yaml".format(
-            self.kubectl_command, paths["kube_config"], namespace, service_name
+            self.kubectl_command,
+            paths["kube_config"],
+            quote(namespace),
+            quote(service_name),
         )
 
         output, _rc = await self._local_async_exec(
         )
 
         output, _rc = await self._local_async_exec(
@@ -1327,90 +1737,243 @@ class K8sHelmBaseConnector(K8sConnector):
 
         return service
 
 
         return service
 
-    async def _exec_inspect_comand(
+    async def _exec_get_command(
+        self, get_command: str, kdu_instance: str, namespace: str, kubeconfig: str
+    ):
+        """Obtains information about the kdu instance."""
+
+        full_command = self._get_get_command(
+            get_command, kdu_instance, namespace, kubeconfig
+        )
+
+        output, _rc = await self._local_async_exec(command=full_command)
+
+        return output
+
+    async def _exec_inspect_command(
         self, inspect_command: str, kdu_model: str, repo_url: str = None
     ):
         self, inspect_command: str, kdu_model: str, repo_url: str = None
     ):
-        """
-        Obtains information about a kdu, no cluster (no env)
+        """Obtains information about an Helm Chart package (´helm show´ command)
+
+        Args:
+            inspect_command: the Helm sub command (`helm show <inspect_command> ...`)
+            kdu_model: The name or path of an Helm Chart
+            repo_url: Helm Chart repository url
+
+        Returns:
+            str: the requested info about the Helm Chart package
         """
 
         repo_str = ""
         if repo_url:
         """
 
         repo_str = ""
         if repo_url:
-            repo_str = " --repo {}".format(repo_url)
+            repo_str = " --repo {}".format(quote(repo_url))
 
 
-        idx = kdu_model.find("/")
-        if idx >= 0:
-            idx += 1
-            kdu_model = kdu_model[idx:]
+            # Obtain the Chart's name and store it in the var kdu_model
+            kdu_model, _ = self._split_repo(kdu_model=kdu_model)
 
 
-        version = ""
-        if ":" in kdu_model:
-            parts = kdu_model.split(sep=":")
-            if len(parts) == 2:
-                version = "--version {}".format(str(parts[1]))
-                kdu_model = parts[0]
+        kdu_model, version = self._split_version(kdu_model)
+        if version:
+            version_str = "--version {}".format(quote(version))
+        else:
+            version_str = ""
 
         full_command = self._get_inspect_command(
 
         full_command = self._get_inspect_command(
-            inspect_command, kdu_model, repo_str, version
-        )
-        output, _rc = await self._local_async_exec(
-            command=full_command, encode_utf8=True
+            show_command=inspect_command,
+            kdu_model=quote(kdu_model),
+            repo_str=repo_str,
+            version=version_str,
         )
 
         )
 
+        output, _ = await self._local_async_exec(command=full_command)
+
         return output
 
         return output
 
+    async def _get_replica_count_url(
+        self,
+        kdu_model: str,
+        repo_url: str = None,
+        resource_name: str = None,
+    ) -> tuple[int, str]:
+        """Get the replica count value in the Helm Chart Values.
+
+        Args:
+            kdu_model: The name or path of an Helm Chart
+            repo_url: Helm Chart repository url
+            resource_name: Resource name
+
+        Returns:
+            A tuple with:
+            - The number of replicas of the specific instance; if not found, returns None; and
+            - The string corresponding to the replica count key in the Helm values
+        """
+
+        kdu_values = yaml.load(
+            await self.values_kdu(kdu_model=kdu_model, repo_url=repo_url),
+            Loader=yaml.SafeLoader,
+        )
+
+        self.log.debug(f"Obtained the Helm package values for the KDU: {kdu_values}")
+
+        if not kdu_values:
+            raise K8sException(
+                "kdu_values not found for kdu_model {}".format(kdu_model)
+            )
+
+        if resource_name:
+            kdu_values = kdu_values.get(resource_name, None)
+
+        if not kdu_values:
+            msg = "resource {} not found in the values in model {}".format(
+                resource_name, kdu_model
+            )
+            self.log.error(msg)
+            raise K8sException(msg)
+
+        duplicate_check = False
+
+        replica_str = ""
+        replicas = None
+
+        if kdu_values.get("replicaCount") is not None:
+            replicas = kdu_values["replicaCount"]
+            replica_str = "replicaCount"
+        elif kdu_values.get("replicas") is not None:
+            duplicate_check = True
+            replicas = kdu_values["replicas"]
+            replica_str = "replicas"
+        else:
+            if resource_name:
+                msg = (
+                    "replicaCount or replicas not found in the resource"
+                    "{} values in model {}. Cannot be scaled".format(
+                        resource_name, kdu_model
+                    )
+                )
+            else:
+                msg = (
+                    "replicaCount or replicas not found in the values"
+                    "in model {}. Cannot be scaled".format(kdu_model)
+                )
+            self.log.error(msg)
+            raise K8sException(msg)
+
+        # Control if replicas and replicaCount exists at the same time
+        msg = "replicaCount and replicas are exists at the same time"
+        if duplicate_check:
+            if "replicaCount" in kdu_values:
+                self.log.error(msg)
+                raise K8sException(msg)
+        else:
+            if "replicas" in kdu_values:
+                self.log.error(msg)
+                raise K8sException(msg)
+
+        return replicas, replica_str
+
+    async def _get_replica_count_instance(
+        self,
+        kdu_instance: str,
+        namespace: str,
+        kubeconfig: str,
+        resource_name: str = None,
+    ) -> int:
+        """Get the replica count value in the instance.
+
+        Args:
+            kdu_instance: The name of the KDU instance
+            namespace: KDU instance namespace
+            kubeconfig:
+            resource_name: Resource name
+
+        Returns:
+            The number of replicas of the specific instance; if not found, returns None
+        """
+
+        kdu_values = yaml.load(
+            await self.get_values_kdu(kdu_instance, namespace, kubeconfig),
+            Loader=yaml.SafeLoader,
+        )
+
+        self.log.debug(f"Obtained the Helm values for the KDU instance: {kdu_values}")
+
+        replicas = None
+
+        if kdu_values:
+            resource_values = (
+                kdu_values.get(resource_name, None) if resource_name else None
+            )
+
+            for replica_str in ("replicaCount", "replicas"):
+                if resource_values:
+                    replicas = resource_values.get(replica_str)
+                else:
+                    replicas = kdu_values.get(replica_str)
+
+                if replicas is not None:
+                    break
+
+        return replicas
+
     async def _store_status(
         self,
         cluster_id: str,
         operation: str,
         kdu_instance: str,
         namespace: str = None,
     async def _store_status(
         self,
         cluster_id: str,
         operation: str,
         kdu_instance: str,
         namespace: str = None,
-        check_every: float = 10,
         db_dict: dict = None,
         db_dict: dict = None,
-        run_once: bool = False,
-    ):
-        while True:
-            try:
-                await asyncio.sleep(check_every)
-                detailed_status = await self._status_kdu(
-                    cluster_id=cluster_id,
-                    kdu_instance=kdu_instance,
-                    namespace=namespace,
-                    return_text=False,
-                )
-                status = detailed_status.get("info").get("description")
-                self.log.debug("KDU {} STATUS: {}.".format(kdu_instance, status))
-                # write status to db
-                result = await self.write_app_status_to_db(
-                    db_dict=db_dict,
-                    status=str(status),
-                    detailed_status=str(detailed_status),
-                    operation=operation,
-                )
-                if not result:
-                    self.log.info("Error writing in database. Task exiting...")
-                    return
-            except asyncio.CancelledError:
-                self.log.debug("Task cancelled")
-                return
-            except Exception as e:
-                self.log.debug(
-                    "_store_status exception: {}".format(str(e)), exc_info=True
-                )
-                pass
-            finally:
-                if run_once:
-                    return
+    ) -> None:
+        """
+        Obtains the status of the KDU instance based on Helm Charts, and stores it in the database.
+
+        :param cluster_id (str): the cluster where the KDU instance is deployed
+        :param operation (str): The operation related to the status to be updated (for instance, "install" or "upgrade")
+        :param kdu_instance (str): The KDU instance in relation to which the status is obtained
+        :param namespace (str): The Kubernetes namespace where the KDU instance was deployed. Defaults to None
+        :param db_dict (dict): A dictionary with the database necessary information. It shall contain the
+        values for the keys:
+            - "collection": The Mongo DB collection to write to
+            - "filter": The query filter to use in the update process
+            - "path": The dot separated keys which targets the object to be updated
+        Defaults to None.
+        """
+
+        try:
+            detailed_status = await self._status_kdu(
+                cluster_id=cluster_id,
+                kdu_instance=kdu_instance,
+                yaml_format=False,
+                namespace=namespace,
+            )
+
+            status = detailed_status.get("info").get("description")
+            self.log.debug(f"Status for KDU {kdu_instance} obtained: {status}.")
+
+            # write status to db
+            result = await self.write_app_status_to_db(
+                db_dict=db_dict,
+                status=str(status),
+                detailed_status=str(detailed_status),
+                operation=operation,
+            )
+
+            if not result:
+                self.log.info("Error writing in database. Task exiting...")
+
+        except asyncio.CancelledError as e:
+            self.log.warning(
+                f"Exception in method {self._store_status.__name__} (task cancelled): {e}"
+            )
+        except Exception as e:
+            self.log.warning(f"Exception in method {self._store_status.__name__}: {e}")
 
     # params for use in -f file
     # returns values file option and filename (in order to delete it at the end)
 
     # params for use in -f file
     # returns values file option and filename (in order to delete it at the end)
-    def _params_to_file_option(self, cluster_id: str, params: dict) -> (str, str):
-
+    def _params_to_file_option(self, cluster_id: str, params: dict) -> tuple[str, str]:
         if params and len(params) > 0:
             self._init_paths_env(cluster_name=cluster_id, create_if_not_exist=True)
 
             def get_random_number():
         if params and len(params) > 0:
             self._init_paths_env(cluster_name=cluster_id, create_if_not_exist=True)
 
             def get_random_number():
-                r = random.randrange(start=1, stop=99999999)
+                r = random.SystemRandom().randint(1, 99999999)
                 s = str(r)
                 while len(s) < 10:
                     s = "0" + s
                 s = str(r)
                 while len(s) < 10:
                     s = "0" + s
@@ -1420,7 +1983,7 @@ class K8sHelmBaseConnector(K8sConnector):
             for key in params:
                 value = params.get(key)
                 if "!!yaml" in str(value):
             for key in params:
                 value = params.get(key)
                 if "!!yaml" in str(value):
-                    value = yaml.load(value[7:])
+                    value = yaml.safe_load(value[7:])
                 params2[key] = value
 
             values_file = get_random_number() + ".yaml"
                 params2[key] = value
 
             values_file = get_random_number() + ".yaml"
@@ -1434,19 +1997,14 @@ class K8sHelmBaseConnector(K8sConnector):
     # params for use in --set option
     @staticmethod
     def _params_to_set_option(params: dict) -> str:
     # params for use in --set option
     @staticmethod
     def _params_to_set_option(params: dict) -> str:
-        params_str = ""
-        if params and len(params) > 0:
-            start = True
-            for key in params:
-                value = params.get(key, None)
-                if value is not None:
-                    if start:
-                        params_str += "--set "
-                        start = False
-                    else:
-                        params_str += ","
-                    params_str += "{}={}".format(key, value)
-        return params_str
+        pairs = [
+            f"{quote(str(key))}={quote(str(value))}"
+            for key, value in params.items()
+            if value is not None
+        ]
+        if not pairs:
+            return ""
+        return "--set " + ",".join(pairs)
 
     @staticmethod
     def generate_kdu_instance_name(**kwargs):
 
     @staticmethod
     def generate_kdu_instance_name(**kwargs):
@@ -1476,10 +2034,234 @@ class K8sHelmBaseConnector(K8sConnector):
         name += "-"
 
         def get_random_number():
         name += "-"
 
         def get_random_number():
-            r = random.randrange(start=1, stop=99999999)
+            r = random.SystemRandom().randint(1, 99999999)
             s = str(r)
             s = s.rjust(10, "0")
             return s
 
         name = name + get_random_number()
         return name.lower()
             s = str(r)
             s = s.rjust(10, "0")
             return s
 
         name = name + get_random_number()
         return name.lower()
+
+    def _split_version(self, kdu_model: str) -> tuple[str, str]:
+        version = None
+        if (
+            not (
+                self._is_helm_chart_a_file(kdu_model)
+                or self._is_helm_chart_a_url(kdu_model)
+            )
+            and ":" in kdu_model
+        ):
+            parts = kdu_model.split(sep=":")
+            if len(parts) == 2:
+                version = str(parts[1])
+                kdu_model = parts[0]
+        return kdu_model, version
+
+    def _split_repo(self, kdu_model: str) -> tuple[str, str]:
+        """Obtain the Helm Chart's repository and Chart's names from the KDU model
+
+        Args:
+            kdu_model (str): Associated KDU model
+
+        Returns:
+            (str, str): Tuple with the Chart name in index 0, and the repo name
+                        in index 2; if there was a problem finding them, return None
+                        for both
+        """
+
+        chart_name = None
+        repo_name = None
+
+        idx = kdu_model.find("/")
+        if not self._is_helm_chart_a_url(kdu_model) and idx >= 0:
+            chart_name = kdu_model[idx + 1 :]
+            repo_name = kdu_model[:idx]
+
+        return chart_name, repo_name
+
+    async def _find_repo(self, kdu_model: str, cluster_uuid: str) -> str:
+        """Obtain the Helm repository for an Helm Chart
+
+        Args:
+            kdu_model (str): the KDU model associated with the Helm Chart instantiation
+            cluster_uuid (str): The cluster UUID associated with the Helm Chart instantiation
+
+        Returns:
+            str: the repository URL; if Helm Chart is a local one, the function returns None
+        """
+
+        _, repo_name = self._split_repo(kdu_model=kdu_model)
+
+        repo_url = None
+        if repo_name:
+            # Find repository link
+            local_repo_list = await self.repo_list(cluster_uuid)
+            for repo in local_repo_list:
+                if repo["name"] == repo_name:
+                    repo_url = repo["url"]
+                    break  # it is not necessary to continue the loop if the repo link was found...
+
+        return repo_url
+
+    def _repo_to_oci_url(self, repo):
+        db_repo = self.db.get_one("k8srepos", {"name": repo}, fail_on_empty=False)
+        if db_repo and "oci" in db_repo:
+            return db_repo.get("url")
+
+    async def _prepare_helm_chart(self, kdu_model, cluster_id):
+        # e.g.: "stable/openldap", "1.0"
+        kdu_model, version = self._split_version(kdu_model)
+        # e.g.: "openldap, stable"
+        chart_name, repo = self._split_repo(kdu_model)
+        if repo and chart_name:  # repo/chart case
+            oci_url = self._repo_to_oci_url(repo)
+            if oci_url:  # oci does not require helm repo update
+                kdu_model = f"{oci_url.rstrip('/')}/{chart_name.lstrip('/')}"  # urljoin doesn't work for oci schema
+            else:
+                await self.repo_update(cluster_id, repo)
+        return kdu_model, version
+
+    async def create_certificate(
+        self, cluster_uuid, namespace, dns_prefix, name, secret_name, usage
+    ):
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+        kubectl = Kubectl(config_file=paths["kube_config"])
+        await kubectl.create_certificate(
+            namespace=namespace,
+            name=name,
+            dns_prefix=dns_prefix,
+            secret_name=secret_name,
+            usages=[usage],
+            issuer_name="ca-issuer",
+        )
+
+    async def delete_certificate(self, cluster_uuid, namespace, certificate_name):
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+        kubectl = Kubectl(config_file=paths["kube_config"])
+        await kubectl.delete_certificate(namespace, certificate_name)
+
+    async def create_namespace(
+        self,
+        namespace,
+        cluster_uuid,
+        labels,
+    ):
+        """
+        Create a namespace in a specific cluster
+
+        :param namespace:    Namespace to be created
+        :param cluster_uuid: K8s cluster uuid used to retrieve kubeconfig
+        :param labels:       Dictionary with labels for the new namespace
+        :returns: None
+        """
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+        kubectl = Kubectl(config_file=paths["kube_config"])
+        await kubectl.create_namespace(
+            name=namespace,
+            labels=labels,
+        )
+
+    async def delete_namespace(
+        self,
+        namespace,
+        cluster_uuid,
+    ):
+        """
+        Delete a namespace in a specific cluster
+
+        :param namespace: namespace to be deleted
+        :param cluster_uuid: K8s cluster uuid used to retrieve kubeconfig
+        :returns: None
+        """
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+        kubectl = Kubectl(config_file=paths["kube_config"])
+        await kubectl.delete_namespace(
+            name=namespace,
+        )
+
+    async def copy_secret_data(
+        self,
+        src_secret: str,
+        dst_secret: str,
+        cluster_uuid: str,
+        data_key: str,
+        src_namespace: str = "osm",
+        dst_namespace: str = "osm",
+    ):
+        """
+        Copy a single key and value from an existing secret to a new one
+
+        :param src_secret: name of the existing secret
+        :param dst_secret: name of the new secret
+        :param cluster_uuid: K8s cluster uuid used to retrieve kubeconfig
+        :param data_key: key of the existing secret to be copied
+        :param src_namespace: Namespace of the existing secret
+        :param dst_namespace: Namespace of the new secret
+        :returns: None
+        """
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+        kubectl = Kubectl(config_file=paths["kube_config"])
+        secret_data = await kubectl.get_secret_content(
+            name=src_secret,
+            namespace=src_namespace,
+        )
+        # Only the corresponding data_key value needs to be copy
+        data = {data_key: secret_data.get(data_key)}
+        await kubectl.create_secret(
+            name=dst_secret,
+            data=data,
+            namespace=dst_namespace,
+            secret_type="Opaque",
+        )
+
+    async def setup_default_rbac(
+        self,
+        name,
+        namespace,
+        cluster_uuid,
+        api_groups,
+        resources,
+        verbs,
+        service_account,
+    ):
+        """
+        Create a basic RBAC for a new namespace.
+
+        :param name: name of both Role and Role Binding
+        :param namespace: K8s namespace
+        :param cluster_uuid: K8s cluster uuid used to retrieve kubeconfig
+        :param api_groups: Api groups to be allowed in Policy Rule
+        :param resources: Resources to be allowed in Policy Rule
+        :param verbs: Verbs to be allowed in Policy Rule
+        :param service_account: Service Account name used to bind the Role
+        :returns: None
+        """
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+        kubectl = Kubectl(config_file=paths["kube_config"])
+        await kubectl.create_role(
+            name=name,
+            labels={},
+            namespace=namespace,
+            api_groups=api_groups,
+            resources=resources,
+            verbs=verbs,
+        )
+        await kubectl.create_role_binding(
+            name=name,
+            labels={},
+            namespace=namespace,
+            role_name=name,
+            sa_name=service_account,
+        )
diff --git a/n2vc/k8s_helm_conn.py b/n2vc/k8s_helm_conn.py
deleted file mode 100644 (file)
index 6bbc0fa..0000000
+++ /dev/null
@@ -1,641 +0,0 @@
-##
-# Copyright 2019 Telefonica Investigacion y Desarrollo, S.A.U.
-# This file is part of OSM
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
-##
-import asyncio
-import os
-import yaml
-
-from n2vc.k8s_helm_base_conn import K8sHelmBaseConnector
-from n2vc.exceptions import K8sException
-
-
-class K8sHelmConnector(K8sHelmBaseConnector):
-
-    """
-    ####################################################################################
-    ################################### P U B L I C ####################################
-    ####################################################################################
-    """
-
-    def __init__(
-        self,
-        fs: object,
-        db: object,
-        kubectl_command: str = "/usr/bin/kubectl",
-        helm_command: str = "/usr/bin/helm",
-        log: object = None,
-        on_update_db=None,
-    ):
-        """
-        Initializes helm connector for helm v2
-
-        :param fs: file system for kubernetes and helm configuration
-        :param db: database object to write current operation status
-        :param kubectl_command: path to kubectl executable
-        :param helm_command: path to helm executable
-        :param log: logger
-        :param on_update_db: callback called when k8s connector updates database
-        """
-
-        # parent class
-        K8sHelmBaseConnector.__init__(
-            self,
-            db=db,
-            log=log,
-            fs=fs,
-            kubectl_command=kubectl_command,
-            helm_command=helm_command,
-            on_update_db=on_update_db,
-        )
-
-        self.log.info("Initializing K8S Helm2 connector")
-
-        # initialize helm client-only
-        self.log.debug("Initializing helm client-only...")
-        command = "{} init --client-only {} ".format(
-            self._helm_command,
-            "--stable-repo-url {}".format(self._stable_repo_url)
-            if self._stable_repo_url
-            else "--skip-repos",
-        )
-        try:
-            asyncio.ensure_future(
-                self._local_async_exec(command=command, raise_exception_on_error=False)
-            )
-            # loop = asyncio.get_event_loop()
-            # loop.run_until_complete(self._local_async_exec(command=command,
-            # raise_exception_on_error=False))
-        except Exception as e:
-            self.warning(
-                msg="helm init failed (it was already initialized): {}".format(e)
-            )
-
-        self.log.info("K8S Helm2 connector initialized")
-
-    async def install(
-        self,
-        cluster_uuid: str,
-        kdu_model: str,
-        kdu_instance: str,
-        atomic: bool = True,
-        timeout: float = 300,
-        params: dict = None,
-        db_dict: dict = None,
-        kdu_name: str = None,
-        namespace: str = None,
-        **kwargs,
-    ):
-        """
-        Deploys of a new KDU instance. It would implicitly rely on the `install` call
-        to deploy the Chart/Bundle properly parametrized (in practice, this call would
-        happen before any _initial-config-primitive_of the VNF is called).
-
-        :param cluster_uuid: UUID of a K8s cluster known by OSM
-        :param kdu_model: chart/ reference (string), which can be either
-            of these options:
-            - a name of chart available via the repos known by OSM
-            - a path to a packaged chart
-            - a path to an unpacked chart directory or a URL
-        :param kdu_instance: Kdu instance name
-        :param atomic: If set, installation process purges chart/bundle on fail, also
-            will wait until all the K8s objects are active
-        :param timeout: Time in seconds to wait for the install of the chart/bundle
-            (defaults to Helm default timeout: 300s)
-        :param params: dictionary of key-value pairs for instantiation parameters
-            (overriding default values)
-        :param dict db_dict: where to write into database when the status changes.
-                        It contains a dict with {collection: <str>, filter: {},
-                        path: <str>},
-                            e.g. {collection: "nsrs", filter:
-                            {_id: <nsd-id>, path: "_admin.deployed.K8S.3"}
-        :param kdu_name: Name of the KDU instance to be installed
-        :param namespace: K8s namespace to use for the KDU instance
-        :param kwargs: Additional parameters (None yet)
-        :return: True if successful
-        """
-        _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
-        self.log.debug("installing {} in cluster {}".format(kdu_model, cluster_id))
-
-        # sync local dir
-        self.fs.sync(from_path=cluster_id)
-
-        # init env, paths
-        paths, env = self._init_paths_env(
-            cluster_name=cluster_id, create_if_not_exist=True
-        )
-
-        await self._install_impl(
-            cluster_id,
-            kdu_model,
-            paths,
-            env,
-            kdu_instance,
-            atomic=atomic,
-            timeout=timeout,
-            params=params,
-            db_dict=db_dict,
-            kdu_name=kdu_name,
-            namespace=namespace,
-        )
-
-        # sync fs
-        self.fs.reverse_sync(from_path=cluster_id)
-
-        self.log.debug("Returning kdu_instance {}".format(kdu_instance))
-        return True
-
-    async def inspect_kdu(self, kdu_model: str, repo_url: str = None) -> str:
-
-        self.log.debug(
-            "inspect kdu_model {} from (optional) repo: {}".format(kdu_model, repo_url)
-        )
-
-        return await self._exec_inspect_comand(
-            inspect_command="", kdu_model=kdu_model, repo_url=repo_url
-        )
-
-    """
-    ####################################################################################
-    ################################### P R I V A T E ##################################
-    ####################################################################################
-    """
-
-    def _init_paths_env(self, cluster_name: str, create_if_not_exist: bool = True):
-        """
-        Creates and returns base cluster and kube dirs and returns them.
-        Also created helm3 dirs according to new directory specification, paths are
-        returned and also environment variables that must be provided to execute commands
-
-        Helm 2 directory specification uses helm_home dir:
-
-        The variables assigned for this paths are:
-        - Helm hone: $HELM_HOME
-        - helm kubeconfig: $KUBECONFIG
-
-        :param cluster_name:  cluster_name
-        :return: Dictionary with config_paths and dictionary with helm environment variables
-        """
-        base = self.fs.path
-        if base.endswith("/") or base.endswith("\\"):
-            base = base[:-1]
-
-        # base dir for cluster
-        cluster_dir = base + "/" + cluster_name
-
-        # kube dir
-        kube_dir = cluster_dir + "/" + ".kube"
-        if create_if_not_exist and not os.path.exists(kube_dir):
-            self.log.debug("Creating dir {}".format(kube_dir))
-            os.makedirs(kube_dir)
-
-        # helm home dir
-        helm_dir = cluster_dir + "/" + ".helm"
-        if create_if_not_exist and not os.path.exists(helm_dir):
-            self.log.debug("Creating dir {}".format(helm_dir))
-            os.makedirs(helm_dir)
-
-        config_filename = kube_dir + "/config"
-
-        # 2 - Prepare dictionary with paths
-        paths = {
-            "kube_dir": kube_dir,
-            "kube_config": config_filename,
-            "cluster_dir": cluster_dir,
-            "helm_dir": helm_dir,
-        }
-
-        for file_name, file in paths.items():
-            if "dir" in file_name and not os.path.exists(file):
-                err_msg = "{} dir does not exist".format(file)
-                self.log.error(err_msg)
-                raise K8sException(err_msg)
-
-        # 3 - Prepare environment variables
-        env = {"HELM_HOME": helm_dir, "KUBECONFIG": config_filename}
-
-        return paths, env
-
-    async def _get_services(self, cluster_id, kdu_instance, namespace):
-
-        # init config, env
-        paths, env = self._init_paths_env(
-            cluster_name=cluster_id, create_if_not_exist=True
-        )
-
-        command1 = "{} get manifest {} ".format(self._helm_command, kdu_instance)
-        command2 = "{} get --namespace={} -f -".format(self.kubectl_command, namespace)
-        output, _rc = await self._local_async_exec_pipe(
-            command1, command2, env=env, raise_exception_on_error=True
-        )
-        services = self._parse_services(output)
-
-        return services
-
-    async def _cluster_init(
-        self, cluster_id: str, namespace: str, paths: dict, env: dict
-    ):
-        """
-        Implements the helm version dependent cluster initialization:
-        For helm2 it initialized tiller environment if needed
-        """
-
-        # check if tiller pod is up in cluster
-        command = "{} --kubeconfig={} --namespace={} get deployments".format(
-            self.kubectl_command, paths["kube_config"], namespace
-        )
-        output, _rc = await self._local_async_exec(
-            command=command, raise_exception_on_error=True, env=env
-        )
-
-        output_table = self._output_to_table(output=output)
-
-        # find 'tiller' pod in all pods
-        already_initialized = False
-        try:
-            for row in output_table:
-                if row[0].startswith("tiller-deploy"):
-                    already_initialized = True
-                    break
-        except Exception:
-            pass
-
-        # helm init
-        n2vc_installed_sw = False
-        if not already_initialized:
-            self.log.info(
-                "Initializing helm in client and server: {}".format(cluster_id)
-            )
-            command = "{} --kubeconfig={} --namespace kube-system create serviceaccount {}".format(
-                self.kubectl_command, paths["kube_config"], self.service_account
-            )
-            _, _rc = await self._local_async_exec(
-                command=command, raise_exception_on_error=False, env=env
-            )
-
-            command = (
-                "{} --kubeconfig={} create clusterrolebinding osm-tiller-cluster-rule "
-                "--clusterrole=cluster-admin --serviceaccount=kube-system:{}"
-            ).format(self.kubectl_command, paths["kube_config"], self.service_account)
-            _, _rc = await self._local_async_exec(
-                command=command, raise_exception_on_error=False, env=env
-            )
-
-            command = (
-                "{} --kubeconfig={} --tiller-namespace={} --home={} --service-account {} "
-                " {} init"
-            ).format(
-                self._helm_command,
-                paths["kube_config"],
-                namespace,
-                paths["helm_dir"],
-                self.service_account,
-                "--stable-repo-url {}".format(self._stable_repo_url)
-                if self._stable_repo_url
-                else "--skip-repos",
-            )
-            _, _rc = await self._local_async_exec(
-                command=command, raise_exception_on_error=True, env=env
-            )
-            n2vc_installed_sw = True
-        else:
-            # check client helm installation
-            check_file = paths["helm_dir"] + "/repository/repositories.yaml"
-            if not self._check_file_exists(
-                filename=check_file, exception_if_not_exists=False
-            ):
-                self.log.info("Initializing helm in client: {}".format(cluster_id))
-                command = (
-                    "{} --kubeconfig={} --tiller-namespace={} "
-                    "--home={} init --client-only {} "
-                ).format(
-                    self._helm_command,
-                    paths["kube_config"],
-                    namespace,
-                    paths["helm_dir"],
-                    "--stable-repo-url {}".format(self._stable_repo_url)
-                    if self._stable_repo_url
-                    else "--skip-repos",
-                )
-                output, _rc = await self._local_async_exec(
-                    command=command, raise_exception_on_error=True, env=env
-                )
-            else:
-                self.log.info("Helm client already initialized")
-
-        # remove old stable repo and add new one
-        cluster_uuid = "{}:{}".format(namespace, cluster_id)
-        repo_list = await self.repo_list(cluster_uuid)
-        for repo in repo_list:
-            if repo["name"] == "stable" and repo["url"] != self._stable_repo_url:
-                self.log.debug("Add new stable repo url: {}")
-                await self.repo_remove(cluster_uuid, "stable")
-                if self._stable_repo_url:
-                    await self.repo_add(cluster_uuid, "stable", self._stable_repo_url)
-                break
-
-        return n2vc_installed_sw
-
-    async def _uninstall_sw(self, cluster_id: str, namespace: str):
-        # uninstall Tiller if necessary
-
-        self.log.debug("Uninstalling tiller from cluster {}".format(cluster_id))
-
-        # init paths, env
-        paths, env = self._init_paths_env(
-            cluster_name=cluster_id, create_if_not_exist=True
-        )
-
-        if not namespace:
-            # find namespace for tiller pod
-            command = "{} --kubeconfig={} get deployments --all-namespaces".format(
-                self.kubectl_command, paths["kube_config"]
-            )
-            output, _rc = await self._local_async_exec(
-                command=command, raise_exception_on_error=False, env=env
-            )
-            output_table = self._output_to_table(output=output)
-            namespace = None
-            for r in output_table:
-                try:
-                    if "tiller-deploy" in r[1]:
-                        namespace = r[0]
-                        break
-                except Exception:
-                    pass
-            else:
-                msg = "Tiller deployment not found in cluster {}".format(cluster_id)
-                self.log.error(msg)
-
-            self.log.debug("namespace for tiller: {}".format(namespace))
-
-        if namespace:
-            # uninstall tiller from cluster
-            self.log.debug("Uninstalling tiller from cluster {}".format(cluster_id))
-            command = "{} --kubeconfig={} --home={} reset".format(
-                self._helm_command, paths["kube_config"], paths["helm_dir"]
-            )
-            self.log.debug("resetting: {}".format(command))
-            output, _rc = await self._local_async_exec(
-                command=command, raise_exception_on_error=True, env=env
-            )
-            # Delete clusterrolebinding and serviceaccount.
-            # Ignore if errors for backward compatibility
-            command = (
-                "{} --kubeconfig={} delete clusterrolebinding.rbac.authorization.k8s."
-                "io/osm-tiller-cluster-rule"
-            ).format(self.kubectl_command, paths["kube_config"])
-            output, _rc = await self._local_async_exec(
-                command=command, raise_exception_on_error=False, env=env
-            )
-            command = "{} --kubeconfig={} --namespace kube-system delete serviceaccount/{}".format(
-                self.kubectl_command, paths["kube_config"], self.service_account
-            )
-            output, _rc = await self._local_async_exec(
-                command=command, raise_exception_on_error=False, env=env
-            )
-
-        else:
-            self.log.debug("namespace not found")
-
-    async def _instances_list(self, cluster_id):
-
-        # init paths, env
-        paths, env = self._init_paths_env(
-            cluster_name=cluster_id, create_if_not_exist=True
-        )
-
-        command = "{} list --output yaml".format(self._helm_command)
-
-        output, _rc = await self._local_async_exec(
-            command=command, raise_exception_on_error=True, env=env
-        )
-
-        if output and len(output) > 0:
-            # parse yaml and update keys to lower case to unify with helm3
-            instances = yaml.load(output, Loader=yaml.SafeLoader).get("Releases")
-            new_instances = []
-            for instance in instances:
-                new_instance = dict((k.lower(), v) for k, v in instance.items())
-                new_instances.append(new_instance)
-            return new_instances
-        else:
-            return []
-
-    def _get_inspect_command(
-        self, show_command: str, kdu_model: str, repo_str: str, version: str
-    ):
-        inspect_command = "{} inspect {} {}{} {}".format(
-            self._helm_command, show_command, kdu_model, repo_str, version
-        )
-        return inspect_command
-
-    async def _status_kdu(
-        self,
-        cluster_id: str,
-        kdu_instance: str,
-        namespace: str = None,
-        show_error_log: bool = False,
-        return_text: bool = False,
-    ):
-
-        self.log.debug(
-            "status of kdu_instance: {}, namespace: {} ".format(kdu_instance, namespace)
-        )
-
-        # init config, env
-        paths, env = self._init_paths_env(
-            cluster_name=cluster_id, create_if_not_exist=True
-        )
-        command = "{} status {} --output yaml".format(self._helm_command, kdu_instance)
-        output, rc = await self._local_async_exec(
-            command=command,
-            raise_exception_on_error=True,
-            show_error_log=show_error_log,
-            env=env,
-        )
-
-        if return_text:
-            return str(output)
-
-        if rc != 0:
-            return None
-
-        data = yaml.load(output, Loader=yaml.SafeLoader)
-
-        # remove field 'notes'
-        try:
-            del data.get("info").get("status")["notes"]
-        except KeyError:
-            pass
-
-        # parse field 'resources'
-        try:
-            resources = str(data.get("info").get("status").get("resources"))
-            resource_table = self._output_to_table(resources)
-            data.get("info").get("status")["resources"] = resource_table
-        except Exception:
-            pass
-
-        # set description to lowercase (unify with helm3)
-        try:
-            data.get("info")["description"] = data.get("info").pop("Description")
-        except KeyError:
-            pass
-
-        return data
-
-    def _get_helm_chart_repos_ids(self, cluster_uuid) -> list:
-        repo_ids = []
-        cluster_filter = {"_admin.helm-chart.id": cluster_uuid}
-        cluster = self.db.get_one("k8sclusters", cluster_filter)
-        if cluster:
-            repo_ids = cluster.get("_admin").get("helm_chart_repos") or []
-            return repo_ids
-        else:
-            raise K8sException(
-                "k8cluster with helm-id : {} not found".format(cluster_uuid)
-            )
-
-    async def _is_install_completed(self, cluster_id: str, kdu_instance: str) -> bool:
-
-        status = await self._status_kdu(
-            cluster_id=cluster_id, kdu_instance=kdu_instance, return_text=False
-        )
-
-        # extract info.status.resources-> str
-        # format:
-        #       ==> v1/Deployment
-        #       NAME                    READY   UP-TO-DATE   AVAILABLE   AGE
-        #       halting-horse-mongodb   0/1     1            0           0s
-        #       halting-petit-mongodb   1/1     1            0           0s
-        # blank line
-        resources = K8sHelmBaseConnector._get_deep(
-            status, ("info", "status", "resources")
-        )
-
-        # convert to table
-        resources = K8sHelmBaseConnector._output_to_table(resources)
-
-        num_lines = len(resources)
-        index = 0
-        ready = True
-        while index < num_lines:
-            try:
-                line1 = resources[index]
-                index += 1
-                # find '==>' in column 0
-                if line1[0] == "==>":
-                    line2 = resources[index]
-                    index += 1
-                    # find READY in column 1
-                    if line2[1] == "READY":
-                        # read next lines
-                        line3 = resources[index]
-                        index += 1
-                        while len(line3) > 1 and index < num_lines:
-                            ready_value = line3[1]
-                            parts = ready_value.split(sep="/")
-                            current = int(parts[0])
-                            total = int(parts[1])
-                            if current < total:
-                                self.log.debug("NOT READY:\n    {}".format(line3))
-                                ready = False
-                            line3 = resources[index]
-                            index += 1
-
-            except Exception:
-                pass
-
-        return ready
-
-    def _get_install_command(
-        self, kdu_model, kdu_instance, namespace, params_str, version, atomic, timeout
-    ) -> str:
-
-        timeout_str = ""
-        if timeout:
-            timeout_str = "--timeout {}".format(timeout)
-
-        # atomic
-        atomic_str = ""
-        if atomic:
-            atomic_str = "--atomic"
-        # namespace
-        namespace_str = ""
-        if namespace:
-            namespace_str = "--namespace {}".format(namespace)
-
-        # version
-        version_str = ""
-        if version:
-            version_str = version_str = "--version {}".format(version)
-
-        command = (
-            "{helm} install {atomic} --output yaml  "
-            "{params} {timeout} --name={name} {ns} {model} {ver}".format(
-                helm=self._helm_command,
-                atomic=atomic_str,
-                params=params_str,
-                timeout=timeout_str,
-                name=kdu_instance,
-                ns=namespace_str,
-                model=kdu_model,
-                ver=version_str,
-            )
-        )
-        return command
-
-    def _get_upgrade_command(
-        self, kdu_model, kdu_instance, namespace, params_str, version, atomic, timeout
-    ) -> str:
-
-        timeout_str = ""
-        if timeout:
-            timeout_str = "--timeout {}".format(timeout)
-
-        # atomic
-        atomic_str = ""
-        if atomic:
-            atomic_str = "--atomic"
-
-        # version
-        version_str = ""
-        if version:
-            version_str = "--version {}".format(version)
-
-        command = "{helm} upgrade {atomic} --output yaml {params} {timeout} {name} {model} {ver}".format(
-            helm=self._helm_command,
-            atomic=atomic_str,
-            params=params_str,
-            timeout=timeout_str,
-            name=kdu_instance,
-            model=kdu_model,
-            ver=version_str,
-        )
-        return command
-
-    def _get_rollback_command(self, kdu_instance, namespace, revision) -> str:
-        return "{} rollback {} {} --wait".format(
-            self._helm_command, kdu_instance, revision
-        )
-
-    def _get_uninstall_command(self, kdu_instance: str, namespace: str) -> str:
-        return "{} delete --purge  {}".format(self._helm_command, kdu_instance)
index 149947d..c197221 100644 (file)
@@ -13,6 +13,7 @@
 #     limitations under the License.
 
 import asyncio
 #     limitations under the License.
 
 import asyncio
+from typing import Union
 import os
 import uuid
 import yaml
 import os
 import uuid
 import yaml
@@ -20,6 +21,7 @@ import tempfile
 import binascii
 
 from n2vc.config import EnvironConfig
 import binascii
 
 from n2vc.config import EnvironConfig
+from n2vc.definitions import RelationEndpoint
 from n2vc.exceptions import K8sException
 from n2vc.k8s_conn import K8sConnector
 from n2vc.kubectl import Kubectl
 from n2vc.exceptions import K8sException
 from n2vc.k8s_conn import K8sConnector
 from n2vc.kubectl import Kubectl
@@ -49,7 +51,6 @@ class K8sJujuConnector(K8sConnector):
         kubectl_command: str = "/usr/bin/kubectl",
         juju_command: str = "/usr/bin/juju",
         log: object = None,
         kubectl_command: str = "/usr/bin/kubectl",
         juju_command: str = "/usr/bin/juju",
         log: object = None,
-        loop: object = None,
         on_update_db=None,
     ):
         """
         on_update_db=None,
     ):
         """
@@ -58,24 +59,18 @@ class K8sJujuConnector(K8sConnector):
         :param kubectl_command: path to kubectl executable
         :param helm_command: path to helm executable
         :param log: logger
         :param kubectl_command: path to kubectl executable
         :param helm_command: path to helm executable
         :param log: logger
-        :param: loop: Asyncio loop
         """
 
         # parent class
         """
 
         # parent class
-        K8sConnector.__init__(
-            self,
-            db,
-            log=log,
-            on_update_db=on_update_db,
-        )
+        K8sConnector.__init__(self, db, log=log, on_update_db=on_update_db)
 
         self.fs = fs
 
         self.fs = fs
-        self.loop = loop or asyncio.get_event_loop()
         self.log.debug("Initializing K8S Juju connector")
 
         db_uri = EnvironConfig(prefixes=["OSMLCM_", "OSMMON_"]).get("database_uri")
         self._store = MotorStore(db_uri)
         self.log.debug("Initializing K8S Juju connector")
 
         db_uri = EnvironConfig(prefixes=["OSMLCM_", "OSMMON_"]).get("database_uri")
         self._store = MotorStore(db_uri)
-        self.loading_libjuju = asyncio.Lock(loop=self.loop)
+        self.loading_libjuju = asyncio.Lock()
+        self.uninstall_locks = {}
 
         self.log.debug("K8S Juju connector initialized")
         # TODO: Remove these commented lines:
 
         self.log.debug("K8S Juju connector initialized")
         # TODO: Remove these commented lines:
@@ -121,43 +116,31 @@ class K8sJujuConnector(K8sConnector):
         # if it fails in the middle of the process
         cleanup_data = []
         try:
         # if it fails in the middle of the process
         cleanup_data = []
         try:
-            kubectl.create_cluster_role(
-                name=metadata_name,
-                labels=labels,
-            )
+            self.log.debug("Initializing K8s cluster for juju")
+            kubectl.create_cluster_role(name=metadata_name, labels=labels)
+            self.log.debug("Cluster role created")
             cleanup_data.append(
             cleanup_data.append(
-                {
-                    "delete": kubectl.delete_cluster_role,
-                    "args": (metadata_name),
-                }
+                {"delete": kubectl.delete_cluster_role, "args": (metadata_name,)}
             )
 
             )
 
-            kubectl.create_service_account(
-                name=metadata_name,
-                labels=labels,
-            )
+            kubectl.create_service_account(name=metadata_name, labels=labels)
+            self.log.debug("Service account created")
             cleanup_data.append(
             cleanup_data.append(
-                {
-                    "delete": kubectl.delete_service_account,
-                    "args": (metadata_name),
-                }
+                {"delete": kubectl.delete_service_account, "args": (metadata_name,)}
             )
 
             )
 
-            kubectl.create_cluster_role_binding(
-                name=metadata_name,
-                labels=labels,
-            )
+            kubectl.create_cluster_role_binding(name=metadata_name, labels=labels)
+            self.log.debug("Role binding created")
             cleanup_data.append(
                 {
             cleanup_data.append(
                 {
-                    "delete": kubectl.delete_service_account,
-                    "args": (metadata_name),
+                    "delete": kubectl.delete_cluster_role_binding,
+                    "args": (metadata_name,),
                 }
             )
                 }
             )
-            token, client_cert_data = await kubectl.get_secret_data(
-                metadata_name,
-            )
+            token, client_cert_data = await kubectl.get_secret_data(metadata_name)
 
             default_storage_class = kubectl.get_default_storage_class()
 
             default_storage_class = kubectl.get_default_storage_class()
+            self.log.debug("Default storage class: {}".format(default_storage_class))
             await libjuju.add_k8s(
                 name=cluster_uuid,
                 rbac_id=rbac_id,
             await libjuju.add_k8s(
                 name=cluster_uuid,
                 rbac_id=rbac_id,
@@ -167,9 +150,10 @@ class K8sJujuConnector(K8sConnector):
                 storage_class=default_storage_class,
                 credential_name=self._get_credential_name(cluster_uuid),
             )
                 storage_class=default_storage_class,
                 credential_name=self._get_credential_name(cluster_uuid),
             )
+            self.log.debug("K8s cluster added to juju controller")
             return cluster_uuid, True
         except Exception as e:
             return cluster_uuid, True
         except Exception as e:
-            self.log.error("Error initializing k8scluster: {}".format(e))
+            self.log.error("Error initializing k8scluster: {}".format(e), exc_info=True)
             if len(cleanup_data) > 0:
                 self.log.debug("Cleaning up created resources in k8s cluster...")
                 for item in cleanup_data:
             if len(cleanup_data) > 0:
                 self.log.debug("Cleaning up created resources in k8s cluster...")
                 for item in cleanup_data:
@@ -186,16 +170,16 @@ class K8sJujuConnector(K8sConnector):
         name: str,
         url: str,
         _type: str = "charm",
         name: str,
         url: str,
         _type: str = "charm",
+        cert: str = None,
+        user: str = None,
+        password: str = None,
     ):
         raise MethodNotImplemented()
 
     async def repo_list(self):
         raise MethodNotImplemented()
 
     ):
         raise MethodNotImplemented()
 
     async def repo_list(self):
         raise MethodNotImplemented()
 
-    async def repo_remove(
-        self,
-        name: str,
-    ):
+    async def repo_remove(self, name: str):
         raise MethodNotImplemented()
 
     async def synchronize_repos(self, cluster_uuid: str, name: str):
         raise MethodNotImplemented()
 
     async def synchronize_repos(self, cluster_uuid: str, name: str):
@@ -302,6 +286,10 @@ class K8sJujuConnector(K8sConnector):
             raise K8sException("bundle must be set")
 
         if bundle.startswith("cs:"):
             raise K8sException("bundle must be set")
 
         if bundle.startswith("cs:"):
+            # For Juju Bundles provided by the Charm Store
+            pass
+        elif bundle.startswith("ch:"):
+            # For Juju Bundles provided by the Charm Hub (this only works for juju version >= 2.9)
             pass
         elif bundle.startswith("http"):
             # Download the file
             pass
         elif bundle.startswith("http"):
             # Download the file
@@ -311,12 +299,16 @@ class K8sJujuConnector(K8sConnector):
             os.chdir(new_workdir)
             bundle = "local:{}".format(kdu_model)
 
             os.chdir(new_workdir)
             bundle = "local:{}".format(kdu_model)
 
-        self.log.debug("Checking for model named {}".format(kdu_instance))
+        # default namespace to kdu_instance
+        if not namespace:
+            namespace = kdu_instance
+
+        self.log.debug("Checking for model named {}".format(namespace))
 
         # Create the new model
 
         # Create the new model
-        self.log.debug("Adding model: {}".format(kdu_instance))
+        self.log.debug("Adding model: {}".format(namespace))
         cloud = Cloud(cluster_uuid, self._get_credential_name(cluster_uuid))
         cloud = Cloud(cluster_uuid, self._get_credential_name(cluster_uuid))
-        await libjuju.add_model(kdu_instance, cloud)
+        await libjuju.add_model(namespace, cloud)
 
         # if model:
         # TODO: Instantiation parameters
 
         # if model:
         # TODO: Instantiation parameters
@@ -335,10 +327,17 @@ class K8sJujuConnector(K8sConnector):
             previous_workdir = "/app/storage"
 
         self.log.debug("[install] deploying {}".format(bundle))
             previous_workdir = "/app/storage"
 
         self.log.debug("[install] deploying {}".format(bundle))
+        instantiation_params = params.get("overlay") if params else None
         await libjuju.deploy(
         await libjuju.deploy(
-            bundle, model_name=kdu_instance, wait=atomic, timeout=timeout
+            bundle,
+            model_name=namespace,
+            wait=atomic,
+            timeout=timeout,
+            instantiation_params=instantiation_params,
         )
         os.chdir(previous_workdir)
         )
         os.chdir(previous_workdir)
+
+        # update information in the database (first, the VCA status, and then, the namespace)
         if self.on_update_db:
             await self.on_update_db(
                 cluster_uuid,
         if self.on_update_db:
             await self.on_update_db(
                 cluster_uuid,
@@ -346,6 +345,13 @@ class K8sJujuConnector(K8sConnector):
                 filter=db_dict["filter"],
                 vca_id=kwargs.get("vca_id"),
             )
                 filter=db_dict["filter"],
                 vca_id=kwargs.get("vca_id"),
             )
+
+        self.db.set_one(
+            table="nsrs",
+            q_filter={"_admin.deployed.K8s.kdu-instance": kdu_instance},
+            update_dict={"_admin.deployed.K8s.$.namespace": namespace},
+        )
+
         return True
 
     async def scale(
         return True
 
     async def scale(
@@ -354,58 +360,66 @@ class K8sJujuConnector(K8sConnector):
         scale: int,
         resource_name: str,
         total_timeout: float = 1800,
         scale: int,
         resource_name: str,
         total_timeout: float = 1800,
+        namespace: str = None,
         **kwargs,
     ) -> bool:
         """Scale an application in a model
 
         :param: kdu_instance str:        KDU instance name
         **kwargs,
     ) -> bool:
         """Scale an application in a model
 
         :param: kdu_instance str:        KDU instance name
-        :param: scale int:               Scale to which to set this application
-        :param: resource_name str:       Resource name (Application name)
+        :param: scale int:               Scale to which to set the application
+        :param: resource_name str:       The application name in the Juju Bundle
         :param: timeout float:           The time, in seconds, to wait for the install
                                          to finish
         :param: timeout float:           The time, in seconds, to wait for the install
                                          to finish
+        :param namespace str: The namespace (model) where the Bundle was deployed
         :param kwargs:                   Additional parameters
                                             vca_id (str): VCA ID
 
         :return: If successful, returns True
         """
 
         :param kwargs:                   Additional parameters
                                             vca_id (str): VCA ID
 
         :return: If successful, returns True
         """
 
+        model_name = self._obtain_namespace(
+            kdu_instance=kdu_instance, namespace=namespace
+        )
         try:
             libjuju = await self._get_libjuju(kwargs.get("vca_id"))
             await libjuju.scale_application(
         try:
             libjuju = await self._get_libjuju(kwargs.get("vca_id"))
             await libjuju.scale_application(
-                model_name=kdu_instance,
+                model_name=model_name,
                 application_name=resource_name,
                 scale=scale,
                 total_timeout=total_timeout,
             )
         except Exception as e:
                 application_name=resource_name,
                 scale=scale,
                 total_timeout=total_timeout,
             )
         except Exception as e:
-            error_msg = "Error scaling application {} in kdu instance {}: {}".format(
-                resource_name, kdu_instance, e
+            error_msg = "Error scaling application {} of the model {} of the kdu instance {}: {}".format(
+                resource_name, model_name, kdu_instance, e
             )
             self.log.error(error_msg)
             raise K8sException(message=error_msg)
         return True
 
     async def get_scale_count(
             )
             self.log.error(error_msg)
             raise K8sException(message=error_msg)
         return True
 
     async def get_scale_count(
-        self,
-        resource_name: str,
-        kdu_instance: str,
-        **kwargs,
+        self, resource_name: str, kdu_instance: str, namespace: str = None, **kwargs
     ) -> int:
         """Get an application scale count
 
     ) -> int:
         """Get an application scale count
 
-        :param: resource_name str:       Resource name (Application name)
+        :param: resource_name str:       The application name in the Juju Bundle
         :param: kdu_instance str:        KDU instance name
         :param: kdu_instance str:        KDU instance name
+        :param namespace str: The namespace (model) where the Bundle was deployed
         :param kwargs:                   Additional parameters
                                             vca_id (str): VCA ID
         :return: Return application instance count
         """
         :param kwargs:                   Additional parameters
                                             vca_id (str): VCA ID
         :return: Return application instance count
         """
+
+        model_name = self._obtain_namespace(
+            kdu_instance=kdu_instance, namespace=namespace
+        )
         try:
             libjuju = await self._get_libjuju(kwargs.get("vca_id"))
         try:
             libjuju = await self._get_libjuju(kwargs.get("vca_id"))
-            status = await libjuju.get_model_status(kdu_instance)
+            status = await libjuju.get_model_status(model_name=model_name)
             return len(status.applications[resource_name].units)
         except Exception as e:
             return len(status.applications[resource_name].units)
         except Exception as e:
-            error_msg = "Error getting scale count from application {} in kdu instance {}: {}".format(
-                resource_name, kdu_instance, e
+            error_msg = (
+                f"Error getting scale count from application {resource_name} of the model {model_name} of "
+                f"the kdu instance {kdu_instance}: {e}"
             )
             self.log.error(error_msg)
             raise K8sException(message=error_msg)
             )
             self.log.error(error_msg)
             raise K8sException(message=error_msg)
@@ -455,10 +469,7 @@ class K8sJujuConnector(K8sConnector):
     """Rollback"""
 
     async def rollback(
     """Rollback"""
 
     async def rollback(
-        self,
-        cluster_uuid: str,
-        kdu_instance: str,
-        revision: int = 0,
+        self, cluster_uuid: str, kdu_instance: str, revision: int = 0
     ) -> str:
         """Rollback a model
 
     ) -> str:
         """Rollback a model
 
@@ -475,34 +486,71 @@ class K8sJujuConnector(K8sConnector):
     """Deletion"""
 
     async def uninstall(
     """Deletion"""
 
     async def uninstall(
-        self,
-        cluster_uuid: str,
-        kdu_instance: str,
-        **kwargs,
+        self, cluster_uuid: str, kdu_instance: str, namespace: str = None, **kwargs
     ) -> bool:
         """Uninstall a KDU instance
 
         :param cluster_uuid str: The UUID of the cluster
         :param kdu_instance str: The unique name of the KDU instance
     ) -> bool:
         """Uninstall a KDU instance
 
         :param cluster_uuid str: The UUID of the cluster
         :param kdu_instance str: The unique name of the KDU instance
+        :param namespace str: The namespace (model) where the Bundle was deployed
         :param kwargs: Additional parameters
             vca_id (str): VCA ID
 
         :return: Returns True if successful, or raises an exception
         """
         :param kwargs: Additional parameters
             vca_id (str): VCA ID
 
         :return: Returns True if successful, or raises an exception
         """
+        model_name = self._obtain_namespace(
+            kdu_instance=kdu_instance, namespace=namespace
+        )
 
 
-        self.log.debug("[uninstall] Destroying model")
-        libjuju = await self._get_libjuju(kwargs.get("vca_id"))
+        self.log.debug(f"[uninstall] Destroying model: {model_name}")
+
+        will_not_delete = False
+        if model_name not in self.uninstall_locks:
+            self.uninstall_locks[model_name] = asyncio.Lock()
+        delete_lock = self.uninstall_locks[model_name]
 
 
-        await libjuju.destroy_model(kdu_instance, total_timeout=3600)
+        while delete_lock.locked():
+            will_not_delete = True
+            await asyncio.sleep(0.1)
+
+        if will_not_delete:
+            self.log.info("Model {} deleted by another worker.".format(model_name))
+            return True
+
+        try:
+            async with delete_lock:
+                libjuju = await self._get_libjuju(kwargs.get("vca_id"))
 
 
-        # self.log.debug("[uninstall] Model destroyed and disconnecting")
-        # await controller.disconnect()
+                await libjuju.destroy_model(model_name, total_timeout=3600)
+        finally:
+            self.uninstall_locks.pop(model_name)
 
 
+        self.log.debug(f"[uninstall] Model {model_name} destroyed")
         return True
         return True
-        # TODO: Remove these commented lines
-        # if not self.authenticated:
-        #     self.log.debug("[uninstall] Connecting to controller")
-        #     await self.login(cluster_uuid)
+
+    async def upgrade_charm(
+        self,
+        ee_id: str = None,
+        path: str = None,
+        charm_id: str = None,
+        charm_type: str = None,
+        timeout: float = None,
+    ) -> str:
+        """This method upgrade charms in VNFs
+
+        Args:
+            ee_id:  Execution environment id
+            path:   Local path to the charm
+            charm_id:   charm-id
+            charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
+            timeout: (Float)    Timeout for the ns update operation
+
+        Returns:
+            The output of the update operation if status equals to "completed"
+        """
+        raise K8sException(
+            "KDUs deployed with Juju Bundle do not support charm upgrade"
+        )
 
     async def exec_primitive(
         self,
 
     async def exec_primitive(
         self,
@@ -512,6 +560,7 @@ class K8sJujuConnector(K8sConnector):
         timeout: float = 300,
         params: dict = None,
         db_dict: dict = None,
         timeout: float = 300,
         params: dict = None,
         db_dict: dict = None,
+        namespace: str = None,
         **kwargs,
     ) -> str:
         """Exec primitive (Juju action)
         **kwargs,
     ) -> str:
         """Exec primitive (Juju action)
@@ -522,6 +571,7 @@ class K8sJujuConnector(K8sConnector):
         :param timeout: Timeout for action execution
         :param params: Dictionary of all the parameters needed for the action
         :param db_dict: Dictionary for any additional data
         :param timeout: Timeout for action execution
         :param params: Dictionary of all the parameters needed for the action
         :param db_dict: Dictionary for any additional data
+        :param namespace str: The namespace (model) where the Bundle was deployed
         :param kwargs: Additional parameters
             vca_id (str): VCA ID
 
         :param kwargs: Additional parameters
             vca_id (str): VCA ID
 
@@ -529,6 +579,10 @@ class K8sJujuConnector(K8sConnector):
         """
         libjuju = await self._get_libjuju(kwargs.get("vca_id"))
 
         """
         libjuju = await self._get_libjuju(kwargs.get("vca_id"))
 
+        namespace = self._obtain_namespace(
+            kdu_instance=kdu_instance, namespace=namespace
+        )
+
         if not params or "application-name" not in params:
             raise K8sException(
                 "Missing application-name argument, \
         if not params or "application-name" not in params:
             raise K8sException(
                 "Missing application-name argument, \
@@ -537,14 +591,19 @@ class K8sJujuConnector(K8sConnector):
         try:
             self.log.debug(
                 "[exec_primitive] Getting model "
         try:
             self.log.debug(
                 "[exec_primitive] Getting model "
-                "kdu_instance: {}".format(kdu_instance)
+                "{} for the kdu_instance: {}".format(namespace, kdu_instance)
             )
             application_name = params["application-name"]
             )
             application_name = params["application-name"]
-            actions = await libjuju.get_actions(application_name, kdu_instance)
+            actions = await libjuju.get_actions(
+                application_name=application_name, model_name=namespace
+            )
             if primitive_name not in actions:
                 raise K8sException("Primitive {} not found".format(primitive_name))
             output, status = await libjuju.execute_action(
             if primitive_name not in actions:
                 raise K8sException("Primitive {} not found".format(primitive_name))
             output, status = await libjuju.execute_action(
-                application_name, kdu_instance, primitive_name, **params
+                application_name=application_name,
+                model_name=namespace,
+                action_name=primitive_name,
+                **params,
             )
 
             if status != "completed":
             )
 
             if status != "completed":
@@ -553,7 +612,9 @@ class K8sJujuConnector(K8sConnector):
                 )
             if self.on_update_db:
                 await self.on_update_db(
                 )
             if self.on_update_db:
                 await self.on_update_db(
-                    cluster_uuid, kdu_instance, filter=db_dict["filter"]
+                    cluster_uuid=cluster_uuid,
+                    kdu_instance=kdu_instance,
+                    filter=db_dict["filter"],
                 )
 
             return output
                 )
 
             return output
@@ -565,10 +626,7 @@ class K8sJujuConnector(K8sConnector):
 
     """Introspection"""
 
 
     """Introspection"""
 
-    async def inspect_kdu(
-        self,
-        kdu_model: str,
-    ) -> dict:
+    async def inspect_kdu(self, kdu_model: str) -> dict:
         """Inspect a KDU
 
         Inspects a bundle and returns a dictionary of config parameters and
         """Inspect a KDU
 
         Inspects a bundle and returns a dictionary of config parameters and
@@ -610,17 +668,14 @@ class K8sJujuConnector(K8sConnector):
 
         return kdu
 
 
         return kdu
 
-    async def help_kdu(
-        self,
-        kdu_model: str,
-    ) -> str:
+    async def help_kdu(self, kdu_model: str) -> str:
         """View the README
 
         """View the README
 
-        If available, returns the README of the bundle.
+                If available, returns the README of the bundle.
 
 
-        :param kdu_model str: The name or path of a bundle
-
-        :return: If found, returns the contents of the README.
+                :param kdu_model str: The name or path of a bundle
+        f
+                :return: If found, returns the contents of the README.
         """
         readme = None
 
         """
         readme = None
 
@@ -640,8 +695,9 @@ class K8sJujuConnector(K8sConnector):
         kdu_instance: str,
         complete_status: bool = False,
         yaml_format: bool = False,
         kdu_instance: str,
         complete_status: bool = False,
         yaml_format: bool = False,
+        namespace: str = None,
         **kwargs,
         **kwargs,
-    ) -> dict:
+    ) -> Union[str, dict]:
         """Get the status of the KDU
 
         Get the current status of the KDU instance.
         """Get the status of the KDU
 
         Get the current status of the KDU instance.
@@ -650,6 +706,7 @@ class K8sJujuConnector(K8sConnector):
         :param kdu_instance str: The unique id of the KDU instance
         :param complete_status: To get the complete_status of the KDU
         :param yaml_format: To get the status in proper format for NSR record
         :param kdu_instance str: The unique id of the KDU instance
         :param complete_status: To get the complete_status of the KDU
         :param yaml_format: To get the status in proper format for NSR record
+        :param namespace str: The namespace (model) where the Bundle was deployed
         :param: kwargs: Additional parameters
             vca_id (str): VCA ID
 
         :param: kwargs: Additional parameters
             vca_id (str): VCA ID
 
@@ -659,7 +716,10 @@ class K8sJujuConnector(K8sConnector):
         libjuju = await self._get_libjuju(kwargs.get("vca_id"))
         status = {}
 
         libjuju = await self._get_libjuju(kwargs.get("vca_id"))
         status = {}
 
-        model_status = await libjuju.get_model_status(kdu_instance)
+        model_name = self._obtain_namespace(
+            kdu_instance=kdu_instance, namespace=namespace
+        )
+        model_status = await libjuju.get_model_status(model_name=model_name)
 
         if not complete_status:
             for name in model_status.applications:
 
         if not complete_status:
             for name in model_status.applications:
@@ -673,34 +733,87 @@ class K8sJujuConnector(K8sConnector):
 
         return status
 
 
         return status
 
-    async def update_vca_status(self, vcastatus: dict, kdu_instance: str, **kwargs):
+    async def add_relation(
+        self, provider: RelationEndpoint, requirer: RelationEndpoint
+    ):
+        """
+        Add relation between two charmed endpoints
+
+        :param: provider: Provider relation endpoint
+        :param: requirer: Requirer relation endpoint
+        """
+        self.log.debug(f"adding new relation between {provider} and {requirer}")
+        cross_model_relation = (
+            provider.model_name != requirer.model_name
+            or provider.vca_id != requirer.vca_id
+        )
+        try:
+            if cross_model_relation:
+                # Cross-model relation
+                provider_libjuju = await self._get_libjuju(provider.vca_id)
+                requirer_libjuju = await self._get_libjuju(requirer.vca_id)
+                offer = await provider_libjuju.offer(provider)
+                if offer:
+                    saas_name = await requirer_libjuju.consume(
+                        requirer.model_name, offer, provider_libjuju
+                    )
+                    await requirer_libjuju.add_relation(
+                        requirer.model_name, requirer.endpoint, saas_name
+                    )
+            else:
+                # Standard relation
+                vca_id = provider.vca_id
+                model = provider.model_name
+                libjuju = await self._get_libjuju(vca_id)
+                # add juju relations between two applications
+                await libjuju.add_relation(
+                    model_name=model,
+                    endpoint_1=provider.endpoint,
+                    endpoint_2=requirer.endpoint,
+                )
+        except Exception as e:
+            message = f"Error adding relation between {provider} and {requirer}: {e}"
+            self.log.error(message)
+            raise Exception(message=message)
+
+    async def update_vca_status(
+        self, vcastatus: dict, kdu_instance: str, namespace: str = None, **kwargs
+    ):
         """
         Add all configs, actions, executed actions of all applications in a model to vcastatus dict
 
         :param vcastatus dict: dict containing vcastatus
         :param kdu_instance str: The unique id of the KDU instance
         """
         Add all configs, actions, executed actions of all applications in a model to vcastatus dict
 
         :param vcastatus dict: dict containing vcastatus
         :param kdu_instance str: The unique id of the KDU instance
+        :param namespace str: The namespace (model) where the Bundle was deployed
         :param: kwargs: Additional parameters
             vca_id (str): VCA ID
 
         :return: None
         """
         :param: kwargs: Additional parameters
             vca_id (str): VCA ID
 
         :return: None
         """
+
+        model_name = self._obtain_namespace(
+            kdu_instance=kdu_instance, namespace=namespace
+        )
+
         libjuju = await self._get_libjuju(kwargs.get("vca_id"))
         try:
         libjuju = await self._get_libjuju(kwargs.get("vca_id"))
         try:
-            for model_name in vcastatus:
+            for vca_model_name in vcastatus:
                 # Adding executed actions
                 # Adding executed actions
-                vcastatus[model_name][
+                vcastatus[vca_model_name][
                     "executedActions"
                     "executedActions"
-                ] = await libjuju.get_executed_actions(kdu_instance)
+                ] = await libjuju.get_executed_actions(model_name=model_name)
 
 
-                for application in vcastatus[model_name]["applications"]:
+                for application in vcastatus[vca_model_name]["applications"]:
                     # Adding application actions
                     # Adding application actions
-                    vcastatus[model_name]["applications"][application][
+                    vcastatus[vca_model_name]["applications"][application][
                         "actions"
                         "actions"
-                    ] = await libjuju.get_actions(application, kdu_instance)
+                    ] = {}
                     # Adding application configs
                     # Adding application configs
-                    vcastatus[model_name]["applications"][application][
+                    vcastatus[vca_model_name]["applications"][application][
                         "configs"
                         "configs"
-                    ] = await libjuju.get_application_configs(kdu_instance, application)
+                    ] = await libjuju.get_application_configs(
+                        model_name=model_name, application_name=application
+                    )
 
         except Exception as e:
             self.log.debug("Error in updating vca status: {}".format(str(e)))
 
         except Exception as e:
             self.log.debug("Error in updating vca status: {}".format(str(e)))
@@ -710,10 +823,14 @@ class K8sJujuConnector(K8sConnector):
     ) -> list:
         """Return a list of services of a kdu_instance"""
 
     ) -> list:
         """Return a list of services of a kdu_instance"""
 
+        namespace = self._obtain_namespace(
+            kdu_instance=kdu_instance, namespace=namespace
+        )
+
         credentials = self.get_credentials(cluster_uuid=cluster_uuid)
         kubectl = self._get_kubectl(credentials)
         return kubectl.get_services(
         credentials = self.get_credentials(cluster_uuid=cluster_uuid)
         kubectl = self._get_kubectl(credentials)
         return kubectl.get_services(
-            field_selector="metadata.namespace={}".format(kdu_instance)
+            field_selector="metadata.namespace={}".format(namespace)
         )
 
     async def get_service(
         )
 
     async def get_service(
@@ -762,10 +879,7 @@ class K8sJujuConnector(K8sConnector):
         """
         return "cred-{}".format(cluster_uuid)
 
         """
         return "cred-{}".format(cluster_uuid)
 
-    def get_namespace(
-        self,
-        cluster_uuid: str,
-    ) -> str:
+    def get_namespace(self, cluster_uuid: str) -> str:
         """Get the namespace UUID
         Gets the namespace's unique name
 
         """Get the namespace UUID
         Gets the namespace's unique name
 
@@ -798,16 +912,11 @@ class K8sJujuConnector(K8sConnector):
             if not self.libjuju:
                 async with self.loading_libjuju:
                     vca_connection = await get_connection(self._store)
             if not self.libjuju:
                 async with self.loading_libjuju:
                     vca_connection = await get_connection(self._store)
-                    self.libjuju = Libjuju(vca_connection, loop=self.loop, log=self.log)
+                    self.libjuju = Libjuju(vca_connection, log=self.log)
             return self.libjuju
         else:
             vca_connection = await get_connection(self._store, vca_id)
             return self.libjuju
         else:
             vca_connection = await get_connection(self._store, vca_id)
-            return Libjuju(
-                vca_connection,
-                loop=self.loop,
-                log=self.log,
-                n2vc=self,
-            )
+            return Libjuju(vca_connection, log=self.log, n2vc=self)
 
     def _get_kubectl(self, credentials: str) -> Kubectl:
         """
 
     def _get_kubectl(self, credentials: str) -> Kubectl:
         """
@@ -819,3 +928,34 @@ class K8sJujuConnector(K8sConnector):
         with open(kubecfg.name, "w") as kubecfg_file:
             kubecfg_file.write(credentials)
         return Kubectl(config_file=kubecfg.name)
         with open(kubecfg.name, "w") as kubecfg_file:
             kubecfg_file.write(credentials)
         return Kubectl(config_file=kubecfg.name)
+
+    def _obtain_namespace(self, kdu_instance: str, namespace: str = None) -> str:
+        """
+        Obtain the namespace/model name to use in the instantiation of a Juju Bundle in K8s. The default namespace is
+        the kdu_instance name. However, if the user passes the namespace where he wants to deploy the bundle,
+        that namespace will be used.
+
+        :param kdu_instance: the default KDU instance name
+        :param namespace: the namespace passed by the User
+        """
+
+        # deault the namespace/model name to the kdu_instance name TODO -> this should be the real return... But
+        #  once the namespace is not passed in most methods, I had to do this in another way. But I think this should
+        #  be the procedure in the future return namespace if namespace else kdu_instance
+
+        # TODO -> has referred above, this should be avoided in the future, this is temporary, in order to avoid
+        #  compatibility issues
+        return (
+            namespace
+            if namespace
+            else self._obtain_namespace_from_db(kdu_instance=kdu_instance)
+        )
+
+    def _obtain_namespace_from_db(self, kdu_instance: str) -> str:
+        db_nsrs = self.db.get_one(
+            table="nsrs", q_filter={"_admin.deployed.K8s.kdu-instance": kdu_instance}
+        )
+        for k8s in db_nsrs["_admin"]["deployed"]["K8s"]:
+            if k8s.get("kdu-instance") == kdu_instance:
+                return k8s.get("namespace")
+        return ""
index a56b6cd..c16c95a 100644 (file)
@@ -16,19 +16,29 @@ import base64
 import logging
 from typing import Dict
 import typing
 import logging
 from typing import Dict
 import typing
+import uuid
+import json
 
 
+from distutils.version import LooseVersion
 
 from kubernetes import client, config
 
 from kubernetes import client, config
+from kubernetes.client.api import VersionApi
 from kubernetes.client.models import (
     V1ClusterRole,
 from kubernetes.client.models import (
     V1ClusterRole,
+    V1Role,
     V1ObjectMeta,
     V1PolicyRule,
     V1ServiceAccount,
     V1ClusterRoleBinding,
     V1ObjectMeta,
     V1PolicyRule,
     V1ServiceAccount,
     V1ClusterRoleBinding,
+    V1RoleBinding,
     V1RoleRef,
     V1Subject,
     V1RoleRef,
     V1Subject,
+    V1Secret,
+    V1SecretReference,
+    V1Namespace,
 )
 from kubernetes.client.rest import ApiException
 )
 from kubernetes.client.rest import ApiException
+from n2vc.libjuju import retry_callback
 from retrying_async import retry
 
 
 from retrying_async import retry
 
 
@@ -38,6 +48,7 @@ SERVICE_ACCOUNT_ROOT_CA_KEY = "ca.crt"
 CORE_CLIENT = "core_v1"
 RBAC_CLIENT = "rbac_v1"
 STORAGE_CLIENT = "storage_v1"
 CORE_CLIENT = "core_v1"
 RBAC_CLIENT = "rbac_v1"
 STORAGE_CLIENT = "storage_v1"
+CUSTOM_OBJECT_CLIENT = "custom_object"
 
 
 class Kubectl:
 
 
 class Kubectl:
@@ -47,6 +58,7 @@ class Kubectl:
             CORE_CLIENT: client.CoreV1Api(),
             RBAC_CLIENT: client.RbacAuthorizationV1Api(),
             STORAGE_CLIENT: client.StorageV1Api(),
             CORE_CLIENT: client.CoreV1Api(),
             RBAC_CLIENT: client.RbacAuthorizationV1Api(),
             STORAGE_CLIENT: client.StorageV1Api(),
+            CUSTOM_OBJECT_CLIENT: client.CustomObjectsApi(),
         }
         self._configuration = config.kube_config.Configuration.get_default_copy()
         self.logger = logging.getLogger("Kubectl")
         }
         self._configuration = config.kube_config.Configuration.get_default_copy()
         self.logger = logging.getLogger("Kubectl")
@@ -154,9 +166,7 @@ class Kubectl:
         )
 
         if len(cluster_roles.items) > 0:
         )
 
         if len(cluster_roles.items) > 0:
-            raise Exception(
-                "Cluster role with metadata.name={} already exists".format(name)
-            )
+            raise Exception("Role with metadata.name={} already exists".format(name))
 
         metadata = V1ObjectMeta(name=name, labels=labels, namespace=namespace)
         # Cluster role
 
         metadata = V1ObjectMeta(name=name, labels=labels, namespace=namespace)
         # Cluster role
@@ -170,6 +180,46 @@ class Kubectl:
 
         self.clients[RBAC_CLIENT].create_cluster_role(cluster_role)
 
 
         self.clients[RBAC_CLIENT].create_cluster_role(cluster_role)
 
+    async def create_role(
+        self,
+        name: str,
+        labels: Dict[str, str],
+        api_groups: list,
+        resources: list,
+        verbs: list,
+        namespace: str,
+    ):
+        """
+        Create a role with one PolicyRule
+
+        :param: name:       Name of the namespaced Role
+        :param: labels:     Labels for namespaced Role metadata
+        :param: api_groups: List with api-groups allowed in the policy rule
+        :param: resources:  List with resources allowed in the policy rule
+        :param: verbs:      List with verbs allowed in the policy rule
+        :param: namespace:  Kubernetes namespace for Role metadata
+
+        :return: None
+        """
+
+        roles = self.clients[RBAC_CLIENT].list_namespaced_role(
+            namespace, field_selector="metadata.name={}".format(name)
+        )
+
+        if len(roles.items) > 0:
+            raise Exception("Role with metadata.name={} already exists".format(name))
+
+        metadata = V1ObjectMeta(name=name, labels=labels, namespace=namespace)
+
+        role = V1Role(
+            metadata=metadata,
+            rules=[
+                V1PolicyRule(api_groups=api_groups, resources=resources, verbs=verbs),
+            ],
+        )
+
+        self.clients[RBAC_CLIENT].create_namespaced_role(namespace, role)
+
     def delete_cluster_role(self, name: str):
         """
         Delete a cluster role
     def delete_cluster_role(self, name: str):
         """
         Delete a cluster role
@@ -178,6 +228,58 @@ class Kubectl:
         """
         self.clients[RBAC_CLIENT].delete_cluster_role(name)
 
         """
         self.clients[RBAC_CLIENT].delete_cluster_role(name)
 
+    def _get_kubectl_version(self):
+        version = VersionApi().get_code()
+        return "{}.{}".format(version.major, version.minor)
+
+    def _need_to_create_new_secret(self):
+        min_k8s_version = "1.24"
+        current_k8s_version = self._get_kubectl_version()
+        return LooseVersion(min_k8s_version) <= LooseVersion(current_k8s_version)
+
+    def _get_secret_name(self, service_account_name: str):
+        random_alphanum = str(uuid.uuid4())[:5]
+        return "{}-token-{}".format(service_account_name, random_alphanum)
+
+    def _create_service_account_secret(
+        self, service_account_name: str, namespace: str, secret_name: str
+    ):
+        """
+        Create a secret for the service account. K8s version >= 1.24
+
+        :param: service_account_name: Name of the service account
+        :param: namespace:  Kubernetes namespace for service account metadata
+        :param: secret_name: Name of the secret
+        """
+        v1_core = self.clients[CORE_CLIENT]
+        secrets = v1_core.list_namespaced_secret(
+            namespace, field_selector="metadata.name={}".format(secret_name)
+        ).items
+
+        if len(secrets) > 0:
+            raise Exception(
+                "Secret with metadata.name={} already exists".format(secret_name)
+            )
+
+        annotations = {"kubernetes.io/service-account.name": service_account_name}
+        metadata = V1ObjectMeta(
+            name=secret_name, namespace=namespace, annotations=annotations
+        )
+        type = "kubernetes.io/service-account-token"
+        secret = V1Secret(metadata=metadata, type=type)
+        v1_core.create_namespaced_secret(namespace, secret)
+
+    def _get_secret_reference_list(self, namespace: str, secret_name: str):
+        """
+        Return a secret reference list with one secret.
+        K8s version >= 1.24
+
+        :param: namespace:  Kubernetes namespace for service account metadata
+        :param: secret_name: Name of the secret
+        :rtype: list[V1SecretReference]
+        """
+        return [V1SecretReference(name=secret_name, namespace=namespace)]
+
     def create_service_account(
         self,
         name: str,
     def create_service_account(
         self,
         name: str,
@@ -192,7 +294,8 @@ class Kubectl:
         :param: namespace:  Kubernetes namespace for service account metadata
                             Default: kube-system
         """
         :param: namespace:  Kubernetes namespace for service account metadata
                             Default: kube-system
         """
-        service_accounts = self.clients[CORE_CLIENT].list_namespaced_service_account(
+        v1_core = self.clients[CORE_CLIENT]
+        service_accounts = v1_core.list_namespaced_service_account(
             namespace, field_selector="metadata.name={}".format(name)
         )
         if len(service_accounts.items) > 0:
             namespace, field_selector="metadata.name={}".format(name)
         )
         if len(service_accounts.items) > 0:
@@ -201,11 +304,16 @@ class Kubectl:
             )
 
         metadata = V1ObjectMeta(name=name, labels=labels, namespace=namespace)
             )
 
         metadata = V1ObjectMeta(name=name, labels=labels, namespace=namespace)
-        service_account = V1ServiceAccount(metadata=metadata)
 
 
-        self.clients[CORE_CLIENT].create_namespaced_service_account(
-            namespace, service_account
-        )
+        if self._need_to_create_new_secret():
+            secret_name = self._get_secret_name(name)
+            secrets = self._get_secret_reference_list(namespace, secret_name)
+            service_account = V1ServiceAccount(metadata=metadata, secrets=secrets)
+            v1_core.create_namespaced_service_account(namespace, service_account)
+            self._create_service_account_secret(name, namespace, secret_name)
+        else:
+            service_account = V1ServiceAccount(metadata=metadata)
+            v1_core.create_namespaced_service_account(namespace, service_account)
 
     def delete_service_account(self, name: str, namespace: str = "kube-system"):
         """
 
     def delete_service_account(self, name: str, namespace: str = "kube-system"):
         """
@@ -241,6 +349,44 @@ class Kubectl:
         )
         self.clients[RBAC_CLIENT].create_cluster_role_binding(role_binding)
 
         )
         self.clients[RBAC_CLIENT].create_cluster_role_binding(role_binding)
 
+    async def create_role_binding(
+        self,
+        name: str,
+        role_name: str,
+        sa_name: str,
+        labels: Dict[str, str],
+        namespace: str,
+    ):
+        """
+        Create a cluster role binding
+
+        :param: name:       Name of the namespaced Role Binding
+        :param: role_name:  Name of the namespaced Role to be bound
+        :param: sa_name:    Name of the Service Account to be bound
+        :param: labels:     Labels for Role Binding metadata
+        :param: namespace:  Kubernetes namespace for Role Binding metadata
+
+        :return: None
+        """
+        role_bindings = self.clients[RBAC_CLIENT].list_namespaced_role_binding(
+            namespace, field_selector="metadata.name={}".format(name)
+        )
+        if len(role_bindings.items) > 0:
+            raise Exception(
+                "Role Binding with metadata.name={} already exists".format(name)
+            )
+
+        role_binding = V1RoleBinding(
+            metadata=V1ObjectMeta(name=name, labels=labels),
+            role_ref=V1RoleRef(kind="Role", name=role_name, api_group=""),
+            subjects=[
+                V1Subject(kind="ServiceAccount", name=sa_name, namespace=namespace)
+            ],
+        )
+        self.clients[RBAC_CLIENT].create_namespaced_role_binding(
+            namespace, role_binding
+        )
+
     def delete_cluster_role_binding(self, name: str):
         """
         Delete a cluster role binding
     def delete_cluster_role_binding(self, name: str):
         """
         Delete a cluster role binding
@@ -253,6 +399,7 @@ class Kubectl:
         attempts=10,
         delay=1,
         fallback=Exception("Failed getting the secret from service account"),
         attempts=10,
         delay=1,
         fallback=Exception("Failed getting the secret from service account"),
+        callback=retry_callback,
     )
     async def get_secret_data(
         self, name: str, namespace: str = "kube-system"
     )
     async def get_secret_data(
         self, name: str, namespace: str = "kube-system"
@@ -283,6 +430,7 @@ class Kubectl:
             raise Exception(
                 "Failed getting the secret from service account {}".format(name)
             )
             raise Exception(
                 "Failed getting the secret from service account {}".format(name)
             )
+        # TODO: refactor to use get_secret_content
         secret = v1_core.list_namespaced_secret(
             namespace, field_selector="metadata.name={}".format(secret_name)
         ).items[0]
         secret = v1_core.list_namespaced_secret(
             namespace, field_selector="metadata.name={}".format(secret_name)
         ).items[0]
@@ -294,3 +442,176 @@ class Kubectl:
             base64.b64decode(token).decode("utf-8"),
             base64.b64decode(client_certificate_data).decode("utf-8"),
         )
             base64.b64decode(token).decode("utf-8"),
             base64.b64decode(client_certificate_data).decode("utf-8"),
         )
+
+    @retry(
+        attempts=10,
+        delay=1,
+        fallback=Exception("Failed getting data from the secret"),
+    )
+    async def get_secret_content(
+        self,
+        name: str,
+        namespace: str,
+    ) -> dict:
+        """
+        Get secret data
+
+        :param: name:       Name of the secret
+        :param: namespace:  Name of the namespace where the secret is stored
+
+        :return: Dictionary with secret's data
+        """
+        v1_core = self.clients[CORE_CLIENT]
+
+        secret = v1_core.read_namespaced_secret(name, namespace)
+
+        return secret.data
+
+    @retry(
+        attempts=10,
+        delay=1,
+        fallback=Exception("Failed creating the secret"),
+    )
+    async def create_secret(
+        self, name: str, data: dict, namespace: str, secret_type: str
+    ):
+        """
+        Get secret data
+
+        :param: name:        Name of the secret
+        :param: data:        Dict with data content. Values must be already base64 encoded
+        :param: namespace:   Name of the namespace where the secret will be stored
+        :param: secret_type: Type of the secret, e.g., Opaque, kubernetes.io/service-account-token, kubernetes.io/tls
+
+        :return: None
+        """
+        v1_core = self.clients[CORE_CLIENT]
+        metadata = V1ObjectMeta(name=name, namespace=namespace)
+        secret = V1Secret(metadata=metadata, data=data, type=secret_type)
+        v1_core.create_namespaced_secret(namespace, secret)
+
+    async def create_certificate(
+        self,
+        namespace: str,
+        name: str,
+        dns_prefix: str,
+        secret_name: str,
+        usages: list,
+        issuer_name: str,
+    ):
+        """
+        Creates cert-manager certificate object
+
+        :param: namespace:       Name of the namespace where the certificate and secret is stored
+        :param: name:            Name of the certificate object
+        :param: dns_prefix:      Prefix for the dnsNames. They will be prefixed to the common k8s svc suffixes
+        :param: secret_name:     Name of the secret created by cert-manager
+        :param: usages:          List of X.509 key usages
+        :param: issuer_name:     Name of the cert-manager's Issuer or ClusterIssuer object
+
+        """
+        certificate_body = {
+            "apiVersion": "cert-manager.io/v1",
+            "kind": "Certificate",
+            "metadata": {"name": name, "namespace": namespace},
+            "spec": {
+                "secretName": secret_name,
+                "privateKey": {
+                    "rotationPolicy": "Always",
+                    "algorithm": "ECDSA",
+                    "size": 256,
+                },
+                "duration": "8760h",  # 1 Year
+                "renewBefore": "2208h",  # 9 months
+                "subject": {"organizations": ["osm"]},
+                "commonName": "osm",
+                "isCA": False,
+                "usages": usages,
+                "dnsNames": [
+                    "{}.{}".format(dns_prefix, namespace),
+                    "{}.{}.svc".format(dns_prefix, namespace),
+                    "{}.{}.svc.cluster".format(dns_prefix, namespace),
+                    "{}.{}.svc.cluster.local".format(dns_prefix, namespace),
+                ],
+                "issuerRef": {"name": issuer_name, "kind": "ClusterIssuer"},
+            },
+        }
+        client = self.clients[CUSTOM_OBJECT_CLIENT]
+        try:
+            client.create_namespaced_custom_object(
+                group="cert-manager.io",
+                plural="certificates",
+                version="v1",
+                body=certificate_body,
+                namespace=namespace,
+            )
+        except ApiException as e:
+            info = json.loads(e.body)
+            if info.get("reason").lower() == "alreadyexists":
+                self.logger.warning("Certificate already exists: {}".format(e))
+            else:
+                raise e
+
+    async def delete_certificate(self, namespace, object_name):
+        client = self.clients[CUSTOM_OBJECT_CLIENT]
+        try:
+            client.delete_namespaced_custom_object(
+                group="cert-manager.io",
+                plural="certificates",
+                version="v1",
+                name=object_name,
+                namespace=namespace,
+            )
+        except ApiException as e:
+            info = json.loads(e.body)
+            if info.get("reason").lower() == "notfound":
+                self.logger.warning("Certificate already deleted: {}".format(e))
+            else:
+                raise e
+
+    @retry(
+        attempts=10,
+        delay=1,
+        fallback=Exception("Failed creating the namespace"),
+    )
+    async def create_namespace(self, name: str, labels: dict = None):
+        """
+        Create a namespace
+
+        :param: name:       Name of the namespace to be created
+        :param: labels:     Dictionary with labels for the new namespace
+
+        """
+        v1_core = self.clients[CORE_CLIENT]
+        metadata = V1ObjectMeta(name=name, labels=labels)
+        namespace = V1Namespace(
+            metadata=metadata,
+        )
+
+        try:
+            v1_core.create_namespace(namespace)
+            self.logger.debug("Namespace created: {}".format(name))
+        except ApiException as e:
+            info = json.loads(e.body)
+            if info.get("reason").lower() == "alreadyexists":
+                self.logger.warning("Namespace already exists: {}".format(e))
+            else:
+                raise e
+
+    @retry(
+        attempts=10,
+        delay=1,
+        fallback=Exception("Failed deleting the namespace"),
+    )
+    async def delete_namespace(self, name: str):
+        """
+        Delete a namespace
+
+        :param: name:       Name of the namespace to be deleted
+
+        """
+        try:
+            self.clients[CORE_CLIENT].delete_namespace(name)
+        except ApiException as e:
+            if e.reason == "Not Found":
+                self.logger.warning("Namespace already deleted: {}".format(e))
index b897bc6..f36ff39 100644 (file)
 
 import asyncio
 import logging
 
 import asyncio
 import logging
+import os
 import typing
 import typing
+import yaml
 
 import time
 
 import juju.errors
 
 import time
 
 import juju.errors
+from juju.bundle import BundleHandler
 from juju.model import Model
 from juju.machine import Machine
 from juju.application import Application
 from juju.unit import Unit
 from juju.model import Model
 from juju.machine import Machine
 from juju.application import Application
 from juju.unit import Unit
+from juju.url import URL
+from juju.version import DEFAULT_ARCHITECTURE
 from juju.client._definitions import (
     FullStatus,
     QueryApplicationOffersResults,
 from juju.client._definitions import (
     FullStatus,
     QueryApplicationOffersResults,
@@ -33,6 +38,7 @@ from juju.controller import Controller
 from juju.client import client
 from juju import tag
 
 from juju.client import client
 from juju import tag
 
+from n2vc.definitions import Offer, RelationEndpoint
 from n2vc.juju_watcher import JujuModelWatcher
 from n2vc.provisioner import AsyncSSHProvisioner
 from n2vc.n2vc_conn import N2VCConnector
 from n2vc.juju_watcher import JujuModelWatcher
 from n2vc.provisioner import AsyncSSHProvisioner
 from n2vc.n2vc_conn import N2VCConnector
@@ -55,11 +61,18 @@ from retrying_async import retry
 RBAC_LABEL_KEY_NAME = "rbac-id"
 
 
 RBAC_LABEL_KEY_NAME = "rbac-id"
 
 
+@asyncio.coroutine
+def retry_callback(attempt, exc, args, kwargs, delay=0.5, *, loop):
+    # Specifically overridden from upstream implementation so it can
+    # continue to work with Python 3.10
+    yield from asyncio.sleep(attempt * delay)
+    return retry
+
+
 class Libjuju:
     def __init__(
         self,
         vca_connection: Connection,
 class Libjuju:
     def __init__(
         self,
         vca_connection: Connection,
-        loop: asyncio.AbstractEventLoop = None,
         log: logging.Logger = None,
         n2vc: N2VCConnector = None,
     ):
         log: logging.Logger = None,
         n2vc: N2VCConnector = None,
     ):
@@ -67,7 +80,6 @@ class Libjuju:
         Constructor
 
         :param: vca_connection:         n2vc.vca.connection object
         Constructor
 
         :param: vca_connection:         n2vc.vca.connection object
-        :param: loop:                   Asyncio loop
         :param: log:                    Logger
         :param: n2vc:                   N2VC object
         """
         :param: log:                    Logger
         :param: n2vc:                   N2VC object
         """
@@ -76,15 +88,13 @@ class Libjuju:
         self.n2vc = n2vc
         self.vca_connection = vca_connection
 
         self.n2vc = n2vc
         self.vca_connection = vca_connection
 
-        self.loop = loop or asyncio.get_event_loop()
-        self.loop.set_exception_handler(self.handle_exception)
-        self.creating_model = asyncio.Lock(loop=self.loop)
+        self.creating_model = asyncio.Lock()
 
         if self.vca_connection.is_default:
             self.health_check_task = self._create_health_check_task()
 
     def _create_health_check_task(self):
 
         if self.vca_connection.is_default:
             self.health_check_task = self._create_health_check_task()
 
     def _create_health_check_task(self):
-        return self.loop.create_task(self.health_check())
+        return asyncio.get_event_loop().create_task(self.health_check())
 
     async def get_controller(self, timeout: float = 60.0) -> Controller:
         """
 
     async def get_controller(self, timeout: float = 60.0) -> Controller:
         """
@@ -94,7 +104,7 @@ class Libjuju:
         """
         controller = None
         try:
         """
         controller = None
         try:
-            controller = Controller(loop=self.loop)
+            controller = Controller()
             await asyncio.wait_for(
                 controller.connect(
                     endpoint=self.vca_connection.data.endpoints,
             await asyncio.wait_for(
                 controller.connect(
                     endpoint=self.vca_connection.data.endpoints,
@@ -121,7 +131,10 @@ class Libjuju:
             )
             if controller:
                 await self.disconnect_controller(controller)
             )
             if controller:
                 await self.disconnect_controller(controller)
-            raise JujuControllerFailedConnecting(e)
+
+            raise JujuControllerFailedConnecting(
+                f"Error connecting to Juju controller: {e}"
+            )
 
     async def disconnect(self):
         """Disconnect"""
 
     async def disconnect(self):
         """Disconnect"""
@@ -146,7 +159,7 @@ class Libjuju:
         if controller:
             await controller.disconnect()
 
         if controller:
             await controller.disconnect()
 
-    @retry(attempts=3, delay=5, timeout=None)
+    @retry(attempts=3, delay=5, timeout=None, callback=retry_callback)
     async def add_model(self, model_name: str, cloud: VcaCloud):
         """
         Create model
     async def add_model(self, model_name: str, cloud: VcaCloud):
         """
         Create model
@@ -261,7 +274,7 @@ class Libjuju:
             await self.disconnect_controller(controller)
         return application_configs
 
             await self.disconnect_controller(controller)
         return application_configs
 
-    @retry(attempts=3, delay=5)
+    @retry(attempts=3, delay=5, callback=retry_callback)
     async def get_model(self, controller: Controller, model_name: str) -> Model:
         """
         Get model from controller
     async def get_model(self, controller: Controller, model_name: str) -> Model:
         """
         Get model from controller
@@ -545,27 +558,122 @@ class Libjuju:
         return machine_id
 
     async def deploy(
         return machine_id
 
     async def deploy(
-        self, uri: str, model_name: str, wait: bool = True, timeout: float = 3600
+        self,
+        uri: str,
+        model_name: str,
+        wait: bool = True,
+        timeout: float = 3600,
+        instantiation_params: dict = None,
     ):
         """
         Deploy bundle or charm: Similar to the juju CLI command `juju deploy`
 
     ):
         """
         Deploy bundle or charm: Similar to the juju CLI command `juju deploy`
 
-        :param: uri:            Path or Charm Store uri in which the charm or bundle can be found
-        :param: model_name:     Model name
-        :param: wait:           Indicates whether to wait or not until all applications are active
-        :param: timeout:        Time in seconds to wait until all applications are active
+        :param uri:            Path or Charm Store uri in which the charm or bundle can be found
+        :param model_name:     Model name
+        :param wait:           Indicates whether to wait or not until all applications are active
+        :param timeout:        Time in seconds to wait until all applications are active
+        :param instantiation_params: To be applied as overlay bundle over primary bundle.
         """
         controller = await self.get_controller()
         model = await self.get_model(controller, model_name)
         """
         controller = await self.get_controller()
         model = await self.get_model(controller, model_name)
+        overlays = []
         try:
         try:
-            await model.deploy(uri, trust=True)
+            await self._validate_instantiation_params(uri, model, instantiation_params)
+            overlays = self._get_overlays(model_name, instantiation_params)
+            await model.deploy(uri, trust=True, overlays=overlays)
             if wait:
                 await JujuModelWatcher.wait_for_model(model, timeout=timeout)
                 self.log.debug("All units active in model {}".format(model_name))
         finally:
             if wait:
                 await JujuModelWatcher.wait_for_model(model, timeout=timeout)
                 self.log.debug("All units active in model {}".format(model_name))
         finally:
+            self._remove_overlay_file(overlays)
             await self.disconnect_model(model)
             await self.disconnect_controller(controller)
 
             await self.disconnect_model(model)
             await self.disconnect_controller(controller)
 
+    async def _validate_instantiation_params(
+        self, uri: str, model, instantiation_params: dict
+    ) -> None:
+        """Checks if all the applications in instantiation_params
+        exist ins the original bundle.
+
+        Raises:
+            JujuApplicationNotFound if there is an invalid app in
+            the instantiation params.
+        """
+        overlay_apps = self._get_apps_in_instantiation_params(instantiation_params)
+        if not overlay_apps:
+            return
+        original_apps = await self._get_apps_in_original_bundle(uri, model)
+        if not all(app in original_apps for app in overlay_apps):
+            raise JujuApplicationNotFound(
+                "Cannot find application {} in original bundle {}".format(
+                    overlay_apps, original_apps
+                )
+            )
+
+    async def _get_apps_in_original_bundle(self, uri: str, model) -> set:
+        """Bundle is downloaded in BundleHandler.fetch_plan.
+        That method takes care of opening and exception handling.
+
+        Resolve method gets all the information regarding the channel,
+        track, revision, type, source.
+
+        Returns:
+            Set with the names of the applications in original bundle.
+        """
+        url = URL.parse(uri)
+        architecture = DEFAULT_ARCHITECTURE  # only AMD64 is allowed
+        res = await model.deploy_types[str(url.schema)].resolve(
+            url, architecture, entity_url=uri
+        )
+        handler = BundleHandler(model, trusted=True, forced=False)
+        await handler.fetch_plan(url, res.origin)
+        return handler.applications
+
+    def _get_apps_in_instantiation_params(self, instantiation_params: dict) -> list:
+        """Extract applications key in instantiation params.
+
+        Returns:
+            List with the names of the applications in instantiation params.
+
+        Raises:
+            JujuError if applications key is not found.
+        """
+        if not instantiation_params:
+            return []
+        try:
+            return [key for key in instantiation_params.get("applications")]
+        except Exception as e:
+            raise JujuError("Invalid overlay format. {}".format(str(e)))
+
+    def _get_overlays(self, model_name: str, instantiation_params: dict) -> list:
+        """Creates a temporary overlay file which includes the instantiation params.
+        Only one overlay file is created.
+
+        Returns:
+            List with one overlay filename. Empty list if there are no instantiation params.
+        """
+        if not instantiation_params:
+            return []
+        file_name = model_name + "-overlay.yaml"
+        self._write_overlay_file(file_name, instantiation_params)
+        return [file_name]
+
+    def _write_overlay_file(self, file_name: str, instantiation_params: dict) -> None:
+        with open(file_name, "w") as file:
+            yaml.dump(instantiation_params, file)
+
+    def _remove_overlay_file(self, overlay: list) -> None:
+        """Overlay contains either one or zero file names."""
+        if not overlay:
+            return
+        try:
+            filename = overlay[0]
+            os.remove(filename)
+        except OSError as e:
+            self.log.warning(
+                "Overlay file {} could not be removed: {}".format(filename, e)
+            )
+
     async def add_unit(
         self,
         application_name: str,
     async def add_unit(
         self,
         application_name: str,
@@ -594,7 +702,6 @@ class Libjuju:
             application = self._get_application(model, application_name)
 
             if application is not None:
             application = self._get_application(model, application_name)
 
             if application is not None:
-
                 # Checks if the given machine id in the model,
                 # otherwise function raises an error
                 _machine, _series = self._get_machine_info(model, machine_id)
                 # Checks if the given machine id in the model,
                 # otherwise function raises an error
                 _machine, _series = self._get_machine_info(model, machine_id)
@@ -749,7 +856,6 @@ class Libjuju:
 
         try:
             if application_name not in model.applications:
 
         try:
             if application_name not in model.applications:
-
                 if machine_id is not None:
                     machine, series = self._get_machine_info(model, machine_id)
 
                 if machine_id is not None:
                     machine, series = self._get_machine_info(model, machine_id)
 
@@ -791,12 +897,164 @@ class Libjuju:
                 raise JujuApplicationExists(
                     "Application {} exists".format(application_name)
                 )
                 raise JujuApplicationExists(
                     "Application {} exists".format(application_name)
                 )
+        except juju.errors.JujuError as e:
+            if "already exists" in e.message:
+                raise JujuApplicationExists(
+                    "Application {} exists".format(application_name)
+                )
+            else:
+                raise e
+        finally:
+            await self.disconnect_model(model)
+            await self.disconnect_controller(controller)
+
+        return application
+
+    async def upgrade_charm(
+        self,
+        application_name: str,
+        path: str,
+        model_name: str,
+        total_timeout: float = None,
+        **kwargs,
+    ):
+        """Upgrade Charm
+
+        :param: application_name:   Application name
+        :param: model_name:         Model name
+        :param: path:               Local path to the charm
+        :param: total_timeout:      Timeout for the entity to be active
+
+        :return: (str, str): (output and status)
+        """
+
+        self.log.debug(
+            "Upgrading charm {} in model {} from path {}".format(
+                application_name, model_name, path
+            )
+        )
+
+        await self.resolve_application(
+            model_name=model_name, application_name=application_name
+        )
+
+        # Get controller
+        controller = await self.get_controller()
+
+        # Get model
+        model = await self.get_model(controller, model_name)
+
+        try:
+            # Get application
+            application = self._get_application(
+                model,
+                application_name=application_name,
+            )
+            if application is None:
+                raise JujuApplicationNotFound(
+                    "Cannot find application {} to upgrade".format(application_name)
+                )
+
+            await application.refresh(path=path)
+
+            self.log.debug(
+                "Wait until charm upgrade is completed for application {} (model={})".format(
+                    application_name, model_name
+                )
+            )
+
+            await JujuModelWatcher.ensure_units_idle(
+                model=model, application=application
+            )
+
+            if application.status == "error":
+                error_message = "Unknown"
+                for unit in application.units:
+                    if (
+                        unit.workload_status == "error"
+                        and unit.workload_status_message != ""
+                    ):
+                        error_message = unit.workload_status_message
+
+                message = "Application {} failed update in {}: {}".format(
+                    application_name, model_name, error_message
+                )
+                self.log.error(message)
+                raise JujuError(message=message)
+
+            self.log.debug(
+                "Application {} is ready in model {}".format(
+                    application_name, model_name
+                )
+            )
+
         finally:
             await self.disconnect_model(model)
             await self.disconnect_controller(controller)
 
         return application
 
         finally:
             await self.disconnect_model(model)
             await self.disconnect_controller(controller)
 
         return application
 
+    async def resolve_application(self, model_name: str, application_name: str):
+        controller = await self.get_controller()
+        model = await self.get_model(controller, model_name)
+
+        try:
+            application = self._get_application(
+                model,
+                application_name=application_name,
+            )
+            if application is None:
+                raise JujuApplicationNotFound(
+                    "Cannot find application {} to resolve".format(application_name)
+                )
+
+            while application.status == "error":
+                for unit in application.units:
+                    if unit.workload_status == "error":
+                        self.log.debug(
+                            "Model {}, Application {}, Unit {} in error state, resolving".format(
+                                model_name, application_name, unit.entity_id
+                            )
+                        )
+                        try:
+                            await unit.resolved(retry=False)
+                        except Exception:
+                            pass
+
+                await asyncio.sleep(1)
+
+        finally:
+            await self.disconnect_model(model)
+            await self.disconnect_controller(controller)
+
+    async def resolve(self, model_name: str):
+        controller = await self.get_controller()
+        model = await self.get_model(controller, model_name)
+        all_units_active = False
+        try:
+            while not all_units_active:
+                all_units_active = True
+                for application_name, application in model.applications.items():
+                    if application.status == "error":
+                        for unit in application.units:
+                            if unit.workload_status == "error":
+                                self.log.debug(
+                                    "Model {}, Application {}, Unit {} in error state, resolving".format(
+                                        model_name, application_name, unit.entity_id
+                                    )
+                                )
+                                try:
+                                    await unit.resolved(retry=False)
+                                    all_units_active = False
+                                except Exception:
+                                    pass
+
+                if not all_units_active:
+                    await asyncio.sleep(5)
+        finally:
+            await self.disconnect_model(model)
+            await self.disconnect_controller(controller)
+
     async def scale_application(
         self,
         model_name: str,
     async def scale_application(
         self,
         model_name: str,
@@ -1109,10 +1367,10 @@ class Libjuju:
         try:
             await model.add_relation(endpoint_1, endpoint_2)
         except juju.errors.JujuAPIError as e:
         try:
             await model.add_relation(endpoint_1, endpoint_2)
         except juju.errors.JujuAPIError as e:
-            if "not found" in e.message:
+            if self._relation_is_not_found(e):
                 self.log.warning("Relation not found: {}".format(e.message))
                 return
                 self.log.warning("Relation not found: {}".format(e.message))
                 return
-            if "already exists" in e.message:
+            if self._relation_already_exist(e):
                 self.log.warning("Relation already exists: {}".format(e.message))
                 return
             # another exception, raise it
                 self.log.warning("Relation already exists: {}".format(e.message))
                 return
             # another exception, raise it
@@ -1121,28 +1379,83 @@ class Libjuju:
             await self.disconnect_model(model)
             await self.disconnect_controller(controller)
 
             await self.disconnect_model(model)
             await self.disconnect_controller(controller)
 
+    def _relation_is_not_found(self, juju_error):
+        text = "not found"
+        return (text in juju_error.message) or (
+            juju_error.error_code and text in juju_error.error_code
+        )
+
+    def _relation_already_exist(self, juju_error):
+        text = "already exists"
+        return (text in juju_error.message) or (
+            juju_error.error_code and text in juju_error.error_code
+        )
+
+    async def offer(self, endpoint: RelationEndpoint) -> Offer:
+        """
+        Create an offer from a RelationEndpoint
+
+        :param: endpoint: Relation endpoint
+
+        :return: Offer object
+        """
+        model_name = endpoint.model_name
+        offer_name = f"{endpoint.application_name}-{endpoint.endpoint_name}"
+        controller = await self.get_controller()
+        model = None
+        try:
+            model = await self.get_model(controller, model_name)
+            await model.create_offer(endpoint.endpoint, offer_name=offer_name)
+            offer_list = await self._list_offers(model_name, offer_name=offer_name)
+            if offer_list:
+                return Offer(offer_list[0].offer_url)
+            else:
+                raise Exception("offer was not created")
+        except juju.errors.JujuError as e:
+            if "application offer already exists" not in e.message:
+                raise e
+        finally:
+            if model:
+                self.disconnect_model(model)
+            self.disconnect_controller(controller)
+
     async def consume(
         self,
     async def consume(
         self,
-        offer_url: str,
         model_name: str,
         model_name: str,
-    ):
+        offer: Offer,
+        provider_libjuju: "Libjuju",
+    ) -> str:
         """
         """
-        Adds a remote offer to the model. Relations can be created later using "juju relate".
+        Consumes a remote offer in the model. Relations can be created later using "juju relate".
 
 
-        :param: offer_url:      Offer Url
-        :param: model_name:     Model name
+        :param: model_name:             Model name
+        :param: offer:                  Offer object to consume
+        :param: provider_libjuju:       Libjuju object of the provider endpoint
 
         :raises ParseError if there's a problem parsing the offer_url
         :raises JujuError if remote offer includes and endpoint
         :raises JujuAPIError if the operation is not successful
 
         :raises ParseError if there's a problem parsing the offer_url
         :raises JujuError if remote offer includes and endpoint
         :raises JujuAPIError if the operation is not successful
+
+        :returns: Saas name. It is the application name in the model that reference the remote application.
         """
         """
+        saas_name = f'{offer.name}-{offer.model_name.replace("-", "")}'
+        if offer.vca_id:
+            saas_name = f"{saas_name}-{offer.vca_id}"
         controller = await self.get_controller()
         controller = await self.get_controller()
-        model = await controller.get_model(model_name)
-
+        model = None
+        provider_controller = None
         try:
         try:
-            await model.consume(offer_url)
+            model = await controller.get_model(model_name)
+            provider_controller = await provider_libjuju.get_controller()
+            await model.consume(
+                offer.url, application_alias=saas_name, controller=provider_controller
+            )
+            return saas_name
         finally:
         finally:
-            await self.disconnect_model(model)
+            if model:
+                await self.disconnect_model(model)
+            if provider_controller:
+                await provider_libjuju.disconnect_controller(provider_controller)
             await self.disconnect_controller(controller)
 
     async def destroy_model(self, model_name: str, total_timeout: float = 1800):
             await self.disconnect_controller(controller)
 
     async def destroy_model(self, model_name: str, total_timeout: float = 1800):
@@ -1157,28 +1470,38 @@ class Libjuju:
         model = None
         try:
             if not await self.model_exists(model_name, controller=controller):
         model = None
         try:
             if not await self.model_exists(model_name, controller=controller):
+                self.log.warn(f"Model {model_name} doesn't exist")
                 return
 
                 return
 
-            self.log.debug("Destroying model {}".format(model_name))
-
+            self.log.debug(f"Getting model {model_name} to be destroyed")
             model = await self.get_model(controller, model_name)
             model = await self.get_model(controller, model_name)
+            self.log.debug(f"Destroying manual machines in model {model_name}")
             # Destroy machines that are manually provisioned
             # and still are in pending state
             await self._destroy_pending_machines(model, only_manual=True)
             await self.disconnect_model(model)
 
             # Destroy machines that are manually provisioned
             # and still are in pending state
             await self._destroy_pending_machines(model, only_manual=True)
             await self.disconnect_model(model)
 
-            await self._destroy_model(
-                model_name,
-                controller,
+            await asyncio.wait_for(
+                self._destroy_model(model_name, controller),
                 timeout=total_timeout,
             )
                 timeout=total_timeout,
             )
+        except Exception as e:
+            if not await self.model_exists(model_name, controller=controller):
+                self.log.warn(
+                    f"Failed deleting model {model_name}: model doesn't exist"
+                )
+                return
+            self.log.warn(f"Failed deleting model {model_name}: {e}")
+            raise e
         finally:
             if model:
                 await self.disconnect_model(model)
             await self.disconnect_controller(controller)
 
     async def _destroy_model(
         finally:
             if model:
                 await self.disconnect_model(model)
             await self.disconnect_controller(controller)
 
     async def _destroy_model(
-        self, model_name: str, controller: Controller, timeout: float = 1800
+        self,
+        model_name: str,
+        controller: Controller,
     ):
         """
         Destroy model from controller
     ):
         """
         Destroy model from controller
@@ -1187,22 +1510,42 @@ class Libjuju:
         :param: controller: Controller object
         :param: timeout: Timeout in seconds
         """
         :param: controller: Controller object
         :param: timeout: Timeout in seconds
         """
+        self.log.debug(f"Destroying model {model_name}")
 
 
-        async def _destroy_model_loop(model_name: str, controller: Controller):
-            while await self.model_exists(model_name, controller=controller):
+        async def _destroy_model_gracefully(model_name: str, controller: Controller):
+            self.log.info(f"Gracefully deleting model {model_name}")
+            resolved = False
+            while model_name in await controller.list_models():
+                if not resolved:
+                    await self.resolve(model_name)
+                    resolved = True
+                await controller.destroy_model(model_name, destroy_storage=True)
+
+                await asyncio.sleep(5)
+            self.log.info(f"Model {model_name} deleted gracefully")
+
+        async def _destroy_model_forcefully(model_name: str, controller: Controller):
+            self.log.info(f"Forcefully deleting model {model_name}")
+            while model_name in await controller.list_models():
                 await controller.destroy_model(
                 await controller.destroy_model(
-                    model_name, destroy_storage=True, force=True, max_wait=0
+                    model_name, destroy_storage=True, force=True, max_wait=60
                 )
                 await asyncio.sleep(5)
                 )
                 await asyncio.sleep(5)
+            self.log.info(f"Model {model_name} deleted forcefully")
 
         try:
 
         try:
-            await asyncio.wait_for(
-                _destroy_model_loop(model_name, controller), timeout=timeout
-            )
-        except asyncio.TimeoutError:
-            raise Exception(
-                "Timeout waiting for model {} to be destroyed".format(model_name)
-            )
+            try:
+                await asyncio.wait_for(
+                    _destroy_model_gracefully(model_name, controller), timeout=120
+                )
+            except asyncio.TimeoutError:
+                await _destroy_model_forcefully(model_name, controller)
+        except juju.errors.JujuError as e:
+            if any("has been removed" in error for error in e.errors):
+                return
+            if any("model not found" in error for error in e.errors):
+                return
+            raise e
 
     async def destroy_application(
         self, model_name: str, application_name: str, total_timeout: float
 
     async def destroy_application(
         self, model_name: str, application_name: str, total_timeout: float
@@ -1301,10 +1644,6 @@ class Libjuju:
                     await self.disconnect_model(model)
                 await self.disconnect_controller(controller)
 
                     await self.disconnect_model(model)
                 await self.disconnect_controller(controller)
 
-    def handle_exception(self, loop, context):
-        # All unhandled exceptions by libjuju are handled here.
-        pass
-
     async def health_check(self, interval: float = 300.0):
         """
         Health check to make sure controller and controller_model connections are OK
     async def health_check(self, interval: float = 300.0):
         """
         Health check to make sure controller and controller_model connections are OK
@@ -1339,17 +1678,29 @@ class Libjuju:
         finally:
             await self.disconnect_controller(controller)
 
         finally:
             await self.disconnect_controller(controller)
 
-    async def list_offers(self, model_name: str) -> QueryApplicationOffersResults:
-        """List models with certain names
+    async def _list_offers(
+        self, model_name: str, offer_name: str = None
+    ) -> QueryApplicationOffersResults:
+        """
+        List offers within a model
 
         :param: model_name: Model name
 
         :param: model_name: Model name
+        :param: offer_name: Offer name to filter.
 
 
-        :return:            Returns list of offers
+        :return: Returns application offers results in the model
         """
 
         controller = await self.get_controller()
         try:
         """
 
         controller = await self.get_controller()
         try:
-            return await controller.list_offers(model_name)
+            offers = (await controller.list_offers(model_name)).results
+            if offer_name:
+                matching_offer = []
+                for offer in offers:
+                    if offer.offer_name == offer_name:
+                        matching_offer.append(offer)
+                        break
+                offers = matching_offer
+            return offers
         finally:
             await self.disconnect_controller(controller)
 
         finally:
             await self.disconnect_controller(controller)
 
@@ -1497,7 +1848,9 @@ class Libjuju:
         finally:
             await self.disconnect_controller(controller)
 
         finally:
             await self.disconnect_controller(controller)
 
-    @retry(attempts=20, delay=5, fallback=JujuLeaderUnitNotFound())
+    @retry(
+        attempts=20, delay=5, fallback=JujuLeaderUnitNotFound(), callback=retry_callback
+    )
     async def _get_leader_unit(self, application: Application) -> Unit:
         unit = None
         for u in application.units:
     async def _get_leader_unit(self, application: Application) -> Unit:
         unit = None
         for u in application.units:
index d588a1d..d129b4b 100644 (file)
@@ -31,7 +31,6 @@ import time
 
 class Loggable:
     def __init__(self, log, log_to_console: bool = False, prefix: str = ""):
 
 class Loggable:
     def __init__(self, log, log_to_console: bool = False, prefix: str = ""):
-
         self._last_log_time = None  # used for time increment in logging
         self._log_to_console = log_to_console
         self._prefix = prefix
         self._last_log_time = None  # used for time increment in logging
         self._log_to_console = log_to_console
         self._prefix = prefix
@@ -93,7 +92,6 @@ class Loggable:
         include_thread: bool = False,
         include_coroutine: bool = True,
     ) -> str:
         include_thread: bool = False,
         include_coroutine: bool = True,
     ) -> str:
-
         # time increment from last log
         now = time.perf_counter()
         if self._last_log_time is None:
         # time increment from last log
         now = time.perf_counter()
         if self._last_log_time is None:
@@ -133,7 +131,7 @@ class Loggable:
         coroutine_id = ""
         if include_coroutine:
             try:
         coroutine_id = ""
         if include_coroutine:
             try:
-                if asyncio.Task.current_task() is not None:
+                if asyncio.current_task() is not None:
 
                     def print_cor_name(c):
                         import inspect
 
                     def print_cor_name(c):
                         import inspect
@@ -145,7 +143,7 @@ class Loggable:
                         except Exception:
                             pass
 
                         except Exception:
                             pass
 
-                    coro = asyncio.Task.current_task()._coro
+                    coro = asyncio.current_task()._coro
                     coroutine_id = "coro-{} {}()".format(
                         hex(id(coro))[2:], print_cor_name(coro)
                     )
                     coroutine_id = "coro-{} {}()".format(
                         hex(id(coro))[2:], print_cor_name(coro)
                     )
index 6b0df89..01d7df8 100644 (file)
@@ -24,6 +24,7 @@
 import abc
 import asyncio
 from http import HTTPStatus
 import abc
 import asyncio
 from http import HTTPStatus
+from shlex import quote
 import os
 import shlex
 import subprocess
 import os
 import shlex
 import subprocess
@@ -54,7 +55,6 @@ class N2VCConnector(abc.ABC, Loggable):
         db: object,
         fs: object,
         log: object,
         db: object,
         fs: object,
         log: object,
-        loop: object,
         on_update_db=None,
         **kwargs,
     ):
         on_update_db=None,
         **kwargs,
     ):
@@ -64,7 +64,6 @@ class N2VCConnector(abc.ABC, Loggable):
         :param object fs: FileSystem object managing the package artifacts (repo common
             FsBase)
         :param object log: the logging object to log to
         :param object fs: FileSystem object managing the package artifacts (repo common
             FsBase)
         :param object log: the logging object to log to
-        :param object loop: the loop to use for asyncio (default current thread loop)
         :param on_update_db: callback called when n2vc connector updates database.
             Received arguments:
             table: e.g. "nsrs"
         :param on_update_db: callback called when n2vc connector updates database.
             Received arguments:
             table: e.g. "nsrs"
@@ -85,7 +84,6 @@ class N2VCConnector(abc.ABC, Loggable):
         # store arguments into self
         self.db = db
         self.fs = fs
         # store arguments into self
         self.db = db
         self.fs = fs
-        self.loop = loop or asyncio.get_event_loop()
         self.on_update_db = on_update_db
 
         # generate private/public key-pair
         self.on_update_db = on_update_db
 
         # generate private/public key-pair
@@ -118,19 +116,27 @@ class N2VCConnector(abc.ABC, Loggable):
             self.log.warning("No HOME environment variable, using /tmp")
             homedir = "/tmp"
         sshdir = "{}/.ssh".format(homedir)
             self.log.warning("No HOME environment variable, using /tmp")
             homedir = "/tmp"
         sshdir = "{}/.ssh".format(homedir)
+        sshdir = os.path.realpath(os.path.normpath(os.path.abspath(sshdir)))
         if not os.path.exists(sshdir):
             os.mkdir(sshdir)
 
         self.private_key_path = "{}/id_n2vc_rsa".format(sshdir)
         if not os.path.exists(sshdir):
             os.mkdir(sshdir)
 
         self.private_key_path = "{}/id_n2vc_rsa".format(sshdir)
+        self.private_key_path = os.path.realpath(
+            os.path.normpath(os.path.abspath(self.private_key_path))
+        )
         self.public_key_path = "{}.pub".format(self.private_key_path)
         self.public_key_path = "{}.pub".format(self.private_key_path)
+        self.public_key_path = os.path.realpath(
+            os.path.normpath(os.path.abspath(self.public_key_path))
+        )
 
         # If we don't have a key generated, then we have to generate it using ssh-keygen
         if not os.path.exists(self.private_key_path):
 
         # If we don't have a key generated, then we have to generate it using ssh-keygen
         if not os.path.exists(self.private_key_path):
-            cmd = "ssh-keygen -t {} -b {} -N '' -f {}".format(
-                "rsa", "4096", self.private_key_path
+            command = "ssh-keygen -t {} -b {} -N '' -f {}".format(
+                "rsa", "4096", quote(self.private_key_path)
             )
             # run command with arguments
             )
             # run command with arguments
-            subprocess.check_output(shlex.split(cmd))
+            args = shlex.split(command)
+            subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
 
         # Read the public key. Only one public key (one line) in the file
         with open(self.public_key_path, "r") as file:
 
         # Read the public key. Only one public key (one line) in the file
         with open(self.public_key_path, "r") as file:
@@ -146,7 +152,7 @@ class N2VCConnector(abc.ABC, Loggable):
         reuse_ee_id: str = None,
         progress_timeout: float = None,
         total_timeout: float = None,
         reuse_ee_id: str = None,
         progress_timeout: float = None,
         total_timeout: float = None,
-    ) -> (str, dict):
+    ) -> tuple[str, dict]:
         """Create an Execution Environment. Returns when it is created or raises an
         exception on failing
 
         """Create an Execution Environment. Returns when it is created or raises an
         exception on failing
 
@@ -331,6 +337,28 @@ class N2VCConnector(abc.ABC, Loggable):
         :param float total_timeout:
         """
 
         :param float total_timeout:
         """
 
+    @abc.abstractmethod
+    async def upgrade_charm(
+        self,
+        ee_id: str = None,
+        path: str = None,
+        charm_id: str = None,
+        charm_type: str = None,
+        timeout: float = None,
+    ) -> str:
+        """This method upgrade charms in VNFs
+
+        Args:
+            ee_id:  Execution environment id
+            path:   Local path to the charm
+            charm_id:   charm-id
+            charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
+            timeout: (Float)    Timeout for the ns update operation
+
+        Returns:
+            The output of the update operation if status equals to "completed"
+        """
+
     @abc.abstractmethod
     async def exec_primitive(
         self,
     @abc.abstractmethod
     async def exec_primitive(
         self,
@@ -371,7 +399,9 @@ class N2VCConnector(abc.ABC, Loggable):
     ####################################################################################
     """
 
     ####################################################################################
     """
 
-    def _get_namespace_components(self, namespace: str) -> (str, str, str, str, str):
+    def _get_namespace_components(
+        self, namespace: str
+    ) -> tuple[str, str, str, str, str]:
         """
         Split namespace components
 
         """
         Split namespace components
 
@@ -434,7 +464,6 @@ class N2VCConnector(abc.ABC, Loggable):
         #          .format(str(status.value), detailed_status, vca_status, entity_type))
 
         try:
         #          .format(str(status.value), detailed_status, vca_status, entity_type))
 
         try:
-
             the_table = db_dict["collection"]
             the_filter = db_dict["filter"]
             the_path = db_dict["path"]
             the_table = db_dict["collection"]
             the_filter = db_dict["filter"]
             the_path = db_dict["path"]
@@ -502,4 +531,4 @@ def obj_to_dict(obj: object) -> dict:
     # convert obj to yaml
     yaml_text = obj_to_yaml(obj)
     # parse to dict
     # convert obj to yaml
     yaml_text = obj_to_yaml(obj)
     # parse to dict
-    return yaml.load(yaml_text, Loader=yaml.Loader)
+    return yaml.load(yaml_text, Loader=yaml.SafeLoader)
index af40aee..f28a9bd 100644 (file)
@@ -24,6 +24,7 @@ import asyncio
 import logging
 
 from n2vc.config import EnvironConfig
 import logging
 
 from n2vc.config import EnvironConfig
+from n2vc.definitions import RelationEndpoint
 from n2vc.exceptions import (
     N2VCBadArgumentsException,
     N2VCException,
 from n2vc.exceptions import (
     N2VCBadArgumentsException,
     N2VCException,
@@ -36,10 +37,12 @@ from n2vc.exceptions import (
 )
 from n2vc.n2vc_conn import N2VCConnector
 from n2vc.n2vc_conn import obj_to_dict, obj_to_yaml
 )
 from n2vc.n2vc_conn import N2VCConnector
 from n2vc.n2vc_conn import obj_to_dict, obj_to_yaml
-from n2vc.libjuju import Libjuju
+from n2vc.libjuju import Libjuju, retry_callback
 from n2vc.store import MotorStore
 from n2vc.store import MotorStore
+from n2vc.utils import get_ee_id_components, generate_random_alfanum_string
 from n2vc.vca.connection import get_connection
 from retrying_async import retry
 from n2vc.vca.connection import get_connection
 from retrying_async import retry
+from typing import Tuple
 
 
 class N2VCJujuConnector(N2VCConnector):
 
 
 class N2VCJujuConnector(N2VCConnector):
@@ -58,7 +61,6 @@ class N2VCJujuConnector(N2VCConnector):
         db: object,
         fs: object,
         log: object = None,
         db: object,
         fs: object,
         log: object = None,
-        loop: object = None,
         on_update_db=None,
     ):
         """
         on_update_db=None,
     ):
         """
@@ -67,19 +69,11 @@ class N2VCJujuConnector(N2VCConnector):
         :param: db: Database object from osm_common
         :param: fs: Filesystem object from osm_common
         :param: log: Logger
         :param: db: Database object from osm_common
         :param: fs: Filesystem object from osm_common
         :param: log: Logger
-        :param: loop: Asyncio loop
         :param: on_update_db: Callback function to be called for updating the database.
         """
 
         # parent class constructor
         :param: on_update_db: Callback function to be called for updating the database.
         """
 
         # parent class constructor
-        N2VCConnector.__init__(
-            self,
-            db=db,
-            fs=fs,
-            log=log,
-            loop=loop,
-            on_update_db=on_update_db,
-        )
+        N2VCConnector.__init__(self, db=db, fs=fs, log=log, on_update_db=on_update_db)
 
         # silence websocket traffic log
         logging.getLogger("websockets.protocol").setLevel(logging.INFO)
 
         # silence websocket traffic log
         logging.getLogger("websockets.protocol").setLevel(logging.INFO)
@@ -90,8 +84,8 @@ class N2VCJujuConnector(N2VCConnector):
 
         db_uri = EnvironConfig(prefixes=["OSMLCM_", "OSMMON_"]).get("database_uri")
         self._store = MotorStore(db_uri)
 
         db_uri = EnvironConfig(prefixes=["OSMLCM_", "OSMMON_"]).get("database_uri")
         self._store = MotorStore(db_uri)
-        self.loading_libjuju = asyncio.Lock(loop=self.loop)
-
+        self.loading_libjuju = asyncio.Lock()
+        self.delete_namespace_locks = {}
         self.log.info("N2VC juju connector initialized")
 
     async def get_status(
         self.log.info("N2VC juju connector initialized")
 
     async def get_status(
@@ -224,10 +218,7 @@ class N2VCJujuConnector(N2VCConnector):
         # create or reuse a new juju machine
         try:
             if not await libjuju.model_exists(model_name):
         # create or reuse a new juju machine
         try:
             if not await libjuju.model_exists(model_name):
-                await libjuju.add_model(
-                    model_name,
-                    libjuju.vca_connection.lxd_cloud,
-                )
+                await libjuju.add_model(model_name, libjuju.vca_connection.lxd_cloud)
             machine, new = await libjuju.create_machine(
                 model_name=model_name,
                 machine_id=machine_id,
             machine, new = await libjuju.create_machine(
                 model_name=model_name,
                 machine_id=machine_id,
@@ -253,9 +244,7 @@ class N2VCJujuConnector(N2VCConnector):
             raise N2VCException(message=message)
 
         # new machine credentials
             raise N2VCException(message=message)
 
         # new machine credentials
-        credentials = {
-            "hostname": machine.dns_name,
-        }
+        credentials = {"hostname": machine.dns_name}
 
         self.log.info(
             "Execution environment created. ee_id: {}, credentials: {}".format(
 
         self.log.info(
             "Execution environment created. ee_id: {}, credentials: {}".format(
@@ -335,10 +324,7 @@ class N2VCJujuConnector(N2VCConnector):
         # register machine on juju
         try:
             if not await libjuju.model_exists(model_name):
         # register machine on juju
         try:
             if not await libjuju.model_exists(model_name):
-                await libjuju.add_model(
-                    model_name,
-                    libjuju.vca_connection.lxd_cloud,
-                )
+                await libjuju.add_model(model_name, libjuju.vca_connection.lxd_cloud)
             machine_id = await libjuju.provision_machine(
                 model_name=model_name,
                 hostname=hostname,
             machine_id = await libjuju.provision_machine(
                 model_name=model_name,
                 hostname=hostname,
@@ -369,7 +355,13 @@ class N2VCJujuConnector(N2VCConnector):
 
     # In case of native_charm is being deployed, if JujuApplicationExists error happens
     # it will try to add_unit
 
     # In case of native_charm is being deployed, if JujuApplicationExists error happens
     # it will try to add_unit
-    @retry(attempts=3, delay=5, retry_exceptions=(N2VCApplicationExists,), timeout=None)
+    @retry(
+        attempts=3,
+        delay=5,
+        retry_exceptions=(N2VCApplicationExists,),
+        timeout=None,
+        callback=retry_callback,
+    )
     async def install_configuration_sw(
         self,
         ee_id: str,
     async def install_configuration_sw(
         self,
         ee_id: str,
@@ -453,7 +445,7 @@ class N2VCJujuConnector(N2VCConnector):
             artifact_path = artifact_path.replace("//", "/")
 
         # check charm path
             artifact_path = artifact_path.replace("//", "/")
 
         # check charm path
-        if not self.fs.file_exists(artifact_path, mode="dir"):
+        if not self.fs.file_exists(artifact_path):
             msg = "artifact path does not exist: {}".format(artifact_path)
             raise N2VCBadArgumentsException(message=msg, bad_args=["artifact_path"])
 
             msg = "artifact path does not exist: {}".format(artifact_path)
             raise N2VCBadArgumentsException(message=msg, bad_args=["artifact_path"])
 
@@ -550,7 +542,7 @@ class N2VCJujuConnector(N2VCConnector):
             artifact_path = artifact_path.replace("//", "/")
 
         # check charm path
             artifact_path = artifact_path.replace("//", "/")
 
         # check charm path
-        if not self.fs.file_exists(artifact_path, mode="dir"):
+        if not self.fs.file_exists(artifact_path):
             msg = "artifact path does not exist: {}".format(artifact_path)
             raise N2VCBadArgumentsException(message=msg, bad_args=["artifact_path"])
 
             msg = "artifact path does not exist: {}".format(artifact_path)
             raise N2VCBadArgumentsException(message=msg, bad_args=["artifact_path"])
 
@@ -562,10 +554,7 @@ class N2VCJujuConnector(N2VCConnector):
         _, ns_id, _, _, _ = self._get_namespace_components(namespace=namespace)
         model_name = "{}-k8s".format(ns_id)
         if not await libjuju.model_exists(model_name):
         _, ns_id, _, _, _ = self._get_namespace_components(namespace=namespace)
         model_name = "{}-k8s".format(ns_id)
         if not await libjuju.model_exists(model_name):
-            await libjuju.add_model(
-                model_name,
-                libjuju.vca_connection.k8s_cloud,
-            )
+            await libjuju.add_model(model_name, libjuju.vca_connection.k8s_cloud)
         application_name = self._get_application_name(namespace)
 
         try:
         application_name = self._get_application_name(namespace)
 
         try:
@@ -584,9 +573,7 @@ class N2VCJujuConnector(N2VCConnector):
 
         self.log.info("K8s proxy charm installed")
         ee_id = N2VCJujuConnector._build_ee_id(
 
         self.log.info("K8s proxy charm installed")
         ee_id = N2VCJujuConnector._build_ee_id(
-            model_name=model_name,
-            application_name=application_name,
-            machine_id="k8s",
+            model_name=model_name, application_name=application_name, machine_id="k8s"
         )
 
         self._write_ee_id_db(db_dict=db_dict, ee_id=ee_id)
         )
 
         self._write_ee_id_db(db_dict=db_dict, ee_id=ee_id)
@@ -715,70 +702,45 @@ class N2VCJujuConnector(N2VCConnector):
         return await libjuju.get_metrics(model_name, application_name)
 
     async def add_relation(
         return await libjuju.get_metrics(model_name, application_name)
 
     async def add_relation(
-        self,
-        ee_id_1: str,
-        ee_id_2: str,
-        endpoint_1: str,
-        endpoint_2: str,
-        vca_id: str = None,
+        self, provider: RelationEndpoint, requirer: RelationEndpoint
     ):
         """
         Add relation between two charmed endpoints
 
     ):
         """
         Add relation between two charmed endpoints
 
-        :param: ee_id_1: The id of the first execution environment
-        :param: ee_id_2: The id of the second execution environment
-        :param: endpoint_1: The endpoint in the first execution environment
-        :param: endpoint_2: The endpoint in the second execution environment
-        :param: vca_id: VCA ID
+        :param: provider: Provider relation endpoint
+        :param: requirer: Requirer relation endpoint
         """
         """
-        self.log.debug(
-            "adding new relation between {} and {}, endpoints: {}, {}".format(
-                ee_id_1, ee_id_2, endpoint_1, endpoint_2
-            )
+        self.log.debug(f"adding new relation between {provider} and {requirer}")
+        cross_model_relation = (
+            provider.model_name != requirer.model_name
+            or provider.vca_id != requirer.vca_id
         )
         )
-        libjuju = await self._get_libjuju(vca_id)
-
-        # check arguments
-        if not ee_id_1:
-            message = "EE 1 is mandatory"
-            self.log.error(message)
-            raise N2VCBadArgumentsException(message=message, bad_args=["ee_id_1"])
-        if not ee_id_2:
-            message = "EE 2 is mandatory"
-            self.log.error(message)
-            raise N2VCBadArgumentsException(message=message, bad_args=["ee_id_2"])
-        if not endpoint_1:
-            message = "endpoint 1 is mandatory"
-            self.log.error(message)
-            raise N2VCBadArgumentsException(message=message, bad_args=["endpoint_1"])
-        if not endpoint_2:
-            message = "endpoint 2 is mandatory"
-            self.log.error(message)
-            raise N2VCBadArgumentsException(message=message, bad_args=["endpoint_2"])
-
-        # get the model, the applications and the machines from the ee_id's
-        model_1, app_1, _machine_1 = self._get_ee_id_components(ee_id_1)
-        model_2, app_2, _machine_2 = self._get_ee_id_components(ee_id_2)
-
-        # model must be the same
-        if model_1 != model_2:
-            message = "EE models are not the same: {} vs {}".format(ee_id_1, ee_id_2)
-            self.log.error(message)
-            raise N2VCBadArgumentsException(
-                message=message, bad_args=["ee_id_1", "ee_id_2"]
-            )
-
-        # add juju relations between two applications
         try:
         try:
-            await libjuju.add_relation(
-                model_name=model_1,
-                endpoint_1="{}:{}".format(app_1, endpoint_1),
-                endpoint_2="{}:{}".format(app_2, endpoint_2),
-            )
+            if cross_model_relation:
+                # Cross-model relation
+                provider_libjuju = await self._get_libjuju(provider.vca_id)
+                requirer_libjuju = await self._get_libjuju(requirer.vca_id)
+                offer = await provider_libjuju.offer(provider)
+                if offer:
+                    saas_name = await requirer_libjuju.consume(
+                        requirer.model_name, offer, provider_libjuju
+                    )
+                    await requirer_libjuju.add_relation(
+                        requirer.model_name, requirer.endpoint, saas_name
+                    )
+            else:
+                # Standard relation
+                vca_id = provider.vca_id
+                model = provider.model_name
+                libjuju = await self._get_libjuju(vca_id)
+                # add juju relations between two applications
+                await libjuju.add_relation(
+                    model_name=model,
+                    endpoint_1=provider.endpoint,
+                    endpoint_2=requirer.endpoint,
+                )
         except Exception as e:
         except Exception as e:
-            message = "Error adding relation between {} and {}: {}".format(
-                ee_id_1, ee_id_2, e
-            )
+            message = f"Error adding relation between {provider} and {requirer}: {e}"
             self.log.error(message)
             raise N2VCException(message=message)
 
             self.log.error(message)
             raise N2VCException(message=message)
 
@@ -810,33 +772,60 @@ class N2VCJujuConnector(N2VCConnector):
         :param: vca_id: VCA ID
         """
         self.log.info("Deleting namespace={}".format(namespace))
         :param: vca_id: VCA ID
         """
         self.log.info("Deleting namespace={}".format(namespace))
-        libjuju = await self._get_libjuju(vca_id)
+        will_not_delete = False
+        if namespace not in self.delete_namespace_locks:
+            self.delete_namespace_locks[namespace] = asyncio.Lock()
+        delete_lock = self.delete_namespace_locks[namespace]
 
 
-        # check arguments
-        if namespace is None:
-            raise N2VCBadArgumentsException(
-                message="namespace is mandatory", bad_args=["namespace"]
-            )
+        while delete_lock.locked():
+            will_not_delete = True
+            await asyncio.sleep(0.1)
 
 
-        _nsi_id, ns_id, _vnf_id, _vdu_id, _vdu_count = self._get_namespace_components(
-            namespace=namespace
-        )
-        if ns_id is not None:
-            try:
-                models = await libjuju.list_models(contains=ns_id)
-                for model in models:
-                    await libjuju.destroy_model(
-                        model_name=model, total_timeout=total_timeout
+        if will_not_delete:
+            self.log.info("Namespace {} deleted by another worker.".format(namespace))
+            return
+
+        try:
+            async with delete_lock:
+                libjuju = await self._get_libjuju(vca_id)
+
+                # check arguments
+                if namespace is None:
+                    raise N2VCBadArgumentsException(
+                        message="namespace is mandatory", bad_args=["namespace"]
                     )
                     )
-            except Exception as e:
-                raise N2VCException(
-                    message="Error deleting namespace {} : {}".format(namespace, e)
-                )
-        else:
-            raise N2VCBadArgumentsException(
-                message="only ns_id is permitted to delete yet", bad_args=["namespace"]
-            )
 
 
+                (
+                    _nsi_id,
+                    ns_id,
+                    _vnf_id,
+                    _vdu_id,
+                    _vdu_count,
+                ) = self._get_namespace_components(namespace=namespace)
+                if ns_id is not None:
+                    try:
+                        models = await libjuju.list_models(contains=ns_id)
+                        for model in models:
+                            await libjuju.destroy_model(
+                                model_name=model, total_timeout=total_timeout
+                            )
+                    except Exception as e:
+                        self.log.error(f"Error deleting namespace {namespace} : {e}")
+                        raise N2VCException(
+                            message="Error deleting namespace {} : {}".format(
+                                namespace, e
+                            )
+                        )
+                else:
+                    raise N2VCBadArgumentsException(
+                        message="only ns_id is permitted to delete yet",
+                        bad_args=["namespace"],
+                    )
+        except Exception as e:
+            self.log.error(f"Error deleting namespace {namespace} : {e}")
+            raise e
+        finally:
+            self.delete_namespace_locks.pop(namespace)
         self.log.info("Namespace {} deleted".format(namespace))
 
     async def delete_execution_environment(
         self.log.info("Namespace {} deleted".format(namespace))
 
     async def delete_execution_environment(
@@ -847,6 +836,7 @@ class N2VCJujuConnector(N2VCConnector):
         scaling_in: bool = False,
         vca_type: str = None,
         vca_id: str = None,
         scaling_in: bool = False,
         vca_type: str = None,
         vca_id: str = None,
+        application_to_delete: str = None,
     ):
         """
         Delete an execution environment
     ):
         """
         Delete an execution environment
@@ -856,10 +846,11 @@ class N2VCJujuConnector(N2VCConnector):
                             {collection: <str>, filter: {},  path: <str>},
                             e.g. {collection: "nsrs", filter:
                                 {_id: <nsd-id>, path: "_admin.deployed.VCA.3"}
                             {collection: <str>, filter: {},  path: <str>},
                             e.g. {collection: "nsrs", filter:
                                 {_id: <nsd-id>, path: "_admin.deployed.VCA.3"}
-        :param: total_timeout: Total timeout
-        :param: scaling_in: Boolean to indicate if it is a scaling in operation
-        :param: vca_type: VCA type
-        :param: vca_id: VCA ID
+        :param total_timeout: Total timeout
+        :param scaling_in: Boolean to indicate if it is a scaling in operation
+        :param vca_type: VCA type
+        :param vca_id: VCA ID
+        :param application_to_delete: name of the single application to be deleted
         """
         self.log.info("Deleting execution environment ee_id={}".format(ee_id))
         libjuju = await self._get_libjuju(vca_id)
         """
         self.log.info("Deleting execution environment ee_id={}".format(ee_id))
         libjuju = await self._get_libjuju(vca_id)
@@ -874,12 +865,30 @@ class N2VCJujuConnector(N2VCConnector):
             ee_id=ee_id
         )
         try:
             ee_id=ee_id
         )
         try:
-            if not scaling_in:
-                # destroy the model
-                await libjuju.destroy_model(
+            if application_to_delete == application_name:
+                # destroy the application
+                await libjuju.destroy_application(
                     model_name=model_name,
                     model_name=model_name,
+                    application_name=application_name,
                     total_timeout=total_timeout,
                 )
                     total_timeout=total_timeout,
                 )
+                # if model is empty delete it
+                controller = await libjuju.get_controller()
+                model = await libjuju.get_model(
+                    controller=controller,
+                    model_name=model_name,
+                )
+                if not model.applications:
+                    self.log.info("Model {} is empty, deleting it".format(model_name))
+                    await libjuju.destroy_model(
+                        model_name=model_name,
+                        total_timeout=total_timeout,
+                    )
+            elif not scaling_in:
+                # destroy the model
+                await libjuju.destroy_model(
+                    model_name=model_name, total_timeout=total_timeout
+                )
             elif vca_type == "native_charm" and scaling_in:
                 # destroy the unit in the application
                 await libjuju.destroy_unit(
             elif vca_type == "native_charm" and scaling_in:
                 # destroy the unit in the application
                 await libjuju.destroy_unit(
@@ -982,8 +991,7 @@ class N2VCJujuConnector(N2VCConnector):
                     config=params_dict,
                 )
                 actions = await libjuju.get_actions(
                     config=params_dict,
                 )
                 actions = await libjuju.get_actions(
-                    application_name=application_name,
-                    model_name=model_name,
+                    application_name=application_name, model_name=model_name
                 )
                 self.log.debug(
                     "Application {} has these actions: {}".format(
                 )
                 self.log.debug(
                     "Application {} has these actions: {}".format(
@@ -1047,23 +1055,86 @@ class N2VCJujuConnector(N2VCConnector):
                     machine_id=machine_id,
                     progress_timeout=progress_timeout,
                     total_timeout=total_timeout,
                     machine_id=machine_id,
                     progress_timeout=progress_timeout,
                     total_timeout=total_timeout,
-                    **params_dict
+                    **params_dict,
                 )
                 if status == "completed":
                     return output
                 else:
                 )
                 if status == "completed":
                     return output
                 else:
-                    raise Exception("status is not completed: {}".format(status))
+                    if "output" in output:
+                        raise Exception(f'{status}: {output["output"]}')
+                    else:
+                        raise Exception(
+                            f"{status}: No further information received from action"
+                        )
+
             except Exception as e:
             except Exception as e:
-                self.log.error(
-                    "Error executing primitive {}: {}".format(primitive_name, e)
-                )
+                self.log.error(f"Error executing primitive {primitive_name}: {e}")
                 raise N2VCExecutionException(
                 raise N2VCExecutionException(
-                    message="Error executing primitive {} into ee={} : {}".format(
-                        primitive_name, ee_id, e
-                    ),
+                    message=f"Error executing primitive {primitive_name} in ee={ee_id}: {e}",
                     primitive_name=primitive_name,
                 )
 
                     primitive_name=primitive_name,
                 )
 
+    async def upgrade_charm(
+        self,
+        ee_id: str = None,
+        path: str = None,
+        charm_id: str = None,
+        charm_type: str = None,
+        timeout: float = None,
+    ) -> str:
+        """This method upgrade charms in VNFs
+
+        Args:
+            ee_id:  Execution environment id
+            path:   Local path to the charm
+            charm_id:   charm-id
+            charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
+            timeout: (Float)    Timeout for the ns update operation
+
+        Returns:
+            The output of the update operation if status equals to "completed"
+
+        """
+        self.log.info("Upgrading charm: {} on ee: {}".format(path, ee_id))
+        libjuju = await self._get_libjuju(charm_id)
+
+        # check arguments
+        if ee_id is None or len(ee_id) == 0:
+            raise N2VCBadArgumentsException(
+                message="ee_id is mandatory", bad_args=["ee_id"]
+            )
+        try:
+            (
+                model_name,
+                application_name,
+                machine_id,
+            ) = N2VCJujuConnector._get_ee_id_components(ee_id=ee_id)
+
+        except Exception:
+            raise N2VCBadArgumentsException(
+                message="ee_id={} is not a valid execution environment id".format(
+                    ee_id
+                ),
+                bad_args=["ee_id"],
+            )
+
+        try:
+            await libjuju.upgrade_charm(
+                application_name=application_name,
+                path=path,
+                model_name=model_name,
+                total_timeout=timeout,
+            )
+
+            return f"Charm upgraded with application name {application_name}"
+
+        except Exception as e:
+            self.log.error("Error upgrading charm {}: {}".format(path, e))
+
+            raise N2VCException(
+                message="Error upgrading charm {} in ee={} : {}".format(path, ee_id, e)
+            )
+
     async def disconnect(self, vca_id: str = None):
         """
         Disconnect from VCA
     async def disconnect(self, vca_id: str = None):
         """
         Disconnect from VCA
@@ -1100,19 +1171,13 @@ class N2VCJujuConnector(N2VCConnector):
             if not self.libjuju:
                 async with self.loading_libjuju:
                     vca_connection = await get_connection(self._store)
             if not self.libjuju:
                 async with self.loading_libjuju:
                     vca_connection = await get_connection(self._store)
-                    self.libjuju = Libjuju(vca_connection, loop=self.loop, log=self.log)
+                    self.libjuju = Libjuju(vca_connection, log=self.log)
             return self.libjuju
         else:
             vca_connection = await get_connection(self._store, vca_id)
             return self.libjuju
         else:
             vca_connection = await get_connection(self._store, vca_id)
-            return Libjuju(
-                vca_connection,
-                loop=self.loop,
-                log=self.log,
-                n2vc=self,
-            )
+            return Libjuju(vca_connection, log=self.log, n2vc=self)
 
     def _write_ee_id_db(self, db_dict: dict, ee_id: str):
 
     def _write_ee_id_db(self, db_dict: dict, ee_id: str):
-
         # write ee_id to database: _admin.deployed.VCA.x
         try:
             the_table = db_dict["collection"]
         # write ee_id to database: _admin.deployed.VCA.x
         try:
             the_table = db_dict["collection"]
@@ -1153,30 +1218,43 @@ class N2VCJujuConnector(N2VCConnector):
         :return: model_name, application_name, machine_id
         """
 
         :return: model_name, application_name, machine_id
         """
 
-        if ee_id is None:
-            return None, None, None
-
-        # split components of id
-        parts = ee_id.split(".")
-        model_name = parts[0]
-        application_name = parts[1]
-        machine_id = parts[2]
-        return model_name, application_name, machine_id
+        return get_ee_id_components(ee_id)
 
 
-    def _get_application_name(self, namespace: str) -> str:
-        """
-        Build application name from namespace
-        :param namespace:
-        :return: app-vnf-<vnf id>-vdu-<vdu-id>-cnt-<vdu-count>
+    @staticmethod
+    def _find_charm_level(vnf_id: str, vdu_id: str) -> str:
+        """Decides the charm level.
+        Args:
+            vnf_id  (str):  VNF id
+            vdu_id  (str):  VDU id
+
+        Returns:
+            charm_level (str):  ns-level or vnf-level or vdu-level
         """
         """
+        if vdu_id and not vnf_id:
+            raise N2VCException(message="If vdu-id exists, vnf-id should be provided.")
+        if vnf_id and vdu_id:
+            return "vdu-level"
+        if vnf_id and not vdu_id:
+            return "vnf-level"
+        if not vnf_id and not vdu_id:
+            return "ns-level"
 
 
-        # TODO: Enforce the Juju 50-character application limit
+    @staticmethod
+    def _generate_backward_compatible_application_name(
+        vnf_id: str, vdu_id: str, vdu_count: str
+    ) -> str:
+        """Generate backward compatible application name
+         by limiting the app name to 50 characters.
 
 
-        # split namespace components
-        _, _, vnf_id, vdu_id, vdu_count = self._get_namespace_components(
-            namespace=namespace
-        )
+        Args:
+            vnf_id  (str):  VNF ID
+            vdu_id  (str):  VDU ID
+            vdu_count   (str):  vdu-count-index
+
+        Returns:
+            application_name (str): generated application name
 
 
+        """
         if vnf_id is None or len(vnf_id) == 0:
             vnf_id = ""
         else:
         if vnf_id is None or len(vnf_id) == 0:
             vnf_id = ""
         else:
@@ -1194,7 +1272,233 @@ class N2VCJujuConnector(N2VCConnector):
         else:
             vdu_count = "-cnt-" + vdu_count
 
         else:
             vdu_count = "-cnt-" + vdu_count
 
-        application_name = "app-{}{}{}".format(vnf_id, vdu_id, vdu_count)
+        # Generate a random suffix with 5 characters (the default size used by K8s)
+        random_suffix = generate_random_alfanum_string(size=5)
+
+        application_name = "app-{}{}{}-{}".format(
+            vnf_id, vdu_id, vdu_count, random_suffix
+        )
+        return application_name
+
+    @staticmethod
+    def _get_vca_record(search_key: str, vca_records: list, vdu_id: str) -> dict:
+        """Get the correct VCA record dict depending on the search key
+
+        Args:
+            search_key  (str):      keyword to find the correct VCA record
+            vca_records (list):     All VCA records as list
+            vdu_id  (str):          VDU ID
+
+        Returns:
+            vca_record  (dict):     Dictionary which includes the correct VCA record
+
+        """
+        return next(
+            filter(lambda record: record[search_key] == vdu_id, vca_records), {}
+        )
+
+    @staticmethod
+    def _generate_application_name(
+        charm_level: str,
+        vnfrs: dict,
+        vca_records: list,
+        vnf_count: str = None,
+        vdu_id: str = None,
+        vdu_count: str = None,
+    ) -> str:
+        """Generate application name to make the relevant charm of VDU/KDU
+        in the VNFD descriptor become clearly visible.
+        Limiting the app name to 50 characters.
+
+        Args:
+            charm_level  (str):  level of charm
+            vnfrs  (dict):  vnf record dict
+            vca_records   (list):   db_nsr["_admin"]["deployed"]["VCA"] as list
+            vnf_count   (str): vnf count index
+            vdu_id   (str):  VDU ID
+            vdu_count   (str):  vdu count index
+
+        Returns:
+            application_name (str): generated application name
+
+        """
+        application_name = ""
+        if charm_level == "ns-level":
+            if len(vca_records) != 1:
+                raise N2VCException(message="One VCA record is expected.")
+            # Only one VCA record is expected if it's ns-level charm.
+            # Shorten the charm name to its first 40 characters.
+            charm_name = vca_records[0]["charm_name"][:40]
+            if not charm_name:
+                raise N2VCException(message="Charm name should be provided.")
+            application_name = charm_name + "-ns"
+
+        elif charm_level == "vnf-level":
+            if len(vca_records) < 1:
+                raise N2VCException(message="One or more VCA record is expected.")
+            # If VNF is scaled, more than one VCA record may be included in vca_records
+            # but ee_descriptor_id is same.
+            # Shorten the ee_descriptor_id and member-vnf-index-ref
+            # to first 12 characters.
+            application_name = (
+                vca_records[0]["ee_descriptor_id"][:12]
+                + "-"
+                + vnf_count
+                + "-"
+                + vnfrs["member-vnf-index-ref"][:12]
+                + "-vnf"
+            )
+        elif charm_level == "vdu-level":
+            if len(vca_records) < 1:
+                raise N2VCException(message="One or more VCA record is expected.")
+
+            # Charms are also used for deployments with Helm charts.
+            # If deployment unit is a Helm chart/KDU,
+            # vdu_profile_id and vdu_count will be empty string.
+            if vdu_count is None:
+                vdu_count = ""
+
+            # If vnf/vdu is scaled, more than one VCA record may be included in vca_records
+            # but ee_descriptor_id is same.
+            # Shorten the ee_descriptor_id, member-vnf-index-ref and vdu_profile_id
+            # to first 12 characters.
+            if not vdu_id:
+                raise N2VCException(message="vdu-id should be provided.")
+
+            vca_record = N2VCJujuConnector._get_vca_record(
+                "vdu_id", vca_records, vdu_id
+            )
+
+            if not vca_record:
+                vca_record = N2VCJujuConnector._get_vca_record(
+                    "kdu_name", vca_records, vdu_id
+                )
+
+            application_name = (
+                vca_record["ee_descriptor_id"][:12]
+                + "-"
+                + vnf_count
+                + "-"
+                + vnfrs["member-vnf-index-ref"][:12]
+                + "-"
+                + vdu_id[:12]
+                + "-"
+                + vdu_count
+                + "-vdu"
+            )
+
+        return application_name
+
+    def _get_vnf_count_and_record(
+        self, charm_level: str, vnf_id_and_count: str
+    ) -> Tuple[str, dict]:
+        """Get the vnf count and VNF record depend on charm level
+
+        Args:
+            charm_level  (str)
+            vnf_id_and_count (str)
+
+        Returns:
+            (vnf_count  (str), db_vnfr(dict)) as Tuple
+
+        """
+        vnf_count = ""
+        db_vnfr = {}
+
+        if charm_level in ("vnf-level", "vdu-level"):
+            vnf_id = "-".join(vnf_id_and_count.split("-")[:-1])
+            vnf_count = vnf_id_and_count.split("-")[-1]
+            db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
+
+        # If the charm is ns level, it returns empty vnf_count and db_vnfr
+        return vnf_count, db_vnfr
+
+    @staticmethod
+    def _get_vca_records(charm_level: str, db_nsr: dict, db_vnfr: dict) -> list:
+        """Get the VCA records from db_nsr dict
+
+        Args:
+            charm_level (str):  level of charm
+            db_nsr  (dict):     NS record from database
+            db_vnfr (dict):     VNF record from database
+
+        Returns:
+            vca_records (list):  List of VCA record dictionaries
+
+        """
+        vca_records = {}
+        if charm_level == "ns-level":
+            vca_records = list(
+                filter(
+                    lambda vca_record: vca_record["target_element"] == "ns",
+                    db_nsr["_admin"]["deployed"]["VCA"],
+                )
+            )
+        elif charm_level in ["vnf-level", "vdu-level"]:
+            vca_records = list(
+                filter(
+                    lambda vca_record: vca_record["member-vnf-index"]
+                    == db_vnfr["member-vnf-index-ref"],
+                    db_nsr["_admin"]["deployed"]["VCA"],
+                )
+            )
+
+        return vca_records
+
+    def _get_application_name(self, namespace: str) -> str:
+        """Build application name from namespace
+
+        Application name structure:
+            NS level: <charm-name>-ns
+            VNF level: <ee-name>-z<vnf-ordinal-scale-number>-<vnf-profile-id>-vnf
+            VDU level: <ee-name>-z<vnf-ordinal-scale-number>-<vnf-profile-id>-
+            <vdu-profile-id>-z<vdu-ordinal-scale-number>-vdu
+
+        Application naming for backward compatibility (old structure):
+            NS level: app-<random_value>
+            VNF level: app-vnf-<vnf-id>-z<ordinal-scale-number>-<random_value>
+            VDU level: app-vnf-<vnf-id>-z<vnf-ordinal-scale-number>-vdu-
+            <vdu-id>-cnt-<vdu-count>-z<vdu-ordinal-scale-number>-<random_value>
+
+        Args:
+            namespace   (str)
+
+        Returns:
+            application_name    (str)
+
+        """
+        # split namespace components
+        (
+            nsi_id,
+            ns_id,
+            vnf_id_and_count,
+            vdu_id,
+            vdu_count,
+        ) = self._get_namespace_components(namespace=namespace)
+
+        if not ns_id:
+            raise N2VCException(message="ns-id should be provided.")
+
+        charm_level = self._find_charm_level(vnf_id_and_count, vdu_id)
+        db_nsr = self.db.get_one("nsrs", {"_id": ns_id})
+        vnf_count, db_vnfr = self._get_vnf_count_and_record(
+            charm_level, vnf_id_and_count
+        )
+        vca_records = self._get_vca_records(charm_level, db_nsr, db_vnfr)
+
+        if all("charm_name" in vca_record.keys() for vca_record in vca_records):
+            application_name = self._generate_application_name(
+                charm_level,
+                db_vnfr,
+                vca_records,
+                vnf_count=vnf_count,
+                vdu_id=vdu_id,
+                vdu_count=vdu_count,
+            )
+        else:
+            application_name = self._generate_backward_compatible_application_name(
+                vnf_id_and_count, vdu_id, vdu_count
+            )
 
         return N2VCJujuConnector._format_app_name(application_name)
 
 
         return N2VCJujuConnector._format_app_name(application_name)
 
@@ -1255,6 +1559,6 @@ class N2VCJujuConnector(N2VCConnector):
         :param: vca_id: VCA ID
         """
         vca_connection = await get_connection(self._store, vca_id=vca_id)
         :param: vca_id: VCA ID
         """
         vca_connection = await get_connection(self._store, vca_id=vca_id)
-        libjuju = Libjuju(vca_connection, loop=self.loop, log=self.log, n2vc=self)
+        libjuju = Libjuju(vca_connection, log=self.log, n2vc=self)
         controller = await libjuju.get_controller()
         await libjuju.disconnect_controller(controller)
         controller = await libjuju.get_controller()
         await libjuju.disconnect_controller(controller)
index cd6c6fb..c8e5910 100644 (file)
 #     limitations under the License.
 
 import abc
 #     limitations under the License.
 
 import abc
-import asyncio
-from base64 import b64decode
-import re
 import typing
 
 import typing
 
-from Crypto.Cipher import AES
 from motor.motor_asyncio import AsyncIOMotorClient
 from n2vc.config import EnvironConfig
 from n2vc.vca.connection_data import ConnectionData
 from osm_common.dbmongo import DbMongo, DbException
 from motor.motor_asyncio import AsyncIOMotorClient
 from n2vc.config import EnvironConfig
 from n2vc.vca.connection_data import ConnectionData
 from osm_common.dbmongo import DbMongo, DbException
+from osm_common.dbbase import Encryption
+
 
 DB_NAME = "osm"
 
 
 DB_NAME = "osm"
 
@@ -184,17 +182,21 @@ class DbMongoStore(Store):
 
 
 class MotorStore(Store):
 
 
 class MotorStore(Store):
-    def __init__(self, uri: str, loop=None):
+    def __init__(self, uri: str):
         """
         Constructor
 
         :param: uri: Connection string to connect to the database.
         """
         Constructor
 
         :param: uri: Connection string to connect to the database.
-        :param: loop: Asyncio Loop
         """
         self._client = AsyncIOMotorClient(uri)
         """
         self._client = AsyncIOMotorClient(uri)
-        self.loop = loop or asyncio.get_event_loop()
         self._secret_key = None
         self._config = EnvironConfig(prefixes=["OSMLCM_", "OSMMON_"])
         self._secret_key = None
         self._config = EnvironConfig(prefixes=["OSMLCM_", "OSMMON_"])
+        self.encryption = Encryption(
+            uri=uri,
+            config=self._config,
+            encoding_type="utf-8",
+            logger_name="db",
+        )
 
     @property
     def _database(self):
 
     @property
     def _database(self):
@@ -223,7 +225,7 @@ class MotorStore(Store):
         data = await self._vca_collection.find_one({"_id": vca_id})
         if not data:
             raise Exception("vca with id {} not found".format(vca_id))
         data = await self._vca_collection.find_one({"_id": vca_id})
         if not data:
             raise Exception("vca with id {} not found".format(vca_id))
-        await self.decrypt_fields(
+        await self.encryption.decrypt_fields(
             data,
             ["secret", "cacert"],
             schema_version=data["schema_version"],
             data,
             ["secret", "cacert"],
             schema_version=data["schema_version"],
@@ -294,114 +296,3 @@ class MotorStore(Store):
     async def _get_juju_info(self):
         """Get Juju information (the default VCA) from the admin collection"""
         return await self._admin_collection.find_one({"_id": "juju"})
     async def _get_juju_info(self):
         """Get Juju information (the default VCA) from the admin collection"""
         return await self._admin_collection.find_one({"_id": "juju"})
-
-    # DECRYPT METHODS
-    async def decrypt_fields(
-        self,
-        item: dict,
-        fields: typing.List[str],
-        schema_version: str = None,
-        salt: str = None,
-    ):
-        """
-        Decrypt fields
-
-        Decrypt fields from a dictionary. Follows the same logic as in osm_common.
-
-        :param: item: Dictionary with the keys to be decrypted
-        :param: fields: List of keys to decrypt
-        :param: schema version: Schema version. (i.e. 1.11)
-        :param: salt: Salt for the decryption
-        """
-        flags = re.I
-
-        async def process(_item):
-            if isinstance(_item, list):
-                for elem in _item:
-                    await process(elem)
-            elif isinstance(_item, dict):
-                for key, val in _item.items():
-                    if isinstance(val, str):
-                        if any(re.search(f, key, flags) for f in fields):
-                            _item[key] = await self.decrypt(val, schema_version, salt)
-                    else:
-                        await process(val)
-
-        await process(item)
-
-    async def decrypt(self, value, schema_version=None, salt=None):
-        """
-        Decrypt an encrypted value
-        :param value: value to be decrypted. It is a base64 string
-        :param schema_version: used for known encryption method used. If None or '1.0' no encryption has been done.
-               If '1.1' symmetric AES encryption has been done
-        :param salt: optional salt to be used
-        :return: Plain content of value
-        """
-        await self.get_secret_key()
-        if not self.secret_key or not schema_version or schema_version == "1.0":
-            return value
-        else:
-            secret_key = self._join_secret_key(salt)
-            encrypted_msg = b64decode(value)
-            cipher = AES.new(secret_key)
-            decrypted_msg = cipher.decrypt(encrypted_msg)
-            try:
-                unpadded_private_msg = decrypted_msg.decode().rstrip("\0")
-            except UnicodeDecodeError:
-                raise DbException(
-                    "Cannot decrypt information. Are you using same COMMONKEY in all OSM components?",
-                    http_code=500,
-                )
-            return unpadded_private_msg
-
-    def _join_secret_key(self, update_key: typing.Any) -> bytes:
-        """
-        Join key with secret key
-
-        :param: update_key: str or bytes with the to update
-
-        :return: Joined key
-        """
-        return self._join_keys(update_key, self.secret_key)
-
-    def _join_keys(self, key: typing.Any, secret_key: bytes) -> bytes:
-        """
-        Join key with secret_key
-
-        :param: key: str or bytesof the key to update
-        :param: secret_key: bytes of the secret key
-
-        :return: Joined key
-        """
-        if isinstance(key, str):
-            update_key_bytes = key.encode()
-        else:
-            update_key_bytes = key
-        new_secret_key = bytearray(secret_key) if secret_key else bytearray(32)
-        for i, b in enumerate(update_key_bytes):
-            new_secret_key[i % 32] ^= b
-        return bytes(new_secret_key)
-
-    @property
-    def secret_key(self):
-        return self._secret_key
-
-    async def get_secret_key(self):
-        """
-        Get secret key using the database key and the serial key in the DB
-        The key is populated in the property self.secret_key
-        """
-        if self.secret_key:
-            return
-        secret_key = None
-        if self.database_key:
-            secret_key = self._join_keys(self.database_key, None)
-        version_data = await self._admin_collection.find_one({"_id": "version"})
-        if version_data and version_data.get("serial"):
-            secret_key = self._join_keys(b64decode(version_data["serial"]), secret_key)
-        self._secret_key = secret_key
-
-    @property
-    def database_key(self):
-        return self._config["database_commonkey"]
diff --git a/n2vc/tests/unit/test_definitions.py b/n2vc/tests/unit/test_definitions.py
new file mode 100644 (file)
index 0000000..5d58a76
--- /dev/null
@@ -0,0 +1,48 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+
+from typing import NoReturn
+from unittest import TestCase
+from unittest.mock import patch
+
+from n2vc.definitions import Offer, RelationEndpoint
+
+
+@patch("n2vc.definitions.get_ee_id_components")
+class RelationEndpointTest(TestCase):
+    def test_success(self, mock_get_ee_id_components) -> NoReturn:
+        mock_get_ee_id_components.return_value = ("model", "application", "machine_id")
+        relation_endpoint = RelationEndpoint(
+            "model.application.machine_id",
+            "vca",
+            "endpoint",
+        )
+        self.assertEqual(relation_endpoint.model_name, "model")
+        self.assertEqual(relation_endpoint.application_name, "application")
+        self.assertEqual(relation_endpoint.vca_id, "vca")
+        self.assertEqual(relation_endpoint.endpoint, "application:endpoint")
+        self.assertEqual(relation_endpoint.endpoint_name, "endpoint")
+        self.assertEqual(
+            str(relation_endpoint), "application:endpoint (model: model, vca: vca)"
+        )
+
+
+class OfferTest(TestCase):
+    def test_success(self) -> NoReturn:
+        url = "admin/test-model.my-offer"
+        offer = Offer(url)
+        self.assertEqual(offer.model_name, "test-model")
+        self.assertEqual(offer.name, "my-offer")
+        self.assertEqual(offer.username, "admin")
+        self.assertEqual(offer.url, url)
index 5f81274..b9e9e36 100644 (file)
 #     See the License for the specific language governing permissions and
 #     limitations under the License.
 
 #     See the License for the specific language governing permissions and
 #     limitations under the License.
 
+import json
+import os
+from time import sleep
 import asynctest
 import asyncio
 
 import asynctest
 import asyncio
 
-from unittest import mock, TestCase
-from unittest.mock import Mock
 from n2vc.juju_watcher import JujuModelWatcher, entity_ready, status
 from n2vc.exceptions import EntityInvalidException
 from .utils import FakeN2VC, AsyncMock, Deltas, FakeWatcher
 from juju.application import Application
 from n2vc.juju_watcher import JujuModelWatcher, entity_ready, status
 from n2vc.exceptions import EntityInvalidException
 from .utils import FakeN2VC, AsyncMock, Deltas, FakeWatcher
 from juju.application import Application
-from juju.model import Model
+from juju.action import Action
 from juju.annotation import Annotation
 from juju.annotation import Annotation
+from juju.client._definitions import AllWatcherNextResults
 from juju.machine import Machine
 from juju.machine import Machine
-from juju.action import Action
+from juju.model import Model
+from juju.unit import Unit
+from unittest import mock, TestCase
+from unittest.mock import Mock
 
 
 class JujuWatcherTest(asynctest.TestCase):
 
 
 class JujuWatcherTest(asynctest.TestCase):
@@ -122,6 +127,167 @@ class EntityReadyTest(TestCase):
         self.assertTrue(isinstance(entity_ready(entity), bool))
 
 
         self.assertTrue(isinstance(entity_ready(entity), bool))
 
 
+@mock.patch("n2vc.juju_watcher.client.AllWatcherFacade.from_connection")
+class EntityStateTest(TestCase):
+    def setUp(self):
+        self.model = Model()
+        self.model._connector = mock.MagicMock()
+        self.loop = asyncio.new_event_loop()
+        self.application = Mock(Application)
+        self.upgrade_file = None
+        self.line_number = 1
+
+    def _fetch_next_delta(self):
+        delta = None
+        while delta is None:
+            raw_data = self.upgrade_file.readline()
+            if not raw_data:
+                raise EOFError("Log file is out of events")
+            try:
+                delta = json.loads(raw_data)
+            except ValueError:
+                continue
+
+        if delta[0] == "unit":
+            if delta[2]["life"] == "dead":
+                # Remove the unit from the application
+                for unit in self.application.units:
+                    if unit.entity_id == delta[2]["name"]:
+                        self.application.units.remove(unit)
+            else:
+                unit_present = False
+                for unit in self.application.units:
+                    if unit.entity_id == delta[2]["name"]:
+                        unit_present = True
+
+                if not unit_present:
+                    print("Application gets a new unit: {}".format(delta[2]["name"]))
+                    unit = Mock(Unit)
+                    unit.entity_id = delta[2]["name"]
+                    unit.entity_type = "unit"
+                    self.application.units.append(unit)
+
+        print("{}  {}".format(self.line_number, delta))
+        self.line_number = self.line_number + 1
+
+        return AllWatcherNextResults(
+            deltas=[
+                delta,
+            ]
+        )
+
+    def _ensure_state(self, filename, mock_all_watcher):
+        with open(
+            os.path.join(os.path.dirname(__file__), "testdata", filename),
+            "r",
+        ) as self.upgrade_file:
+            all_changes = AsyncMock()
+            all_changes.Next.side_effect = self._fetch_next_delta
+            mock_all_watcher.return_value = all_changes
+
+            self.loop.run_until_complete(
+                JujuModelWatcher.ensure_units_idle(
+                    model=self.model, application=self.application
+                )
+            )
+
+            with self.assertRaises(EOFError, msg="Not all events consumed"):
+                change = self._fetch_next_delta()
+                print(change.deltas[0].deltas)
+
+    def _slow_changes(self):
+        sleep(0.1)
+        return AllWatcherNextResults(
+            deltas=[
+                json.loads(
+                    """["unit","change",
+                {
+                    "name": "app-vnf-7a49ace2b6-z0/2",
+                    "application": "app-vnf-7a49ace2b6-z0",
+                    "workload-status": {
+                        "current": "active",
+                        "message": "",
+                        "since": "2022-04-26T18:50:27.579802723Z"},
+                    "agent-status": {
+                        "current": "idle",
+                        "message": "",
+                        "since": "2022-04-26T18:50:28.592142816Z"}
+                }]"""
+                ),
+            ]
+        )
+
+    def test_timeout(self, mock_all_watcher):
+        unit1 = Mock(Unit)
+        unit1.entity_id = "app-vnf-7a49ace2b6-z0/0"
+        unit1.entity_type = "unit"
+        self.application.units = [
+            unit1,
+        ]
+
+        all_changes = AsyncMock()
+        all_changes.Next.side_effect = self._slow_changes
+        mock_all_watcher.return_value = all_changes
+
+        with self.assertRaises(TimeoutError):
+            self.loop.run_until_complete(
+                JujuModelWatcher.wait_for_units_idle(
+                    model=self.model, application=self.application, timeout=0.01
+                )
+            )
+
+    def test_machine_unit_upgrade(self, mock_all_watcher):
+        unit1 = Mock(Unit)
+        unit1.entity_id = "app-vnf-7a49ace2b6-z0/0"
+        unit1.entity_type = "unit"
+        unit2 = Mock(Unit)
+        unit2.entity_id = "app-vnf-7a49ace2b6-z0/1"
+        unit2.entity_type = "unit"
+        unit3 = Mock(Unit)
+        unit3.entity_id = "app-vnf-7a49ace2b6-z0/2"
+        unit3.entity_type = "unit"
+
+        self.application.units = [unit1, unit2, unit3]
+
+        self._ensure_state("upgrade-machine.log", mock_all_watcher)
+
+    def test_operator_upgrade(self, mock_all_watcher):
+        unit1 = Mock(Unit)
+        unit1.entity_id = "sshproxy/0"
+        unit1.entity_type = "unit"
+        self.application.units = [
+            unit1,
+        ]
+        self._ensure_state("upgrade-operator.log", mock_all_watcher)
+
+    def test_podspec_stateful_upgrade(self, mock_all_watcher):
+        unit1 = Mock(Unit)
+        unit1.entity_id = "mongodb/0"
+        unit1.entity_type = "unit"
+        self.application.units = [
+            unit1,
+        ]
+        self._ensure_state("upgrade-podspec-stateful.log", mock_all_watcher)
+
+    def test_podspec_stateless_upgrade(self, mock_all_watcher):
+        unit1 = Mock(Unit)
+        unit1.entity_id = "lcm/9"
+        unit1.entity_type = "unit"
+        self.application.units = [
+            unit1,
+        ]
+        self._ensure_state("upgrade-podspec-stateless.log", mock_all_watcher)
+
+    def test_sidecar_upgrade(self, mock_all_watcher):
+        unit1 = Mock(Unit)
+        unit1.entity_id = "kafka/0"
+        unit1.entity_type = "unit"
+        self.application.units = [
+            unit1,
+        ]
+        self._ensure_state("upgrade-sidecar.log", mock_all_watcher)
+
+
 class StatusTest(TestCase):
     def setUp(self):
         self.model = Model()
 class StatusTest(TestCase):
     def setUp(self):
         self.model = Model()
index 93d0c4c..bddfddd 100644 (file)
@@ -39,7 +39,7 @@ class TestK8sHelm3Conn(asynctest.TestCase):
         self.fs.path = "./tmp/"
         self.namespace = "testk8s"
         self.cluster_id = "helm3_cluster_id"
         self.fs.path = "./tmp/"
         self.namespace = "testk8s"
         self.cluster_id = "helm3_cluster_id"
-        self.cluster_uuid = "{}:{}".format(self.namespace, self.cluster_id)
+        self.cluster_uuid = self.cluster_id
         # pass fake kubectl and helm commands to make sure it does not call actual commands
         K8sHelm3Connector._check_file_exists = asynctest.Mock(return_value=True)
         cluster_dir = self.fs.path + self.cluster_id
         # pass fake kubectl and helm commands to make sure it does not call actual commands
         K8sHelm3Connector._check_file_exists = asynctest.Mock(return_value=True)
         cluster_dir = self.fs.path + self.cluster_id
@@ -66,8 +66,8 @@ class TestK8sHelm3Conn(asynctest.TestCase):
 
         self.assertEqual(
             k8scluster_uuid,
 
         self.assertEqual(
             k8scluster_uuid,
-            "{}:{}".format(self.namespace, self.cluster_id),
-            "Check cluster_uuid format: <namespace>.<cluster_id>",
+            self.cluster_id,
+            "Check cluster_uuid",
         )
         self.helm_conn._get_namespaces.assert_called_once_with(self.cluster_id)
         self.helm_conn._create_namespace.assert_called_once_with(
         )
         self.helm_conn._get_namespaces.assert_called_once_with(self.cluster_id)
         self.helm_conn._create_namespace.assert_called_once_with(
@@ -86,7 +86,7 @@ class TestK8sHelm3Conn(asynctest.TestCase):
     async def test_repo_add(self):
         repo_name = "bitnami"
         repo_url = "https://charts.bitnami.com/bitnami"
     async def test_repo_add(self):
         repo_name = "bitnami"
         repo_url = "https://charts.bitnami.com/bitnami"
-        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
+        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=(0, ""))
 
         await self.helm_conn.repo_add(self.cluster_uuid, repo_name, repo_url)
 
 
         await self.helm_conn.repo_add(self.cluster_uuid, repo_name, repo_url)
 
@@ -102,35 +102,38 @@ class TestK8sHelm3Conn(asynctest.TestCase):
             ),
         )
 
             ),
         )
 
-        repo_update_command = "/usr/bin/helm3 repo update"
-        repo_add_command = "/usr/bin/helm3 repo add {} {}".format(repo_name, repo_url)
+        repo_update_command = (
+            "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config /usr/bin/helm3 repo update {}"
+        ).format(repo_name)
+        repo_add_command = (
+            "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config /usr/bin/helm3 repo add {} {}"
+        ).format(repo_name, repo_url)
         calls = self.helm_conn._local_async_exec.call_args_list
         call0_kargs = calls[0][1]
         self.assertEqual(
             call0_kargs.get("command"),
         calls = self.helm_conn._local_async_exec.call_args_list
         call0_kargs = calls[0][1]
         self.assertEqual(
             call0_kargs.get("command"),
-            repo_update_command,
-            "Invalid repo update command: {}".format(call0_kargs.get("command")),
+            repo_add_command,
+            "Invalid repo add command: {}".format(call0_kargs.get("command")),
         )
         self.assertEqual(
             call0_kargs.get("env"),
             self.env,
         )
         self.assertEqual(
             call0_kargs.get("env"),
             self.env,
-            "Invalid env for update command: {}".format(call0_kargs.get("env")),
+            "Invalid env for add command: {}".format(call0_kargs.get("env")),
         )
         call1_kargs = calls[1][1]
         self.assertEqual(
             call1_kargs.get("command"),
         )
         call1_kargs = calls[1][1]
         self.assertEqual(
             call1_kargs.get("command"),
-            repo_add_command,
-            "Invalid repo add command: {}".format(call1_kargs.get("command")),
+            repo_update_command,
+            "Invalid repo update command: {}".format(call1_kargs.get("command")),
         )
         self.assertEqual(
             call1_kargs.get("env"),
             self.env,
         )
         self.assertEqual(
             call1_kargs.get("env"),
             self.env,
-            "Invalid env for add command: {}".format(call1_kargs.get("env")),
+            "Invalid env for update command: {}".format(call1_kargs.get("env")),
         )
 
     @asynctest.fail_on(active_handles=True)
     async def test_repo_list(self):
         )
 
     @asynctest.fail_on(active_handles=True)
     async def test_repo_list(self):
-
         self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
 
         await self.helm_conn.repo_list(self.cluster_uuid)
         self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
 
         await self.helm_conn.repo_list(self.cluster_uuid)
@@ -139,14 +142,13 @@ class TestK8sHelm3Conn(asynctest.TestCase):
         self.helm_conn.fs.reverse_sync.assert_called_once_with(
             from_path=self.cluster_id
         )
         self.helm_conn.fs.reverse_sync.assert_called_once_with(
             from_path=self.cluster_id
         )
-        command = "/usr/bin/helm3 repo list --output yaml"
+        command = "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config /usr/bin/helm3 repo list --output yaml"
         self.helm_conn._local_async_exec.assert_called_with(
             command=command, env=self.env, raise_exception_on_error=False
         )
 
     @asynctest.fail_on(active_handles=True)
     async def test_repo_remove(self):
         self.helm_conn._local_async_exec.assert_called_with(
             command=command, env=self.env, raise_exception_on_error=False
         )
 
     @asynctest.fail_on(active_handles=True)
     async def test_repo_remove(self):
-
         self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
         repo_name = "bitnami"
         await self.helm_conn.repo_remove(self.cluster_uuid, repo_name)
         self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
         repo_name = "bitnami"
         await self.helm_conn.repo_remove(self.cluster_uuid, repo_name)
@@ -155,7 +157,9 @@ class TestK8sHelm3Conn(asynctest.TestCase):
         self.helm_conn.fs.reverse_sync.assert_called_once_with(
             from_path=self.cluster_id
         )
         self.helm_conn.fs.reverse_sync.assert_called_once_with(
             from_path=self.cluster_id
         )
-        command = "/usr/bin/helm3 repo remove {}".format(repo_name)
+        command = "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config /usr/bin/helm3 repo remove {}".format(
+            repo_name
+        )
         self.helm_conn._local_async_exec.assert_called_with(
             command=command, env=self.env, raise_exception_on_error=True
         )
         self.helm_conn._local_async_exec.assert_called_with(
             command=command, env=self.env, raise_exception_on_error=True
         )
@@ -168,6 +172,7 @@ class TestK8sHelm3Conn(asynctest.TestCase):
         self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
         self.helm_conn._status_kdu = asynctest.CoroutineMock(return_value=None)
         self.helm_conn._store_status = asynctest.CoroutineMock()
         self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
         self.helm_conn._status_kdu = asynctest.CoroutineMock(return_value=None)
         self.helm_conn._store_status = asynctest.CoroutineMock()
+        self.helm_conn._repo_to_oci_url = Mock(return_value=None)
         self.kdu_instance = "stable-openldap-0005399828"
         self.helm_conn.generate_kdu_instance_name = Mock(return_value=self.kdu_instance)
         self.helm_conn._get_namespaces = asynctest.CoroutineMock(return_value=[])
         self.kdu_instance = "stable-openldap-0005399828"
         self.helm_conn.generate_kdu_instance_name = Mock(return_value=self.kdu_instance)
         self.helm_conn._get_namespaces = asynctest.CoroutineMock(return_value=[])
@@ -190,9 +195,17 @@ class TestK8sHelm3Conn(asynctest.TestCase):
         self.helm_conn._create_namespace.assert_called_once_with(
             self.cluster_id, self.namespace
         )
         self.helm_conn._create_namespace.assert_called_once_with(
             self.cluster_id, self.namespace
         )
-        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
-        self.helm_conn.fs.reverse_sync.assert_called_once_with(
-            from_path=self.cluster_id
+        self.helm_conn.fs.sync.assert_has_calls(
+            [
+                asynctest.call(from_path=self.cluster_id),
+                asynctest.call(from_path=self.cluster_id),
+            ]
+        )
+        self.helm_conn.fs.reverse_sync.assert_has_calls(
+            [
+                asynctest.call(from_path=self.cluster_id),
+                asynctest.call(from_path=self.cluster_id),
+            ]
         )
         self.helm_conn._store_status.assert_called_with(
             cluster_id=self.cluster_id,
         )
         self.helm_conn._store_status.assert_called_with(
             cluster_id=self.cluster_id,
@@ -200,14 +213,13 @@ class TestK8sHelm3Conn(asynctest.TestCase):
             namespace=self.namespace,
             db_dict=db_dict,
             operation="install",
             namespace=self.namespace,
             db_dict=db_dict,
             operation="install",
-            run_once=True,
-            check_every=0,
         )
         command = (
         )
         command = (
-            "/usr/bin/helm3 install stable-openldap-0005399828 --atomic --output yaml   "
+            "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config /usr/bin/helm3 "
+            "install stable-openldap-0005399828 --atomic --output yaml   "
             "--timeout 300s --namespace testk8s stable/openldap --version 1.2.2"
         )
             "--timeout 300s --namespace testk8s stable/openldap --version 1.2.2"
         )
-        self.helm_conn._local_async_exec.assert_called_once_with(
+        self.helm_conn._local_async_exec.assert_called_with(
             command=command, env=self.env, raise_exception_on_error=False
         )
 
             command=command, env=self.env, raise_exception_on_error=False
         )
 
@@ -255,16 +267,57 @@ class TestK8sHelm3Conn(asynctest.TestCase):
         }
         self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
         self.helm_conn._store_status = asynctest.CoroutineMock()
         }
         self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
         self.helm_conn._store_status = asynctest.CoroutineMock()
+        self.helm_conn._repo_to_oci_url = Mock(return_value=None)
         self.helm_conn.get_instance_info = asynctest.CoroutineMock(
             return_value=instance_info
         )
         self.helm_conn.get_instance_info = asynctest.CoroutineMock(
             return_value=instance_info
         )
+        # TEST-1 (--force true)
+        await self.helm_conn.upgrade(
+            self.cluster_uuid,
+            kdu_instance,
+            kdu_model,
+            atomic=True,
+            db_dict=db_dict,
+            force=True,
+        )
+        self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id)
+        self.helm_conn.fs.reverse_sync.assert_has_calls(
+            [
+                asynctest.call(from_path=self.cluster_id),
+                asynctest.call(from_path=self.cluster_id),
+            ]
+        )
+        self.helm_conn._store_status.assert_called_with(
+            cluster_id=self.cluster_id,
+            kdu_instance=kdu_instance,
+            namespace=self.namespace,
+            db_dict=db_dict,
+            operation="upgrade",
+        )
+        command = (
+            "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config "
+            "/usr/bin/helm3 upgrade stable-openldap-0005399828 stable/openldap "
+            "--namespace testk8s --atomic --force --output yaml  --timeout 300s "
+            "--reuse-values --version 1.2.3"
+        )
+        self.helm_conn._local_async_exec.assert_called_with(
+            command=command, env=self.env, raise_exception_on_error=False
+        )
 
 
+        # TEST-2 (--force false)
         await self.helm_conn.upgrade(
         await self.helm_conn.upgrade(
-            self.cluster_uuid, kdu_instance, kdu_model, atomic=True, db_dict=db_dict
+            self.cluster_uuid,
+            kdu_instance,
+            kdu_model,
+            atomic=True,
+            db_dict=db_dict,
         )
         )
-        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
-        self.helm_conn.fs.reverse_sync.assert_called_once_with(
-            from_path=self.cluster_id
+        self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id)
+        self.helm_conn.fs.reverse_sync.assert_has_calls(
+            [
+                asynctest.call(from_path=self.cluster_id),
+                asynctest.call(from_path=self.cluster_id),
+            ]
         )
         self.helm_conn._store_status.assert_called_with(
             cluster_id=self.cluster_id,
         )
         self.helm_conn._store_status.assert_called_with(
             cluster_id=self.cluster_id,
@@ -272,18 +325,152 @@ class TestK8sHelm3Conn(asynctest.TestCase):
             namespace=self.namespace,
             db_dict=db_dict,
             operation="upgrade",
             namespace=self.namespace,
             db_dict=db_dict,
             operation="upgrade",
-            run_once=True,
-            check_every=0,
         )
         command = (
         )
         command = (
+            "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config "
             "/usr/bin/helm3 upgrade stable-openldap-0005399828 stable/openldap "
             "/usr/bin/helm3 upgrade stable-openldap-0005399828 stable/openldap "
-            "--namespace testk8s --atomic --output yaml  --timeout 300s  "
-            "--version 1.2.3"
+            "--namespace testk8s --atomic --output yaml  --timeout 300s "
+            "--reuse-values --version 1.2.3"
         )
         )
-        self.helm_conn._local_async_exec.assert_called_once_with(
+        self.helm_conn._local_async_exec.assert_called_with(
+            command=command, env=self.env, raise_exception_on_error=False
+        )
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_upgrade_namespace(self):
+        kdu_model = "stable/openldap:1.2.3"
+        kdu_instance = "stable-openldap-0005399828"
+        db_dict = {}
+        instance_info = {
+            "chart": "openldap-1.2.2",
+            "name": kdu_instance,
+            "namespace": self.namespace,
+            "revision": 1,
+            "status": "DEPLOYED",
+        }
+        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
+        self.helm_conn._store_status = asynctest.CoroutineMock()
+        self.helm_conn._repo_to_oci_url = Mock(return_value=None)
+        self.helm_conn.get_instance_info = asynctest.CoroutineMock(
+            return_value=instance_info
+        )
+
+        await self.helm_conn.upgrade(
+            self.cluster_uuid,
+            kdu_instance,
+            kdu_model,
+            atomic=True,
+            db_dict=db_dict,
+            namespace="default",
+        )
+        self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id)
+        self.helm_conn.fs.reverse_sync.assert_has_calls(
+            [
+                asynctest.call(from_path=self.cluster_id),
+                asynctest.call(from_path=self.cluster_id),
+            ]
+        )
+        self.helm_conn._store_status.assert_called_with(
+            cluster_id=self.cluster_id,
+            kdu_instance=kdu_instance,
+            namespace="default",
+            db_dict=db_dict,
+            operation="upgrade",
+        )
+        command = (
+            "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config "
+            "/usr/bin/helm3 upgrade stable-openldap-0005399828 stable/openldap "
+            "--namespace default --atomic --output yaml  --timeout 300s "
+            "--reuse-values --version 1.2.3"
+        )
+        self.helm_conn._local_async_exec.assert_called_with(
             command=command, env=self.env, raise_exception_on_error=False
         )
 
             command=command, env=self.env, raise_exception_on_error=False
         )
 
+    @asynctest.fail_on(active_handles=True)
+    async def test_scale(self):
+        kdu_model = "stable/openldap:1.2.3"
+        kdu_instance = "stable-openldap-0005399828"
+        db_dict = {}
+        instance_info = {
+            "chart": "openldap-1.2.3",
+            "name": kdu_instance,
+            "namespace": self.namespace,
+            "revision": 1,
+            "status": "DEPLOYED",
+        }
+        repo_list = [
+            {
+                "name": "stable",
+                "url": "https://kubernetes-charts.storage.googleapis.com/",
+            }
+        ]
+        kdu_values = """
+            # Default values for openldap.
+            # This is a YAML-formatted file.
+            # Declare variables to be passed into your templates.
+
+            replicaCount: 1
+            dummy-app:
+              replicas: 2
+        """
+
+        self.helm_conn.repo_list = asynctest.CoroutineMock(return_value=repo_list)
+        self.helm_conn.values_kdu = asynctest.CoroutineMock(return_value=kdu_values)
+        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
+        self.helm_conn._store_status = asynctest.CoroutineMock()
+        self.helm_conn._repo_to_oci_url = Mock(return_value=None)
+        self.helm_conn.get_instance_info = asynctest.CoroutineMock(
+            return_value=instance_info
+        )
+
+        # TEST-1
+        await self.helm_conn.scale(
+            kdu_instance,
+            2,
+            "",
+            kdu_model=kdu_model,
+            cluster_uuid=self.cluster_uuid,
+            atomic=True,
+            db_dict=db_dict,
+        )
+        command = (
+            "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config "
+            "/usr/bin/helm3 upgrade stable-openldap-0005399828 stable/openldap "
+            "--namespace testk8s --atomic --output yaml --set replicaCount=2 --timeout 1800s "
+            "--reuse-values --version 1.2.3"
+        )
+        self.helm_conn._local_async_exec.assert_called_with(
+            command=command, env=self.env, raise_exception_on_error=False
+        )
+        # TEST-2
+        await self.helm_conn.scale(
+            kdu_instance,
+            3,
+            "dummy-app",
+            kdu_model=kdu_model,
+            cluster_uuid=self.cluster_uuid,
+            atomic=True,
+            db_dict=db_dict,
+        )
+        command = (
+            "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config "
+            "/usr/bin/helm3 upgrade stable-openldap-0005399828 stable/openldap "
+            "--namespace testk8s --atomic --output yaml --set dummy-app.replicas=3 --timeout 1800s "
+            "--reuse-values --version 1.2.3"
+        )
+        self.helm_conn._local_async_exec.assert_called_with(
+            command=command, env=self.env, raise_exception_on_error=False
+        )
+        self.helm_conn.fs.reverse_sync.assert_called_with(from_path=self.cluster_id)
+        self.helm_conn._store_status.assert_called_with(
+            cluster_id=self.cluster_id,
+            kdu_instance=kdu_instance,
+            namespace=self.namespace,
+            db_dict=db_dict,
+            operation="scale",
+        )
+
     @asynctest.fail_on(active_handles=True)
     async def test_rollback(self):
         kdu_instance = "stable-openldap-0005399828"
     @asynctest.fail_on(active_handles=True)
     async def test_rollback(self):
         kdu_instance = "stable-openldap-0005399828"
@@ -304,7 +491,7 @@ class TestK8sHelm3Conn(asynctest.TestCase):
         await self.helm_conn.rollback(
             self.cluster_uuid, kdu_instance=kdu_instance, revision=1, db_dict=db_dict
         )
         await self.helm_conn.rollback(
             self.cluster_uuid, kdu_instance=kdu_instance, revision=1, db_dict=db_dict
         )
-        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
+        self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id)
         self.helm_conn.fs.reverse_sync.assert_called_once_with(
             from_path=self.cluster_id
         )
         self.helm_conn.fs.reverse_sync.assert_called_once_with(
             from_path=self.cluster_id
         )
@@ -314,10 +501,11 @@ class TestK8sHelm3Conn(asynctest.TestCase):
             namespace=self.namespace,
             db_dict=db_dict,
             operation="rollback",
             namespace=self.namespace,
             db_dict=db_dict,
             operation="rollback",
-            run_once=True,
-            check_every=0,
         )
         )
-        command = "/usr/bin/helm3 rollback stable-openldap-0005399828 1 --namespace=testk8s --wait"
+        command = (
+            "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config /usr/bin/helm3 "
+            "rollback stable-openldap-0005399828 1 --namespace=testk8s --wait"
+        )
         self.helm_conn._local_async_exec.assert_called_once_with(
             command=command, env=self.env, raise_exception_on_error=False
         )
         self.helm_conn._local_async_exec.assert_called_once_with(
             command=command, env=self.env, raise_exception_on_error=False
         )
@@ -339,13 +527,13 @@ class TestK8sHelm3Conn(asynctest.TestCase):
         )
 
         await self.helm_conn.uninstall(self.cluster_uuid, kdu_instance)
         )
 
         await self.helm_conn.uninstall(self.cluster_uuid, kdu_instance)
-        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
+        self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id)
         self.helm_conn.fs.reverse_sync.assert_called_once_with(
             from_path=self.cluster_id
         )
         self.helm_conn.fs.reverse_sync.assert_called_once_with(
             from_path=self.cluster_id
         )
-        command = "/usr/bin/helm3 uninstall {} --namespace={}".format(
-            kdu_instance, self.namespace
-        )
+        command = (
+            "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config /usr/bin/helm3 uninstall {} --namespace={}"
+        ).format(kdu_instance, self.namespace)
         self.helm_conn._local_async_exec.assert_called_once_with(
             command=command, env=self.env, raise_exception_on_error=True
         )
         self.helm_conn._local_async_exec.assert_called_once_with(
             command=command, env=self.env, raise_exception_on_error=True
         )
@@ -368,9 +556,9 @@ class TestK8sHelm3Conn(asynctest.TestCase):
             from_path=self.cluster_id
         )
         self.helm_conn._parse_services.assert_called_once()
             from_path=self.cluster_id
         )
         self.helm_conn._parse_services.assert_called_once()
-        command1 = "/usr/bin/helm3 get manifest {} --namespace=testk8s".format(
-            kdu_instance
-        )
+        command1 = (
+            "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config /usr/bin/helm3 get manifest {} --namespace=testk8s"
+        ).format(kdu_instance)
         command2 = "/usr/bin/kubectl get --namespace={} -f -".format(self.namespace)
         self.helm_conn._local_async_exec_pipe.assert_called_once_with(
             command1, command2, env=self.env, raise_exception_on_error=True
         command2 = "/usr/bin/kubectl get --namespace={} -f -".format(self.namespace)
         self.helm_conn._local_async_exec_pipe.assert_called_once_with(
             command1, command2, env=self.env, raise_exception_on_error=True
@@ -413,9 +601,7 @@ class TestK8sHelm3Conn(asynctest.TestCase):
             "https://kubernetes-charts.storage.googleapis.com/ "
             "--version 1.2.4"
         )
             "https://kubernetes-charts.storage.googleapis.com/ "
             "--version 1.2.4"
         )
-        self.helm_conn._local_async_exec.assert_called_with(
-            command=command, encode_utf8=True
-        )
+        self.helm_conn._local_async_exec.assert_called_with(command=command)
 
     @asynctest.fail_on(active_handles=True)
     async def test_help_kdu(self):
 
     @asynctest.fail_on(active_handles=True)
     async def test_help_kdu(self):
@@ -430,9 +616,7 @@ class TestK8sHelm3Conn(asynctest.TestCase):
             "https://kubernetes-charts.storage.googleapis.com/ "
             "--version 1.2.4"
         )
             "https://kubernetes-charts.storage.googleapis.com/ "
             "--version 1.2.4"
         )
-        self.helm_conn._local_async_exec.assert_called_with(
-            command=command, encode_utf8=True
-        )
+        self.helm_conn._local_async_exec.assert_called_with(command=command)
 
     @asynctest.fail_on(active_handles=True)
     async def test_values_kdu(self):
 
     @asynctest.fail_on(active_handles=True)
     async def test_values_kdu(self):
@@ -447,10 +631,23 @@ class TestK8sHelm3Conn(asynctest.TestCase):
             "https://kubernetes-charts.storage.googleapis.com/ "
             "--version 1.2.4"
         )
             "https://kubernetes-charts.storage.googleapis.com/ "
             "--version 1.2.4"
         )
-        self.helm_conn._local_async_exec.assert_called_with(
-            command=command, encode_utf8=True
+        self.helm_conn._local_async_exec.assert_called_with(command=command)
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_get_values_kdu(self):
+        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
+
+        kdu_instance = "stable-openldap-0005399828"
+        await self.helm_conn.get_values_kdu(
+            kdu_instance, self.namespace, self.env["KUBECONFIG"]
         )
 
         )
 
+        command = (
+            "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config /usr/bin/helm3 get values "
+            "stable-openldap-0005399828 --namespace=testk8s --output yaml"
+        )
+        self.helm_conn._local_async_exec.assert_called_with(command=command)
+
     @asynctest.fail_on(active_handles=True)
     async def test_instances_list(self):
         self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
     @asynctest.fail_on(active_handles=True)
     async def test_instances_list(self):
         self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
@@ -471,11 +668,11 @@ class TestK8sHelm3Conn(asynctest.TestCase):
         self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
 
         await self.helm_conn._status_kdu(
         self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
 
         await self.helm_conn._status_kdu(
-            self.cluster_id, kdu_instance, self.namespace, return_text=True
-        )
-        command = "/usr/bin/helm3 status {} --namespace={} --output yaml".format(
-            kdu_instance, self.namespace
+            self.cluster_id, kdu_instance, self.namespace, yaml_format=True
         )
         )
+        command = (
+            "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config /usr/bin/helm3 status {} --namespace={} --output yaml"
+        ).format(kdu_instance, self.namespace)
         self.helm_conn._local_async_exec.assert_called_once_with(
             command=command,
             env=self.env,
         self.helm_conn._local_async_exec.assert_called_once_with(
             command=command,
             env=self.env,
@@ -507,14 +704,12 @@ class TestK8sHelm3Conn(asynctest.TestCase):
             namespace=self.namespace,
             db_dict=db_dict,
             operation="install",
             namespace=self.namespace,
             db_dict=db_dict,
             operation="install",
-            run_once=True,
-            check_every=0,
         )
         self.helm_conn._status_kdu.assert_called_once_with(
             cluster_id=self.cluster_id,
             kdu_instance=kdu_instance,
             namespace=self.namespace,
         )
         self.helm_conn._status_kdu.assert_called_once_with(
             cluster_id=self.cluster_id,
             kdu_instance=kdu_instance,
             namespace=self.namespace,
-            return_text=False,
+            yaml_format=False,
         )
         self.helm_conn.write_app_status_to_db.assert_called_once_with(
             db_dict=db_dict,
         )
         self.helm_conn.write_app_status_to_db.assert_called_once_with(
             db_dict=db_dict,
@@ -548,6 +743,7 @@ class TestK8sHelm3Conn(asynctest.TestCase):
                 "updated": "2020-10-30 11:11:20.376744191 +0000 UTC",
             }
         ]
                 "updated": "2020-10-30 11:11:20.376744191 +0000 UTC",
             }
         ]
+        self.helm_conn._get_namespace = Mock(return_value=self.namespace)
         self.helm_conn._uninstall_sw = asynctest.CoroutineMock()
         self.helm_conn.instances_list = asynctest.CoroutineMock(return_value=instances)
         self.helm_conn.uninstall = asynctest.CoroutineMock()
         self.helm_conn._uninstall_sw = asynctest.CoroutineMock()
         self.helm_conn.instances_list = asynctest.CoroutineMock(return_value=instances)
         self.helm_conn.uninstall = asynctest.CoroutineMock()
@@ -557,6 +753,9 @@ class TestK8sHelm3Conn(asynctest.TestCase):
         self.helm_conn.fs.file_delete.assert_called_once_with(
             self.cluster_id, ignore_non_exist=True
         )
         self.helm_conn.fs.file_delete.assert_called_once_with(
             self.cluster_id, ignore_non_exist=True
         )
+        self.helm_conn._get_namespace.assert_called_once_with(
+            cluster_uuid=self.cluster_uuid
+        )
         self.helm_conn.instances_list.assert_called_once_with(
             cluster_uuid=self.cluster_uuid
         )
         self.helm_conn.instances_list.assert_called_once_with(
             cluster_uuid=self.cluster_uuid
         )
@@ -564,7 +763,7 @@ class TestK8sHelm3Conn(asynctest.TestCase):
             cluster_uuid=self.cluster_uuid, kdu_instance=kdu_instance
         )
         self.helm_conn._uninstall_sw.assert_called_once_with(
             cluster_uuid=self.cluster_uuid, kdu_instance=kdu_instance
         )
         self.helm_conn._uninstall_sw.assert_called_once_with(
-            self.cluster_id, self.namespace
+            cluster_id=self.cluster_id, namespace=self.namespace
         )
 
     @asynctest.fail_on(active_handles=True)
         )
 
     @asynctest.fail_on(active_handles=True)
@@ -603,7 +802,13 @@ class TestK8sHelm3Conn(asynctest.TestCase):
         )
         self.helm_conn.repo_remove.assert_not_called()
         self.helm_conn.repo_add.assert_called_once_with(
         )
         self.helm_conn.repo_remove.assert_not_called()
         self.helm_conn.repo_add.assert_called_once_with(
-            self.cluster_uuid, "bitnami", "https://charts.bitnami.com/bitnami"
+            self.cluster_uuid,
+            "bitnami",
+            "https://charts.bitnami.com/bitnami",
+            cert=None,
+            user=None,
+            password=None,
+            oci=False,
         )
         self.assertEqual(deleted_repo_list, [], "Deleted repo list should be empty")
         self.assertEqual(
         )
         self.assertEqual(deleted_repo_list, [], "Deleted repo list should be empty")
         self.assertEqual(
diff --git a/n2vc/tests/unit/test_k8s_helm_conn.py b/n2vc/tests/unit/test_k8s_helm_conn.py
deleted file mode 100644 (file)
index c57c8a4..0000000
+++ /dev/null
@@ -1,537 +0,0 @@
-##
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: alfonso.tiernosepulveda@telefonica.com
-##
-
-import asynctest
-import logging
-
-from asynctest.mock import Mock
-from osm_common.dbmemory import DbMemory
-from osm_common.fslocal import FsLocal
-from n2vc.k8s_helm_conn import K8sHelmConnector
-
-__author__ = "Isabel Lloret <illoret@indra.es>"
-
-
-class TestK8sHelmConn(asynctest.TestCase):
-    logging.basicConfig(level=logging.DEBUG)
-    logger = logging.getLogger(__name__)
-    logger.setLevel(logging.DEBUG)
-
-    async def setUp(self):
-        self.db = Mock(DbMemory())
-        self.fs = asynctest.Mock(FsLocal())
-        self.fs.path = "./tmp/"
-        self.namespace = "testk8s"
-        self.service_account = "osm"
-        self.cluster_id = "helm_cluster_id"
-        self.cluster_uuid = "{}:{}".format(self.namespace, self.cluster_id)
-        # pass fake kubectl and helm commands to make sure it does not call actual commands
-        K8sHelmConnector._check_file_exists = asynctest.Mock(return_value=True)
-        K8sHelmConnector._local_async_exec = asynctest.CoroutineMock(
-            return_value=("", 0)
-        )
-        cluster_dir = self.fs.path + self.cluster_id
-        self.kube_config = self.fs.path + self.cluster_id + "/.kube/config"
-        self.helm_home = self.fs.path + self.cluster_id + "/.helm"
-        self.env = {
-            "HELM_HOME": "{}/.helm".format(cluster_dir),
-            "KUBECONFIG": "{}/.kube/config".format(cluster_dir),
-        }
-        self.helm_conn = K8sHelmConnector(self.fs, self.db, log=self.logger)
-        self.logger.debug("Set up executed")
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_init_env(self):
-        # TODO
-        pass
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_repo_add(self):
-        repo_name = "bitnami"
-        repo_url = "https://charts.bitnami.com/bitnami"
-        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
-
-        await self.helm_conn.repo_add(self.cluster_uuid, repo_name, repo_url)
-
-        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
-        self.helm_conn.fs.reverse_sync.assert_called_once_with(
-            from_path=self.cluster_id
-        )
-        self.assertEqual(
-            self.helm_conn._local_async_exec.call_count,
-            2,
-            "local_async_exec expected 2 calls, called {}".format(
-                self.helm_conn._local_async_exec.call_count
-            ),
-        )
-
-        repo_update_command = "/usr/bin/helm repo update"
-        repo_add_command = "/usr/bin/helm repo add {} {}".format(repo_name, repo_url)
-        calls = self.helm_conn._local_async_exec.call_args_list
-        call0_kargs = calls[0][1]
-        self.assertEqual(
-            call0_kargs.get("command"),
-            repo_update_command,
-            "Invalid repo update command: {}".format(call0_kargs.get("command")),
-        )
-        self.assertEqual(
-            call0_kargs.get("env"),
-            self.env,
-            "Invalid env for update command: {}".format(call0_kargs.get("env")),
-        )
-        call1_kargs = calls[1][1]
-        self.assertEqual(
-            call1_kargs.get("command"),
-            repo_add_command,
-            "Invalid repo add command: {}".format(call1_kargs.get("command")),
-        )
-        self.assertEqual(
-            call1_kargs.get("env"),
-            self.env,
-            "Invalid env for add command: {}".format(call1_kargs.get("env")),
-        )
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_repo_list(self):
-        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
-
-        await self.helm_conn.repo_list(self.cluster_uuid)
-
-        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
-        self.helm_conn.fs.reverse_sync.assert_called_once_with(
-            from_path=self.cluster_id
-        )
-        command = "/usr/bin/helm repo list --output yaml"
-        self.helm_conn._local_async_exec.assert_called_with(
-            command=command, env=self.env, raise_exception_on_error=False
-        )
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_repo_remove(self):
-        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
-        repo_name = "bitnami"
-        await self.helm_conn.repo_remove(self.cluster_uuid, repo_name)
-
-        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
-        self.helm_conn.fs.reverse_sync.assert_called_once_with(
-            from_path=self.cluster_id
-        )
-        command = "/usr/bin/helm repo remove {}".format(repo_name)
-        self.helm_conn._local_async_exec.assert_called_once_with(
-            command=command, env=self.env, raise_exception_on_error=True
-        )
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_install(self):
-        kdu_model = "stable/openldap:1.2.2"
-        kdu_instance = "stable-openldap-0005399828"
-        db_dict = {}
-        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
-        self.helm_conn._status_kdu = asynctest.CoroutineMock(return_value=None)
-        self.helm_conn._store_status = asynctest.CoroutineMock()
-        self.helm_conn.generate_kdu_instance_name = Mock(return_value=kdu_instance)
-
-        await self.helm_conn.install(
-            self.cluster_uuid,
-            kdu_model,
-            kdu_instance,
-            atomic=True,
-            namespace=self.namespace,
-            db_dict=db_dict,
-        )
-
-        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
-        self.helm_conn.fs.reverse_sync.assert_called_once_with(
-            from_path=self.cluster_id
-        )
-        self.helm_conn._store_status.assert_called_with(
-            cluster_id=self.cluster_id,
-            kdu_instance=kdu_instance,
-            namespace=self.namespace,
-            db_dict=db_dict,
-            operation="install",
-            run_once=True,
-            check_every=0,
-        )
-        command = (
-            "/usr/bin/helm install --atomic --output yaml   --timeout 300 "
-            "--name=stable-openldap-0005399828 --namespace testk8s stable/openldap "
-            "--version 1.2.2"
-        )
-        self.helm_conn._local_async_exec.assert_called_once_with(
-            command=command, env=self.env, raise_exception_on_error=False
-        )
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_upgrade(self):
-        kdu_model = "stable/openldap:1.2.3"
-        kdu_instance = "stable-openldap-0005399828"
-        db_dict = {}
-        instance_info = {
-            "chart": "openldap-1.2.2",
-            "name": kdu_instance,
-            "namespace": self.namespace,
-            "revision": 1,
-            "status": "DEPLOYED",
-        }
-        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
-        self.helm_conn._store_status = asynctest.CoroutineMock()
-        self.helm_conn.get_instance_info = asynctest.CoroutineMock(
-            return_value=instance_info
-        )
-
-        await self.helm_conn.upgrade(
-            self.cluster_uuid, kdu_instance, kdu_model, atomic=True, db_dict=db_dict
-        )
-        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
-        self.helm_conn.fs.reverse_sync.assert_called_once_with(
-            from_path=self.cluster_id
-        )
-        self.helm_conn._store_status.assert_called_with(
-            cluster_id=self.cluster_id,
-            kdu_instance=kdu_instance,
-            namespace=self.namespace,
-            db_dict=db_dict,
-            operation="upgrade",
-            run_once=True,
-            check_every=0,
-        )
-        command = (
-            "/usr/bin/helm upgrade --atomic --output yaml  --timeout 300 "
-            "stable-openldap-0005399828 stable/openldap --version 1.2.3"
-        )
-        self.helm_conn._local_async_exec.assert_called_once_with(
-            command=command, env=self.env, raise_exception_on_error=False
-        )
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_rollback(self):
-        kdu_instance = "stable-openldap-0005399828"
-        db_dict = {}
-        instance_info = {
-            "chart": "openldap-1.2.3",
-            "name": kdu_instance,
-            "namespace": self.namespace,
-            "revision": 2,
-            "status": "DEPLOYED",
-        }
-        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
-        self.helm_conn._store_status = asynctest.CoroutineMock()
-        self.helm_conn.get_instance_info = asynctest.CoroutineMock(
-            return_value=instance_info
-        )
-
-        await self.helm_conn.rollback(
-            self.cluster_uuid, kdu_instance=kdu_instance, revision=1, db_dict=db_dict
-        )
-        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
-        self.helm_conn.fs.reverse_sync.assert_called_once_with(
-            from_path=self.cluster_id
-        )
-        self.helm_conn._store_status.assert_called_with(
-            cluster_id=self.cluster_id,
-            kdu_instance=kdu_instance,
-            namespace=self.namespace,
-            db_dict=db_dict,
-            operation="rollback",
-            run_once=True,
-            check_every=0,
-        )
-        command = "/usr/bin/helm rollback stable-openldap-0005399828 1 --wait"
-        self.helm_conn._local_async_exec.assert_called_once_with(
-            command=command, env=self.env, raise_exception_on_error=False
-        )
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_uninstall(self):
-        kdu_instance = "stable-openldap-0005399828"
-        instance_info = {
-            "chart": "openldap-1.2.2",
-            "name": kdu_instance,
-            "namespace": self.namespace,
-            "revision": 3,
-            "status": "DEPLOYED",
-        }
-        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
-        self.helm_conn._store_status = asynctest.CoroutineMock()
-        self.helm_conn.get_instance_info = asynctest.CoroutineMock(
-            return_value=instance_info
-        )
-
-        await self.helm_conn.uninstall(self.cluster_uuid, kdu_instance)
-        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
-        self.helm_conn.fs.reverse_sync.assert_called_once_with(
-            from_path=self.cluster_id
-        )
-        command = "/usr/bin/helm delete --purge  {}".format(kdu_instance)
-        self.helm_conn._local_async_exec.assert_called_once_with(
-            command=command, env=self.env, raise_exception_on_error=True
-        )
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_get_services(self):
-        kdu_instance = "test_services_1"
-        service = {"name": "testservice", "type": "LoadBalancer"}
-        self.helm_conn._local_async_exec_pipe = asynctest.CoroutineMock(
-            return_value=("", 0)
-        )
-        self.helm_conn._parse_services = Mock(return_value=["testservice"])
-        self.helm_conn._get_service = asynctest.CoroutineMock(return_value=service)
-
-        services = await self.helm_conn.get_services(
-            self.cluster_uuid, kdu_instance, self.namespace
-        )
-        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
-        self.helm_conn.fs.reverse_sync.assert_called_once_with(
-            from_path=self.cluster_id
-        )
-        self.helm_conn._parse_services.assert_called_once()
-        command1 = "/usr/bin/helm get manifest {} ".format(kdu_instance)
-        command2 = "/usr/bin/kubectl get --namespace={} -f -".format(self.namespace)
-        self.helm_conn._local_async_exec_pipe.assert_called_once_with(
-            command1, command2, env=self.env, raise_exception_on_error=True
-        )
-        self.assertEqual(
-            services, [service], "Invalid service returned from get_service"
-        )
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_get_service(self):
-        service_name = "service1"
-
-        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
-        await self.helm_conn.get_service(
-            self.cluster_uuid, service_name, self.namespace
-        )
-
-        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
-        self.helm_conn.fs.reverse_sync.assert_called_once_with(
-            from_path=self.cluster_id
-        )
-        command = (
-            "/usr/bin/kubectl --kubeconfig=./tmp/helm_cluster_id/.kube/config "
-            "--namespace=testk8s get service service1 -o=yaml"
-        )
-        self.helm_conn._local_async_exec.assert_called_once_with(
-            command=command, env=self.env, raise_exception_on_error=True
-        )
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_inspect_kdu(self):
-        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
-
-        kdu_model = "stable/openldap:1.2.4"
-        repo_url = "https://kubernetes-charts.storage.googleapis.com/"
-        await self.helm_conn.inspect_kdu(kdu_model, repo_url)
-
-        command = (
-            "/usr/bin/helm inspect  openldap --repo "
-            "https://kubernetes-charts.storage.googleapis.com/ "
-            "--version 1.2.4"
-        )
-        self.helm_conn._local_async_exec.assert_called_with(
-            command=command, encode_utf8=True
-        )
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_help_kdu(self):
-        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
-
-        kdu_model = "stable/openldap:1.2.4"
-        repo_url = "https://kubernetes-charts.storage.googleapis.com/"
-        await self.helm_conn.help_kdu(kdu_model, repo_url)
-
-        command = (
-            "/usr/bin/helm inspect readme openldap --repo "
-            "https://kubernetes-charts.storage.googleapis.com/ "
-            "--version 1.2.4"
-        )
-        self.helm_conn._local_async_exec.assert_called_with(
-            command=command, encode_utf8=True
-        )
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_values_kdu(self):
-        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
-
-        kdu_model = "stable/openldap:1.2.4"
-        repo_url = "https://kubernetes-charts.storage.googleapis.com/"
-        await self.helm_conn.values_kdu(kdu_model, repo_url)
-
-        command = (
-            "/usr/bin/helm inspect values openldap --repo "
-            "https://kubernetes-charts.storage.googleapis.com/ "
-            "--version 1.2.4"
-        )
-        self.helm_conn._local_async_exec.assert_called_with(
-            command=command, encode_utf8=True
-        )
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_instances_list(self):
-        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
-
-        await self.helm_conn.instances_list(self.cluster_uuid)
-        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
-        self.helm_conn.fs.reverse_sync.assert_called_once_with(
-            from_path=self.cluster_id
-        )
-        command = "/usr/bin/helm list --output yaml"
-        self.helm_conn._local_async_exec.assert_called_once_with(
-            command=command, env=self.env, raise_exception_on_error=True
-        )
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_status_kdu(self):
-        kdu_instance = "stable-openldap-0005399828"
-        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
-
-        await self.helm_conn._status_kdu(
-            self.cluster_id, kdu_instance, self.namespace, return_text=True
-        )
-        command = "/usr/bin/helm status {} --output yaml".format(kdu_instance)
-        self.helm_conn._local_async_exec.assert_called_once_with(
-            command=command,
-            env=self.env,
-            raise_exception_on_error=True,
-            show_error_log=False,
-        )
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_store_status(self):
-        kdu_instance = "stable-openldap-0005399828"
-        db_dict = {}
-        status = {
-            "info": {
-                "description": "Install complete",
-                "status": {
-                    "code": "1",
-                    "notes": "The openldap helm chart has been installed",
-                },
-            }
-        }
-        self.helm_conn._status_kdu = asynctest.CoroutineMock(return_value=status)
-        self.helm_conn.write_app_status_to_db = asynctest.CoroutineMock(
-            return_value=status
-        )
-
-        await self.helm_conn._store_status(
-            cluster_id=self.cluster_id,
-            kdu_instance=kdu_instance,
-            namespace=self.namespace,
-            db_dict=db_dict,
-            operation="install",
-            run_once=True,
-            check_every=0,
-        )
-        self.helm_conn._status_kdu.assert_called_once_with(
-            cluster_id=self.cluster_id,
-            kdu_instance=kdu_instance,
-            namespace=self.namespace,
-            return_text=False,
-        )
-        self.helm_conn.write_app_status_to_db.assert_called_once_with(
-            db_dict=db_dict,
-            status="Install complete",
-            detailed_status=str(status),
-            operation="install",
-        )
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_reset_uninstall_false(self):
-        self.helm_conn._uninstall_sw = asynctest.CoroutineMock()
-
-        await self.helm_conn.reset(self.cluster_uuid, force=False, uninstall_sw=False)
-        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
-        self.helm_conn.fs.file_delete.assert_called_once_with(
-            self.cluster_id, ignore_non_exist=True
-        )
-        self.helm_conn._uninstall_sw.assert_not_called()
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_reset_uninstall(self):
-        kdu_instance = "stable-openldap-0021099429"
-        instances = [
-            {
-                "app_version": "2.4.48",
-                "chart": "openldap-1.2.3",
-                "name": kdu_instance,
-                "namespace": self.namespace,
-                "revision": "1",
-                "status": "deployed",
-                "updated": "2020-10-30 11:11:20.376744191 +0000 UTC",
-            }
-        ]
-        self.helm_conn._uninstall_sw = asynctest.CoroutineMock()
-        self.helm_conn.instances_list = asynctest.CoroutineMock(return_value=instances)
-        self.helm_conn.uninstall = asynctest.CoroutineMock()
-
-        await self.helm_conn.reset(self.cluster_uuid, force=True, uninstall_sw=True)
-        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
-        self.helm_conn.fs.file_delete.assert_called_once_with(
-            self.cluster_id, ignore_non_exist=True
-        )
-        self.helm_conn.instances_list.assert_called_once_with(
-            cluster_uuid=self.cluster_uuid
-        )
-        self.helm_conn.uninstall.assert_called_once_with(
-            cluster_uuid=self.cluster_uuid, kdu_instance=kdu_instance
-        )
-        self.helm_conn._uninstall_sw.assert_called_once_with(
-            self.cluster_id, self.namespace
-        )
-
-    @asynctest.fail_on(active_handles=True)
-    async def test_uninstall_sw_namespace(self):
-        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
-
-        await self.helm_conn._uninstall_sw(self.cluster_id, self.namespace)
-        calls = self.helm_conn._local_async_exec.call_args_list
-        self.assertEqual(
-            len(calls), 3, "To uninstall should have executed three commands"
-        )
-        call0_kargs = calls[0][1]
-        command_0 = "/usr/bin/helm --kubeconfig={} --home={} reset".format(
-            self.kube_config, self.helm_home
-        )
-        self.assertEqual(
-            call0_kargs,
-            {"command": command_0, "raise_exception_on_error": True, "env": self.env},
-            "Invalid args for first call to local_exec",
-        )
-        call1_kargs = calls[1][1]
-        command_1 = (
-            "/usr/bin/kubectl --kubeconfig={} delete "
-            "clusterrolebinding.rbac.authorization.k8s.io/osm-tiller-cluster-rule".format(
-                self.kube_config
-            )
-        )
-        self.assertEqual(
-            call1_kargs,
-            {"command": command_1, "raise_exception_on_error": False, "env": self.env},
-            "Invalid args for second call to local_exec",
-        )
-        call2_kargs = calls[2][1]
-        command_2 = (
-            "/usr/bin/kubectl --kubeconfig={} --namespace kube-system delete "
-            "serviceaccount/{}".format(self.kube_config, self.service_account)
-        )
-        self.assertEqual(
-            call2_kargs,
-            {"command": command_2, "raise_exception_on_error": False, "env": self.env},
-            "Invalid args for third call to local_exec",
-        )
index 1423c61..1de1288 100644 (file)
@@ -17,13 +17,11 @@ import asyncio
 import logging
 import asynctest
 from unittest.mock import Mock
 import logging
 import asynctest
 from unittest.mock import Mock
+from n2vc.definitions import Offer, RelationEndpoint
 from n2vc.k8s_juju_conn import K8sJujuConnector, RBAC_LABEL_KEY_NAME
 from osm_common import fslocal
 from .utils import kubeconfig, FakeModel, FakeFileWrapper, AsyncMock, FakeApplication
 from n2vc.k8s_juju_conn import K8sJujuConnector, RBAC_LABEL_KEY_NAME
 from osm_common import fslocal
 from .utils import kubeconfig, FakeModel, FakeFileWrapper, AsyncMock, FakeApplication
-from n2vc.exceptions import (
-    MethodNotImplemented,
-    K8sException,
-)
+from n2vc.exceptions import MethodNotImplemented, K8sException
 from n2vc.vca.connection_data import ConnectionData
 
 
 from n2vc.vca.connection_data import ConnectionData
 
 
@@ -66,11 +64,14 @@ class K8sJujuConnTestCase(asynctest.TestCase):
         )
         logging.disable(logging.CRITICAL)
 
         )
         logging.disable(logging.CRITICAL)
 
+        self.kdu_name = "kdu_name"
+        self.kdu_instance = "{}-{}".format(self.kdu_name, "id")
+        self.default_namespace = self.kdu_instance
+
         self.k8s_juju_conn = K8sJujuConnector(
             fs=fslocal.FsLocal(),
             db=self.db,
             log=None,
         self.k8s_juju_conn = K8sJujuConnector(
             fs=fslocal.FsLocal(),
             db=self.db,
             log=None,
-            loop=self.loop,
             on_update_db=None,
         )
         self.k8s_juju_conn._store.get_vca_id.return_value = None
             on_update_db=None,
         )
         self.k8s_juju_conn._store.get_vca_id.return_value = None
@@ -82,6 +83,9 @@ class K8sJujuConnTestCase(asynctest.TestCase):
         self.kubectl.get_services.return_value = [{}]
         self.k8s_juju_conn._get_kubectl = Mock()
         self.k8s_juju_conn._get_kubectl.return_value = self.kubectl
         self.kubectl.get_services.return_value = [{}]
         self.k8s_juju_conn._get_kubectl = Mock()
         self.k8s_juju_conn._get_kubectl.return_value = self.kubectl
+        self.k8s_juju_conn._obtain_namespace_from_db = Mock(
+            return_value=self.default_namespace
+        )
 
 
 class InitEnvTest(K8sJujuConnTestCase):
 
 
 class InitEnvTest(K8sJujuConnTestCase):
@@ -126,9 +130,15 @@ class InitEnvTest(K8sJujuConnTestCase):
             uuid, created = self.loop.run_until_complete(
                 self.k8s_juju_conn.init_env(k8s_creds=kubeconfig)
             )
             uuid, created = self.loop.run_until_complete(
                 self.k8s_juju_conn.init_env(k8s_creds=kubeconfig)
             )
-
         self.assertIsNone(created)
         self.assertIsNone(uuid)
         self.assertIsNone(created)
         self.assertIsNone(uuid)
+        self.kubectl.create_cluster_role.assert_called_once()
+        self.kubectl.create_service_account.assert_called_once()
+        self.kubectl.create_cluster_role_binding.assert_called_once()
+        self.kubectl.get_default_storage_class.assert_called_once()
+        self.kubectl.delete_cluster_role.assert_called_once()
+        self.kubectl.delete_service_account.assert_called_once()
+        self.kubectl.delete_cluster_role_binding.assert_called_once()
         self.k8s_juju_conn.libjuju.add_k8s.assert_called_once()
 
 
         self.k8s_juju_conn.libjuju.add_k8s.assert_called_once()
 
 
@@ -202,9 +212,7 @@ class InstallTest(K8sJujuConnTestCase):
         self.local_bundle = "bundle"
         self.cs_bundle = "cs:bundle"
         self.http_bundle = "https://example.com/bundle.yaml"
         self.local_bundle = "bundle"
         self.cs_bundle = "cs:bundle"
         self.http_bundle = "https://example.com/bundle.yaml"
-        self.kdu_name = "kdu_name"
         self.cluster_uuid = "cluster"
         self.cluster_uuid = "cluster"
-        self.kdu_instance = "{}-{}".format(self.kdu_name, "id")
         self.k8s_juju_conn.libjuju.add_model = AsyncMock()
         self.k8s_juju_conn.libjuju.deploy = AsyncMock()
 
         self.k8s_juju_conn.libjuju.add_model = AsyncMock()
         self.k8s_juju_conn.libjuju.deploy = AsyncMock()
 
@@ -218,15 +226,17 @@ class InstallTest(K8sJujuConnTestCase):
                 kdu_name=self.kdu_name,
                 db_dict=self.db_dict,
                 timeout=1800,
                 kdu_name=self.kdu_name,
                 db_dict=self.db_dict,
                 timeout=1800,
+                params=None,
             )
         )
         self.assertEqual(mock_chdir.call_count, 2)
         self.k8s_juju_conn.libjuju.add_model.assert_called_once()
         self.k8s_juju_conn.libjuju.deploy.assert_called_once_with(
             "local:{}".format(self.local_bundle),
             )
         )
         self.assertEqual(mock_chdir.call_count, 2)
         self.k8s_juju_conn.libjuju.add_model.assert_called_once()
         self.k8s_juju_conn.libjuju.deploy.assert_called_once_with(
             "local:{}".format(self.local_bundle),
-            model_name=self.kdu_instance,
+            model_name=self.default_namespace,
             wait=True,
             timeout=1800,
             wait=True,
             timeout=1800,
+            instantiation_params=None,
         )
 
     def test_success_cs(self, mock_chdir):
         )
 
     def test_success_cs(self, mock_chdir):
@@ -239,17 +249,20 @@ class InstallTest(K8sJujuConnTestCase):
                 kdu_name=self.kdu_name,
                 db_dict=self.db_dict,
                 timeout=1800,
                 kdu_name=self.kdu_name,
                 db_dict=self.db_dict,
                 timeout=1800,
+                params={},
             )
         )
         self.k8s_juju_conn.libjuju.add_model.assert_called_once()
         self.k8s_juju_conn.libjuju.deploy.assert_called_once_with(
             self.cs_bundle,
             )
         )
         self.k8s_juju_conn.libjuju.add_model.assert_called_once()
         self.k8s_juju_conn.libjuju.deploy.assert_called_once_with(
             self.cs_bundle,
-            model_name=self.kdu_instance,
+            model_name=self.default_namespace,
             wait=True,
             timeout=1800,
             wait=True,
             timeout=1800,
+            instantiation_params=None,
         )
 
     def test_success_http(self, mock_chdir):
         )
 
     def test_success_http(self, mock_chdir):
+        params = {"overlay": {"applications": {"squid": {"scale": 2}}}}
         self.loop.run_until_complete(
             self.k8s_juju_conn.install(
                 self.cluster_uuid,
         self.loop.run_until_complete(
             self.k8s_juju_conn.install(
                 self.cluster_uuid,
@@ -259,17 +272,20 @@ class InstallTest(K8sJujuConnTestCase):
                 kdu_name=self.kdu_name,
                 db_dict=self.db_dict,
                 timeout=1800,
                 kdu_name=self.kdu_name,
                 db_dict=self.db_dict,
                 timeout=1800,
+                params=params,
             )
         )
         self.k8s_juju_conn.libjuju.add_model.assert_called_once()
         self.k8s_juju_conn.libjuju.deploy.assert_called_once_with(
             self.http_bundle,
             )
         )
         self.k8s_juju_conn.libjuju.add_model.assert_called_once()
         self.k8s_juju_conn.libjuju.deploy.assert_called_once_with(
             self.http_bundle,
-            model_name=self.kdu_instance,
+            model_name=self.default_namespace,
             wait=True,
             timeout=1800,
             wait=True,
             timeout=1800,
+            instantiation_params=params.get("overlay"),
         )
 
     def test_success_not_kdu_name(self, mock_chdir):
         )
 
     def test_success_not_kdu_name(self, mock_chdir):
+        params = {"some_key": {"applications": {"squid": {"scale": 2}}}}
         self.loop.run_until_complete(
             self.k8s_juju_conn.install(
                 self.cluster_uuid,
         self.loop.run_until_complete(
             self.k8s_juju_conn.install(
                 self.cluster_uuid,
@@ -278,14 +294,16 @@ class InstallTest(K8sJujuConnTestCase):
                 atomic=True,
                 db_dict=self.db_dict,
                 timeout=1800,
                 atomic=True,
                 db_dict=self.db_dict,
                 timeout=1800,
+                params=params,
             )
         )
         self.k8s_juju_conn.libjuju.add_model.assert_called_once()
         self.k8s_juju_conn.libjuju.deploy.assert_called_once_with(
             self.cs_bundle,
             )
         )
         self.k8s_juju_conn.libjuju.add_model.assert_called_once()
         self.k8s_juju_conn.libjuju.deploy.assert_called_once_with(
             self.cs_bundle,
-            model_name=self.kdu_instance,
+            model_name=self.default_namespace,
             wait=True,
             timeout=1800,
             wait=True,
             timeout=1800,
+            instantiation_params=None,
         )
 
     def test_missing_db_dict(self, mock_chdir):
         )
 
     def test_missing_db_dict(self, mock_chdir):
@@ -322,9 +340,10 @@ class InstallTest(K8sJujuConnTestCase):
         self.k8s_juju_conn.libjuju.add_model.assert_called_once()
         self.k8s_juju_conn.libjuju.deploy.assert_called_once_with(
             self.cs_bundle,
         self.k8s_juju_conn.libjuju.add_model.assert_called_once()
         self.k8s_juju_conn.libjuju.deploy.assert_called_once_with(
             self.cs_bundle,
-            model_name=self.kdu_instance,
+            model_name=self.default_namespace,
             wait=True,
             timeout=1800,
             wait=True,
             timeout=1800,
+            instantiation_params=None,
         )
 
     def test_missing_bundle(self, mock_chdir):
         )
 
     def test_missing_bundle(self, mock_chdir):
@@ -360,9 +379,10 @@ class InstallTest(K8sJujuConnTestCase):
         self.k8s_juju_conn.libjuju.add_model.assert_called_once()
         self.k8s_juju_conn.libjuju.deploy.assert_called_once_with(
             "local:{}".format(self.local_bundle),
         self.k8s_juju_conn.libjuju.add_model.assert_called_once()
         self.k8s_juju_conn.libjuju.deploy.assert_called_once_with(
             "local:{}".format(self.local_bundle),
-            model_name=self.kdu_instance,
+            model_name=self.default_namespace,
             wait=True,
             timeout=1800,
             wait=True,
             timeout=1800,
+            instantiation_params=None,
         )
 
 
         )
 
 
@@ -394,7 +414,6 @@ class ExecPrimitivesTest(K8sJujuConnTestCase):
         super(ExecPrimitivesTest, self).setUp()
         self.action_name = "touch"
         self.application_name = "myapp"
         super(ExecPrimitivesTest, self).setUp()
         self.action_name = "touch"
         self.application_name = "myapp"
-        self.model_name = "model"
         self.k8s_juju_conn.libjuju.get_actions = AsyncMock()
         self.k8s_juju_conn.libjuju.execute_action = AsyncMock()
 
         self.k8s_juju_conn.libjuju.get_actions = AsyncMock()
         self.k8s_juju_conn.libjuju.execute_action = AsyncMock()
 
@@ -408,16 +427,22 @@ class ExecPrimitivesTest(K8sJujuConnTestCase):
 
         output = self.loop.run_until_complete(
             self.k8s_juju_conn.exec_primitive(
 
         output = self.loop.run_until_complete(
             self.k8s_juju_conn.exec_primitive(
-                "cluster", self.model_name, self.action_name, params=params
+                "cluster", self.kdu_instance, self.action_name, params=params
             )
         )
 
         self.assertEqual(output, "success")
             )
         )
 
         self.assertEqual(output, "success")
+        self.k8s_juju_conn._obtain_namespace_from_db.assert_called_once_with(
+            kdu_instance=self.kdu_instance
+        )
         self.k8s_juju_conn.libjuju.get_actions.assert_called_once_with(
         self.k8s_juju_conn.libjuju.get_actions.assert_called_once_with(
-            self.application_name, self.model_name
+            application_name=self.application_name, model_name=self.default_namespace
         )
         self.k8s_juju_conn.libjuju.execute_action.assert_called_once_with(
         )
         self.k8s_juju_conn.libjuju.execute_action.assert_called_once_with(
-            self.application_name, self.model_name, self.action_name, **params
+            application_name=self.application_name,
+            model_name=self.default_namespace,
+            action_name=self.action_name,
+            **params
         )
 
     def test_exception(self):
         )
 
     def test_exception(self):
@@ -429,16 +454,22 @@ class ExecPrimitivesTest(K8sJujuConnTestCase):
         with self.assertRaises(Exception):
             output = self.loop.run_until_complete(
                 self.k8s_juju_conn.exec_primitive(
         with self.assertRaises(Exception):
             output = self.loop.run_until_complete(
                 self.k8s_juju_conn.exec_primitive(
-                    "cluster", self.model_name, self.action_name, params=params
+                    "cluster", self.kdu_instance, self.action_name, params=params
                 )
             )
 
         self.assertIsNone(output)
                 )
             )
 
         self.assertIsNone(output)
+        self.k8s_juju_conn._obtain_namespace_from_db.assert_called_once_with(
+            kdu_instance=self.kdu_instance
+        )
         self.k8s_juju_conn.libjuju.get_actions.assert_called_once_with(
         self.k8s_juju_conn.libjuju.get_actions.assert_called_once_with(
-            self.application_name, self.model_name
+            application_name=self.application_name, model_name=self.default_namespace
         )
         self.k8s_juju_conn.libjuju.execute_action.assert_called_once_with(
         )
         self.k8s_juju_conn.libjuju.execute_action.assert_called_once_with(
-            self.application_name, self.model_name, self.action_name, **params
+            application_name=self.application_name,
+            model_name=self.default_namespace,
+            action_name=self.action_name,
+            **params
         )
 
     def test_missing_application_name_in_params(self):
         )
 
     def test_missing_application_name_in_params(self):
@@ -448,7 +479,7 @@ class ExecPrimitivesTest(K8sJujuConnTestCase):
         with self.assertRaises(K8sException):
             output = self.loop.run_until_complete(
                 self.k8s_juju_conn.exec_primitive(
         with self.assertRaises(K8sException):
             output = self.loop.run_until_complete(
                 self.k8s_juju_conn.exec_primitive(
-                    "cluster", self.model_name, self.action_name, params=params
+                    "cluster", self.kdu_instance, self.action_name, params=params
                 )
             )
 
                 )
             )
 
@@ -461,7 +492,7 @@ class ExecPrimitivesTest(K8sJujuConnTestCase):
         with self.assertRaises(K8sException):
             output = self.loop.run_until_complete(
                 self.k8s_juju_conn.exec_primitive(
         with self.assertRaises(K8sException):
             output = self.loop.run_until_complete(
                 self.k8s_juju_conn.exec_primitive(
-                    "cluster", self.model_name, self.action_name
+                    "cluster", self.kdu_instance, self.action_name
                 )
             )
 
                 )
             )
 
@@ -480,13 +511,16 @@ class ExecPrimitivesTest(K8sJujuConnTestCase):
         with self.assertRaises(K8sException):
             output = self.loop.run_until_complete(
                 self.k8s_juju_conn.exec_primitive(
         with self.assertRaises(K8sException):
             output = self.loop.run_until_complete(
                 self.k8s_juju_conn.exec_primitive(
-                    "cluster", self.model_name, "non-existing-action", params=params
+                    "cluster", self.kdu_instance, "non-existing-action", params=params
                 )
             )
 
         self.assertIsNone(output)
                 )
             )
 
         self.assertIsNone(output)
+        self.k8s_juju_conn._obtain_namespace_from_db.assert_called_once_with(
+            kdu_instance=self.kdu_instance
+        )
         self.k8s_juju_conn.libjuju.get_actions.assert_called_once_with(
         self.k8s_juju_conn.libjuju.get_actions.assert_called_once_with(
-            self.application_name, self.model_name
+            application_name=self.application_name, model_name=self.default_namespace
         )
         self.k8s_juju_conn.libjuju.execute_action.assert_not_called()
 
         )
         self.k8s_juju_conn.libjuju.execute_action.assert_not_called()
 
@@ -498,16 +532,22 @@ class ExecPrimitivesTest(K8sJujuConnTestCase):
         with self.assertRaises(K8sException):
             output = self.loop.run_until_complete(
                 self.k8s_juju_conn.exec_primitive(
         with self.assertRaises(K8sException):
             output = self.loop.run_until_complete(
                 self.k8s_juju_conn.exec_primitive(
-                    "cluster", self.model_name, self.action_name, params=params
+                    "cluster", self.kdu_instance, self.action_name, params=params
                 )
             )
 
         self.assertIsNone(output)
                 )
             )
 
         self.assertIsNone(output)
+        self.k8s_juju_conn._obtain_namespace_from_db.assert_called_once_with(
+            kdu_instance=self.kdu_instance
+        )
         self.k8s_juju_conn.libjuju.get_actions.assert_called_once_with(
         self.k8s_juju_conn.libjuju.get_actions.assert_called_once_with(
-            self.application_name, self.model_name
+            application_name=self.application_name, model_name=self.default_namespace
         )
         self.k8s_juju_conn.libjuju.execute_action.assert_called_once_with(
         )
         self.k8s_juju_conn.libjuju.execute_action.assert_called_once_with(
-            self.application_name, self.model_name, self.action_name, **params
+            application_name=self.application_name,
+            model_name=self.default_namespace,
+            action_name=self.action_name,
+            **params
         )
 
 
         )
 
 
@@ -646,8 +686,6 @@ class UpdateVcaStatusTest(K8sJujuConnTestCase):
     def setUp(self):
         super(UpdateVcaStatusTest, self).setUp()
         self.vcaStatus = {"model": {"applications": {"app": {"actions": {}}}}}
     def setUp(self):
         super(UpdateVcaStatusTest, self).setUp()
         self.vcaStatus = {"model": {"applications": {"app": {"actions": {}}}}}
-        self.kdu_name = "kdu_name"
-        self.kdu_instance = "{}-{}".format(self.kdu_name, "id")
         self.k8s_juju_conn.libjuju.get_executed_actions = AsyncMock()
         self.k8s_juju_conn.libjuju.get_actions = AsyncMock()
         self.k8s_juju_conn.libjuju.get_application_configs = AsyncMock()
         self.k8s_juju_conn.libjuju.get_executed_actions = AsyncMock()
         self.k8s_juju_conn.libjuju.get_actions = AsyncMock()
         self.k8s_juju_conn.libjuju.get_application_configs = AsyncMock()
@@ -657,7 +695,6 @@ class UpdateVcaStatusTest(K8sJujuConnTestCase):
             self.k8s_juju_conn.update_vca_status(self.vcaStatus, self.kdu_instance)
         )
         self.k8s_juju_conn.libjuju.get_executed_actions.assert_called_once()
             self.k8s_juju_conn.update_vca_status(self.vcaStatus, self.kdu_instance)
         )
         self.k8s_juju_conn.libjuju.get_executed_actions.assert_called_once()
-        self.k8s_juju_conn.libjuju.get_actions.assert_called_once()
         self.k8s_juju_conn.libjuju.get_application_configs.assert_called_once()
 
     def test_exception(self):
         self.k8s_juju_conn.libjuju.get_application_configs.assert_called_once()
 
     def test_exception(self):
@@ -668,7 +705,6 @@ class UpdateVcaStatusTest(K8sJujuConnTestCase):
                 self.k8s_juju_conn.update_vca_status(self.vcaStatus, self.kdu_instance)
             )
             self.k8s_juju_conn.libjuju.get_executed_actions.assert_not_called()
                 self.k8s_juju_conn.update_vca_status(self.vcaStatus, self.kdu_instance)
             )
             self.k8s_juju_conn.libjuju.get_executed_actions.assert_not_called()
-            self.k8s_juju_conn.libjuju.get_actions.assert_not_called_once()
             self.k8s_juju_conn.libjuju.get_application_configs.assert_not_called_once()
 
 
             self.k8s_juju_conn.libjuju.get_application_configs.assert_not_called_once()
 
 
@@ -721,3 +757,70 @@ class GetScaleCount(K8sJujuConnTestCase):
             )
         self.assertIsNone(status)
         self.k8s_juju_conn.libjuju.get_model_status.assert_called_once()
             )
         self.assertIsNone(status)
         self.k8s_juju_conn.libjuju.get_model_status.assert_called_once()
+
+
+class AddRelationTest(K8sJujuConnTestCase):
+    def setUp(self):
+        super(AddRelationTest, self).setUp()
+        self.k8s_juju_conn.libjuju.add_relation = AsyncMock()
+        self.k8s_juju_conn.libjuju.offer = AsyncMock()
+        self.k8s_juju_conn.libjuju.get_controller = AsyncMock()
+        self.k8s_juju_conn.libjuju.consume = AsyncMock()
+
+    def test_standard_relation_same_model_and_controller(self):
+        relation_endpoint_1 = RelationEndpoint("model-1.app1.0", None, "endpoint1")
+        relation_endpoint_2 = RelationEndpoint("model-1.app2.1", None, "endpoint2")
+        self.loop.run_until_complete(
+            self.k8s_juju_conn.add_relation(relation_endpoint_1, relation_endpoint_2)
+        )
+        self.k8s_juju_conn.libjuju.add_relation.assert_called_once_with(
+            model_name="model-1",
+            endpoint_1="app1:endpoint1",
+            endpoint_2="app2:endpoint2",
+        )
+        self.k8s_juju_conn.libjuju.offer.assert_not_called()
+        self.k8s_juju_conn.libjuju.consume.assert_not_called()
+
+    def test_cmr_relation_same_controller(self):
+        relation_endpoint_1 = RelationEndpoint("model-1.app1.0", None, "endpoint")
+        relation_endpoint_2 = RelationEndpoint("model-2.app2.1", None, "endpoint")
+        offer = Offer("admin/model-1.app1")
+        self.k8s_juju_conn.libjuju.offer.return_value = offer
+        self.k8s_juju_conn.libjuju.consume.return_value = "saas"
+        self.loop.run_until_complete(
+            self.k8s_juju_conn.add_relation(relation_endpoint_1, relation_endpoint_2)
+        )
+        self.k8s_juju_conn.libjuju.offer.assert_called_once_with(relation_endpoint_1)
+        self.k8s_juju_conn.libjuju.consume.assert_called_once()
+        self.k8s_juju_conn.libjuju.add_relation.assert_called_once_with(
+            "model-2", "app2:endpoint", "saas"
+        )
+
+    def test_cmr_relation_different_controller(self):
+        self.k8s_juju_conn._get_libjuju = AsyncMock(
+            return_value=self.k8s_juju_conn.libjuju
+        )
+        relation_endpoint_1 = RelationEndpoint("model-1.app1.0", "vca-id-1", "endpoint")
+        relation_endpoint_2 = RelationEndpoint("model-1.app2.1", "vca-id-2", "endpoint")
+        offer = Offer("admin/model-1.app1")
+        self.k8s_juju_conn.libjuju.offer.return_value = offer
+        self.k8s_juju_conn.libjuju.consume.return_value = "saas"
+        self.loop.run_until_complete(
+            self.k8s_juju_conn.add_relation(relation_endpoint_1, relation_endpoint_2)
+        )
+        self.k8s_juju_conn.libjuju.offer.assert_called_once_with(relation_endpoint_1)
+        self.k8s_juju_conn.libjuju.consume.assert_called_once()
+        self.k8s_juju_conn.libjuju.add_relation.assert_called_once_with(
+            "model-1", "app2:endpoint", "saas"
+        )
+
+    def test_relation_exception(self):
+        relation_endpoint_1 = RelationEndpoint("model-1.app1.0", None, "endpoint")
+        relation_endpoint_2 = RelationEndpoint("model-2.app2.1", None, "endpoint")
+        self.k8s_juju_conn.libjuju.offer.side_effect = Exception()
+        with self.assertRaises(Exception):
+            self.loop.run_until_complete(
+                self.k8s_juju_conn.add_relation(
+                    relation_endpoint_1, relation_endpoint_2
+                )
+            )
index eb9b01d..a6d02ff 100644 (file)
 #     See the License for the specific language governing permissions and
 #     limitations under the License.
 
 #     See the License for the specific language governing permissions and
 #     limitations under the License.
 
+import asynctest
+import yaml
+import os
 from unittest import TestCase, mock
 from unittest import TestCase, mock
-from n2vc.kubectl import Kubectl, CORE_CLIENT
+from n2vc.kubectl import Kubectl, CORE_CLIENT, CUSTOM_OBJECT_CLIENT
 from n2vc.utils import Dict
 from kubernetes.client.rest import ApiException
 from n2vc.utils import Dict
 from kubernetes.client.rest import ApiException
+from kubernetes.client import (
+    V1ObjectMeta,
+    V1Secret,
+    V1ServiceAccount,
+    V1SecretReference,
+    V1Role,
+    V1RoleBinding,
+    V1RoleRef,
+    V1Subject,
+    V1PolicyRule,
+    V1Namespace,
+)
 
 
 class FakeK8sResourceMetadata:
 
 
 class FakeK8sResourceMetadata:
@@ -66,6 +81,56 @@ class FakeK8sStorageClassesList:
         return self._items
 
 
         return self._items
 
 
+class FakeK8sServiceAccountsList:
+    def __init__(self, items=[]):
+        self._items = items
+
+    @property
+    def items(self):
+        return self._items
+
+
+class FakeK8sSecretList:
+    def __init__(self, items=[]):
+        self._items = items
+
+    @property
+    def items(self):
+        return self._items
+
+
+class FakeK8sRoleList:
+    def __init__(self, items=[]):
+        self._items = items
+
+    @property
+    def items(self):
+        return self._items
+
+
+class FakeK8sRoleBindingList:
+    def __init__(self, items=[]):
+        self._items = items
+
+    @property
+    def items(self):
+        return self._items
+
+
+class FakeK8sVersionApiCode:
+    def __init__(self, major: str, minor: str):
+        self._major = major
+        self._minor = minor
+
+    @property
+    def major(self):
+        return self._major
+
+    @property
+    def minor(self):
+        return self._minor
+
+
 fake_list_services = Dict(
     {
         "items": [
 fake_list_services = Dict(
     {
         "items": [
@@ -248,3 +313,542 @@ class GetDefaultStorageClass(KubectlTestCase):
         sc_name = kubectl.get_default_storage_class()
         self.assertEqual(sc_name, self.default_sc_name)
         mock_list_storage_class.assert_called_once()
         sc_name = kubectl.get_default_storage_class()
         self.assertEqual(sc_name, self.default_sc_name)
         mock_list_storage_class.assert_called_once()
+
+
+@mock.patch("kubernetes.client.VersionApi.get_code")
+@mock.patch("kubernetes.client.CoreV1Api.list_namespaced_secret")
+@mock.patch("kubernetes.client.CoreV1Api.create_namespaced_secret")
+@mock.patch("kubernetes.client.CoreV1Api.create_namespaced_service_account")
+@mock.patch("kubernetes.client.CoreV1Api.list_namespaced_service_account")
+class CreateServiceAccountClass(KubectlTestCase):
+    @mock.patch("kubernetes.config.load_kube_config")
+    def setUp(self, mock_load_kube_config):
+        super(CreateServiceAccountClass, self).setUp()
+        self.service_account_name = "Service_account"
+        self.labels = {"Key1": "Value1", "Key2": "Value2"}
+        self.namespace = "kubernetes"
+        self.token_id = "abc12345"
+        self.kubectl = Kubectl()
+
+    def assert_create_secret(self, mock_create_secret, secret_name):
+        annotations = {"kubernetes.io/service-account.name": self.service_account_name}
+        secret_metadata = V1ObjectMeta(
+            name=secret_name, namespace=self.namespace, annotations=annotations
+        )
+        secret_type = "kubernetes.io/service-account-token"
+        secret = V1Secret(metadata=secret_metadata, type=secret_type)
+        mock_create_secret.assert_called_once_with(self.namespace, secret)
+
+    def assert_create_service_account_v_1_24(
+        self, mock_create_service_account, secret_name
+    ):
+        sevice_account_metadata = V1ObjectMeta(
+            name=self.service_account_name, labels=self.labels, namespace=self.namespace
+        )
+        secrets = [V1SecretReference(name=secret_name, namespace=self.namespace)]
+        service_account = V1ServiceAccount(
+            metadata=sevice_account_metadata, secrets=secrets
+        )
+        mock_create_service_account.assert_called_once_with(
+            self.namespace, service_account
+        )
+
+    def assert_create_service_account_v_1_23(self, mock_create_service_account):
+        metadata = V1ObjectMeta(
+            name=self.service_account_name, labels=self.labels, namespace=self.namespace
+        )
+        service_account = V1ServiceAccount(metadata=metadata)
+        mock_create_service_account.assert_called_once_with(
+            self.namespace, service_account
+        )
+
+    @mock.patch("n2vc.kubectl.uuid.uuid4")
+    def test_secret_is_created_when_k8s_1_24(
+        self,
+        mock_uuid4,
+        mock_list_service_account,
+        mock_create_service_account,
+        mock_create_secret,
+        mock_list_secret,
+        mock_version,
+    ):
+        mock_list_service_account.return_value = FakeK8sServiceAccountsList(items=[])
+        mock_list_secret.return_value = FakeK8sSecretList(items=[])
+        mock_version.return_value = FakeK8sVersionApiCode("1", "24")
+        mock_uuid4.return_value = self.token_id
+        self.kubectl.create_service_account(
+            self.service_account_name, self.labels, self.namespace
+        )
+        secret_name = "{}-token-{}".format(self.service_account_name, self.token_id[:5])
+        self.assert_create_service_account_v_1_24(
+            mock_create_service_account, secret_name
+        )
+        self.assert_create_secret(mock_create_secret, secret_name)
+
+    def test_secret_is_not_created_when_k8s_1_23(
+        self,
+        mock_list_service_account,
+        mock_create_service_account,
+        mock_create_secret,
+        mock_list_secret,
+        mock_version,
+    ):
+        mock_list_service_account.return_value = FakeK8sServiceAccountsList(items=[])
+        mock_version.return_value = FakeK8sVersionApiCode("1", "23+")
+        self.kubectl.create_service_account(
+            self.service_account_name, self.labels, self.namespace
+        )
+        self.assert_create_service_account_v_1_23(mock_create_service_account)
+        mock_create_secret.assert_not_called()
+        mock_list_secret.assert_not_called()
+
+    def test_raise_exception_if_service_account_already_exists(
+        self,
+        mock_list_service_account,
+        mock_create_service_account,
+        mock_create_secret,
+        mock_list_secret,
+        mock_version,
+    ):
+        mock_list_service_account.return_value = FakeK8sServiceAccountsList(items=[1])
+        with self.assertRaises(Exception) as context:
+            self.kubectl.create_service_account(
+                self.service_account_name, self.labels, self.namespace
+            )
+        self.assertTrue(
+            "Service account with metadata.name={} already exists".format(
+                self.service_account_name
+            )
+            in str(context.exception)
+        )
+        mock_create_service_account.assert_not_called()
+        mock_create_secret.assert_not_called()
+
+    @mock.patch("n2vc.kubectl.uuid.uuid4")
+    def test_raise_exception_if_secret_already_exists(
+        self,
+        mock_uuid4,
+        mock_list_service_account,
+        mock_create_service_account,
+        mock_create_secret,
+        mock_list_secret,
+        mock_version,
+    ):
+        mock_list_service_account.return_value = FakeK8sServiceAccountsList(items=[])
+        mock_list_secret.return_value = FakeK8sSecretList(items=[1])
+        mock_version.return_value = FakeK8sVersionApiCode("1", "24+")
+        mock_uuid4.return_value = self.token_id
+        with self.assertRaises(Exception) as context:
+            self.kubectl.create_service_account(
+                self.service_account_name, self.labels, self.namespace
+            )
+        self.assertTrue(
+            "Secret with metadata.name={}-token-{} already exists".format(
+                self.service_account_name, self.token_id[:5]
+            )
+            in str(context.exception)
+        )
+        mock_create_service_account.assert_called()
+        mock_create_secret.assert_not_called()
+
+
+@mock.patch("kubernetes.client.CustomObjectsApi.create_namespaced_custom_object")
+class CreateCertificateClass(asynctest.TestCase):
+    @mock.patch("kubernetes.config.load_kube_config")
+    def setUp(self, mock_load_kube_config):
+        super(CreateCertificateClass, self).setUp()
+        self.namespace = "osm"
+        self.name = "test-cert"
+        self.dns_prefix = "*"
+        self.secret_name = "test-cert-secret"
+        self.usages = ["server auth"]
+        self.issuer_name = "ca-issuer"
+        self.kubectl = Kubectl()
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_certificate_is_created(
+        self,
+        mock_create_certificate,
+    ):
+        with open(
+            os.path.join(
+                os.path.dirname(__file__), "testdata", "test_certificate.yaml"
+            ),
+            "r",
+        ) as test_certificate:
+            certificate_body = yaml.safe_load(test_certificate.read())
+            print(certificate_body)
+        await self.kubectl.create_certificate(
+            namespace=self.namespace,
+            name=self.name,
+            dns_prefix=self.dns_prefix,
+            secret_name=self.secret_name,
+            usages=self.usages,
+            issuer_name=self.issuer_name,
+        )
+        mock_create_certificate.assert_called_once_with(
+            group="cert-manager.io",
+            plural="certificates",
+            version="v1",
+            body=certificate_body,
+            namespace=self.namespace,
+        )
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_no_exception_if_alreadyexists(
+        self,
+        mock_create_certificate,
+    ):
+        api_exception = ApiException()
+        api_exception.body = '{"reason": "AlreadyExists"}'
+        self.kubectl.clients[
+            CUSTOM_OBJECT_CLIENT
+        ].create_namespaced_custom_object.side_effect = api_exception
+        raised = False
+        try:
+            await self.kubectl.create_certificate(
+                namespace=self.namespace,
+                name=self.name,
+                dns_prefix=self.dns_prefix,
+                secret_name=self.secret_name,
+                usages=self.usages,
+                issuer_name=self.issuer_name,
+            )
+        except Exception:
+            raised = True
+        self.assertFalse(raised, "An exception was raised")
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_other_exceptions(
+        self,
+        mock_create_certificate,
+    ):
+        self.kubectl.clients[
+            CUSTOM_OBJECT_CLIENT
+        ].create_namespaced_custom_object.side_effect = Exception()
+        with self.assertRaises(Exception):
+            await self.kubectl.create_certificate(
+                namespace=self.namespace,
+                name=self.name,
+                dns_prefix=self.dns_prefix,
+                secret_name=self.secret_name,
+                usages=self.usages,
+                issuer_name=self.issuer_name,
+            )
+
+
+@mock.patch("kubernetes.client.CustomObjectsApi.delete_namespaced_custom_object")
+class DeleteCertificateClass(asynctest.TestCase):
+    @mock.patch("kubernetes.config.load_kube_config")
+    def setUp(self, mock_load_kube_config):
+        super(DeleteCertificateClass, self).setUp()
+        self.namespace = "osm"
+        self.object_name = "test-cert"
+        self.kubectl = Kubectl()
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_no_exception_if_notfound(
+        self,
+        mock_create_certificate,
+    ):
+        api_exception = ApiException()
+        api_exception.body = '{"reason": "NotFound"}'
+        self.kubectl.clients[
+            CUSTOM_OBJECT_CLIENT
+        ].delete_namespaced_custom_object.side_effect = api_exception
+        raised = False
+        try:
+            await self.kubectl.delete_certificate(
+                namespace=self.namespace,
+                object_name=self.object_name,
+            )
+        except Exception:
+            raised = True
+        self.assertFalse(raised, "An exception was raised")
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_other_exceptions(
+        self,
+        mock_create_certificate,
+    ):
+        self.kubectl.clients[
+            CUSTOM_OBJECT_CLIENT
+        ].delete_namespaced_custom_object.side_effect = Exception()
+        with self.assertRaises(Exception):
+            await self.kubectl.delete_certificate(
+                namespace=self.namespace,
+                object_name=self.object_name,
+            )
+
+
+@mock.patch("kubernetes.client.RbacAuthorizationV1Api.create_namespaced_role")
+@mock.patch("kubernetes.client.RbacAuthorizationV1Api.list_namespaced_role")
+class CreateRoleClass(asynctest.TestCase):
+    @mock.patch("kubernetes.config.load_kube_config")
+    def setUp(self, mock_load_kube_config):
+        super(CreateRoleClass, self).setUp()
+        self.name = "role"
+        self.namespace = "osm"
+        self.resources = ["*"]
+        self.api_groups = ["*"]
+        self.verbs = ["*"]
+        self.labels = {}
+        self.kubectl = Kubectl()
+
+    @asynctest.fail_on(active_handles=True)
+    async def assert_create_role(self, mock_create_role):
+        metadata = V1ObjectMeta(
+            name=self.name, labels=self.labels, namespace=self.namespace
+        )
+        role = V1Role(
+            metadata=metadata,
+            rules=[
+                V1PolicyRule(
+                    api_groups=self.api_groups,
+                    resources=self.resources,
+                    verbs=self.verbs,
+                ),
+            ],
+        )
+        await self.kubectl.create_role(
+            namespace=self.namespace,
+            api_groups=self.api_groups,
+            name=self.name,
+            resources=self.resources,
+            verbs=self.verbs,
+            labels=self.labels,
+        )
+        mock_create_role.assert_called_once_with(self.namespace, role)
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_raise_exception_if_role_already_exists(
+        self,
+        mock_list_role,
+        mock_create_role,
+    ):
+        mock_list_role.return_value = FakeK8sRoleList(items=[1])
+        with self.assertRaises(Exception) as context:
+            await self.kubectl.create_role(
+                self.name,
+                self.labels,
+                self.api_groups,
+                self.resources,
+                self.verbs,
+                self.namespace,
+            )
+        self.assertTrue(
+            "Role with metadata.name={} already exists".format(self.name)
+            in str(context.exception)
+        )
+        mock_create_role.assert_not_called()
+
+
+@mock.patch("kubernetes.client.RbacAuthorizationV1Api.create_namespaced_role_binding")
+@mock.patch("kubernetes.client.RbacAuthorizationV1Api.list_namespaced_role_binding")
+class CreateRoleBindingClass(asynctest.TestCase):
+    @mock.patch("kubernetes.config.load_kube_config")
+    def setUp(self, mock_load_kube_config):
+        super(CreateRoleBindingClass, self).setUp()
+        self.name = "rolebinding"
+        self.namespace = "osm"
+        self.role_name = "role"
+        self.sa_name = "Default"
+        self.labels = {}
+        self.kubectl = Kubectl()
+
+    @asynctest.fail_on(active_handles=True)
+    async def assert_create_role_binding(self, mock_create_role_binding):
+        role_binding = V1RoleBinding(
+            metadata=V1ObjectMeta(name=self.name, labels=self.labels),
+            role_ref=V1RoleRef(kind="Role", name=self.role_name, api_group=""),
+            subjects=[
+                V1Subject(
+                    kind="ServiceAccount",
+                    name=self.sa_name,
+                    namespace=self.namespace,
+                )
+            ],
+        )
+        await self.kubectl.create_role_binding(
+            namespace=self.namespace,
+            role_name=self.role_name,
+            name=self.name,
+            sa_name=self.sa_name,
+            labels=self.labels,
+        )
+        mock_create_role_binding.assert_called_once_with(self.namespace, role_binding)
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_raise_exception_if_role_binding_already_exists(
+        self,
+        mock_list_role_binding,
+        mock_create_role_binding,
+    ):
+        mock_list_role_binding.return_value = FakeK8sRoleBindingList(items=[1])
+        with self.assertRaises(Exception) as context:
+            await self.kubectl.create_role_binding(
+                self.name,
+                self.role_name,
+                self.sa_name,
+                self.labels,
+                self.namespace,
+            )
+        self.assertTrue(
+            "Role Binding with metadata.name={} already exists".format(self.name)
+            in str(context.exception)
+        )
+        mock_create_role_binding.assert_not_called()
+
+
+@mock.patch("kubernetes.client.CoreV1Api.create_namespaced_secret")
+class CreateSecretClass(asynctest.TestCase):
+    @mock.patch("kubernetes.config.load_kube_config")
+    def setUp(self, mock_load_kube_config):
+        super(CreateSecretClass, self).setUp()
+        self.name = "secret"
+        self.namespace = "osm"
+        self.data = {"test": "1234"}
+        self.secret_type = "Opaque"
+        self.kubectl = Kubectl()
+
+    @asynctest.fail_on(active_handles=True)
+    async def assert_create_secret(self, mock_create_secret):
+        secret_metadata = V1ObjectMeta(name=self.name, namespace=self.namespace)
+        secret = V1Secret(
+            metadata=secret_metadata,
+            data=self.data,
+            type=self.secret_type,
+        )
+        await self.kubectl.create_secret(
+            namespace=self.namespace,
+            data=self.data,
+            name=self.name,
+            secret_type=self.secret_type,
+        )
+        mock_create_secret.assert_called_once_with(self.namespace, secret)
+
+
+@mock.patch("kubernetes.client.CoreV1Api.create_namespace")
+class CreateNamespaceClass(asynctest.TestCase):
+    @mock.patch("kubernetes.config.load_kube_config")
+    def setUp(self, mock_load_kube_config):
+        super(CreateNamespaceClass, self).setUp()
+        self.namespace = "osm"
+        self.labels = {"key": "value"}
+        self.kubectl = Kubectl()
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_namespace_is_created(
+        self,
+        mock_create_namespace,
+    ):
+        metadata = V1ObjectMeta(name=self.namespace, labels=self.labels)
+        namespace = V1Namespace(
+            metadata=metadata,
+        )
+        await self.kubectl.create_namespace(
+            name=self.namespace,
+            labels=self.labels,
+        )
+        mock_create_namespace.assert_called_once_with(namespace)
+
+    async def test_namespace_is_created_default_labels(
+        self,
+        mock_create_namespace,
+    ):
+        metadata = V1ObjectMeta(name=self.namespace, labels=None)
+        namespace = V1Namespace(
+            metadata=metadata,
+        )
+        await self.kubectl.create_namespace(
+            name=self.namespace,
+        )
+        mock_create_namespace.assert_called_once_with(namespace)
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_no_exception_if_alreadyexists(
+        self,
+        mock_create_namespace,
+    ):
+        api_exception = ApiException()
+        api_exception.body = '{"reason": "AlreadyExists"}'
+        self.kubectl.clients[CORE_CLIENT].create_namespace.side_effect = api_exception
+        raised = False
+        try:
+            await self.kubectl.create_namespace(
+                name=self.namespace,
+            )
+        except Exception:
+            raised = True
+        self.assertFalse(raised, "An exception was raised")
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_other_exceptions(
+        self,
+        mock_create_namespace,
+    ):
+        self.kubectl.clients[CORE_CLIENT].create_namespace.side_effect = Exception()
+        with self.assertRaises(Exception):
+            await self.kubectl.create_namespace(
+                name=self.namespace,
+            )
+
+
+@mock.patch("kubernetes.client.CoreV1Api.delete_namespace")
+class DeleteNamespaceClass(asynctest.TestCase):
+    @mock.patch("kubernetes.config.load_kube_config")
+    def setUp(self, mock_load_kube_config):
+        super(DeleteNamespaceClass, self).setUp()
+        self.namespace = "osm"
+        self.kubectl = Kubectl()
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_no_exception_if_notfound(
+        self,
+        mock_delete_namespace,
+    ):
+        api_exception = ApiException()
+        api_exception.body = '{"reason": "NotFound"}'
+        self.kubectl.clients[CORE_CLIENT].delete_namespace.side_effect = api_exception
+        raised = False
+        try:
+            await self.kubectl.delete_namespace(
+                name=self.namespace,
+            )
+        except Exception:
+            raised = True
+        self.assertFalse(raised, "An exception was raised")
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_other_exceptions(
+        self,
+        mock_delete_namespace,
+    ):
+        self.kubectl.clients[CORE_CLIENT].delete_namespace.side_effect = Exception()
+        with self.assertRaises(Exception):
+            await self.kubectl.delete_namespace(
+                name=self.namespace,
+            )
+
+
+@mock.patch("kubernetes.client.CoreV1Api.read_namespaced_secret")
+class GetSecretContentClass(asynctest.TestCase):
+    @mock.patch("kubernetes.config.load_kube_config")
+    def setUp(self, mock_load_kube_config):
+        super(GetSecretContentClass, self).setUp()
+        self.name = "my_secret"
+        self.namespace = "osm"
+        self.data = {"my_key": "my_value"}
+        self.type = "Opaque"
+        self.kubectl = Kubectl()
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_return_type_is_dict(
+        self,
+        mock_read_namespaced_secret,
+    ):
+        metadata = V1ObjectMeta(name=self.name, namespace=self.namespace)
+        secret = V1Secret(metadata=metadata, data=self.data, type=self.type)
+        mock_read_namespaced_secret.return_value = secret
+        content = await self.kubectl.get_secret_content(self.name, self.namespace)
+        assert type(content) is dict
index 918a2fb..38d8d0e 100644 (file)
@@ -20,6 +20,8 @@ import juju
 import kubernetes
 from juju.errors import JujuAPIError
 import logging
 import kubernetes
 from juju.errors import JujuAPIError
 import logging
+
+from n2vc.definitions import Offer, RelationEndpoint
 from .utils import (
     FakeApplication,
     FakeMachine,
 from .utils import (
     FakeApplication,
     FakeMachine,
@@ -58,7 +60,7 @@ class LibjujuTestCase(asynctest.TestCase):
         self.loop = asyncio.get_event_loop()
         self.db = Mock()
         mock_base64_to_cacert.return_value = cacert
         self.loop = asyncio.get_event_loop()
         self.db = Mock()
         mock_base64_to_cacert.return_value = cacert
-        Connection._load_vca_connection_data = Mock()
+        Connection._load_vca_connection_data = Mock()
         vca_connection = Connection(AsyncMock())
         vca_connection._data = ConnectionData(
             **{
         vca_connection = Connection(AsyncMock())
         vca_connection._data = ConnectionData(
             **{
@@ -76,7 +78,7 @@ class LibjujuTestCase(asynctest.TestCase):
             }
         )
         logging.disable(logging.CRITICAL)
             }
         )
         logging.disable(logging.CRITICAL)
-        self.libjuju = Libjuju(vca_connection, self.loop)
+        self.libjuju = Libjuju(vca_connection)
         self.loop.run_until_complete(self.libjuju.disconnect())
 
 
         self.loop.run_until_complete(self.libjuju.disconnect())
 
 
@@ -494,70 +496,408 @@ class CreateMachineTest(LibjujuTestCase):
 # TODO test provision machine
 
 
 # TODO test provision machine
 
 
+@asynctest.mock.patch("os.remove")
+@asynctest.mock.patch("n2vc.libjuju.yaml.dump")
+@asynctest.mock.patch("builtins.open", create=True)
 @asynctest.mock.patch("n2vc.libjuju.Libjuju.get_controller")
 @asynctest.mock.patch("n2vc.libjuju.Libjuju.get_model")
 @asynctest.mock.patch("n2vc.libjuju.Libjuju.disconnect_model")
 @asynctest.mock.patch("n2vc.libjuju.Libjuju.disconnect_controller")
 @asynctest.mock.patch("n2vc.juju_watcher.JujuModelWatcher.wait_for_model")
 @asynctest.mock.patch("juju.model.Model.deploy")
 @asynctest.mock.patch("n2vc.libjuju.Libjuju.get_controller")
 @asynctest.mock.patch("n2vc.libjuju.Libjuju.get_model")
 @asynctest.mock.patch("n2vc.libjuju.Libjuju.disconnect_model")
 @asynctest.mock.patch("n2vc.libjuju.Libjuju.disconnect_controller")
 @asynctest.mock.patch("n2vc.juju_watcher.JujuModelWatcher.wait_for_model")
 @asynctest.mock.patch("juju.model.Model.deploy")
+@asynctest.mock.patch("juju.model.CharmhubDeployType.resolve")
+@asynctest.mock.patch("n2vc.libjuju.BundleHandler")
+@asynctest.mock.patch("juju.url.URL.parse")
 class DeployTest(LibjujuTestCase):
     def setUp(self):
         super(DeployTest, self).setUp()
 class DeployTest(LibjujuTestCase):
     def setUp(self):
         super(DeployTest, self).setUp()
+        self.instantiation_params = {"applications": {"squid": {"scale": 2}}}
+        self.architecture = "amd64"
+        self.uri = "cs:osm"
+        self.url = AsyncMock()
+        self.url.schema = juju.url.Schema.CHARM_HUB
+        self.bundle_instance = None
+
+    def setup_bundle_download_mocks(
+        self, mock_url_parse, mock_bundle, mock_resolve, mock_get_model
+    ):
+        mock_url_parse.return_value = self.url
+        mock_bundle.return_value = AsyncMock()
+        mock_resolve.return_value = AsyncMock()
+        mock_resolve.origin = AsyncMock()
+        mock_get_model.return_value = juju.model.Model()
+        self.bundle_instance = mock_bundle.return_value
+        self.bundle_instance.applications = {"squid"}
+
+    def assert_overlay_file_is_written(self, filename, mocked_file, mock_yaml, mock_os):
+        mocked_file.assert_called_once_with(filename, "w")
+        mock_yaml.assert_called_once_with(
+            self.instantiation_params, mocked_file.return_value.__enter__.return_value
+        )
+        mock_os.assert_called_once_with(filename)
+
+    def assert_overlay_file_is_not_written(self, mocked_file, mock_yaml, mock_os):
+        mocked_file.assert_not_called()
+        mock_yaml.assert_not_called()
+        mock_os.assert_not_called()
+
+    def assert_bundle_is_downloaded(self, mock_resolve, mock_url_parse):
+        mock_resolve.assert_called_once_with(
+            self.url, self.architecture, entity_url=self.uri
+        )
+        mock_url_parse.assert_called_once_with(self.uri)
+        self.bundle_instance.fetch_plan.assert_called_once_with(
+            self.url, mock_resolve.origin
+        )
+
+    def assert_bundle_is_not_downloaded(self, mock_resolve, mock_url_parse):
+        mock_resolve.assert_not_called()
+        mock_url_parse.assert_not_called()
+        self.bundle_instance.fetch_plan.assert_not_called()
 
     def test_deploy(
         self,
 
     def test_deploy(
         self,
+        mock_url_parse,
+        mock_bundle,
+        mock_resolve,
         mock_deploy,
         mock_wait_for_model,
         mock_disconnect_controller,
         mock_disconnect_model,
         mock_get_model,
         mock_get_controller,
         mock_deploy,
         mock_wait_for_model,
         mock_disconnect_controller,
         mock_disconnect_model,
         mock_get_model,
         mock_get_controller,
+        mocked_file,
+        mock_yaml,
+        mock_os,
     ):
     ):
-        mock_get_model.return_value = juju.model.Model()
+        self.setup_bundle_download_mocks(
+            mock_url_parse, mock_bundle, mock_resolve, mock_get_model
+        )
+        model_name = "model1"
+
         self.loop.run_until_complete(
         self.loop.run_until_complete(
-            self.libjuju.deploy("cs:osm", "model", wait=True, timeout=0)
+            self.libjuju.deploy(
+                "cs:osm",
+                model_name,
+                wait=True,
+                timeout=0,
+                instantiation_params=None,
+            )
         )
         )
-        mock_deploy.assert_called_once()
+        self.assert_overlay_file_is_not_written(mocked_file, mock_yaml, mock_os)
+        self.assert_bundle_is_not_downloaded(mock_resolve, mock_url_parse)
+        mock_deploy.assert_called_once_with("cs:osm", trust=True, overlays=[])
         mock_wait_for_model.assert_called_once()
         mock_disconnect_controller.assert_called_once()
         mock_disconnect_model.assert_called_once()
 
     def test_deploy_no_wait(
         self,
         mock_wait_for_model.assert_called_once()
         mock_disconnect_controller.assert_called_once()
         mock_disconnect_model.assert_called_once()
 
     def test_deploy_no_wait(
         self,
+        mock_url_parse,
+        mock_bundle,
+        mock_resolve,
         mock_deploy,
         mock_wait_for_model,
         mock_disconnect_controller,
         mock_disconnect_model,
         mock_get_model,
         mock_get_controller,
         mock_deploy,
         mock_wait_for_model,
         mock_disconnect_controller,
         mock_disconnect_model,
         mock_get_model,
         mock_get_controller,
+        mocked_file,
+        mock_yaml,
+        mock_os,
     ):
     ):
-        mock_get_model.return_value = juju.model.Model()
+        self.setup_bundle_download_mocks(
+            mock_url_parse, mock_bundle, mock_resolve, mock_get_model
+        )
         self.loop.run_until_complete(
         self.loop.run_until_complete(
-            self.libjuju.deploy("cs:osm", "model", wait=False, timeout=0)
+            self.libjuju.deploy(
+                "cs:osm", "model", wait=False, timeout=0, instantiation_params={}
+            )
         )
         )
-        mock_deploy.assert_called_once()
+        self.assert_overlay_file_is_not_written(mocked_file, mock_yaml, mock_os)
+        self.assert_bundle_is_not_downloaded(mock_resolve, mock_url_parse)
+        mock_deploy.assert_called_once_with("cs:osm", trust=True, overlays=[])
         mock_wait_for_model.assert_not_called()
         mock_disconnect_controller.assert_called_once()
         mock_disconnect_model.assert_called_once()
 
     def test_deploy_exception(
         self,
         mock_wait_for_model.assert_not_called()
         mock_disconnect_controller.assert_called_once()
         mock_disconnect_model.assert_called_once()
 
     def test_deploy_exception(
         self,
+        mock_url_parse,
+        mock_bundle,
+        mock_resolve,
         mock_deploy,
         mock_wait_for_model,
         mock_disconnect_controller,
         mock_disconnect_model,
         mock_get_model,
         mock_get_controller,
         mock_deploy,
         mock_wait_for_model,
         mock_disconnect_controller,
         mock_disconnect_model,
         mock_get_model,
         mock_get_controller,
+        mocked_file,
+        mock_yaml,
+        mock_os,
     ):
     ):
+        self.setup_bundle_download_mocks(
+            mock_url_parse, mock_bundle, mock_resolve, mock_get_model
+        )
         mock_deploy.side_effect = Exception()
         mock_deploy.side_effect = Exception()
-        mock_get_model.return_value = juju.model.Model()
         with self.assertRaises(Exception):
             self.loop.run_until_complete(self.libjuju.deploy("cs:osm", "model"))
         with self.assertRaises(Exception):
             self.loop.run_until_complete(self.libjuju.deploy("cs:osm", "model"))
+        self.assert_overlay_file_is_not_written(mocked_file, mock_yaml, mock_os)
+        self.assert_bundle_is_not_downloaded(mock_resolve, mock_url_parse)
         mock_deploy.assert_called_once()
         mock_wait_for_model.assert_not_called()
         mock_disconnect_controller.assert_called_once()
         mock_disconnect_model.assert_called_once()
 
         mock_deploy.assert_called_once()
         mock_wait_for_model.assert_not_called()
         mock_disconnect_controller.assert_called_once()
         mock_disconnect_model.assert_called_once()
 
+    def test_deploy_with_instantiation_params(
+        self,
+        mock_url_parse,
+        mock_bundle,
+        mock_resolve,
+        mock_deploy,
+        mock_wait_for_model,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+        mocked_file,
+        mock_yaml,
+        mock_os,
+    ):
+        self.setup_bundle_download_mocks(
+            mock_url_parse, mock_bundle, mock_resolve, mock_get_model
+        )
+        model_name = "model1"
+        expected_filename = "{}-overlay.yaml".format(model_name)
+        self.loop.run_until_complete(
+            self.libjuju.deploy(
+                self.uri,
+                model_name,
+                wait=True,
+                timeout=0,
+                instantiation_params=self.instantiation_params,
+            )
+        )
+        self.assert_overlay_file_is_written(
+            expected_filename, mocked_file, mock_yaml, mock_os
+        )
+        self.assert_bundle_is_downloaded(mock_resolve, mock_url_parse)
+        mock_deploy.assert_called_once_with(
+            self.uri, trust=True, overlays=[expected_filename]
+        )
+        mock_wait_for_model.assert_called_once()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    def test_deploy_with_instantiation_params_no_applications(
+        self,
+        mock_url_parse,
+        mock_bundle,
+        mock_resolve,
+        mock_deploy,
+        mock_wait_for_model,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+        mocked_file,
+        mock_yaml,
+        mock_os,
+    ):
+        self.instantiation_params = {"applications": {}}
+        self.setup_bundle_download_mocks(
+            mock_url_parse, mock_bundle, mock_resolve, mock_get_model
+        )
+
+        model_name = "model3"
+        expected_filename = "{}-overlay.yaml".format(model_name)
+        self.loop.run_until_complete(
+            self.libjuju.deploy(
+                self.uri,
+                model_name,
+                wait=False,
+                timeout=0,
+                instantiation_params=self.instantiation_params,
+            )
+        )
+
+        self.assert_overlay_file_is_written(
+            expected_filename, mocked_file, mock_yaml, mock_os
+        )
+        self.assert_bundle_is_not_downloaded(mock_resolve, mock_url_parse)
+        mock_deploy.assert_called_once_with(
+            self.uri, trust=True, overlays=[expected_filename]
+        )
+        mock_wait_for_model.assert_not_called()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    def test_deploy_with_instantiation_params_applications_not_found(
+        self,
+        mock_url_parse,
+        mock_bundle,
+        mock_resolve,
+        mock_deploy,
+        mock_wait_for_model,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+        mocked_file,
+        mock_yaml,
+        mock_os,
+    ):
+        self.instantiation_params = {"some_key": {"squid": {"scale": 2}}}
+        self.setup_bundle_download_mocks(
+            mock_url_parse, mock_bundle, mock_resolve, mock_get_model
+        )
+
+        with self.assertRaises(JujuError):
+            self.loop.run_until_complete(
+                self.libjuju.deploy(
+                    self.uri,
+                    "model1",
+                    wait=True,
+                    timeout=0,
+                    instantiation_params=self.instantiation_params,
+                )
+            )
+
+        self.assert_overlay_file_is_not_written(mocked_file, mock_yaml, mock_os)
+        self.assert_bundle_is_not_downloaded(mock_resolve, mock_url_parse)
+        mock_deploy.assert_not_called()
+        mock_wait_for_model.assert_not_called()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    def test_deploy_overlay_contains_invalid_app(
+        self,
+        mock_url_parse,
+        mock_bundle,
+        mock_resolve,
+        mock_deploy,
+        mock_wait_for_model,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+        mocked_file,
+        mock_yaml,
+        mock_os,
+    ):
+        self.setup_bundle_download_mocks(
+            mock_url_parse, mock_bundle, mock_resolve, mock_get_model
+        )
+        self.bundle_instance.applications = {"new_app"}
+
+        with self.assertRaises(JujuApplicationNotFound) as error:
+            self.loop.run_until_complete(
+                self.libjuju.deploy(
+                    self.uri,
+                    "model2",
+                    wait=True,
+                    timeout=0,
+                    instantiation_params=self.instantiation_params,
+                )
+            )
+        error_msg = "Cannot find application ['squid'] in original bundle {'new_app'}"
+        self.assertEqual(str(error.exception), error_msg)
+
+        self.assert_overlay_file_is_not_written(mocked_file, mock_yaml, mock_os)
+        self.assert_bundle_is_downloaded(mock_resolve, mock_url_parse)
+        mock_deploy.assert_not_called()
+        mock_wait_for_model.assert_not_called()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    def test_deploy_exception_with_instantiation_params(
+        self,
+        mock_url_parse,
+        mock_bundle,
+        mock_resolve,
+        mock_deploy,
+        mock_wait_for_model,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+        mocked_file,
+        mock_yaml,
+        mock_os,
+    ):
+        self.setup_bundle_download_mocks(
+            mock_url_parse, mock_bundle, mock_resolve, mock_get_model
+        )
+
+        mock_deploy.side_effect = Exception()
+        model_name = "model2"
+        expected_filename = "{}-overlay.yaml".format(model_name)
+        with self.assertRaises(Exception):
+            self.loop.run_until_complete(
+                self.libjuju.deploy(
+                    self.uri,
+                    model_name,
+                    instantiation_params=self.instantiation_params,
+                )
+            )
+
+        self.assert_overlay_file_is_written(
+            expected_filename, mocked_file, mock_yaml, mock_os
+        )
+        self.assert_bundle_is_downloaded(mock_resolve, mock_url_parse)
+        mock_deploy.assert_called_once_with(
+            self.uri, trust=True, overlays=[expected_filename]
+        )
+        mock_wait_for_model.assert_not_called()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    @asynctest.mock.patch("logging.Logger.warning")
+    def test_deploy_exception_when_deleting_file_is_not_propagated(
+        self,
+        mock_warning,
+        mock_url_parse,
+        mock_bundle,
+        mock_resolve,
+        mock_deploy,
+        mock_wait_for_model,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+        mocked_file,
+        mock_yaml,
+        mock_os,
+    ):
+        self.setup_bundle_download_mocks(
+            mock_url_parse, mock_bundle, mock_resolve, mock_get_model
+        )
+
+        mock_os.side_effect = OSError("Error")
+        model_name = "model2"
+        expected_filename = "{}-overlay.yaml".format(model_name)
+        self.loop.run_until_complete(
+            self.libjuju.deploy(
+                self.uri,
+                model_name,
+                instantiation_params=self.instantiation_params,
+            )
+        )
+
+        self.assert_overlay_file_is_written(
+            expected_filename, mocked_file, mock_yaml, mock_os
+        )
+        self.assert_bundle_is_downloaded(mock_resolve, mock_url_parse)
+        mock_deploy.assert_called_once_with(
+            self.uri, trust=True, overlays=[expected_filename]
+        )
+        mock_wait_for_model.assert_called_once()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+        mock_warning.assert_called_with(
+            "Overlay file {} could not be removed: Error".format(expected_filename)
+        )
+
 
 @asynctest.mock.patch("n2vc.libjuju.Libjuju.get_controller")
 @asynctest.mock.patch("n2vc.libjuju.Libjuju.get_model")
 
 @asynctest.mock.patch("n2vc.libjuju.Libjuju.get_controller")
 @asynctest.mock.patch("n2vc.libjuju.Libjuju.get_model")
@@ -776,7 +1116,6 @@ class ExecuteActionTest(LibjujuTestCase):
         mock_get_model,
         mock_get_controller,
     ):
         mock_get_model,
         mock_get_controller,
     ):
-
         mock_get_model.return_value = juju.model.Model()
         mock__get_application.return_value = FakeApplication()
         output = None
         mock_get_model.return_value = juju.model.Model()
         mock__get_application.return_value = FakeApplication()
         output = None
@@ -1005,6 +1344,38 @@ class AddRelationTest(LibjujuTestCase):
         mock_disconnect_controller.assert_called_once()
         mock_disconnect_model.assert_called_once()
 
         mock_disconnect_controller.assert_called_once()
         mock_disconnect_model.assert_called_once()
 
+    @asynctest.mock.patch("logging.Logger.warning")
+    def test_not_found_in_error_code(
+        self,
+        mock_warning,
+        mock_add_relation,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        result = {
+            "error": "relation cannot be added",
+            "error-code": "not found",
+            "response": "response",
+            "request-id": 1,
+        }
+
+        mock_get_model.return_value = juju.model.Model()
+        mock_add_relation.side_effect = JujuAPIError(result)
+
+        self.loop.run_until_complete(
+            self.libjuju.add_relation(
+                "model",
+                "app1:relation1",
+                "app2:relation2",
+            )
+        )
+
+        mock_warning.assert_called_with("Relation not found: relation cannot be added")
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
     @asynctest.mock.patch("logging.Logger.warning")
     def test_already_exists(
         self,
     @asynctest.mock.patch("logging.Logger.warning")
     def test_already_exists(
         self,
@@ -1033,6 +1404,40 @@ class AddRelationTest(LibjujuTestCase):
         mock_disconnect_controller.assert_called_once()
         mock_disconnect_model.assert_called_once()
 
         mock_disconnect_controller.assert_called_once()
         mock_disconnect_model.assert_called_once()
 
+    @asynctest.mock.patch("logging.Logger.warning")
+    def test_already_exists_error_code(
+        self,
+        mock_warning,
+        mock_add_relation,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        result = {
+            "error": "relation cannot be added",
+            "error-code": "already exists",
+            "response": "response",
+            "request-id": 1,
+        }
+
+        mock_get_model.return_value = juju.model.Model()
+        mock_add_relation.side_effect = JujuAPIError(result)
+
+        self.loop.run_until_complete(
+            self.libjuju.add_relation(
+                "model",
+                "app1:relation1",
+                "app2:relation2",
+            )
+        )
+
+        mock_warning.assert_called_with(
+            "Relation already exists: relation cannot be added"
+        )
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
     def test_exception(
         self,
         mock_add_relation,
     def test_exception(
         self,
         mock_add_relation,
@@ -1227,7 +1632,6 @@ class ConfigureApplicationTest(LibjujuTestCase):
         mock_get_model,
         mock_get_controller,
     ):
         mock_get_model,
         mock_get_controller,
     ):
-
         mock_get_application.return_value = FakeApplication()
 
         self.loop.run_until_complete(
         mock_get_application.return_value = FakeApplication()
 
         self.loop.run_until_complete(
@@ -1249,7 +1653,6 @@ class ConfigureApplicationTest(LibjujuTestCase):
         mock_get_model,
         mock_get_controller,
     ):
         mock_get_model,
         mock_get_controller,
     ):
-
         mock_get_application.side_effect = Exception()
 
         with self.assertRaises(Exception):
         mock_get_application.side_effect = Exception()
 
         with self.assertRaises(Exception):
@@ -1271,7 +1674,6 @@ class ConfigureApplicationTest(LibjujuTestCase):
         mock_get_model,
         mock_get_controller,
     ):
         mock_get_model,
         mock_get_controller,
     ):
-
         result = {"error": "not found", "response": "response", "request-id": 1}
 
         mock_get_controller.side_effect = JujuAPIError(result)
         result = {"error": "not found", "response": "response", "request-id": 1}
 
         mock_get_controller.side_effect = JujuAPIError(result)
@@ -1296,7 +1698,6 @@ class ConfigureApplicationTest(LibjujuTestCase):
         mock_get_model,
         mock_get_controller,
     ):
         mock_get_model,
         mock_get_controller,
     ):
-
         result = {"error": "not found", "response": "response", "request-id": 1}
         mock_get_model.side_effect = JujuAPIError(result)
 
         result = {"error": "not found", "response": "response", "request-id": 1}
         mock_get_model.side_effect = JujuAPIError(result)
 
@@ -1415,7 +1816,7 @@ class ListOffers(LibjujuTestCase):
         mock_get_controller.return_value = juju.controller.Controller()
         mock_list_offers.side_effect = Exception()
         with self.assertRaises(Exception):
         mock_get_controller.return_value = juju.controller.Controller()
         mock_list_offers.side_effect = Exception()
         with self.assertRaises(Exception):
-            self.loop.run_until_complete(self.libjuju.list_offers("model"))
+            self.loop.run_until_complete(self.libjuju._list_offers("model"))
         mock_disconnect_controller.assert_called_once()
 
     def test_empty_list(
         mock_disconnect_controller.assert_called_once()
 
     def test_empty_list(
@@ -1425,8 +1826,10 @@ class ListOffers(LibjujuTestCase):
         mock_get_controller,
     ):
         mock_get_controller.return_value = juju.controller.Controller()
         mock_get_controller,
     ):
         mock_get_controller.return_value = juju.controller.Controller()
-        mock_list_offers.return_value = []
-        offers = self.loop.run_until_complete(self.libjuju.list_offers("model"))
+        offer_results = Mock()
+        offer_results.results = []
+        mock_list_offers.return_value = offer_results
+        offers = self.loop.run_until_complete(self.libjuju._list_offers("model"))
         self.assertEqual(offers, [])
         mock_disconnect_controller.assert_called_once()
 
         self.assertEqual(offers, [])
         mock_disconnect_controller.assert_called_once()
 
@@ -1437,12 +1840,110 @@ class ListOffers(LibjujuTestCase):
         mock_get_controller,
     ):
         mock_get_controller.return_value = juju.controller.Controller()
         mock_get_controller,
     ):
         mock_get_controller.return_value = juju.controller.Controller()
-        mock_list_offers.return_value = ["offer"]
-        offers = self.loop.run_until_complete(self.libjuju.list_offers("model"))
-        self.assertEqual(offers, ["offer"])
+        offer = Mock()
+        offer_results = Mock()
+        offer_results.results = [offer]
+        mock_list_offers.return_value = offer_results
+        offers = self.loop.run_until_complete(self.libjuju._list_offers("model"))
+        self.assertEqual(offers, [offer])
+        mock_disconnect_controller.assert_called_once()
+
+    def test_matching_offer_name(
+        self,
+        mock_list_offers,
+        mock_disconnect_controller,
+        mock_get_controller,
+    ):
+        mock_get_controller.return_value = juju.controller.Controller()
+        offer_1 = Mock()
+        offer_1.offer_name = "offer1"
+        offer_2 = Mock()
+        offer_2.offer_name = "offer2"
+        offer_results = Mock()
+        offer_results.results = [offer_1, offer_2]
+        mock_list_offers.return_value = offer_results
+        offers = self.loop.run_until_complete(
+            self.libjuju._list_offers("model", offer_name="offer2")
+        )
+        self.assertEqual(offers, [offer_2])
+        mock_disconnect_controller.assert_called_once()
+
+    def test_not_matching_offer_name(
+        self,
+        mock_list_offers,
+        mock_disconnect_controller,
+        mock_get_controller,
+    ):
+        mock_get_controller.return_value = juju.controller.Controller()
+        offer_1 = Mock()
+        offer_1.offer_name = "offer1"
+        offer_2 = Mock()
+        offer_2.offer_name = "offer2"
+        offer_results = Mock()
+        offer_results.results = [offer_1, offer_2]
+        mock_list_offers.return_value = offer_results
+        offers = self.loop.run_until_complete(
+            self.libjuju._list_offers("model", offer_name="offer3")
+        )
+        self.assertEqual(offers, [])
         mock_disconnect_controller.assert_called_once()
 
 
         mock_disconnect_controller.assert_called_once()
 
 
+@asynctest.mock.patch("n2vc.libjuju.Libjuju.get_controller")
+@asynctest.mock.patch("juju.controller.Controller.get_model")
+@asynctest.mock.patch("n2vc.libjuju.Libjuju.disconnect_model")
+@asynctest.mock.patch("n2vc.libjuju.Libjuju.disconnect_controller")
+@asynctest.mock.patch("n2vc.libjuju.Libjuju._list_offers")
+@asynctest.mock.patch("juju.model.Model.create_offer")
+class OfferTest(LibjujuTestCase):
+    def setUp(self):
+        super(OfferTest, self).setUp()
+
+    def test_offer(
+        self,
+        mock_create_offer,
+        mock__list_offers,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        controller = juju.controller.Controller()
+        model = juju.model.Model()
+        mock_get_controller.return_value = controller
+        mock_get_model.return_value = model
+        endpoint = RelationEndpoint("model.app-name.0", "vca", "endpoint")
+        self.loop.run_until_complete(self.libjuju.offer(endpoint))
+        mock_create_offer.assert_called_with(
+            "app-name:endpoint", offer_name="app-name-endpoint"
+        )
+        mock_disconnect_model.assert_called_once_with(model)
+        mock_disconnect_controller.assert_called_once_with(controller)
+
+    def test_offer_exception(
+        self,
+        mock_create_offer,
+        mock__list_offers,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        controller = juju.controller.Controller()
+        model = juju.model.Model()
+        mock_get_controller.return_value = controller
+        mock_get_model.return_value = model
+        mock__list_offers.return_value = []
+        endpoint = RelationEndpoint("model.app-name.0", "vca", "endpoint")
+        with self.assertRaises(Exception):
+            self.loop.run_until_complete(self.libjuju.offer(endpoint))
+        mock_create_offer.assert_called_with(
+            "app-name:endpoint", offer_name="app-name-endpoint"
+        )
+        mock_disconnect_model.assert_called_once_with(model)
+        mock_disconnect_controller.assert_called_once_with(controller)
+
+
 @asynctest.mock.patch("n2vc.libjuju.Libjuju.get_controller")
 @asynctest.mock.patch("juju.controller.Controller.get_model")
 @asynctest.mock.patch("n2vc.libjuju.Libjuju.disconnect_model")
 @asynctest.mock.patch("n2vc.libjuju.Libjuju.get_controller")
 @asynctest.mock.patch("juju.controller.Controller.get_model")
 @asynctest.mock.patch("n2vc.libjuju.Libjuju.disconnect_model")
@@ -1450,7 +1951,9 @@ class ListOffers(LibjujuTestCase):
 @asynctest.mock.patch("juju.model.Model.consume")
 class ConsumeTest(LibjujuTestCase):
     def setUp(self):
 @asynctest.mock.patch("juju.model.Model.consume")
 class ConsumeTest(LibjujuTestCase):
     def setUp(self):
+        self.offer_url = "admin/model.offer_name"
         super(ConsumeTest, self).setUp()
         super(ConsumeTest, self).setUp()
+        self.provider_libjuju = self.libjuju
 
     def test_consume(
         self,
 
     def test_consume(
         self,
@@ -1460,13 +1963,25 @@ class ConsumeTest(LibjujuTestCase):
         mock_get_model,
         mock_get_controller,
     ):
         mock_get_model,
         mock_get_controller,
     ):
-        mock_get_controller.return_value = juju.controller.Controller()
+        self_controller = juju.controller.Controller()
+        provider_controller = juju.controller.Controller()
+        mock_get_controller.side_effect = [self_controller, provider_controller]
         mock_get_model.return_value = juju.model.Model()
 
         mock_get_model.return_value = juju.model.Model()
 
-        self.loop.run_until_complete(self.libjuju.consume("offer_url", "model_name"))
-        mock_consume.assert_called_once()
+        self.loop.run_until_complete(
+            self.libjuju.consume(
+                "model_name",
+                Offer(self.offer_url, vca_id="vca-id"),
+                self.provider_libjuju,
+            )
+        )
+        mock_consume.assert_called_once_with(
+            "admin/model.offer_name",
+            application_alias="offer_name-model-vca-id",
+            controller=provider_controller,
+        )
         mock_disconnect_model.assert_called_once()
         mock_disconnect_model.assert_called_once()
-        mock_disconnect_controller.assert_called_once()
+        self.assertEqual(mock_disconnect_controller.call_count, 2)
 
     def test_parsing_error_exception(
         self,
 
     def test_parsing_error_exception(
         self,
@@ -1482,11 +1997,13 @@ class ConsumeTest(LibjujuTestCase):
 
         with self.assertRaises(juju.offerendpoints.ParseError):
             self.loop.run_until_complete(
 
         with self.assertRaises(juju.offerendpoints.ParseError):
             self.loop.run_until_complete(
-                self.libjuju.consume("offer_url", "model_name")
+                self.libjuju.consume(
+                    "model_name", Offer(self.offer_url), self.provider_libjuju
+                )
             )
         mock_consume.assert_called_once()
         mock_disconnect_model.assert_called_once()
             )
         mock_consume.assert_called_once()
         mock_disconnect_model.assert_called_once()
-        mock_disconnect_controller.assert_called_once()
+        self.assertEqual(mock_disconnect_controller.call_count, 2)
 
     def test_juju_error_exception(
         self,
 
     def test_juju_error_exception(
         self,
@@ -1502,11 +2019,13 @@ class ConsumeTest(LibjujuTestCase):
 
         with self.assertRaises(juju.errors.JujuError):
             self.loop.run_until_complete(
 
         with self.assertRaises(juju.errors.JujuError):
             self.loop.run_until_complete(
-                self.libjuju.consume("offer_url", "model_name")
+                self.libjuju.consume(
+                    "model_name", Offer(self.offer_url), self.provider_libjuju
+                )
             )
         mock_consume.assert_called_once()
         mock_disconnect_model.assert_called_once()
             )
         mock_consume.assert_called_once()
         mock_disconnect_model.assert_called_once()
-        mock_disconnect_controller.assert_called_once()
+        self.assertEqual(mock_disconnect_controller.call_count, 2)
 
     def test_juju_api_error_exception(
         self,
 
     def test_juju_api_error_exception(
         self,
@@ -1524,11 +2043,13 @@ class ConsumeTest(LibjujuTestCase):
 
         with self.assertRaises(juju.errors.JujuAPIError):
             self.loop.run_until_complete(
 
         with self.assertRaises(juju.errors.JujuAPIError):
             self.loop.run_until_complete(
-                self.libjuju.consume("offer_url", "model_name")
+                self.libjuju.consume(
+                    "model_name", Offer(self.offer_url), self.provider_libjuju
+                )
             )
         mock_consume.assert_called_once()
         mock_disconnect_model.assert_called_once()
             )
         mock_consume.assert_called_once()
         mock_disconnect_model.assert_called_once()
-        mock_disconnect_controller.assert_called_once()
+        self.assertEqual(mock_disconnect_controller.call_count, 2)
 
 
 @asynctest.mock.patch("n2vc.libjuju.Libjuju.get_k8s_cloud_credential")
 
 
 @asynctest.mock.patch("n2vc.libjuju.Libjuju.get_k8s_cloud_credential")
index d89de3f..2ce5024 100644 (file)
 
 import asyncio
 import logging
 
 import asyncio
 import logging
-from unittest.mock import Mock
+from unittest.mock import Mock, MagicMock
+from unittest.mock import patch
 
 
 import asynctest
 
 
 import asynctest
+from n2vc.definitions import Offer, RelationEndpoint
 from n2vc.n2vc_juju_conn import N2VCJujuConnector
 from osm_common import fslocal
 from n2vc.n2vc_juju_conn import N2VCJujuConnector
 from osm_common import fslocal
+from osm_common.dbmemory import DbMemory
 from n2vc.exceptions import (
     N2VCBadArgumentsException,
     N2VCException,
 from n2vc.exceptions import (
     N2VCBadArgumentsException,
     N2VCException,
+    JujuApplicationNotFound,
 )
 from n2vc.tests.unit.utils import AsyncMock
 from n2vc.vca.connection_data import ConnectionData
 )
 from n2vc.tests.unit.utils import AsyncMock
 from n2vc.vca.connection_data import ConnectionData
+from n2vc.tests.unit.testdata import test_db_descriptors as descriptors
+import yaml
 
 
 class N2VCJujuConnTestCase(asynctest.TestCase):
 
 
 class N2VCJujuConnTestCase(asynctest.TestCase):
@@ -34,10 +40,7 @@ class N2VCJujuConnTestCase(asynctest.TestCase):
     @asynctest.mock.patch("n2vc.n2vc_juju_conn.get_connection")
     @asynctest.mock.patch("n2vc.vca.connection_data.base64_to_cacert")
     def setUp(
     @asynctest.mock.patch("n2vc.n2vc_juju_conn.get_connection")
     @asynctest.mock.patch("n2vc.vca.connection_data.base64_to_cacert")
     def setUp(
-        self,
-        mock_base64_to_cacert=None,
-        mock_get_connection=None,
-        mock_store=None,
+        self, mock_base64_to_cacert=None, mock_get_connection=None, mock_store=None
     ):
         self.loop = asyncio.get_event_loop()
         self.db = Mock()
     ):
         self.loop = asyncio.get_event_loop()
         self.db = Mock()
@@ -70,7 +73,6 @@ class N2VCJujuConnTestCase(asynctest.TestCase):
             db=self.db,
             fs=fslocal.FsLocal(),
             log=None,
             db=self.db,
             fs=fslocal.FsLocal(),
             log=None,
-            loop=self.loop,
             on_update_db=None,
         )
         N2VCJujuConnector.get_public_key.assert_not_called()
             on_update_db=None,
         )
         N2VCJujuConnector.get_public_key.assert_not_called()
@@ -131,10 +133,6 @@ class UpdateVcaStatusTest(N2VCJujuConnTestCase):
             self.n2vc.libjuju.get_application_configs.assert_not_called_once()
 
 
             self.n2vc.libjuju.get_application_configs.assert_not_called_once()
 
 
-@asynctest.mock.patch("osm_common.fslocal.FsLocal.file_exists")
-@asynctest.mock.patch(
-    "osm_common.fslocal.FsLocal.path", new_callable=asynctest.PropertyMock, create=True
-)
 class K8sProxyCharmsTest(N2VCJujuConnTestCase):
     def setUp(self):
         super(K8sProxyCharmsTest, self).setUp()
 class K8sProxyCharmsTest(N2VCJujuConnTestCase):
     def setUp(self):
         super(K8sProxyCharmsTest, self).setUp()
@@ -142,46 +140,53 @@ class K8sProxyCharmsTest(N2VCJujuConnTestCase):
         self.n2vc.libjuju.add_model = AsyncMock()
         self.n2vc.libjuju.deploy_charm = AsyncMock()
         self.n2vc.libjuju.model_exists.return_value = False
         self.n2vc.libjuju.add_model = AsyncMock()
         self.n2vc.libjuju.deploy_charm = AsyncMock()
         self.n2vc.libjuju.model_exists.return_value = False
+        self.db = DbMemory()
+        self.fs = fslocal.FsLocal()
+        self.fs.path = "/"
+        self.n2vc.fs = self.fs
+        self.n2vc.db = self.db
+        self.db.create_list("nsrs", yaml.safe_load(descriptors.db_nsrs_text))
+        self.db.create_list("vnfrs", yaml.safe_load(descriptors.db_vnfrs_text))
 
 
-    def test_success(
-        self,
-        mock_path,
-        mock_file_exists,
-    ):
-        mock_file_exists.return_value = True
-        mock_path.return_value = "/path"
+    @patch(
+        "n2vc.n2vc_juju_conn.generate_random_alfanum_string",
+        **{"return_value": "random"}
+    )
+    def test_success(self, mock_generate_random_alfanum_string):
+        self.n2vc.fs.file_exists = MagicMock(create_autospec=True)
+        self.n2vc.fs.file_exists.return_value = True
         ee_id = self.loop.run_until_complete(
             self.n2vc.install_k8s_proxy_charm(
         ee_id = self.loop.run_until_complete(
             self.n2vc.install_k8s_proxy_charm(
-                "charm",
-                "nsi-id.ns-id.vnf-id.vdu",
-                "////path/",
+                "simple",
+                ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0",
+                "path",
                 {},
             )
         )
 
         self.n2vc.libjuju.add_model.assert_called_once()
         self.n2vc.libjuju.deploy_charm.assert_called_once_with(
                 {},
             )
         )
 
         self.n2vc.libjuju.add_model.assert_called_once()
         self.n2vc.libjuju.deploy_charm.assert_called_once_with(
-            model_name="ns-id-k8s",
-            application_name="app-vnf-vnf-id-vdu-vdu",
-            path="/path/path/",
+            model_name="dbfbd751-3de4-4e68-bd40-ec5ae0a53898-k8s",
+            application_name="simple-ee-z0-vnf1-vnf",
+            path="//path",
             machine_id=None,
             db_dict={},
             progress_timeout=None,
             total_timeout=None,
             config=None,
         )
             machine_id=None,
             db_dict={},
             progress_timeout=None,
             total_timeout=None,
             config=None,
         )
-        self.assertEqual(ee_id, "ns-id-k8s.app-vnf-vnf-id-vdu-vdu.k8s")
+        self.assertEqual(
+            ee_id, "dbfbd751-3de4-4e68-bd40-ec5ae0a53898-k8s.simple-ee-z0-vnf1-vnf.k8s"
+        )
 
     def test_no_artifact_path(
         self,
 
     def test_no_artifact_path(
         self,
-        mock_path,
-        mock_file_exists,
     ):
         with self.assertRaises(N2VCBadArgumentsException):
             ee_id = self.loop.run_until_complete(
                 self.n2vc.install_k8s_proxy_charm(
     ):
         with self.assertRaises(N2VCBadArgumentsException):
             ee_id = self.loop.run_until_complete(
                 self.n2vc.install_k8s_proxy_charm(
-                    "charm",
-                    "nsi-id.ns-id.vnf-id.vdu",
+                    "simple",
+                    ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0",
                     "",
                     {},
                 )
                     "",
                     {},
                 )
@@ -190,15 +195,13 @@ class K8sProxyCharmsTest(N2VCJujuConnTestCase):
 
     def test_no_db(
         self,
 
     def test_no_db(
         self,
-        mock_path,
-        mock_file_exists,
     ):
         with self.assertRaises(N2VCBadArgumentsException):
             ee_id = self.loop.run_until_complete(
                 self.n2vc.install_k8s_proxy_charm(
     ):
         with self.assertRaises(N2VCBadArgumentsException):
             ee_id = self.loop.run_until_complete(
                 self.n2vc.install_k8s_proxy_charm(
-                    "charm",
-                    "nsi-id.ns-id.vnf-id.vdu",
-                    "/path/",
+                    "simple",
+                    ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0",
+                    "path",
                     None,
                 )
             )
                     None,
                 )
             )
@@ -206,16 +209,15 @@ class K8sProxyCharmsTest(N2VCJujuConnTestCase):
 
     def test_file_not_exists(
         self,
 
     def test_file_not_exists(
         self,
-        mock_path,
-        mock_file_exists,
     ):
     ):
-        mock_file_exists.return_value = False
+        self.n2vc.fs.file_exists = MagicMock(create_autospec=True)
+        self.n2vc.fs.file_exists.return_value = False
         with self.assertRaises(N2VCBadArgumentsException):
             ee_id = self.loop.run_until_complete(
                 self.n2vc.install_k8s_proxy_charm(
         with self.assertRaises(N2VCBadArgumentsException):
             ee_id = self.loop.run_until_complete(
                 self.n2vc.install_k8s_proxy_charm(
-                    "charm",
-                    "nsi-id.ns-id.vnf-id.vdu",
-                    "/path/",
+                    "simple",
+                    ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0",
+                    "path",
                     {},
                 )
             )
                     {},
                 )
             )
@@ -223,19 +225,1271 @@ class K8sProxyCharmsTest(N2VCJujuConnTestCase):
 
     def test_exception(
         self,
 
     def test_exception(
         self,
-        mock_path,
-        mock_file_exists,
     ):
     ):
-        mock_file_exists.return_value = True
-        mock_path.return_value = "/path"
+        self.n2vc.fs.file_exists = MagicMock(create_autospec=True)
+        self.n2vc.fs.file_exists.return_value = True
+        self.n2vc.fs.path = MagicMock(create_autospec=True)
+        self.n2vc.fs.path.return_value = "path"
         self.n2vc.libjuju.deploy_charm.side_effect = Exception()
         with self.assertRaises(N2VCException):
             ee_id = self.loop.run_until_complete(
                 self.n2vc.install_k8s_proxy_charm(
         self.n2vc.libjuju.deploy_charm.side_effect = Exception()
         with self.assertRaises(N2VCException):
             ee_id = self.loop.run_until_complete(
                 self.n2vc.install_k8s_proxy_charm(
-                    "charm",
-                    "nsi-id.ns-id.vnf-id.vdu",
-                    "path/",
+                    "simple",
+                    ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0",
+                    "path",
                     {},
                 )
             )
             self.assertIsNone(ee_id)
                     {},
                 )
             )
             self.assertIsNone(ee_id)
+
+
+class AddRelationTest(N2VCJujuConnTestCase):
+    def setUp(self):
+        super(AddRelationTest, self).setUp()
+        self.n2vc.libjuju.add_relation = AsyncMock()
+        self.n2vc.libjuju.offer = AsyncMock()
+        self.n2vc.libjuju.get_controller = AsyncMock()
+        self.n2vc.libjuju.consume = AsyncMock()
+
+    def test_standard_relation_same_model_and_controller(self):
+        relation_endpoint_1 = RelationEndpoint("model-1.app1.0", None, "endpoint1")
+        relation_endpoint_2 = RelationEndpoint("model-1.app2.1", None, "endpoint2")
+        self.loop.run_until_complete(
+            self.n2vc.add_relation(relation_endpoint_1, relation_endpoint_2)
+        )
+        self.n2vc.libjuju.add_relation.assert_called_once_with(
+            model_name="model-1",
+            endpoint_1="app1:endpoint1",
+            endpoint_2="app2:endpoint2",
+        )
+        self.n2vc.libjuju.offer.assert_not_called()
+        self.n2vc.libjuju.consume.assert_not_called()
+
+    def test_cmr_relation_same_controller(self):
+        relation_endpoint_1 = RelationEndpoint("model-1.app1.0", None, "endpoint")
+        relation_endpoint_2 = RelationEndpoint("model-2.app2.1", None, "endpoint")
+        offer = Offer("admin/model-1.app1")
+        self.n2vc.libjuju.offer.return_value = offer
+        self.n2vc.libjuju.consume.return_value = "saas"
+        self.loop.run_until_complete(
+            self.n2vc.add_relation(relation_endpoint_1, relation_endpoint_2)
+        )
+        self.n2vc.libjuju.offer.assert_called_once_with(relation_endpoint_1)
+        self.n2vc.libjuju.consume.assert_called_once()
+        self.n2vc.libjuju.add_relation.assert_called_once_with(
+            "model-2", "app2:endpoint", "saas"
+        )
+
+    def test_cmr_relation_different_controller(self):
+        self.n2vc._get_libjuju = AsyncMock(return_value=self.n2vc.libjuju)
+        relation_endpoint_1 = RelationEndpoint(
+            "model-1.app1.0", "vca-id-1", "endpoint1"
+        )
+        relation_endpoint_2 = RelationEndpoint(
+            "model-1.app2.1", "vca-id-2", "endpoint2"
+        )
+        offer = Offer("admin/model-1.app1")
+        self.n2vc.libjuju.offer.return_value = offer
+        self.n2vc.libjuju.consume.return_value = "saas"
+        self.loop.run_until_complete(
+            self.n2vc.add_relation(relation_endpoint_1, relation_endpoint_2)
+        )
+        self.n2vc.libjuju.offer.assert_called_once_with(relation_endpoint_1)
+        self.n2vc.libjuju.consume.assert_called_once()
+        self.n2vc.libjuju.add_relation.assert_called_once_with(
+            "model-1", "app2:endpoint2", "saas"
+        )
+
+    def test_relation_exception(self):
+        relation_endpoint_1 = RelationEndpoint("model-1.app1.0", None, "endpoint")
+        relation_endpoint_2 = RelationEndpoint("model-2.app2.1", None, "endpoint")
+        self.n2vc.libjuju.offer.side_effect = Exception()
+        with self.assertRaises(N2VCException):
+            self.loop.run_until_complete(
+                self.n2vc.add_relation(relation_endpoint_1, relation_endpoint_2)
+            )
+
+
+class UpgradeCharmTest(N2VCJujuConnTestCase):
+    def setUp(self):
+        super(UpgradeCharmTest, self).setUp()
+        self.n2vc._get_libjuju = AsyncMock(return_value=self.n2vc.libjuju)
+        N2VCJujuConnector._get_ee_id_components = Mock()
+        self.n2vc.libjuju.upgrade_charm = AsyncMock()
+
+    def test_empty_ee_id(self):
+        with self.assertRaises(N2VCBadArgumentsException):
+            self.loop.run_until_complete(
+                self.n2vc.upgrade_charm(
+                    "", "/sample_charm_path", "sample_charm_id", "native-charm", None
+                )
+            )
+        self.n2vc._get_libjuju.assert_called()
+        self.n2vc._get_ee_id_components.assert_not_called()
+        self.n2vc.libjuju.upgrade_charm.assert_not_called()
+
+    def test_wrong_ee_id(self):
+        N2VCJujuConnector._get_ee_id_components.side_effect = Exception
+        with self.assertRaises(N2VCBadArgumentsException):
+            self.loop.run_until_complete(
+                self.n2vc.upgrade_charm(
+                    "ns-id-k8s.app-vnf-vnf-id-vdu-vdu-random.k8s",
+                    "/sample_charm_path",
+                    "sample_charm_id",
+                    "native-charm",
+                    500,
+                )
+            )
+        self.n2vc._get_libjuju.assert_called()
+        self.n2vc._get_ee_id_components.assert_called()
+        self.n2vc.libjuju.upgrade_charm.assert_not_called()
+
+    def test_charm_upgrade_succeded(self):
+        N2VCJujuConnector._get_ee_id_components.return_value = (
+            "sample_model",
+            "sample_app",
+            "sample_machine_id",
+        )
+        self.loop.run_until_complete(
+            self.n2vc.upgrade_charm(
+                "ns-id-k8s.app-vnf-vnf-id-vdu-vdu-random.k8s",
+                "/sample_charm_path",
+                "sample_charm_id",
+                "native-charm",
+                500,
+            )
+        )
+        self.n2vc._get_libjuju.assert_called()
+        self.n2vc._get_ee_id_components.assert_called()
+        self.n2vc.libjuju.upgrade_charm.assert_called_with(
+            application_name="sample_app",
+            path="/sample_charm_path",
+            model_name="sample_model",
+            total_timeout=500,
+        )
+
+    def test_charm_upgrade_failed(self):
+        N2VCJujuConnector._get_ee_id_components.return_value = (
+            "sample_model",
+            "sample_app",
+            "sample_machine_id",
+        )
+        self.n2vc.libjuju.upgrade_charm.side_effect = JujuApplicationNotFound
+        with self.assertRaises(N2VCException):
+            self.loop.run_until_complete(
+                self.n2vc.upgrade_charm(
+                    "ns-id-k8s.app-vnf-vnf-id-vdu-vdu-random.k8s",
+                    "/sample_charm_path",
+                    "sample_charm_id",
+                    "native-charm",
+                    None,
+                )
+            )
+        self.n2vc._get_libjuju.assert_called()
+        self.n2vc._get_ee_id_components.assert_called()
+        self.n2vc.libjuju.upgrade_charm.assert_called_with(
+            application_name="sample_app",
+            path="/sample_charm_path",
+            model_name="sample_model",
+            total_timeout=None,
+        )
+
+
+class GenerateApplicationNameTest(N2VCJujuConnTestCase):
+    vnf_id = "dbfbd751-3de4-4e68-bd40-ec5ae0a53898"
+
+    def setUp(self):
+        super(GenerateApplicationNameTest, self).setUp()
+        self.db = MagicMock(DbMemory)
+
+    @patch(
+        "n2vc.n2vc_juju_conn.generate_random_alfanum_string",
+        **{"return_value": "random"}
+    )
+    def test_generate_backward_compatible_application_name(
+        self, mock_generate_random_alfanum
+    ):
+        vdu_id = "mgmtVM"
+        vdu_count = "0"
+        expected_result = "app-vnf-ec5ae0a53898-vdu-mgmtVM-cnt-0-random"
+
+        application_name = self.n2vc._generate_backward_compatible_application_name(
+            GenerateApplicationNameTest.vnf_id, vdu_id, vdu_count
+        )
+        self.assertEqual(application_name, expected_result)
+
+    @patch(
+        "n2vc.n2vc_juju_conn.generate_random_alfanum_string",
+        **{"return_value": "random"}
+    )
+    def test_generate_backward_compatible_application_name_without_vnf_id_vdu_id(
+        self, mock_generate_random_alfanum
+    ):
+        vnf_id = None
+        vdu_id = ""
+        vdu_count = None
+        expected_result = "app--random"
+        application_name = self.n2vc._generate_backward_compatible_application_name(
+            vnf_id, vdu_id, vdu_count
+        )
+
+        self.assertEqual(application_name, expected_result)
+        self.assertLess(len(application_name), 50)
+
+    def test_find_charm_level_with_vnf_id(self):
+        vdu_id = ""
+        expected_result = "vnf-level"
+        charm_level = self.n2vc._find_charm_level(
+            GenerateApplicationNameTest.vnf_id, vdu_id
+        )
+        self.assertEqual(charm_level, expected_result)
+
+    def test_find_charm_level_with_vdu_id(self):
+        vnf_id = ""
+        vdu_id = "mgmtVM"
+        with self.assertRaises(N2VCException):
+            self.n2vc._find_charm_level(vnf_id, vdu_id)
+
+    def test_find_charm_level_with_vnf_id_and_vdu_id(self):
+        vdu_id = "mgmtVM"
+        expected_result = "vdu-level"
+        charm_level = self.n2vc._find_charm_level(
+            GenerateApplicationNameTest.vnf_id, vdu_id
+        )
+        self.assertEqual(charm_level, expected_result)
+
+    def test_find_charm_level_without_vnf_id_and_vdu_id(self):
+        vnf_id = ""
+        vdu_id = ""
+        expected_result = "ns-level"
+        charm_level = self.n2vc._find_charm_level(vnf_id, vdu_id)
+        self.assertEqual(charm_level, expected_result)
+
+    def test_generate_application_name_ns_charm(self):
+        charm_level = "ns-level"
+        vnfrs = {}
+        vca_records = [
+            {
+                "target_element": "ns",
+                "member-vnf-index": "",
+                "vdu_id": None,
+                "kdu_name": None,
+                "vdu_count_index": None,
+                "vnfd_id": None,
+                "vdu_name": None,
+                "type": "proxy_charm",
+                "ee_descriptor_id": None,
+                "charm_name": "simple-ns-charm-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh",
+                "ee_id": None,
+                "application": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            }
+        ]
+        vnf_count = ""
+        vdu_count = ""
+        vdu_id = None
+        expected_result = "simple-ns-charm-abc-000-rrrr-nnnn-4444-h-ns"
+        application_name = self.n2vc._generate_application_name(
+            charm_level,
+            vnfrs,
+            vca_records,
+            vnf_count=vnf_count,
+            vdu_id=vdu_id,
+            vdu_count=vdu_count,
+        )
+        self.assertEqual(application_name, expected_result)
+        self.assertLess(len(application_name), 50)
+
+    def test_generate_application_name_ns_charm_empty_vca_records(self):
+        charm_level = "ns-level"
+        vnfrs = {}
+        vca_records = []
+        vnf_count = ""
+        vdu_count = ""
+        vdu_id = None
+        with self.assertRaises(N2VCException):
+            self.n2vc._generate_application_name(
+                charm_level,
+                vnfrs,
+                vca_records,
+                vnf_count=vnf_count,
+                vdu_id=vdu_id,
+                vdu_count=vdu_count,
+            )
+
+    def test_generate_application_name_vnf_charm(self):
+        charm_level = "vnf-level"
+        vnfrs = {"member-vnf-index-ref": "vnf111-xxx-yyy-zzz"}
+        vca_records = [
+            {
+                "target_element": "vnf/vnf1",
+                "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                "charm_name": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            }
+        ]
+        vnf_count = "1"
+        vdu_count = ""
+        vdu_id = None
+        expected_result = "simple-ee-ab-1-vnf111-xxx-y-vnf"
+        application_name = self.n2vc._generate_application_name(
+            charm_level,
+            vnfrs,
+            vca_records,
+            vnf_count=vnf_count,
+            vdu_id=vdu_id,
+            vdu_count=vdu_count,
+        )
+        self.assertEqual(application_name, expected_result)
+        self.assertLess(len(application_name), 50)
+
+    def test_generate_application_name_vdu_charm_kdu_name_in_vca_record_is_none(self):
+        charm_level = "vdu-level"
+        vnfrs = {
+            "member-vnf-index-ref": "vnf111-xxx-yyy-zzz",
+            "vdur": [
+                {"_id": "38912ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "mgmtVM"},
+                {"_id": "45512ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "dataVM"},
+            ],
+        }
+        vca_records = [
+            {
+                "target_element": "vnf/vnf1/mgmtvm",
+                "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                "vdu_id": "mgmtVM",
+                "kdu_name": None,
+                "vdu_count_index": None,
+                "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                "vdu_name": "mgmtvm",
+                "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                "charm_name": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            },
+            {
+                "target_element": "vnf/vnf1/dataVM",
+                "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                "vdu_id": "dataVM",
+                "kdu_name": None,
+                "vdu_count_index": None,
+                "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                "vdu_name": "datavm",
+                "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-8888-hhh-3333-yyyy-888-hhh-ttt-444",
+                "charm_name": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            },
+        ]
+        vnf_count = "2"
+        vdu_count = "0"
+        vdu_id = "mgmtVM"
+        expected_result = "simple-ee-ab-2-vnf111-xxx-y-mgmtVM-0-vdu"
+        application_name = self.n2vc._generate_application_name(
+            charm_level,
+            vnfrs,
+            vca_records,
+            vnf_count=vnf_count,
+            vdu_id=vdu_id,
+            vdu_count=vdu_count,
+        )
+        self.assertEqual(application_name, expected_result)
+        self.assertLess(len(application_name), 50)
+
+    def test_generate_application_name_vdu_charm_vdu_id_kdu_name_in_vca_record_are_both_set(
+        self,
+    ):
+        charm_level = "vdu-level"
+        vnfrs = {
+            "member-vnf-index-ref": "vnf111-xxx-yyy-zzz",
+            "vdur": [
+                {"_id": "38912ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "mgmtVM"},
+                {"_id": "45512ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "dataVM"},
+            ],
+        }
+        vca_records = [
+            {
+                "target_element": "vnf/vnf1/mgmtVM",
+                "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                "vdu_id": "mgmtVM",
+                "kdu_name": "mgmtVM",
+                "vdu_count_index": None,
+                "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                "vdu_name": "mgmtvm",
+                "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                "charm_name": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            },
+            {
+                "target_element": "vnf/vnf1/dataVM",
+                "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                "vdu_id": "dataVM",
+                "kdu_name": None,
+                "vdu_count_index": None,
+                "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                "vdu_name": "datavm",
+                "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-8888-hhh-3333-yyyy-888-hhh-ttt-444",
+                "charm_name": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            },
+        ]
+        vnf_count = "2"
+        vdu_count = "0"
+        vdu_id = "mgmtVM"
+        expected_result = "simple-ee-ab-2-vnf111-xxx-y-mgmtVM-0-vdu"
+        application_name = self.n2vc._generate_application_name(
+            charm_level,
+            vnfrs,
+            vca_records,
+            vnf_count=vnf_count,
+            vdu_id=vdu_id,
+            vdu_count=vdu_count,
+        )
+        self.assertEqual(application_name, expected_result)
+        self.assertLess(len(application_name), 50)
+
+    def test_generate_application_name_vdu_charm_both_vdu_id_kdu_name_in_vca_record_are_none(
+        self,
+    ):
+        charm_level = "vdu-level"
+        vnfrs = {"member-vnf-index-ref": "vnf111-xxx-yyy-zzz"}
+        vca_records = [
+            {
+                "target_element": "vnf/vnf1/mgmtVM",
+                "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                "vdu_id": None,
+                "kdu_name": None,
+                "vdu_count_index": None,
+                "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                "vdu_name": "mgmtvm",
+                "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                "charm_name": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            }
+        ]
+        vnf_count = "2"
+        vdu_count = "0"
+        vdu_id = "mgmtVM"
+        with self.assertRaises(KeyError):
+            self.n2vc._generate_application_name(
+                charm_level,
+                vnfrs,
+                vca_records,
+                vnf_count=vnf_count,
+                vdu_id=vdu_id,
+                vdu_count=vdu_count,
+            )
+
+    def test_generate_application_name_vdu_charm_given_vdu_id_is_none(self):
+        charm_level = "vdu-level"
+        vnfrs = {"member-vnf-index-ref": "vnf111-xxx-yyy-zzz"}
+        vca_records = [
+            {
+                "target_element": "vnf/vnf1/mgmtvVM",
+                "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                "vdu_id": None,
+                "kdu_name": "mgmtVM",
+                "vdu_count_index": None,
+                "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                "vdu_name": "mgmtvm",
+                "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                "charm_name": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            }
+        ]
+        vnf_count = "2"
+        vdu_count = "0"
+        vdu_id = None
+        with self.assertRaises(N2VCException):
+            self.n2vc._generate_application_name(
+                charm_level,
+                vnfrs,
+                vca_records,
+                vnf_count=vnf_count,
+                vdu_id=vdu_id,
+                vdu_count=vdu_count,
+            )
+
+    def test_generate_application_name_vdu_charm_vdu_id_does_not_match_with_the_key_in_vca_record(
+        self,
+    ):
+        charm_level = "vdu-level"
+        vnfrs = {"member-vnf-index-ref": "vnf111-xxx-yyy-zzz"}
+        vca_records = [
+            {
+                "target_element": "vnf/vnf1/mgmtVM",
+                "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                "vdu_id": None,
+                "kdu_name": "mgmtVM",
+                "vdu_count_index": None,
+                "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                "vdu_name": "mgmtvm",
+                "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                "charm_name": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            }
+        ]
+        vnf_count = "2"
+        vdu_count = "0"
+        vdu_id = "mgmtvm"
+        with self.assertRaises(KeyError):
+            self.n2vc._generate_application_name(
+                charm_level,
+                vnfrs,
+                vca_records,
+                vnf_count=vnf_count,
+                vdu_id=vdu_id,
+                vdu_count=vdu_count,
+            )
+
+    def test_generate_application_name_vdu_charm_vdu_id_in_vca_record_is_none(self):
+        charm_level = "vdu-level"
+        vnfrs = {"member-vnf-index-ref": "vnf111-xxx-yyy-zzz"}
+        vca_records = [
+            {
+                "target_element": "vnf/vnf1/mgmtVM",
+                "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                "vdu_id": None,
+                "kdu_name": "mgmtVM",
+                "vdu_count_index": None,
+                "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                "vdu_name": "mgmtvm",
+                "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                "charm_name": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            }
+        ]
+        vnf_count = "2"
+        vdu_count = "0"
+        vdu_id = "mgmtVM"
+        expected_result = "simple-ee-ab-2-vnf111-xxx-y-mgmtVM-0-vdu"
+        application_name = self.n2vc._generate_application_name(
+            charm_level,
+            vnfrs,
+            vca_records,
+            vnf_count=vnf_count,
+            vdu_id=vdu_id,
+            vdu_count=vdu_count,
+        )
+        self.assertEqual(application_name, expected_result)
+        self.assertLess(len(application_name), 50)
+
+    def test_get_vnf_count_db_vnfr_ns_charm(self):
+        self.db.get_one.return_value = {"member-vnf-index-ref": "sample-ref"}
+        charm_level = "ns-level"
+        vnf_id_and_count = "m7fbd751-3de4-4e68-bd40-ec5ae0a53898-4"
+        with patch.object(self.n2vc, "db", self.db):
+            vnf_count, db_vnfr = self.n2vc._get_vnf_count_and_record(
+                charm_level, vnf_id_and_count
+            )
+        self.assertEqual(vnf_count, "")
+        self.assertEqual(db_vnfr, {})
+
+    def test_get_vnf_count_db_vnfr_vnf_charm(self):
+        self.db.get_one.return_value = {"member-vnf-index-ref": "sample-ref"}
+        charm_level = "vnf-level"
+        vnf_id_and_count = "m7fbd751-3de4-4e68-bd40-ec5ae0a53898-4"
+        with patch.object(self.n2vc, "db", self.db):
+            vnf_count, db_vnfr = self.n2vc._get_vnf_count_and_record(
+                charm_level, vnf_id_and_count
+            )
+        self.assertEqual(vnf_count, "4")
+        self.assertEqual(db_vnfr, {"member-vnf-index-ref": "sample-ref"})
+
+    def test_get_vnf_count_db_vnfr_vdu_charm(self):
+        self.db.get_one.return_value = {"member-vnf-index-ref": "sample-ref"}
+        charm_level = "vdu-level"
+        vnf_id_and_count = "m7fbd751-3de4-4e68-bd40-ec5ae0a53898-2"
+        with patch.object(self.n2vc, "db", self.db):
+            vnf_count, db_vnfr = self.n2vc._get_vnf_count_and_record(
+                charm_level, vnf_id_and_count
+            )
+        self.assertEqual(vnf_count, "2")
+        self.assertEqual(db_vnfr, {"member-vnf-index-ref": "sample-ref"})
+
+    def test_get_vca_records_vdu_charm(self):
+        charm_level = "vdu-level"
+        db_vnfr = {
+            "member-vnf-index-ref": "vnf111-xxx-yyy-zzz",
+            "vdur": [
+                {"_id": "38912ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "mgmtVM"},
+                {"_id": "45512ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "dataVM"},
+            ],
+        }
+        db_nsr = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "vnf/vnf1/mgmtvm",
+                            "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                            "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a",
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                            "vdu_name": "mgmtvm",
+                            "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                        {
+                            "target_element": "vnf/vnf2/datavm",
+                            "member-vnf-index": "vnf222-xxx-yyy-zzz",
+                            "vdu_id": "45512ff7-5bdd-4228-911f-c2bee259c44a",
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                            "vdu_name": "datavm",
+                            "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-8888-hhh-3333-yyyy-888-hhh-ttt-444",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                    ]
+                }
+            }
+        }
+        expected_result = [
+            {
+                "target_element": "vnf/vnf1/mgmtvm",
+                "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a",
+                "kdu_name": None,
+                "vdu_count_index": None,
+                "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                "vdu_name": "mgmtvm",
+                "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                "charm_name": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            }
+        ]
+        vca_records = self.n2vc._get_vca_records(charm_level, db_nsr, db_vnfr)
+        self.assertEqual(vca_records, expected_result)
+
+    def test_get_vca_records_vnf_charm_member_vnf_index_mismatch(self):
+        charm_level = "vnf-level"
+        db_vnfr = {"member-vnf-index-ref": "vnf222-xxx-yyy-zzz"}
+        db_nsr = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "vnf/vnf1/mgmtvm",
+                            "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                            "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a",
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                            "vdu_name": "mgmtvm",
+                            "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                        {
+                            "target_element": "vnf/vnf1/mgmtvm",
+                            "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                            "vdu_id": "45512ff7-5bdd-4228-911f-c2bee259c44a",
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                            "vdu_name": "datavm",
+                            "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-8888-hhh-3333-yyyy-888-hhh-ttt-444",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                    ]
+                }
+            }
+        }
+        expected_result = []
+        vca_records = self.n2vc._get_vca_records(charm_level, db_nsr, db_vnfr)
+        self.assertEqual(vca_records, expected_result)
+
+    def test_get_vca_records_ns_charm(self):
+        charm_level = "ns-level"
+        db_vnfr = {"member-vnf-index-ref": "vnf222-xxx-yyy-zzz"}
+        db_nsr = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "vnf/vnf1/mgmtvm",
+                            "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                            "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a",
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                            "vdu_name": "mgmtvm",
+                            "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                        {
+                            "target_element": "ns",
+                            "member-vnf-index": None,
+                            "vdu_id": None,
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "",
+                            "vdu_name": "",
+                            "ee_descriptor_id": "",
+                            "charm_name": "simple-ns-charm-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                    ]
+                }
+            }
+        }
+        expected_result = [
+            {
+                "target_element": "ns",
+                "member-vnf-index": None,
+                "vdu_id": None,
+                "kdu_name": None,
+                "vdu_count_index": None,
+                "vnfd_id": "",
+                "vdu_name": "",
+                "ee_descriptor_id": "",
+                "charm_name": "simple-ns-charm-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            }
+        ]
+        vca_records = self.n2vc._get_vca_records(charm_level, db_nsr, db_vnfr)
+        self.assertEqual(vca_records, expected_result)
+
+    def test_get_vca_records_ns_charm_empty_charm_name(self):
+        charm_level = "ns-level"
+        db_vnfr = {"member-vnf-index-ref": "vnf222-xxx-yyy-zzz"}
+        db_nsr = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "vnf/vnf1/mgmtvm",
+                            "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                            "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a",
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                            "vdu_name": "mgmtvm",
+                            "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                        {
+                            "target_element": "ns",
+                            "member-vnf-index": None,
+                            "vdu_id": None,
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "",
+                            "vdu_name": "",
+                            "ee_descriptor_id": "",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                    ]
+                }
+            }
+        }
+        expected_result = [
+            {
+                "target_element": "ns",
+                "member-vnf-index": None,
+                "vdu_id": None,
+                "kdu_name": None,
+                "vdu_count_index": None,
+                "vnfd_id": "",
+                "vdu_name": "",
+                "ee_descriptor_id": "",
+                "charm_name": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            }
+        ]
+        vca_records = self.n2vc._get_vca_records(charm_level, db_nsr, db_vnfr)
+        self.assertEqual(vca_records, expected_result)
+
+    def test_get_application_name_vnf_charm(self):
+        namespace = ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0"
+        self.db.get_one.return_value = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "vnf/vnf1/mgmtvm",
+                            "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                            "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a",
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                            "vdu_name": "mgmtvm",
+                            "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                        {
+                            "target_element": "ns",
+                            "member-vnf-index": None,
+                            "vdu_id": None,
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "",
+                            "vdu_name": "",
+                            "ee_descriptor_id": "",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                    ]
+                }
+            }
+        }
+        mock_vnf_count_and_record = MagicMock()
+        db_vnfr = {"member-vnf-index-ref": "vnf111-xxx-yyy-zzz"}
+        vnf_count = "0"
+        mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr)
+        expected_result = "simple-ee-ab-z0-vnf111-xxx-y-vnf"
+        with patch.object(self.n2vc, "db", self.db), patch.object(
+            self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record
+        ):
+            application_name = self.n2vc._get_application_name(namespace)
+            self.assertEqual(application_name, expected_result)
+            self.assertLess(len(application_name), 50)
+            mock_vnf_count_and_record.assert_called_once_with(
+                "vnf-level", "1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0"
+            )
+            self.db.get_one.assert_called_once()
+
+    @patch(
+        "n2vc.n2vc_juju_conn.generate_random_alfanum_string",
+        **{"return_value": "random"}
+    )
+    def test_get_application_name_vnf_charm_old_naming(
+        self, mock_generate_random_alfanum
+    ):
+        namespace = ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0"
+        self.db.get_one.return_value = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "vnf/vnf1/mgmtvm",
+                            "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                            "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a",
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                            "vdu_name": "mgmtvm",
+                            "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                        {
+                            "target_element": "ns",
+                            "member-vnf-index": None,
+                            "vdu_id": None,
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "",
+                            "vdu_name": "",
+                            "ee_descriptor_id": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                    ]
+                }
+            }
+        }
+        mock_vnf_count_and_record = MagicMock()
+        db_vnfr = {"member-vnf-index-ref": "vnf111-xxx-yyy-zzz"}
+        vnf_count = "0"
+        mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr)
+        expected_result = "app-vnf-eb3161eec0-z0-random"
+        with patch.object(self.n2vc, "db", self.db), patch.object(
+            self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record
+        ):
+            application_name = self.n2vc._get_application_name(namespace)
+            self.assertEqual(application_name, expected_result)
+            mock_vnf_count_and_record.assert_called_once_with(
+                "vnf-level", "1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0"
+            )
+            self.db.get_one.assert_called_once()
+
+    def test_get_application_name_vnf_charm_vnf_index_ref_mismatch(self):
+        namespace = ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0"
+        self.db.get_one.return_value = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "vnf/vnf1/mgmtvm",
+                            "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                            "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a",
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                            "vdu_name": "mgmtvm",
+                            "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                        {
+                            "target_element": "ns",
+                            "member-vnf-index": None,
+                            "vdu_id": None,
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "",
+                            "vdu_name": "",
+                            "ee_descriptor_id": "",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                    ]
+                }
+            }
+        }
+        mock_vnf_count_and_record = MagicMock()
+        db_vnfr = {"member-vnf-index-ref": "vnf222-xxx-yyy-zzz"}
+        vnf_count = "0"
+        mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr)
+        with patch.object(self.n2vc, "db", self.db), patch.object(
+            self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record
+        ):
+            with self.assertRaises(N2VCException):
+                self.n2vc._get_application_name(namespace)
+                mock_vnf_count_and_record.assert_called_once_with(
+                    "vnf-level", "1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0"
+                )
+                self.db.get_one.assert_called_once()
+
+    def test_get_application_name_vdu_charm(self):
+        namespace = ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0.mgmtVM-0"
+        self.db.get_one.return_value = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "vnf/vnf1/mgmtvm",
+                            "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                            "vdu_id": "mgmtVM",
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                            "vdu_name": "mgmtvm",
+                            "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                        {
+                            "target_element": "ns",
+                            "member-vnf-index": None,
+                            "vdu_id": None,
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "",
+                            "vdu_name": "",
+                            "ee_descriptor_id": "",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                    ]
+                }
+            }
+        }
+        mock_vnf_count_and_record = MagicMock()
+        db_vnfr = {
+            "member-vnf-index-ref": "vnf111-xxx-yyy-zzz",
+            "vdur": [
+                {"_id": "38912ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "mgmtVM"},
+                {"_id": "45512ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "dataVM"},
+            ],
+        }
+        vnf_count = "0"
+        mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr)
+        expected_result = "simple-ee-ab-z0-vnf111-xxx-y-mgmtvm-z0-vdu"
+        with patch.object(self.n2vc, "db", self.db), patch.object(
+            self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record
+        ):
+            application_name = self.n2vc._get_application_name(namespace)
+            self.assertEqual(application_name, expected_result)
+            self.assertLess(len(application_name), 50)
+            mock_vnf_count_and_record.assert_called_once_with(
+                "vdu-level", "1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0"
+            )
+            self.db.get_one.assert_called_once()
+
+    def test_get_application_name_kdu_charm(self):
+        namespace = ".82b11965-e580-47c0-9ee0-329f318a305b.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0.ldap"
+        self.db.get_one.return_value = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "vnf/openldap/kdu/ldap",
+                            "member-vnf-index": "openldap",
+                            "vdu_id": None,
+                            "kdu_name": "ldap",
+                            "vdu_count_index": 0,
+                            "operational-status": "init",
+                            "detailed-status": "",
+                            "step": "initial-deploy",
+                            "vnfd_id": "openldap_knf",
+                            "vdu_name": None,
+                            "type": "lxc_proxy_charm",
+                            "ee_descriptor_id": "openldap-ee",
+                            "charm_name": "",
+                            "ee_id": "",
+                            "application": "openldap-ee-z0-openldap-vdu",
+                            "model": "82b11965-e580-47c0-9ee0-329f318a305b",
+                            "config_sw_installed": True,
+                        }
+                    ]
+                }
+            }
+        }
+        mock_vnf_count_and_record = MagicMock()
+        db_vnfr = {"member-vnf-index-ref": "openldap", "vdur": {}}
+        vnf_count = "0"
+        mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr)
+        expected_result = "openldap-ee-z0-openldap-ldap-vdu"
+        with patch.object(self.n2vc, "db", self.db), patch.object(
+            self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record
+        ):
+            application_name = self.n2vc._get_application_name(namespace)
+            self.assertEqual(application_name, expected_result)
+            self.assertLess(len(application_name), 50)
+            mock_vnf_count_and_record.assert_called_once_with(
+                "vdu-level", "1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0"
+            )
+            self.db.get_one.assert_called_once()
+
+    @patch(
+        "n2vc.n2vc_juju_conn.generate_random_alfanum_string",
+        **{"return_value": "random"}
+    )
+    def test_get_application_name_vdu_charm_old_naming(
+        self, mock_generate_random_alfanum
+    ):
+        namespace = ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0.mgmtVM-0"
+        self.db.get_one.return_value = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "vnf/vnf1/mgmtVM",
+                            "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                            "vdu_id": "mgmtVM",
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                            "vdu_name": "mgmtvm",
+                            "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                        {
+                            "target_element": "ns",
+                            "member-vnf-index": None,
+                            "vdu_id": None,
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "",
+                            "vdu_name": "",
+                            "ee_descriptor_id": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                    ]
+                }
+            }
+        }
+        mock_vnf_count_and_record = MagicMock()
+        db_vnfr = {
+            "member-vnf-index-ref": "vnf111-xxx-yyy-zzz",
+            "vdur": [
+                {"_id": "38912ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "mgmtVM"},
+                {"_id": "45512ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "dataVM"},
+            ],
+        }
+        vnf_count = "0"
+        mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr)
+        expected_result = "app-vnf-eb3161eec0-z0-vdu-mgmtvm-cnt-z0-random"
+
+        with patch.object(self.n2vc, "db", self.db), patch.object(
+            self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record
+        ):
+            application_name = self.n2vc._get_application_name(namespace)
+            self.assertEqual(application_name, expected_result)
+            self.assertLess(len(application_name), 50)
+            mock_vnf_count_and_record.assert_called_once_with(
+                "vdu-level", "1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0"
+            )
+            self.db.get_one.assert_called_once()
+
+    def test_get_application_name_ns_charm(self):
+        namespace = ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898"
+        self.db.get_one.return_value = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "ns",
+                            "member-vnf-index": None,
+                            "vdu_id": None,
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "",
+                            "vdu_name": "",
+                            "ee_descriptor_id": "",
+                            "charm_name": "simple-ns-charm-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        }
+                    ]
+                }
+            }
+        }
+        mock_vnf_count_and_record = MagicMock()
+        db_vnfr = {}
+        vnf_count = ""
+        mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr)
+        expected_result = "simple-ns-charm-abc-z000-rrrr-nnnn-z4444-h-ns"
+        with patch.object(self.n2vc, "db", self.db), patch.object(
+            self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record
+        ):
+            application_name = self.n2vc._get_application_name(namespace)
+            self.assertEqual(application_name, expected_result)
+            self.assertLess(len(application_name), 50)
+            mock_vnf_count_and_record.assert_called_once_with("ns-level", None)
+            self.db.get_one.assert_called_once()
+
+    def test_get_application_name_ns_charm_empty_charm_name(self):
+        namespace = ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898"
+        self.db.get_one.return_value = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "ns",
+                            "member-vnf-index": None,
+                            "vdu_id": None,
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "",
+                            "vdu_name": "",
+                            "ee_descriptor_id": "",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        }
+                    ]
+                }
+            }
+        }
+        mock_vnf_count_and_record = MagicMock()
+        db_vnfr = {}
+        vnf_count = ""
+        mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr)
+        with patch.object(self.n2vc, "db", self.db), patch.object(
+            self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record
+        ):
+            with self.assertRaises(N2VCException):
+                self.n2vc._get_application_name(namespace)
+                mock_vnf_count_and_record.assert_called_once_with("ns-level", None)
+                self.db.get_one.assert_called_once()
+
+    @patch(
+        "n2vc.n2vc_juju_conn.generate_random_alfanum_string",
+        **{"return_value": "random"}
+    )
+    def test_get_application_name_ns_charm_old_naming(
+        self, mock_generate_random_alfanum
+    ):
+        namespace = ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898"
+        self.db.get_one.return_value = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "ns",
+                            "member-vnf-index": None,
+                            "vdu_id": None,
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "",
+                            "vdu_name": "",
+                            "ee_descriptor_id": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        }
+                    ]
+                }
+            }
+        }
+        mock_vnf_count_and_record = MagicMock()
+        db_vnfr = {}
+        vnf_count = ""
+        mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr)
+        expected_result = "app-random"
+        with patch.object(self.n2vc, "db", self.db), patch.object(
+            self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record
+        ):
+            application_name = self.n2vc._get_application_name(namespace)
+            self.assertEqual(application_name, expected_result)
+            self.assertLess(len(application_name), 50)
+            mock_vnf_count_and_record.assert_called_once_with("ns-level", None)
+            self.db.get_one.assert_called_once()
+
+
+class DeleteExecutionEnvironmentTest(N2VCJujuConnTestCase):
+    def setUp(self):
+        super(DeleteExecutionEnvironmentTest, self).setUp()
+        self.n2vc.libjuju.get_controller = AsyncMock()
+        self.n2vc.libjuju.destroy_model = AsyncMock()
+        self.n2vc.libjuju.destroy_application = AsyncMock()
+
+    def test_remove_ee__target_application_exists__model_is_deleted(self):
+        get_ee_id_components = MagicMock()
+        get_ee_id_components.return_value = ("my_model", "my_app", None)
+        model = MagicMock(create_autospec=True)
+        model.applications = {}
+        self.n2vc.libjuju.get_model = AsyncMock()
+        self.n2vc.libjuju.get_model.return_value = model
+        with patch.object(self.n2vc, "_get_ee_id_components", get_ee_id_components):
+            self.loop.run_until_complete(
+                self.n2vc.delete_execution_environment(
+                    "my_ee", application_to_delete="my_app"
+                )
+            )
+        self.n2vc.libjuju.destroy_application.assert_called_with(
+            model_name="my_model",
+            application_name="my_app",
+            total_timeout=None,
+        )
+        self.n2vc.libjuju.destroy_model.assert_called_with(
+            model_name="my_model",
+            total_timeout=None,
+        )
+
+    def test_remove_ee__multiple_applications_exist__model_is_not_deleted(self):
+        get_ee_id_components = MagicMock()
+        get_ee_id_components.return_value = ("my_model", "my_app", None)
+        model = MagicMock(create_autospec=True)
+        model.applications = {MagicMock(create_autospec=True)}
+        self.n2vc.libjuju.get_model = AsyncMock()
+        self.n2vc.libjuju.get_model.return_value = model
+        with patch.object(self.n2vc, "_get_ee_id_components", get_ee_id_components):
+            self.loop.run_until_complete(
+                self.n2vc.delete_execution_environment(
+                    "my_ee", application_to_delete="my_app"
+                )
+            )
+        self.n2vc.libjuju.destroy_application.assert_called_with(
+            model_name="my_model",
+            application_name="my_app",
+            total_timeout=None,
+        )
+        self.n2vc.libjuju.destroy_model.assert_not_called()
+
+    def test_remove_ee__target_application_does_not_exist__model_is_deleted(self):
+        get_ee_id_components = MagicMock()
+        get_ee_id_components.return_value = ("my_model", "my_app", None)
+        with patch.object(self.n2vc, "_get_ee_id_components", get_ee_id_components):
+            self.loop.run_until_complete(
+                self.n2vc.delete_execution_environment("my_ee")
+            )
+        self.n2vc.libjuju.destroy_model.assert_called_with(
+            model_name="my_model",
+            total_timeout=None,
+        )
index c7aa2d6..abc5e13 100644 (file)
@@ -138,12 +138,20 @@ class TestMotorStore(TestCase):
         self.vca_collection.find_one = AsyncMock()
         self.vca_collection.insert_one = AsyncMock()
         self.vca_collection.replace_one = AsyncMock()
         self.vca_collection.find_one = AsyncMock()
         self.vca_collection.insert_one = AsyncMock()
         self.vca_collection.replace_one = AsyncMock()
+        self.encryption = Mock()
+        self.encryption.admin_collection = Mock()
+        self.encryption.admin_collection.find_one = AsyncMock()
         self.admin_collection = Mock()
         self.admin_collection.find_one = AsyncMock()
         self.admin_collection.insert_one = AsyncMock()
         self.admin_collection.replace_one = AsyncMock()
         self.vim_accounts_collection = Mock()
         self.vim_accounts_collection.find_one = AsyncMock()
         self.admin_collection = Mock()
         self.admin_collection.find_one = AsyncMock()
         self.admin_collection.insert_one = AsyncMock()
         self.admin_collection.replace_one = AsyncMock()
         self.vim_accounts_collection = Mock()
         self.vim_accounts_collection.find_one = AsyncMock()
+        self.store.encryption._client = {
+            "osm": {
+                "admin": self.encryption.admin_collection,
+            }
+        }
         self.store._client = {
             "osm": {
                 "vca": self.vca_collection,
         self.store._client = {
             "osm": {
                 "vca": self.vca_collection,
@@ -152,7 +160,7 @@ class TestMotorStore(TestCase):
             }
         }
         self.store._config = {"database_commonkey": "osm"}
             }
         }
         self.store._config = {"database_commonkey": "osm"}
-        # self.store.decrypt_fields = Mock()
+        self.store.encryption._config = {"database_commonkey": "osm"}
         self.loop = asyncio.get_event_loop()
 
     @patch("n2vc.vca.connection_data.base64_to_cacert")
         self.loop = asyncio.get_event_loop()
 
     @patch("n2vc.vca.connection_data.base64_to_cacert")
@@ -174,7 +182,7 @@ class TestMotorStore(TestCase):
         db_find_one = conn_data.copy()
         db_find_one.update({"schema_version": "1.1", "_id": "id"})
         self.vca_collection.find_one.return_value = db_find_one
         db_find_one = conn_data.copy()
         db_find_one.update({"schema_version": "1.1", "_id": "id"})
         self.vca_collection.find_one.return_value = db_find_one
-        self.store.decrypt_fields = AsyncMock()
+        self.store.encryption.decrypt_fields = AsyncMock()
         connection_data = self.loop.run_until_complete(
             self.store.get_vca_connection_data("vca_id")
         )
         connection_data = self.loop.run_until_complete(
             self.store.get_vca_connection_data("vca_id")
         )
@@ -207,7 +215,6 @@ class TestMotorStore(TestCase):
         encrypted_secret = "kI46kRJh828ExSNpr16OG/q5a5/qTsE0bsHrv/W/2/g="
         cacert = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQ4ekNDQWx1Z0F3SUJBZ0lVRWlzTTBoQWxiYzQ0Z1ZhZWh6bS80ZUsyNnRZd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0lURU5NQXNHQTFVRUNoTUVTblZxZFRFUU1BNEdBMVVFQXhNSGFuVnFkUzFqWVRBZUZ3MHlNVEEwTWpNeApNRFV3TXpSYUZ3MHpNVEEwTWpNeE1EVTFNelJhTUNFeERUQUxCZ05WQkFvVEJFcDFhblV4RURBT0JnTlZCQU1UCkIycDFhblV0WTJFd2dnR2lNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJqd0F3Z2dHS0FvSUJnUUNhTmFvNGZab2gKTDJWYThtdy9LdCs3RG9tMHBYTlIvbEUxSHJyVmZvbmZqZFVQV01zSHpTSjJZZXlXcUNSd3BiaHlLaE82N1c1dgpUY2RsV3Y3WGFLTGtsdVkraDBZY3BQT3BFTmZZYmxrNGk0QkV1L0wzYVY5MFFkUFFrMG94S01CS2R5QlBNZVNNCkJmS2pPWXdyOGgzM0ZWUWhmVkJnMXVGZ2tGaDdTamNuNHczUFdvc1BCMjNiVHBCbGR3VE9zemN4Qm9TaDNSVTkKTzZjb3lQdDdEN0drOCtHRlA3RGRUQTdoV1RkaUM4cDBkeHp2RUNmY0psMXNFeFEyZVprS1QvVzZyelNtVDhUTApCM0ErM1FDRDhEOEVsQU1IVy9zS25SeHphYU8welpNVmVlQnRnNlFGZ1F3M0dJMGo2ZTY0K2w3VExoOW8wSkZVCjdpUitPY01xUzVDY0NROGpWV3JPSk9Xc2dEbDZ4T2FFREczYnR5SVJHY29jbVcvcEZFQjNZd1A2S1BRTUIrNXkKWDdnZExEWmFGRFBVakZmblhkMnhHdUZlMnpRTDNVbXZEUkZuUlBBaW02QlpQbWo1OFh2emFhZXROa3lyaUZLZwp4Z0Z1dVpTcDUwV2JWdjF0MkdzOTMrRE53NlhFZHRFYnlWWUNBa28xTTY0MkozczFnN3NoQnRFQ0F3RUFBYU1qCk1DRXdEZ1lEVlIwUEFRSC9CQVFEQWdLa01BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUwKQlFBRGdnR0JBRXYxM2o2ZGFVbDBqeERPSnNTV1ZJZS9JdXNXVTRpN2ZXSWlqMHAwRU1GNS9LTE8yemRndTR5SQoreVd2T3N5aVFPanEzMlRYVlo2bTRDSnBkR1dGVE5HK2lLdXVOU3M0N3g3Q3dmVUNBWm5VVzhyamd3ZWJyS3BmCkJMNEVQcTZTcW0rSmltN0VPankyMWJkY2cyUXdZb3A3eUhvaHcveWEvL0l6RTMzVzZxNHlJeEFvNDBVYUhPTEMKTGtGbnNVYitjcFZBeFlPZGp6bjFzNWhnclpuWXlETEl3WmtIdFdEWm94alUzeC9jdnZzZ1FzLytzTWYrRFU4RgpZMkJKRHJjQ1VQM2xzclc0QVpFMFplZkEwOTlncFEvb3dSN0REYnMwSjZUeFM4NGt6Tldjc1FuWnRraXZheHJNClkyVHNnaWVndFExVFdGRWpxLy9sUFV4emJCdmpnd1FBZm5CQXZGeVNKejdTa0VuVm5rUXJGaUlUQVArTHljQVIKMlg4UFI2ZGI1bEt0SitBSENDM3kvZmNQS2k0ZzNTL3djeXRRdmdvOXJ6ODRFalp5YUNTaGJXNG9jNzNrMS9RcAowQWtHRDU0ZGVDWWVPYVJNbW96c0w3ZzdxWkpFekhtODdOcVBYSy9EZFoweWNxaVFhMXY2T3QxNjdXNUlzMUkzCjBWb0IzUzloSlE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCgo="  # noqa: E501
         encrypted_cacert = "QeV4evTLXzcKwZZvmXQ/OvSHToXH3ISwfoLmU+Q9JlQWAFUHSJ9IhO0ewaQrJmx3NkfFb7NCxsQhh+wE57zDW4rWgn4w/SWkzvwSi1h2xYOO3ECEHzzVqgUm15Sk0xaj1Fv9Ed4hipf6PRijeOZ7A1G9zekr1w9WIvebMyJZrK+f6QJ8AP20NUZqG/3k+MeJr3kjrl+8uwU5aPOrHAexSQGAqSKTkWzW7glmlyMWTjwkuSgNVgFg0ctdWTZ5JnNwxXbpjwIKrC4E4sIHcxko2vsTeLF8pZFPk+3QUZIg8BrgtyM3lJC2kO1g3emPQhCIk3VDb5GBgssc/GyFyRXNS651d5BNgcABOKZ4Rv/gGnprB35zP7TKJKkST44XJTEBiugWMkSZg+T9H98/l3eE34O6thfTZXgIyG+ZM6uGlW2XOce0OoEIyJiEL039WJe3izjbD3b9sCCdgQc0MgS+hTaayJI6oCUWPsJLmRji19jLi/wjOsU5gPItCFWw3pBye/A4Zf8Hxm+hShvqBnk8R2yx1fPTiyw/Zx4Jn8m49XQJyjDSZnhIck0PVHR9xWzKCr++PKljLMLdkdFxVRVPFQk/FBbesqofjSXsq9DASY6ACTL3Jmignx2OXD6ac4SlBqCTjV2dIM0yEgZF7zwMNCtppRdXTV8S29JP4W2mfaiqXCUSRTggv8EYU+9diCE+8sPB6HjuLrsfiySbFlYR2m4ysDGXjsVx5CDAf0Nh4IRfcSceYnnBGIQ2sfgGcJFOZoJqr/QeE2NWz6jlWYbWT7MjS/0decpKxP7L88qrR+F48WXQvfsvjWgKjlMKw7lHmFF8FeY836VWWICTRZx+y6IlY1Ys2ML4kySF27Hal4OPhOOoBljMNMVwUEvBulOnKUWw4BGz8eGCl8Hw6tlyJdC7kcBj/aCyNCR/NnuDk4Wck6e//He8L6mS83OJi/hIFc8vYQxnCJMXj9Ou7wr5hxtBnvxXzZM3kFHxCDO24Cd5UyBV9GD8TiQJfBGAy7a2BCBMb5ESVX8NOkyyv2hXMHOjpnKhUM9yP3Ke4CBImO7mCKJNHdFVtAmuyVKJ+jT6ooAAArkX2xwEAvBEpvGNmW2jgs6wxSuKY0h5aUm0rA4v/s8fqSZhzdInB54sMldyAnt9G+9e+g933DfyA/tkc56Ed0vZ/XEvTkThVHyUbfYR/Gjsoab1RpnDBi4aZ2E7iceoBshy+L6NXdL0jlWEs4ZubiWlbVNWlN/MqJcjV/quLU7q4HtkG0MDEFm6To3o48x7xpv8otih6YBduNqBFnwQ6Qz9rM2chFgOR4IgNSZKPxHO0AGCi1gnK/CeCvrSfWYAMn+2rmw0hMZybqKMStG28+rXsKDdqmy6vAwL/+dJwkAW+ix68rWRXpeqHlWidu4SkIBELuwEkFIC/GJU/DRvcN2GG9uP1m+VFifCIS2UdiO4OVrP6PVoW1O+jBJvFH3K1YT7CRqevb9OzjS9fO1wjkOff0W8zZyJK9Mp25aynpf0k3oMpZDpjnlOsFXFUb3N6SvXD1Yi95szIlmsr5yRYaeGUJH7/SAmMr8R6RqsCR0ANptL2dtRoGPi/qcDQE15vnjJ+QMYCg9KbCdV+Qq5di93XAjmwPj6tKZv0aXQuaTZgYR7bdLmAnJaFLbHWcQG1k6F/vdKNEb7llLsoAD9KuKXPZT/LErIyKcI0RZySy9yvhTZb4jQWn17b83yfvqfd5/2NpcyaY4gNERhDRJHw7VhoS5Leai5ZnFaO3C1vU9tIJ85XgCUASTsBLoQWVCKPSQZGxzF7PVLnHui3YA5OsOQpVqAPtgGZ12tP9XkEKj+u2/Atj2bgYrqBF7zUL64X/AQpwr/UElWDhJLSD/KStVeDOUx3AwAVVi9eTUJr6NiNMutCE1sqUf9XVIddgZ/BaG5t3NV2L+T+11QzAl+Xrh8wH/XeUCTmnU3NGkvCz/9Y7PMS+qQL7T7WeGdYmEhb5s/5p/yjSYeqybr5sANOHs83OdeSXbop9cLWW+JksHmS//rHHcrrJhZgCb3P0EOpEoEMCarT6sJq0V1Hwf/YNFdJ9V7Ac654ALS+a9ffNthMUEJeY21QMtNOrEg3QH5RWBPn+yOYN/f38tzwlT1k6Ec94y/sBmeQVv8rRzkkiMSXeAL5ATdJntq8NQq5JbvLQDNnZnHQthZt+uhcUf08mWlRrxxBUaE6xLppgMqFdYSjLGvgn/d8FZ9y7UCg5ZBhgP1rrRQL1COpNKKlJLf5laqwiGAucIDmzSbhO+MidSauDLWuv+fsdd2QYk98PHxqNrPYLrlAlABFi3JEApBm4IlrGbHxKg6dRiy7L1c9xWnAD7E3XrZrSc6DXvGRsjMXWoQdlp4CX5H3cdH9sjIE6akWqiwwrOP6QTbJcxmJGv/MVhsDVrVKmrKSn2H0/Us1fyYCHCOyCSc2L96uId8i9wQO1NXj+1PJmUq3tJ8U0TUwTblOEQdYej99xEI8EzsXLjNJHCgbDygtHBYd/SHToXH3ISwfoLmU+Q9JlS1woaUpVa5sdvbsr4BXR6J"  # noqa: E501
         encrypted_secret = "kI46kRJh828ExSNpr16OG/q5a5/qTsE0bsHrv/W/2/g="
         cacert = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQ4ekNDQWx1Z0F3SUJBZ0lVRWlzTTBoQWxiYzQ0Z1ZhZWh6bS80ZUsyNnRZd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0lURU5NQXNHQTFVRUNoTUVTblZxZFRFUU1BNEdBMVVFQXhNSGFuVnFkUzFqWVRBZUZ3MHlNVEEwTWpNeApNRFV3TXpSYUZ3MHpNVEEwTWpNeE1EVTFNelJhTUNFeERUQUxCZ05WQkFvVEJFcDFhblV4RURBT0JnTlZCQU1UCkIycDFhblV0WTJFd2dnR2lNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJqd0F3Z2dHS0FvSUJnUUNhTmFvNGZab2gKTDJWYThtdy9LdCs3RG9tMHBYTlIvbEUxSHJyVmZvbmZqZFVQV01zSHpTSjJZZXlXcUNSd3BiaHlLaE82N1c1dgpUY2RsV3Y3WGFLTGtsdVkraDBZY3BQT3BFTmZZYmxrNGk0QkV1L0wzYVY5MFFkUFFrMG94S01CS2R5QlBNZVNNCkJmS2pPWXdyOGgzM0ZWUWhmVkJnMXVGZ2tGaDdTamNuNHczUFdvc1BCMjNiVHBCbGR3VE9zemN4Qm9TaDNSVTkKTzZjb3lQdDdEN0drOCtHRlA3RGRUQTdoV1RkaUM4cDBkeHp2RUNmY0psMXNFeFEyZVprS1QvVzZyelNtVDhUTApCM0ErM1FDRDhEOEVsQU1IVy9zS25SeHphYU8welpNVmVlQnRnNlFGZ1F3M0dJMGo2ZTY0K2w3VExoOW8wSkZVCjdpUitPY01xUzVDY0NROGpWV3JPSk9Xc2dEbDZ4T2FFREczYnR5SVJHY29jbVcvcEZFQjNZd1A2S1BRTUIrNXkKWDdnZExEWmFGRFBVakZmblhkMnhHdUZlMnpRTDNVbXZEUkZuUlBBaW02QlpQbWo1OFh2emFhZXROa3lyaUZLZwp4Z0Z1dVpTcDUwV2JWdjF0MkdzOTMrRE53NlhFZHRFYnlWWUNBa28xTTY0MkozczFnN3NoQnRFQ0F3RUFBYU1qCk1DRXdEZ1lEVlIwUEFRSC9CQVFEQWdLa01BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUwKQlFBRGdnR0JBRXYxM2o2ZGFVbDBqeERPSnNTV1ZJZS9JdXNXVTRpN2ZXSWlqMHAwRU1GNS9LTE8yemRndTR5SQoreVd2T3N5aVFPanEzMlRYVlo2bTRDSnBkR1dGVE5HK2lLdXVOU3M0N3g3Q3dmVUNBWm5VVzhyamd3ZWJyS3BmCkJMNEVQcTZTcW0rSmltN0VPankyMWJkY2cyUXdZb3A3eUhvaHcveWEvL0l6RTMzVzZxNHlJeEFvNDBVYUhPTEMKTGtGbnNVYitjcFZBeFlPZGp6bjFzNWhnclpuWXlETEl3WmtIdFdEWm94alUzeC9jdnZzZ1FzLytzTWYrRFU4RgpZMkJKRHJjQ1VQM2xzclc0QVpFMFplZkEwOTlncFEvb3dSN0REYnMwSjZUeFM4NGt6Tldjc1FuWnRraXZheHJNClkyVHNnaWVndFExVFdGRWpxLy9sUFV4emJCdmpnd1FBZm5CQXZGeVNKejdTa0VuVm5rUXJGaUlUQVArTHljQVIKMlg4UFI2ZGI1bEt0SitBSENDM3kvZmNQS2k0ZzNTL3djeXRRdmdvOXJ6ODRFalp5YUNTaGJXNG9jNzNrMS9RcAowQWtHRDU0ZGVDWWVPYVJNbW96c0w3ZzdxWkpFekhtODdOcVBYSy9EZFoweWNxaVFhMXY2T3QxNjdXNUlzMUkzCjBWb0IzUzloSlE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCgo="  # noqa: E501
         encrypted_cacert = "QeV4evTLXzcKwZZvmXQ/OvSHToXH3ISwfoLmU+Q9JlQWAFUHSJ9IhO0ewaQrJmx3NkfFb7NCxsQhh+wE57zDW4rWgn4w/SWkzvwSi1h2xYOO3ECEHzzVqgUm15Sk0xaj1Fv9Ed4hipf6PRijeOZ7A1G9zekr1w9WIvebMyJZrK+f6QJ8AP20NUZqG/3k+MeJr3kjrl+8uwU5aPOrHAexSQGAqSKTkWzW7glmlyMWTjwkuSgNVgFg0ctdWTZ5JnNwxXbpjwIKrC4E4sIHcxko2vsTeLF8pZFPk+3QUZIg8BrgtyM3lJC2kO1g3emPQhCIk3VDb5GBgssc/GyFyRXNS651d5BNgcABOKZ4Rv/gGnprB35zP7TKJKkST44XJTEBiugWMkSZg+T9H98/l3eE34O6thfTZXgIyG+ZM6uGlW2XOce0OoEIyJiEL039WJe3izjbD3b9sCCdgQc0MgS+hTaayJI6oCUWPsJLmRji19jLi/wjOsU5gPItCFWw3pBye/A4Zf8Hxm+hShvqBnk8R2yx1fPTiyw/Zx4Jn8m49XQJyjDSZnhIck0PVHR9xWzKCr++PKljLMLdkdFxVRVPFQk/FBbesqofjSXsq9DASY6ACTL3Jmignx2OXD6ac4SlBqCTjV2dIM0yEgZF7zwMNCtppRdXTV8S29JP4W2mfaiqXCUSRTggv8EYU+9diCE+8sPB6HjuLrsfiySbFlYR2m4ysDGXjsVx5CDAf0Nh4IRfcSceYnnBGIQ2sfgGcJFOZoJqr/QeE2NWz6jlWYbWT7MjS/0decpKxP7L88qrR+F48WXQvfsvjWgKjlMKw7lHmFF8FeY836VWWICTRZx+y6IlY1Ys2ML4kySF27Hal4OPhOOoBljMNMVwUEvBulOnKUWw4BGz8eGCl8Hw6tlyJdC7kcBj/aCyNCR/NnuDk4Wck6e//He8L6mS83OJi/hIFc8vYQxnCJMXj9Ou7wr5hxtBnvxXzZM3kFHxCDO24Cd5UyBV9GD8TiQJfBGAy7a2BCBMb5ESVX8NOkyyv2hXMHOjpnKhUM9yP3Ke4CBImO7mCKJNHdFVtAmuyVKJ+jT6ooAAArkX2xwEAvBEpvGNmW2jgs6wxSuKY0h5aUm0rA4v/s8fqSZhzdInB54sMldyAnt9G+9e+g933DfyA/tkc56Ed0vZ/XEvTkThVHyUbfYR/Gjsoab1RpnDBi4aZ2E7iceoBshy+L6NXdL0jlWEs4ZubiWlbVNWlN/MqJcjV/quLU7q4HtkG0MDEFm6To3o48x7xpv8otih6YBduNqBFnwQ6Qz9rM2chFgOR4IgNSZKPxHO0AGCi1gnK/CeCvrSfWYAMn+2rmw0hMZybqKMStG28+rXsKDdqmy6vAwL/+dJwkAW+ix68rWRXpeqHlWidu4SkIBELuwEkFIC/GJU/DRvcN2GG9uP1m+VFifCIS2UdiO4OVrP6PVoW1O+jBJvFH3K1YT7CRqevb9OzjS9fO1wjkOff0W8zZyJK9Mp25aynpf0k3oMpZDpjnlOsFXFUb3N6SvXD1Yi95szIlmsr5yRYaeGUJH7/SAmMr8R6RqsCR0ANptL2dtRoGPi/qcDQE15vnjJ+QMYCg9KbCdV+Qq5di93XAjmwPj6tKZv0aXQuaTZgYR7bdLmAnJaFLbHWcQG1k6F/vdKNEb7llLsoAD9KuKXPZT/LErIyKcI0RZySy9yvhTZb4jQWn17b83yfvqfd5/2NpcyaY4gNERhDRJHw7VhoS5Leai5ZnFaO3C1vU9tIJ85XgCUASTsBLoQWVCKPSQZGxzF7PVLnHui3YA5OsOQpVqAPtgGZ12tP9XkEKj+u2/Atj2bgYrqBF7zUL64X/AQpwr/UElWDhJLSD/KStVeDOUx3AwAVVi9eTUJr6NiNMutCE1sqUf9XVIddgZ/BaG5t3NV2L+T+11QzAl+Xrh8wH/XeUCTmnU3NGkvCz/9Y7PMS+qQL7T7WeGdYmEhb5s/5p/yjSYeqybr5sANOHs83OdeSXbop9cLWW+JksHmS//rHHcrrJhZgCb3P0EOpEoEMCarT6sJq0V1Hwf/YNFdJ9V7Ac654ALS+a9ffNthMUEJeY21QMtNOrEg3QH5RWBPn+yOYN/f38tzwlT1k6Ec94y/sBmeQVv8rRzkkiMSXeAL5ATdJntq8NQq5JbvLQDNnZnHQthZt+uhcUf08mWlRrxxBUaE6xLppgMqFdYSjLGvgn/d8FZ9y7UCg5ZBhgP1rrRQL1COpNKKlJLf5laqwiGAucIDmzSbhO+MidSauDLWuv+fsdd2QYk98PHxqNrPYLrlAlABFi3JEApBm4IlrGbHxKg6dRiy7L1c9xWnAD7E3XrZrSc6DXvGRsjMXWoQdlp4CX5H3cdH9sjIE6akWqiwwrOP6QTbJcxmJGv/MVhsDVrVKmrKSn2H0/Us1fyYCHCOyCSc2L96uId8i9wQO1NXj+1PJmUq3tJ8U0TUwTblOEQdYej99xEI8EzsXLjNJHCgbDygtHBYd/SHToXH3ISwfoLmU+Q9JlS1woaUpVa5sdvbsr4BXR6J"  # noqa: E501
-
         self.vca_collection.find_one.return_value = {
             "_id": "2ade7f0e-9b58-4dbd-93a3-4ec076185d39",
             "schema_version": "1.11",
         self.vca_collection.find_one.return_value = {
             "_id": "2ade7f0e-9b58-4dbd-93a3-4ec076185d39",
             "schema_version": "1.11",
@@ -216,7 +223,7 @@ class TestMotorStore(TestCase):
             "secret": encrypted_secret,
             "cacert": encrypted_cacert,
         }
             "secret": encrypted_secret,
             "cacert": encrypted_cacert,
         }
-        self.admin_collection.find_one.return_value = {
+        self.encryption.admin_collection.find_one.return_value = {
             "serial": b"l+U3HDp9td+UjQ+AN+Ypj/Uh7n3C+rMJueQNNxkIpWI="
         }
         connection_data = self.loop.run_until_complete(
             "serial": b"l+U3HDp9td+UjQ+AN+Ypj/Uh7n3C+rMJueQNNxkIpWI="
         }
         connection_data = self.loop.run_until_complete(
index bffbc29..3896b2f 100644 (file)
 
 from unittest import TestCase
 
 
 from unittest import TestCase
 
-from n2vc.utils import Dict, EntityType, JujuStatusToOSM, N2VCDeploymentStatus
+from n2vc.utils import (
+    Dict,
+    EntityType,
+    JujuStatusToOSM,
+    N2VCDeploymentStatus,
+    get_ee_id_components,
+)
 from juju.machine import Machine
 from juju.application import Application
 from juju.action import Action
 from juju.machine import Machine
 from juju.application import Application
 from juju.action import Action
@@ -84,3 +90,17 @@ class UtilsTest(TestCase):
                 osm_status = status["osm"]
                 self.assertTrue(juju_status in JujuStatusToOSM[entity_type])
                 self.assertEqual(osm_status, JujuStatusToOSM[entity_type][juju_status])
                 osm_status = status["osm"]
                 self.assertTrue(juju_status in JujuStatusToOSM[entity_type])
                 self.assertEqual(osm_status, JujuStatusToOSM[entity_type][juju_status])
+
+
+class GetEEComponentTest(TestCase):
+    def test_valid(self):
+        model, application, machine = get_ee_id_components("model.application.machine")
+        self.assertEqual(model, "model")
+        self.assertEqual(application, "application")
+        self.assertEqual(machine, "machine")
+
+    def test_invalid(self):
+        with self.assertRaises(Exception):
+            get_ee_id_components("model.application.machine.1")
+        with self.assertRaises(Exception):
+            get_ee_id_components("model.application")
diff --git a/n2vc/tests/unit/testdata/test_certificate.yaml b/n2vc/tests/unit/testdata/test_certificate.yaml
new file mode 100644 (file)
index 0000000..d21e022
--- /dev/null
@@ -0,0 +1,42 @@
+# Copyright 2022 Whitestack, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+#
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+  name: test-cert
+  namespace: osm
+spec:
+  secretName: test-cert-secret
+  privateKey:
+    rotationPolicy: Always
+    algorithm: ECDSA
+    size: 256
+  duration: 8760h
+  renewBefore: 2208h
+  subject:
+    organizations:
+      - osm
+  commonName: osm
+  isCA: false
+  usages:
+    - server auth
+  dnsNames:
+    - "*.osm"
+    - "*.osm.svc"
+    - "*.osm.svc.cluster"
+    - "*.osm.svc.cluster.local"
+  issuerRef:
+    name: ca-issuer
+    kind: ClusterIssuer
diff --git a/n2vc/tests/unit/testdata/test_db_descriptors.py b/n2vc/tests/unit/testdata/test_db_descriptors.py
new file mode 100644 (file)
index 0000000..c6f3670
--- /dev/null
@@ -0,0 +1,414 @@
+# Copyright 2022 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+
+db_nsrs_text = """
+---
+-   _id: dbfbd751-3de4-4e68-bd40-ec5ae0a53898
+    name: k8s-ns
+    name-ref: k8s-ns
+    short-name: k8s-ns
+    admin-status: ENABLED
+    nsState: READY
+    currentOperation: IDLE
+    currentOperationID: null
+    errorDescription: null
+    errorDetail: null
+    deploymentStatus: null
+    configurationStatus:
+      - elementType: VNF
+        elementUnderConfiguration: 1b6a4eb3-4fbf-415e-985c-4aeb3161eec0
+        status: READY
+      - elementType: VNF
+        elementUnderConfiguration: 17892d73-aa19-4b87-9a00-1d094f07a6b3
+        status: READY
+    vcaStatus: null
+    nsd:
+      _id: 12f320b5-2a57-40f4-82b5-020a6b1171d7
+      id: k8s_proxy_charm-ns
+      version: '1.0'
+      name: k8s_proxy_charm-ns
+      vnfd-id:
+        - k8s_proxy_charm-vnf
+      virtual-link-desc:
+        - id: mgmtnet
+          mgmt-network: true
+        - id: datanet
+      df:
+        - id: default-df
+          vnf-profile:
+            - id: vnf1
+              virtual-link-connectivity:
+                - constituent-cpd-id:
+                    - constituent-base-element-id: vnf1
+                      constituent-cpd-id: vnf-mgmt-ext
+                  virtual-link-profile-id: mgmtnet
+                - constituent-cpd-id:
+                    - constituent-base-element-id: vnf1
+                      constituent-cpd-id: vnf-data-ext
+                  virtual-link-profile-id: datanet
+              vnfd-id: k8s_proxy_charm-vnf
+            - id: vnf2
+              virtual-link-connectivity:
+                - constituent-cpd-id:
+                    - constituent-base-element-id: vnf2
+                      constituent-cpd-id: vnf-mgmt-ext
+                  virtual-link-profile-id: mgmtnet
+                - constituent-cpd-id:
+                    - constituent-base-element-id: vnf2
+                      constituent-cpd-id: vnf-data-ext
+                  virtual-link-profile-id: datanet
+              vnfd-id: k8s_proxy_charm-vnf
+      description: NS with 2 VNFs with cloudinit connected by datanet and mgmtnet VLs
+      _admin:
+        userDefinedData: {}
+        revision: 1
+        created: 1658990740.88281
+        modified: 1658990741.09266
+        projects_read:
+          - 51e0e80fe533469d98766caa16552a3e
+        projects_write:
+          - 51e0e80fe533469d98766caa16552a3e
+        onboardingState: ONBOARDED
+        operationalState: ENABLED
+        usageState: NOT_IN_USE
+        storage:
+          fs: mongo
+          path: /app/storage/
+          folder: '12f320b5-2a57-40f4-82b5-020a6b1171d7:1'
+          pkg-dir: k8s_proxy_charm_ns
+          descriptor: k8s_proxy_charm_ns/k8s_proxy_charm_nsd.yaml
+          zipfile: k8s_proxy_charm_ns.tar.gz
+    datacenter: bad7338b-ae46-43d4-a434-c3337a8054ac
+    resource-orchestrator: osmopenmano
+    description: default description
+    constituent-vnfr-ref:
+      - 1b6a4eb3-4fbf-415e-985c-4aeb3161eec0
+      - 17892d73-aa19-4b87-9a00-1d094f07a6b3
+    operational-status: running
+    config-status: configured
+    detailed-status: Done
+    orchestration-progress: {}
+    create-time: 1658998097.57611
+    nsd-name-ref: k8s_proxy_charm-ns
+    operational-events: []
+    nsd-ref: k8s_proxy_charm-ns
+    nsd-id: 12f320b5-2a57-40f4-82b5-020a6b1171d7
+    vnfd-id:
+      - 6d9e1ca1-f387-4d01-9876-066fc7311e0f
+    instantiate_params:
+      nsdId: 12f320b5-2a57-40f4-82b5-020a6b1171d7
+      nsName: k8s-ns
+      nsDescription: default description
+      vimAccountId: bad7338b-ae46-43d4-a434-c3337a8054ac
+      vld:
+        - name: mgmtnet
+          vim-network-name: osm-ext
+    additionalParamsForNs: null
+    ns-instance-config-ref: dbfbd751-3de4-4e68-bd40-ec5ae0a53898
+    id: dbfbd751-3de4-4e68-bd40-ec5ae0a53898
+    ssh-authorized-key: null
+    flavor:
+      - id: '0'
+        memory-mb: 1024
+        name: mgmtVM-flv
+        storage-gb: '10'
+        vcpu-count: 1
+        vim_info:
+          'vim:bad7338b-ae46-43d4-a434-c3337a8054ac':
+            vim_details: null
+            vim_id: 17a9ba76-beb7-4ad4-a481-97de37174866
+            vim_status: DONE
+      - vcpu-count: 1
+        memory-mb: 1024
+        storage-gb: '10'
+        name: mgmtVM-flv
+        id: '1'
+    image:
+      - id: '0'
+        image: ubuntu18.04
+        vim_info:
+          'vim:bad7338b-ae46-43d4-a434-c3337a8054ac':
+            vim_details: null
+            vim_id: 919fc71a-6acd-4ee3-8123-739a9abbc2e7
+            vim_status: DONE
+      - image: 'Canonical:UbuntuServer:18.04-LTS:latest'
+        vim-type: azure
+        id: '1'
+      - image: 'ubuntu-os-cloud:image-family:ubuntu-1804-lts'
+        vim-type: gcp
+        id: '2'
+      - image: ubuntu/images/hvm-ssd/ubuntu-artful-17.10-amd64-server-20180509
+        vim-type: aws
+        id: '3'
+    affinity-or-anti-affinity-group: []
+    revision: 1
+    vld:
+      - id: mgmtnet
+        mgmt-network: true
+        name: mgmtnet
+        type: null
+        vim_info:
+          'vim:bad7338b-ae46-43d4-a434-c3337a8054ac':
+            vim_account_id: bad7338b-ae46-43d4-a434-c3337a8054ac
+            vim_network_name: osm-ext
+            vim_details: >
+              {admin_state_up: true, availability_zone_hints: [],
+              availability_zones: [nova], created_at: '2019-10-17T23:44:03Z',
+              description: '', encapsulation: vlan, encapsulation_id: 2148,
+              encapsulation_type: vlan, id: 21ea5d92-24f1-40ab-8d28-83230e277a49,
+              ipv4_address_scope: null,
+                ipv6_address_scope: null, is_default: false, mtu: 1500, name: osm-ext, port_security_enabled: true, project_id: 456b6471010b4737b47a0dd599c920c5, 'provider:network_type': vlan, 'provider:physical_network': physnet1, 'provider:segmentation_id': 2148, revision_number: 1009,
+                'router:external': true, segmentation_id: 2148, shared: true, status: ACTIVE, subnets: [{subnet: {allocation_pools: [{end: 172.21.249.255, start: 172.21.248.1}], cidr: 172.21.248.0/22, created_at: '2019-10-17T23:44:07Z', description: '', dns_nameservers: [],
+                      enable_dhcp: true, gateway_ip: 172.21.251.254, host_routes: [], id: d14f68b7-8287-41fe-b533-dafb2240680a, ip_version: 4, ipv6_address_mode: null, ipv6_ra_mode: null, name: osm-ext-subnet, network_id: 21ea5d92-24f1-40ab-8d28-83230e277a49, project_id: 456b6471010b4737b47a0dd599c920c5,
+                      revision_number: 5, service_types: [], subnetpool_id: null, tags: [], tenant_id: 456b6471010b4737b47a0dd599c920c5, updated_at: '2020-09-14T15:15:06Z'}}], tags: [], tenant_id: 456b6471010b4737b47a0dd599c920c5, type: data, updated_at: '2022-07-05T18:39:02Z'}
+            vim_id: 21ea5d92-24f1-40ab-8d28-83230e277a49
+            vim_status: ACTIVE
+      - id: datanet
+        mgmt-network: false
+        name: datanet
+        type: null
+        vim_info:
+          'vim:bad7338b-ae46-43d4-a434-c3337a8054ac':
+            vim_account_id: bad7338b-ae46-43d4-a434-c3337a8054ac
+            vim_network_name: null
+            vim_details: >
+              {admin_state_up: true, availability_zone_hints: [],
+              availability_zones: [nova], created_at: '2022-07-28T08:41:59Z',
+              description: '', encapsulation: vxlan, encapsulation_id: 27,
+              encapsulation_type: vxlan, id: 34056287-3cd5-42cb-92d3-413382b50813,
+              ipv4_address_scope: null,
+                ipv6_address_scope: null, mtu: 1450, name: k8s-ns-datanet, port_security_enabled: true, project_id: 71c7971a7cab4b72bd5c10dbe6617f1e, 'provider:network_type': vxlan, 'provider:physical_network': null, 'provider:segmentation_id': 27, revision_number: 2, 'router:external': false,
+                segmentation_id: 27, shared: false, status: ACTIVE, subnets: [{subnet: {allocation_pools: [{end: 192.168.181.254, start: 192.168.181.1}], cidr: 192.168.181.0/24, created_at: '2022-07-28T08:41:59Z', description: '', dns_nameservers: [], enable_dhcp: true, gateway_ip: null,
+                      host_routes: [], id: ab2920f8-881b-4bef-82a5-9582a7930786, ip_version: 4, ipv6_address_mode: null, ipv6_ra_mode: null, name: k8s-ns-datanet-subnet, network_id: 34056287-3cd5-42cb-92d3-413382b50813, project_id: 71c7971a7cab4b72bd5c10dbe6617f1e, revision_number: 0,
+                      service_types: [], subnetpool_id: null, tags: [], tenant_id: 71c7971a7cab4b72bd5c10dbe6617f1e, updated_at: '2022-07-28T08:41:59Z'}}], tags: [], tenant_id: 71c7971a7cab4b72bd5c10dbe6617f1e, type: bridge, updated_at: '2022-07-28T08:41:59Z'}
+            vim_id: 34056287-3cd5-42cb-92d3-413382b50813
+            vim_status: ACTIVE
+    _admin:
+      created: 1658998097.58182
+      modified: 1658998193.42562
+      projects_read:
+        - 51e0e80fe533469d98766caa16552a3e
+      projects_write:
+        - 51e0e80fe533469d98766caa16552a3e
+      nsState: INSTANTIATED
+      current-operation: null
+      nslcmop: null
+      operation-type: null
+      deployed:
+        RO:
+          vnfd: []
+          operational-status: running
+        VCA:
+          - target_element: vnf/vnf1
+            member-vnf-index: vnf1
+            vdu_id: null
+            kdu_name: null
+            vdu_count_index: 0
+            operational-status: init
+            detailed-status: ''
+            step: initial-deploy
+            vnfd_id: k8s_proxy_charm-vnf
+            vdu_name: null
+            type: k8s_proxy_charm
+            ee_descriptor_id: simple-ee
+            charm_name: ''
+            ee_id: dbfbd751-3de4-4e68-bd40-ec5ae0a53898-k8s.simple-ee-z0-vnf1-vnf.k8s
+            application: simple-ee-z0-vnf1-vnf
+            model: dbfbd751-3de4-4e68-bd40-ec5ae0a53898-k8s
+            config_sw_installed: true
+          - target_element: vnf/vnf2
+            member-vnf-index: vnf2
+            vdu_id: null
+            kdu_name: null
+            vdu_count_index: 0
+            operational-status: init
+            detailed-status: ''
+            step: initial-deploy
+            vnfd_id: k8s_proxy_charm-vnf
+            vdu_name: null
+            type: k8s_proxy_charm
+            ee_descriptor_id: simple-ee
+            charm_name: ''
+            ee_id: dbfbd751-3de4-4e68-bd40-ec5ae0a53898-k8s.simple-ee-z0-vnf2-vnf.k8s
+            application: simple-ee-z0-vnf2-vnf
+            model: dbfbd751-3de4-4e68-bd40-ec5ae0a53898-k8s
+            config_sw_installed: true
+        K8s: []
+"""
+
+db_vnfrs_text = """
+-   _id: 1b6a4eb3-4fbf-415e-985c-4aeb3161eec0
+    id: 1b6a4eb3-4fbf-415e-985c-4aeb3161eec0
+    nsr-id-ref: dbfbd751-3de4-4e68-bd40-ec5ae0a53898
+    member-vnf-index-ref: vnf1
+    additionalParamsForVnf: null
+    created-time: 1658998097.58036
+    vnfd-ref: k8s_proxy_charm-vnf
+    vnfd-id: 6d9e1ca1-f387-4d01-9876-066fc7311e0f
+    vim-account-id: bad7338b-ae46-43d4-a434-c3337a8054ac
+    vca-id: null
+    vdur:
+      - _id: 38912ff7-5bdd-4228-911f-c2bee259c44a
+        additionalParams:
+          OSM:
+            count_index: 0
+            member_vnf_index: vnf1
+            ns_id: dbfbd751-3de4-4e68-bd40-ec5ae0a53898
+            vdu:
+              mgmtVM-0:
+                count_index: 0
+                interfaces:
+                  dataVM-xe0:
+                    name: dataVM-xe0
+                  mgmtVM-eth0:
+                    name: mgmtVM-eth0
+                vdu_id: mgmtVM
+            vdu_id: mgmtVM
+            vim_account_id: bad7338b-ae46-43d4-a434-c3337a8054ac
+            vnf_id: 1b6a4eb3-4fbf-415e-985c-4aeb3161eec0
+            vnfd_id: 6d9e1ca1-f387-4d01-9876-066fc7311e0f
+            vnfd_ref: k8s_proxy_charm-vnf
+        affinity-or-anti-affinity-group-id: []
+        alt-image-ids:
+          - '1'
+          - '2'
+          - '3'
+        cloud-init: '6d9e1ca1-f387-4d01-9876-066fc7311e0f:file:cloud-config.txt'
+        count-index: 0
+        id: 38912ff7-5bdd-4228-911f-c2bee259c44a
+        interfaces:
+          - external-connection-point-ref: vnf-mgmt-ext
+            internal-connection-point-ref: mgmtVM-eth0-int
+            mgmt-interface: true
+            mgmt-vnf: true
+            name: mgmtVM-eth0
+            ns-vld-id: mgmtnet
+            position: 1
+            type: PARAVIRT
+            compute_node: nfvisrv11
+            ip-address: 172.21.248.199
+            mac-address: 'fa:16:3e:4d:65:e9'
+            pci: null
+            vlan: 2148
+          - external-connection-point-ref: vnf-data-ext
+            internal-connection-point-ref: dataVM-xe0-int
+            name: dataVM-xe0
+            ns-vld-id: datanet
+            position: 2
+            type: PARAVIRT
+            compute_node: nfvisrv11
+            ip-address: 192.168.181.179
+            mac-address: 'fa:16:3e:ca:b5:d3'
+            pci: null
+            vlan: null
+        internal-connection-point:
+          - connection-point-id: mgmtVM-eth0-int
+            id: mgmtVM-eth0-int
+            name: mgmtVM-eth0-int
+          - connection-point-id: dataVM-xe0-int
+            id: dataVM-xe0-int
+            name: dataVM-xe0-int
+        ip-address: 172.21.248.199
+        ns-flavor-id: '0'
+        ns-image-id: '0'
+        ssh-access-required: true
+        ssh-keys:
+          - >
+            ssh-rsa
+            AAAAB3NzaC1yc2EAAAADAQABAAACAQDW3dtEDKfwZL0WZp6LeJUZFlZzYAHP7M4AsJwl2YFO/wmblfrTpWZ8tRyGwyjQacB7Zb7J07wD5AZACE71A3Nc9zjI22/gWN7N8X+ZxH6ywcr1GdXBqZDBeOdzD4pRb11E9mydGZ9l++KtFRtlF4G7IFYuxkOiSCJrkgiKuVDGodtQ/6VUKwxuI8U6N7MxtIBN2L3IfvMwuNyTo1daiUabQMwQKt/Q8Zpp78zsZ6SoxU+eYAHzbeTjAfNwhA88nRzRZn7tQW+gWl9wbSINbr2+JetTN+BTot/CMPmKzzul9tZrzhSzck1QSM3UDrD36ctRdaLABnWCoxpm0wJthNt693xVrFP+bMgK2BR0fyu9WwVEcHkC9CZ8yoi37k5rGVtoDw6sW6lxQ5QKS+Plv/YjGKqK3Ro/UoIEhgxcW53uz4PveyMBss4geB9ad/1T8dtugd288qfCWJRBpJBrE497EalhHolF3L/2bEu3uCKN0TY4POzqP/5cuAUc/uTJ2mjZewJdlJtrn7IyFtSUypeuVmXRx5LwByQw9EwPhUZlKVjYEHYmu5YTKlFSWyorWgRLBBIK7LLPj+bCGgLeT+fXmip6eFquAyVtoQfDofQ/gc0OXEA1uKfK2VFKg1le+joz1WA/XieGSvKRQ4aZorYgi/FzbpxKj2a60cZubJMq5w==
+            root@lcm-7b6bcf7cdd-5h2ql
+          - >-
+            ssh-rsa
+            AAAAB3NzaC1yc2EAAAADAQABAAABAQDtg65/Jh3KDWC9+YzkTz8Md/uhalkjPo15DSxlUNWzYQNFUzaG5Pt0trDwQ29UOQIUy1CB9HpWSZMTA1ESet/+cyXWkZ9MznAmGLQBdnwqWU792UQf6rv74Zpned8MbnKQXfs8gog1ZFFKRMcwitNRqs8xs8XsPLE/l1Jo2QemhM0fIRofjJiLKYaKeGP59Fb8UlIeGDaxmIFgLs8bAZvrmjbae3o4b1fZDNboqlQbHb9rakxI9uCnsaBrCmelXpP9EFmENx85vdHEwCAfCRvSWKnbXuOojJJzFM5odoWFZo8AuIhEb5ZiLkGet3CvCfWZZPpQc4TuNDaY0t1XUegH
+            juju-client-key
+        vdu-id-ref: mgmtVM
+        vdu-name: mgmtVM
+        vim_info:
+          'vim:bad7338b-ae46-43d4-a434-c3337a8054ac':
+            interfaces:
+              - vim_info: >
+                  {admin_state_up: true, allowed_address_pairs: [],
+                  'binding:host_id': nfvisrv11, 'binding:profile': {},
+                  'binding:vif_details': {bridge_name: br-int, connectivity: l2,
+                  datapath_type: system, ovs_hybrid_plug: true, port_filter: true},
+                  'binding:vif_type': ovs, 'binding:vnic_type': normal,
+                    created_at: '2022-07-28T08:42:04Z', description: '', device_id: 1fabddca-0dcf-4702-a5f3-5cc028c2aba7, device_owner: 'compute:nova', extra_dhcp_opts: [], fixed_ips: [{ip_address: 172.21.248.199, subnet_id: d14f68b7-8287-41fe-b533-dafb2240680a}], id: e053d44f-1d67-4274-b85d-1cef243353d6,
+                    mac_address: 'fa:16:3e:4d:65:e9', name: mgmtVM-eth0, network_id: 21ea5d92-24f1-40ab-8d28-83230e277a49, port_security_enabled: true, project_id: 71c7971a7cab4b72bd5c10dbe6617f1e, revision_number: 4, security_groups: [1de4b2c2-e4be-4e91-985c-d887e2715949], status: ACTIVE,
+                    tags: [], tenant_id: 71c7971a7cab4b72bd5c10dbe6617f1e, updated_at: '2022-07-28T08:42:16Z'}
+                mac_address: 'fa:16:3e:4d:65:e9'
+                vim_net_id: 21ea5d92-24f1-40ab-8d28-83230e277a49
+                vim_interface_id: e053d44f-1d67-4274-b85d-1cef243353d6
+                compute_node: nfvisrv11
+                pci: null
+                vlan: 2148
+                ip_address: 172.21.248.199
+                mgmt_vnf_interface: true
+                mgmt_vdu_interface: true
+              - vim_info: >
+                  {admin_state_up: true, allowed_address_pairs: [],
+                  'binding:host_id': nfvisrv11, 'binding:profile': {},
+                  'binding:vif_details': {bridge_name: br-int, connectivity: l2,
+                  datapath_type: system, ovs_hybrid_plug: true, port_filter: true},
+                  'binding:vif_type': ovs, 'binding:vnic_type': normal,
+                    created_at: '2022-07-28T08:42:04Z', description: '', device_id: 1fabddca-0dcf-4702-a5f3-5cc028c2aba7, device_owner: 'compute:nova', extra_dhcp_opts: [], fixed_ips: [{ip_address: 192.168.181.179, subnet_id: ab2920f8-881b-4bef-82a5-9582a7930786}], id: 8a34c944-0fc1-41ae-9dbc-9743e5988162,
+                    mac_address: 'fa:16:3e:ca:b5:d3', name: dataVM-xe0, network_id: 34056287-3cd5-42cb-92d3-413382b50813, port_security_enabled: true, project_id: 71c7971a7cab4b72bd5c10dbe6617f1e, revision_number: 4, security_groups: [1de4b2c2-e4be-4e91-985c-d887e2715949], status: ACTIVE,
+                    tags: [], tenant_id: 71c7971a7cab4b72bd5c10dbe6617f1e, updated_at: '2022-07-28T08:42:15Z'}
+                mac_address: 'fa:16:3e:ca:b5:d3'
+                vim_net_id: 34056287-3cd5-42cb-92d3-413382b50813
+                vim_interface_id: 8a34c944-0fc1-41ae-9dbc-9743e5988162
+                compute_node: nfvisrv11
+                pci: null
+                vlan: null
+                ip_address: 192.168.181.179
+            vim_details: >
+              {'OS-DCF:diskConfig': MANUAL, 'OS-EXT-AZ:availability_zone': nova,
+              'OS-EXT-SRV-ATTR:host': nfvisrv11,
+              'OS-EXT-SRV-ATTR:hypervisor_hostname': nfvisrv11,
+              'OS-EXT-SRV-ATTR:instance_name': instance-0002967a,
+              'OS-EXT-STS:power_state': 1, 'OS-EXT-STS:task_state': null,
+                'OS-EXT-STS:vm_state': active, 'OS-SRV-USG:launched_at': '2022-07-28T08:42:17.000000', 'OS-SRV-USG:terminated_at': null, accessIPv4: '', accessIPv6: '', addresses: {k8s-ns-datanet: [{'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:ca:b5:d3', 'OS-EXT-IPS:type': fixed,
+                      addr: 192.168.181.179, version: 4}], osm-ext: [{'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:4d:65:e9', 'OS-EXT-IPS:type': fixed, addr: 172.21.248.199, version: 4}]}, config_drive: '', created: '2022-07-28T08:42:06Z', flavor: {id: 17a9ba76-beb7-4ad4-a481-97de37174866,
+                  links: [{href: 'http://172.21.247.1:8774/flavors/17a9ba76-beb7-4ad4-a481-97de37174866', rel: bookmark}]}, hostId: 2aa7155bd281bd308d8e3776af56d428210c21aab788a8cbdf5ef500, id: 1fabddca-0dcf-4702-a5f3-5cc028c2aba7, image: {id: 919fc71a-6acd-4ee3-8123-739a9abbc2e7,
+                  links: [{href: 'http://172.21.247.1:8774/images/919fc71a-6acd-4ee3-8123-739a9abbc2e7', rel: bookmark}]}, key_name: null, links: [{href: 'http://172.21.247.1:8774/v2.1/servers/1fabddca-0dcf-4702-a5f3-5cc028c2aba7', rel: self}, {href: 'http://172.21.247.1:8774/servers/1fabddca-0dcf-4702-a5f3-5cc028c2aba7',
+                    rel: bookmark}], metadata: {}, name: k8s-ns-vnf1-mgmtVM-0, 'os-extended-volumes:volumes_attached': [], progress: 0, security_groups: [{name: default}, {name: default}], status: ACTIVE, tenant_id: 71c7971a7cab4b72bd5c10dbe6617f1e, updated: '2022-07-28T08:42:17Z',
+                user_id: f043c84f940b4fc8a01a98714ea97c80}
+            vim_id: 1fabddca-0dcf-4702-a5f3-5cc028c2aba7
+            vim_status: ACTIVE
+            vim_name: k8s-ns-vnf1-mgmtVM-0
+        virtual-storages:
+          - id: mgmtVM-storage
+            size-of-storage: '10'
+        status: ACTIVE
+        vim-id: 1fabddca-0dcf-4702-a5f3-5cc028c2aba7
+        name: k8s-ns-vnf1-mgmtVM-0
+    connection-point:
+      - name: vnf-mgmt-ext
+        connection-point-id: mgmtVM-eth0-int
+        connection-point-vdu-id: mgmtVM
+        id: vnf-mgmt-ext
+      - name: vnf-data-ext
+        connection-point-id: dataVM-xe0-int
+        connection-point-vdu-id: mgmtVM
+        id: vnf-data-ext
+    ip-address: 172.21.248.199
+    revision: 1
+    _admin:
+      created: 1658998097.58048
+      modified: 1658998097.58048
+      projects_read:
+        - 51e0e80fe533469d98766caa16552a3e
+      projects_write:
+        - 51e0e80fe533469d98766caa16552a3e
+      nsState: INSTANTIATED
+"""
diff --git a/n2vc/tests/unit/testdata/upgrade-machine.log b/n2vc/tests/unit/testdata/upgrade-machine.log
new file mode 100644 (file)
index 0000000..6311432
--- /dev/null
@@ -0,0 +1,38 @@
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+#
+["charm", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "charm-url": "local:bionic/simple-ha-proxy-29", "charm-version": "", "life": "alive", "profile": null, "config": {"ssh-hostname": "", "ssh-key-bits": 4096, "ssh-key-type": "rsa", "ssh-password": "", "ssh-public-key": "", "ssh-username": ""}}]
+["application", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0", "exposed": false, "charm-url": "local:bionic/simple-ha-proxy-29", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "config": {"ssh-hostname": "172.21.249.28", "ssh-password": "osm4u", "ssh-username": "ubuntu"}, "subordinate": false, "status": {"current": "unset", "message": "", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/0", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.54", "private-address": "10.37.209.54", "machine-id": "1", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-26T18:50:27.56175336Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T16:21:56.481875662Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/2", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.93", "private-address": "10.37.209.93", "machine-id": "3", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-26T18:50:27.579802723Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T16:20:44.69125318Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/1", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.4", "private-address": "10.37.209.4", "machine-id": "2", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-26T18:50:27.563068618Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T16:20:48.695716332Z", "version": "2.9.22"}}]
+["charm", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "charm-url": "local:bionic/simple-ha-proxy-28", "charm-version": "", "life": "dying", "profile": null, "config": {"ssh-hostname": "", "ssh-key-bits": 4096, "ssh-key-type": "rsa", "ssh-password": "", "ssh-public-key": "", "ssh-username": ""}}]
+["charm", "remove", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "charm-url": "local:bionic/simple-ha-proxy-28", "charm-version": "", "life": "dying", "profile": null, "config": {"ssh-hostname": "", "ssh-key-bits": 4096, "ssh-key-type": "rsa", "ssh-password": "", "ssh-public-key": "", "ssh-username": ""}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/0", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.54", "private-address": "10.37.209.54", "machine-id": "1", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-26T18:50:27.56175336Z", "version": ""}, "agent-status": {"current": "executing", "message": "running upgrade-charm hook", "since": "2022-04-27T16:22:54.354997486Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/2", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.93", "private-address": "10.37.209.93", "machine-id": "3", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-26T18:50:27.579802723Z", "version": ""}, "agent-status": {"current": "executing", "message": "running upgrade-charm hook", "since": "2022-04-27T16:22:54.400387228Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/1", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.4", "private-address": "10.37.209.4", "machine-id": "2", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-26T18:50:27.563068618Z", "version": ""}, "agent-status": {"current": "executing", "message": "running upgrade-charm hook", "since": "2022-04-27T16:22:54.523797611Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/0", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.54", "private-address": "10.37.209.54", "machine-id": "1", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-26T18:50:27.56175336Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T16:22:54.934760959Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/2", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.93", "private-address": "10.37.209.93", "machine-id": "3", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-26T18:50:27.579802723Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T16:22:54.982259225Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/0", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.54", "private-address": "10.37.209.54", "machine-id": "1", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "waiting", "message": "Waiting for SSH credentials", "since": "2022-04-27T16:22:55.091278959Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T16:22:54.934760959Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/1", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.4", "private-address": "10.37.209.4", "machine-id": "2", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-26T18:50:27.563068618Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T16:22:55.091697191Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/2", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.93", "private-address": "10.37.209.93", "machine-id": "3", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "waiting", "message": "Waiting for SSH credentials", "since": "2022-04-27T16:22:55.153254035Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T16:22:54.982259225Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/1", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.4", "private-address": "10.37.209.4", "machine-id": "2", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "waiting", "message": "Waiting for SSH credentials", "since": "2022-04-27T16:22:55.307204975Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T16:22:55.091697191Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/1", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.4", "private-address": "10.37.209.4", "machine-id": "2", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T16:22:58.698041924Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T16:22:55.091697191Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/1", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.4", "private-address": "10.37.209.4", "machine-id": "2", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T16:22:58.698041924Z", "version": ""}, "agent-status": {"current": "executing", "message": "running leader-settings-changed hook", "since": "2022-04-27T16:22:59.098429743Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/1", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.4", "private-address": "10.37.209.4", "machine-id": "2", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T16:22:58.698041924Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T16:22:59.636191881Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/0", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.54", "private-address": "10.37.209.54", "machine-id": "1", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T16:23:00.173022824Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T16:22:54.934760959Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/2", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.93", "private-address": "10.37.209.93", "machine-id": "3", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T16:23:00.5376781Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T16:22:54.982259225Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/0", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.54", "private-address": "10.37.209.54", "machine-id": "1", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T16:23:00.173022824Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T16:23:00.529675913Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/2", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.93", "private-address": "10.37.209.93", "machine-id": "3", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T16:23:00.5376781Z", "version": ""}, "agent-status": {"current": "executing", "message": "running leader-settings-changed hook", "since": "2022-04-27T16:23:00.948967357Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/2", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.93", "private-address": "10.37.209.93", "machine-id": "3", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T16:23:00.5376781Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T16:23:01.449283589Z", "version": "2.9.22"}}]
\ No newline at end of file
diff --git a/n2vc/tests/unit/testdata/upgrade-operator.log b/n2vc/tests/unit/testdata/upgrade-operator.log
new file mode 100644 (file)
index 0000000..66a5895
--- /dev/null
@@ -0,0 +1,27 @@
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+#
+["charm", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "charm-url": "local:kubernetes/sshproxy-1", "charm-version": "", "life": "alive", "profile": null}]
+["charm", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "charm-url": "local:kubernetes/sshproxy-1", "charm-version": "", "life": "alive", "profile": null, "config": {"apt-mirror": null, "security-apt-mirror": null, "ssh-hostname": "", "ssh-key-bits": 4096, "ssh-key-type": "rsa", "ssh-password": "", "ssh-public-key": "", "ssh-username": ""}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "sshproxy", "exposed": false, "charm-url": "local:kubernetes/sshproxy-1", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "config": {"ssh-hostname": "127.0.0.1", "ssh-password": "osm4u", "ssh-username": "ubuntu"}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-27T18:06:37.951722352Z", "version": ""}, "workload-version": ""}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "sshproxy", "exposed": false, "charm-url": "local:kubernetes/sshproxy-1", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "config": {"ssh-hostname": "127.0.0.1", "ssh-password": "osm4u", "ssh-username": "ubuntu"}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-27T18:06:37.951722352Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "sshproxy/0", "application": "sshproxy", "series": "kubernetes", "charm-url": "local:kubernetes/sshproxy-1", "life": "alive", "public-address": "", "private-address": "10.152.183.24", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T18:08:40.533982098Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T18:08:41.574108719Z", "version": ""}}]
+["charm", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "charm-url": "local:kubernetes/sshproxy-0", "charm-version": "", "life": "dying", "profile": null, "config": {"apt-mirror": null, "security-apt-mirror": null, "ssh-hostname": "", "ssh-key-bits": 4096, "ssh-key-type": "rsa", "ssh-password": "", "ssh-public-key": "", "ssh-username": ""}}]
+["charm", "remove", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "charm-url": "local:kubernetes/sshproxy-0", "charm-version": "", "life": "dying", "profile": null, "config": {"apt-mirror": null, "security-apt-mirror": null, "ssh-hostname": "", "ssh-key-bits": 4096, "ssh-key-type": "rsa", "ssh-password": "", "ssh-public-key": "", "ssh-username": ""}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "sshproxy", "exposed": false, "charm-url": "local:kubernetes/sshproxy-1", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "config": {"ssh-hostname": "127.0.0.1", "ssh-password": "osm4u", "ssh-username": "ubuntu"}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-27T18:06:37.951722352Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "sshproxy/0", "application": "sshproxy", "series": "kubernetes", "charm-url": "local:kubernetes/sshproxy-1", "life": "alive", "public-address": "", "private-address": "10.152.183.24", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "Active", "since": "2022-04-27T18:09:49.713279872Z", "version": ""}, "agent-status": {"current": "executing", "message": "running upgrade-charm hook", "since": "2022-04-27T18:09:48.529774773Z", "version": ""}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "sshproxy", "exposed": false, "charm-url": "local:kubernetes/sshproxy-1", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "config": {"ssh-hostname": "127.0.0.1", "ssh-password": "osm4u", "ssh-username": "ubuntu"}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-27T18:06:37.951722352Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "sshproxy/0", "application": "sshproxy", "series": "kubernetes", "charm-url": "local:kubernetes/sshproxy-1", "life": "alive", "public-address": "", "private-address": "10.152.183.24", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "Active", "since": "2022-04-27T18:09:49.713279872Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T18:09:50.760612389Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "sshproxy/0", "application": "sshproxy", "series": "kubernetes", "charm-url": "local:kubernetes/sshproxy-1", "life": "alive", "public-address": "", "private-address": "10.152.183.24", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T18:09:51.90389784Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T18:09:50.760612389Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "sshproxy/0", "application": "sshproxy", "series": "kubernetes", "charm-url": "local:kubernetes/sshproxy-1", "life": "alive", "public-address": "", "private-address": "10.152.183.24", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T18:09:51.90389784Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T18:09:52.859465812Z", "version": ""}}]
\ No newline at end of file
diff --git a/n2vc/tests/unit/testdata/upgrade-podspec-stateful.log b/n2vc/tests/unit/testdata/upgrade-podspec-stateful.log
new file mode 100644 (file)
index 0000000..7d671b8
--- /dev/null
@@ -0,0 +1,41 @@
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+#
+["charm", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "charm-url": "local:kubernetes/mongodb-k8s-0", "charm-version": "", "life": "alive", "profile": null}]
+["charm", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "charm-url": "local:kubernetes/mongodb-k8s-0", "charm-version": "", "life": "alive", "profile": null, "config": {"replica_set_name": "rs0"}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb", "exposed": false, "charm-url": "local:kubernetes/mongodb-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-25T15:03:59.520286015Z", "version": ""}, "workload-version": ""}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb", "exposed": false, "charm-url": "local:kubernetes/mongodb-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-25T15:03:59.520286015Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "local:kubernetes/mongodb-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-24T08:22:00.904010692Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T17:36:42.739482369Z", "version": ""}}]
+["charm", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "charm-url": "ch:amd64/kubernetes/mongodb-k8s-1", "charm-version": "", "life": "dying", "profile": null, "config": {"replica_set_name": "rs0"}}]
+["charm", "remove", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "charm-url": "ch:amd64/kubernetes/mongodb-k8s-1", "charm-version": "", "life": "dying", "profile": null, "config": {"replica_set_name": "rs0"}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb", "exposed": false, "charm-url": "local:kubernetes/mongodb-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-25T15:03:59.520286015Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "local:kubernetes/mongodb-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-24T08:22:00.904010692Z", "version": ""}, "agent-status": {"current": "executing", "message": "running upgrade-charm hook", "since": "2022-04-27T18:23:25.164370911Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "local:kubernetes/mongodb-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "waiting", "message": "Fetching image information", "since": "2022-04-27T18:23:26.17972471Z", "version": ""}, "agent-status": {"current": "executing", "message": "running upgrade-charm hook", "since": "2022-04-27T18:23:25.164370911Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "local:kubernetes/mongodb-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "waiting", "message": "Assembling pod spec", "since": "2022-04-27T18:23:26.4876642Z", "version": ""}, "agent-status": {"current": "executing", "message": "running upgrade-charm hook", "since": "2022-04-27T18:23:25.164370911Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "local:kubernetes/mongodb-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T18:23:26.747039555Z", "version": ""}, "agent-status": {"current": "executing", "message": "running upgrade-charm hook", "since": "2022-04-27T18:23:25.164370911Z", "version": ""}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb", "exposed": false, "charm-url": "local:kubernetes/mongodb-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-25T15:03:59.520286015Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "local:kubernetes/mongodb-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T18:23:26.747039555Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T18:23:27.665397171Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "local:kubernetes/mongodb-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "waiting", "message": "Fetching image information", "since": "2022-04-27T18:23:28.405317887Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T18:23:27.665397171Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "local:kubernetes/mongodb-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "waiting", "message": "Assembling pod spec", "since": "2022-04-27T18:23:28.701544881Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T18:23:27.665397171Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "local:kubernetes/mongodb-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T18:23:29.040857644Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T18:23:27.665397171Z", "version": ""}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb", "exposed": false, "charm-url": "local:kubernetes/mongodb-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-25T15:03:59.520286015Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "local:kubernetes/mongodb-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T18:23:29.040857644Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T18:23:29.956508325Z", "version": ""}}]
+##########################################################################################################################################################################################################################################################
+# These next events are visible on steful charm upgrade, but so far there is no method to link them to the overall upgrade change
+#["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb", "exposed": false, "charm-url": "local:kubernetes/mongodb-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-27T18:23:30.879168477Z", "version": ""}, "workload-version": ""}]
+#["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb", "exposed": false, "charm-url": "local:kubernetes/mongodb-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "waiting", "message": "", "since": "2022-04-27T18:23:33.296232835Z", "version": ""}, "workload-version": ""}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "local:kubernetes/mongodb-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T18:23:29.040857644Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T18:23:29.956508325Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "local:kubernetes/mongodb-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T18:23:29.040857644Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T18:23:29.956508325Z", "version": ""}}]
+#["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb", "exposed": false, "charm-url": "local:kubernetes/mongodb-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-27T18:23:53.480017079Z", "version": ""}, "workload-version": ""}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "local:kubernetes/mongodb-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T18:23:29.040857644Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T18:23:54.070335385Z", "version": ""}}]
\ No newline at end of file
diff --git a/n2vc/tests/unit/testdata/upgrade-podspec-stateless.log b/n2vc/tests/unit/testdata/upgrade-podspec-stateless.log
new file mode 100644 (file)
index 0000000..0cfe2f5
--- /dev/null
@@ -0,0 +1,102 @@
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+#
+["charm", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "charm-url": "local:kubernetes/osm-lcm-0", "charm-version": "", "life": "alive", "profile": null}]
+["charm", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "charm-url": "local:kubernetes/osm-lcm-0", "charm-version": "", "life": "alive", "profile": null, "config": {"database_commonkey": "osm", "debug_common_local_path": null, "debug_lcm_local_path": null, "debug_mode": false, "debug_n2vc_local_path": null, "debug_pubkey": null, "image_pull_policy": "always", "log_level": "INFO", "mongodb_uri": null, "security_context": false, "vca_apiproxy": null, "vca_cacert": null, "vca_cloud": null, "vca_helm_ca_certs": "", "vca_host": null, "vca_k8s_cloud": null, "vca_model_config_agent_metadata_url": null, "vca_model_config_agent_stream": null, "vca_model_config_apt_ftp_proxy": null, "vca_model_config_apt_http_proxy": null, "vca_model_config_apt_https_proxy": null, "vca_model_config_apt_mirror": null, "vca_model_config_apt_no_proxy": null, "vca_model_config_automatically_retry_hooks": null, "vca_model_config_backup_dir": null, "vca_model_config_cloudinit_userdata": null, "vca_model_config_container_image_metadata_url": null, "vca_model_config_container_image_stream": null, "vca_model_config_container_inherit_properties": null, "vca_model_config_container_networking_method": null, "vca_model_config_default_series": null, "vca_model_config_default_space": null, "vca_model_config_development": null, "vca_model_config_disable_network_management": null, "vca_model_config_egress_subnets": null, "vca_model_config_enable_os_refresh_update": null, "vca_model_config_enable_os_upgrade": null, "vca_model_config_fan_config": null, "vca_model_config_firewall_mode": null, "vca_model_config_ftp_proxy": null, "vca_model_config_http_proxy": null, "vca_model_config_https_proxy": null, "vca_model_config_ignore_machine_addresses": null, "vca_model_config_image_metadata_url": null, "vca_model_config_image_stream": null, "vca_model_config_juju_ftp_proxy": null, "vca_model_config_juju_http_proxy": null, "vca_model_config_juju_https_proxy": null, "vca_model_config_juju_no_proxy": null, "vca_model_config_logforward_enabled": null, "vca_model_config_logging_config": null, "vca_model_config_lxd_snap_channel": null, "vca_model_config_max_action_results_age": null, "vca_model_config_max_action_results_size": null, "vca_model_config_max_status_history_age": null, "vca_model_config_max_status_history_size": null, "vca_model_config_net_bond_reconfigure_delay": null, "vca_model_config_no_proxy": null, "vca_model_config_provisioner_harvest_mode": null, "vca_model_config_proxy_ssh": null, "vca_model_config_snap_http_proxy": null, "vca_model_config_snap_https_proxy": null, "vca_model_config_snap_store_assertions": null, "vca_model_config_snap_store_proxy": null, "vca_model_config_snap_store_proxy_url": null, "vca_model_config_ssl_hostname_verification": null, "vca_model_config_test_mode": null, "vca_model_config_transmit_vendor_metrics": null, "vca_model_config_update_status_hook_interval": null, "vca_port": null, "vca_pubkey": null, "vca_secret": null, "vca_stablerepourl": "https://charts.helm.sh/stable", "vca_user": null}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm", "exposed": false, "charm-url": "local:kubernetes/osm-lcm-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "config": {"database_commonkey": "osm", "debug_common_local_path": "/home/ubuntu/mark/git/osm/branches/master/common", "debug_lcm_local_path": "/home/ubuntu/mark/git/osm/branches/master/LCM", "debug_mode": true, "debug_n2vc_local_path": "/home/ubuntu/mark/git/osm/branches/master/N2VC", "debug_pubkey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDUrOLpYylC9lRlIknpGeda2mzY+mqTYxLDj9Q5t2jerT/aHARSr7DBbkLroqb8bZLsHw3QSHOy9AjF7Y8z5HpkFHGL0do1A/a3MkY+TIX3+FVP8FuvSIb7fNofC2odH5Pj/5kY2TSQhGcsAeYejoYn6qQ0xElNJtWaoqPKkAe825TJkANc31YvokxYCbY9oHfzUPEXtS2nADJrn5drEgc/R8cAwPRNPs2EU/XT2u1m+UP5T9nHbFV9rjv7RhrezB1ynQ5IGsPteOCDIsLswLKpuSQ0JBpuYb6wKjzBlYYyMe1lQF+m9ZWEnywGzCEQncsOxF+GzSbxrrtTLOFgDAbT mark.beierl@canonical.com", "log_level": "DEBUG", "vca_cacert": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVFekNDQW51Z0F3SUJBZ0lWQU1iT21oMThUaFc3NDNlSGhIckZQL1JzcXd5U01BMEdDU3FHU0liM0RRRUIKQ3dVQU1DRXhEVEFMQmdOVkJBb1RCRXAxYW5VeEVEQU9CZ05WQkFNVEIycDFhblV0WTJFd0hoY05Nakl3TkRBNApNREl5TXpReldoY05Nekl3TkRBNE1ESXlPRFF6V2pBaE1RMHdDd1lEVlFRS0V3UktkV3AxTVJBd0RnWURWUVFECkV3ZHFkV3AxTFdOaE1JSUJvakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBWThBTUlJQmlnS0NBWUVBMWJMYUMwemMKQzdKOGRSdkg0UEtSZzU5MEI0S0EzRXpTNXUxbW81QldLblBoQ3RPb1hIVU03ZnhvU2RlV1dKb0FKb0hWOFlaUApiVzF0MnBzZEtGbWlZYWxhdGNkUSs5VGU5dWMxbnRNRDRlTVFTSjVKQ0MrSW83SDdCSjY0bkV4dms4RWNmT0F3CnNxL1lvMnZJaHcwVTNDZk5LaWNPNHE4MW1jamlkc001Nmp3eHA2R05SaVY5bEszV2hXd0JKWjZMdkovUDZySDAKNU8yV2crK0pNOFMzdGlFV1N3SzhZTmxiYTVKUExkdnNPVkVWWVVsK0NUc0RpRGhzZ2hFSHU2RHBzSzd5dGw2aApWa3NFRjI4Y1krRmhhVXpXejk2d0JqM1M0UUdKQXV5K1dBWStnRVZZcXIrQ0dySkVNeEJLK0VPWjJ2MjJ1YW9iClJyNmo5dkZRQ2I5YVQ5RTV1SDRZWGhIelJ2YUZLQVB4b2J5OFFpR0cwRXJkZTA1ajFYU0NaS0EyMXEyODcvR2wKT0NWWUxMMVNBb1VIbUMzUEZGU25ycDYzdUxLSWVJVTAyb1o0L3JQODlnbUM4VXBJNXgxTEdKQXJ1OEd1NHltRApBR2FxSjNWdjQ0MjIyWEhJaThXZGdwUFNLZWpQeUlReW9HMHBuTTZRUk5KaUdYdXg5S3dIblV5bEFnTUJBQUdqClFqQkFNQTRHQTFVZER3RUIvd1FFQXdJQ3BEQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRV0JCU2cKM3VmTzhhajJCc2V2R0lMaEUxQUZpaTR3VWpBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVlFQWs1eXFQeDFFWEF3MApIM2VoeVJienhCa1VKQkgwU2RDT3drelY4MVp3dmxBbktLS3hOTTFBd2VSQUxrUTNIRlMrOW11L2szK2ZZRG1uCkxMR3ZLMjM2THYzWWZpMkVHd2ZpbC9icnQ3bU5pQkRtTDdvd0Vta09oVzVKYlRiM2RRcmxtelJsVXhIU1R4d0MKUUM2NWdQTkJJcTNUZUZmU2t6ZlA1N0FIK0ZHemZYNTVBa0pnbEZpQXRRcGFoQXgxVlRaWitPK3RjbWZSSW5mUQpDSzArZE5qc3VUd2NHbzhvYUpOamJmWHNPYlA1eWFneWV5d2ZxQ3lvRExnT2gwdUlGUVBiUlBRM1g0OUw3bzhmCnNGRm9CcmVNbjFJWjJBUlplc0dWYXRKSFhRb01WRzcrK3F1L0g2dVNEMFZtK3piNTBJbGVhZHZiQVR2aUlUTlgKYWNtQkRvSmdOQ1JsTEhBR3hWN2pabFIrSFBjUitOTGordklJOUloeHVaY09STW5kTHpyT2hGSjVqM2FuOG5kbApOdW9sR2c3WW1hRmJWdFo3aUdEWnBISTdSQVFSZitjNVlKbFVIbUwrMnpNR2xiZHlKL3B5cTRjUEJsVDZlWUhtCmxSVEhseXVRaTd2ZndneXJwVU53ajMvbkNUekxjWDVqaHp3L1h2aDlGeGZpL1FTTmxKREIKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQoK", "vca_cloud": "lxd-cloud", "vca_host": "10.0.2.68", "vca_k8s_cloud": "microk8s", "vca_port": 17070, "vca_pubkey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+tPyU/gOogK/jQFbDgHtlaYhba8Y1SshxC5vL908ST2I6ku4+1XfIgVi8gfCUDRG8kzHL9S0i8iCvPYqCIasSEVD7+LCjYn19JZXWhnkwmlmHoW3a7ljw++d4aNWGKNWxiQOKKtM26ZH5yu1kKHtmZ1bcgrKGkQdiYBhzsKZ/8lRoWakGwZdDTdny6ZxmcvJ52GLyDs/K4jK730ogRVcsj7h3hb7KXKedNkX89ciAaus8m3HA9nMWsf8C0GRXR9ymGDml9pUORO8/6uOsccn5VQWHl5sitSG4K2W/5jBBNNmRQ8obV2ey7N+3nhb9luzhgk2Slj0XTjhnKOLP01Jn juju-client-key", "vca_secret": "86bbee23c74c078a3a67a95349788748", "vca_user": "admin"}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-25T15:19:30.580696141Z", "version": ""}, "workload-version": ""}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm", "exposed": false, "charm-url": "local:kubernetes/osm-lcm-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "config": {"database_commonkey": "osm", "debug_common_local_path": "/home/ubuntu/mark/git/osm/branches/master/common", "debug_lcm_local_path": "/home/ubuntu/mark/git/osm/branches/master/LCM", "debug_mode": true, "debug_n2vc_local_path": "/home/ubuntu/mark/git/osm/branches/master/N2VC", "debug_pubkey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDUrOLpYylC9lRlIknpGeda2mzY+mqTYxLDj9Q5t2jerT/aHARSr7DBbkLroqb8bZLsHw3QSHOy9AjF7Y8z5HpkFHGL0do1A/a3MkY+TIX3+FVP8FuvSIb7fNofC2odH5Pj/5kY2TSQhGcsAeYejoYn6qQ0xElNJtWaoqPKkAe825TJkANc31YvokxYCbY9oHfzUPEXtS2nADJrn5drEgc/R8cAwPRNPs2EU/XT2u1m+UP5T9nHbFV9rjv7RhrezB1ynQ5IGsPteOCDIsLswLKpuSQ0JBpuYb6wKjzBlYYyMe1lQF+m9ZWEnywGzCEQncsOxF+GzSbxrrtTLOFgDAbT mark.beierl@canonical.com", "log_level": "DEBUG", "vca_cacert": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVFekNDQW51Z0F3SUJBZ0lWQU1iT21oMThUaFc3NDNlSGhIckZQL1JzcXd5U01BMEdDU3FHU0liM0RRRUIKQ3dVQU1DRXhEVEFMQmdOVkJBb1RCRXAxYW5VeEVEQU9CZ05WQkFNVEIycDFhblV0WTJFd0hoY05Nakl3TkRBNApNREl5TXpReldoY05Nekl3TkRBNE1ESXlPRFF6V2pBaE1RMHdDd1lEVlFRS0V3UktkV3AxTVJBd0RnWURWUVFECkV3ZHFkV3AxTFdOaE1JSUJvakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBWThBTUlJQmlnS0NBWUVBMWJMYUMwemMKQzdKOGRSdkg0UEtSZzU5MEI0S0EzRXpTNXUxbW81QldLblBoQ3RPb1hIVU03ZnhvU2RlV1dKb0FKb0hWOFlaUApiVzF0MnBzZEtGbWlZYWxhdGNkUSs5VGU5dWMxbnRNRDRlTVFTSjVKQ0MrSW83SDdCSjY0bkV4dms4RWNmT0F3CnNxL1lvMnZJaHcwVTNDZk5LaWNPNHE4MW1jamlkc001Nmp3eHA2R05SaVY5bEszV2hXd0JKWjZMdkovUDZySDAKNU8yV2crK0pNOFMzdGlFV1N3SzhZTmxiYTVKUExkdnNPVkVWWVVsK0NUc0RpRGhzZ2hFSHU2RHBzSzd5dGw2aApWa3NFRjI4Y1krRmhhVXpXejk2d0JqM1M0UUdKQXV5K1dBWStnRVZZcXIrQ0dySkVNeEJLK0VPWjJ2MjJ1YW9iClJyNmo5dkZRQ2I5YVQ5RTV1SDRZWGhIelJ2YUZLQVB4b2J5OFFpR0cwRXJkZTA1ajFYU0NaS0EyMXEyODcvR2wKT0NWWUxMMVNBb1VIbUMzUEZGU25ycDYzdUxLSWVJVTAyb1o0L3JQODlnbUM4VXBJNXgxTEdKQXJ1OEd1NHltRApBR2FxSjNWdjQ0MjIyWEhJaThXZGdwUFNLZWpQeUlReW9HMHBuTTZRUk5KaUdYdXg5S3dIblV5bEFnTUJBQUdqClFqQkFNQTRHQTFVZER3RUIvd1FFQXdJQ3BEQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRV0JCU2cKM3VmTzhhajJCc2V2R0lMaEUxQUZpaTR3VWpBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVlFQWs1eXFQeDFFWEF3MApIM2VoeVJienhCa1VKQkgwU2RDT3drelY4MVp3dmxBbktLS3hOTTFBd2VSQUxrUTNIRlMrOW11L2szK2ZZRG1uCkxMR3ZLMjM2THYzWWZpMkVHd2ZpbC9icnQ3bU5pQkRtTDdvd0Vta09oVzVKYlRiM2RRcmxtelJsVXhIU1R4d0MKUUM2NWdQTkJJcTNUZUZmU2t6ZlA1N0FIK0ZHemZYNTVBa0pnbEZpQXRRcGFoQXgxVlRaWitPK3RjbWZSSW5mUQpDSzArZE5qc3VUd2NHbzhvYUpOamJmWHNPYlA1eWFneWV5d2ZxQ3lvRExnT2gwdUlGUVBiUlBRM1g0OUw3bzhmCnNGRm9CcmVNbjFJWjJBUlplc0dWYXRKSFhRb01WRzcrK3F1L0g2dVNEMFZtK3piNTBJbGVhZHZiQVR2aUlUTlgKYWNtQkRvSmdOQ1JsTEhBR3hWN2pabFIrSFBjUitOTGordklJOUloeHVaY09STW5kTHpyT2hGSjVqM2FuOG5kbApOdW9sR2c3WW1hRmJWdFo3aUdEWnBISTdSQVFSZitjNVlKbFVIbUwrMnpNR2xiZHlKL3B5cTRjUEJsVDZlWUhtCmxSVEhseXVRaTd2ZndneXJwVU53ajMvbkNUekxjWDVqaHp3L1h2aDlGeGZpL1FTTmxKREIKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQoK", "vca_cloud": "lxd-cloud", "vca_host": "10.0.2.68", "vca_k8s_cloud": "microk8s", "vca_port": 17070, "vca_pubkey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+tPyU/gOogK/jQFbDgHtlaYhba8Y1SshxC5vL908ST2I6ku4+1XfIgVi8gfCUDRG8kzHL9S0i8iCvPYqCIasSEVD7+LCjYn19JZXWhnkwmlmHoW3a7ljw++d4aNWGKNWxiQOKKtM26ZH5yu1kKHtmZ1bcgrKGkQdiYBhzsKZ/8lRoWakGwZdDTdny6ZxmcvJ52GLyDs/K4jK730ogRVcsj7h3hb7KXKedNkX89ciAaus8m3HA9nMWsf8C0GRXR9ymGDml9pUORO8/6uOsccn5VQWHl5sitSG4K2W/5jBBNNmRQ8obV2ey7N+3nhb9luzhgk2Slj0XTjhnKOLP01Jn juju-client-key", "vca_secret": "86bbee23c74c078a3a67a95349788748", "vca_user": "admin"}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-25T15:19:30.580696141Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-25T15:19:46.158217393Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-25T15:19:47.020240886Z", "version": ""}}]
+["charm", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "charm-url": "ch:amd64/kubernetes/osm-lcm-1", "charm-version": "", "life": "dying", "profile": null, "config": {"database_commonkey": "osm", "debug_common_local_path": null, "debug_lcm_local_path": null, "debug_mode": false, "debug_n2vc_local_path": null, "debug_pubkey": null, "image_pull_policy": "always", "log_level": "INFO", "mongodb_uri": null, "security_context": false, "vca_apiproxy": null, "vca_cacert": null, "vca_cloud": null, "vca_helm_ca_certs": "", "vca_host": null, "vca_k8s_cloud": null, "vca_model_config_agent_metadata_url": null, "vca_model_config_agent_stream": null, "vca_model_config_apt_ftp_proxy": null, "vca_model_config_apt_http_proxy": null, "vca_model_config_apt_https_proxy": null, "vca_model_config_apt_mirror": null, "vca_model_config_apt_no_proxy": null, "vca_model_config_automatically_retry_hooks": null, "vca_model_config_backup_dir": null, "vca_model_config_cloudinit_userdata": null, "vca_model_config_container_image_metadata_url": null, "vca_model_config_container_image_stream": null, "vca_model_config_container_inherit_properties": null, "vca_model_config_container_networking_method": null, "vca_model_config_default_series": null, "vca_model_config_default_space": null, "vca_model_config_development": null, "vca_model_config_disable_network_management": null, "vca_model_config_egress_subnets": null, "vca_model_config_enable_os_refresh_update": null, "vca_model_config_enable_os_upgrade": null, "vca_model_config_fan_config": null, "vca_model_config_firewall_mode": null, "vca_model_config_ftp_proxy": null, "vca_model_config_http_proxy": null, "vca_model_config_https_proxy": null, "vca_model_config_ignore_machine_addresses": null, "vca_model_config_image_metadata_url": null, "vca_model_config_image_stream": null, "vca_model_config_juju_ftp_proxy": null, "vca_model_config_juju_http_proxy": null, "vca_model_config_juju_https_proxy": null, "vca_model_config_juju_no_proxy": null, "vca_model_config_logforward_enabled": null, "vca_model_config_logging_config": null, "vca_model_config_lxd_snap_channel": null, "vca_model_config_max_action_results_age": null, "vca_model_config_max_action_results_size": null, "vca_model_config_max_status_history_age": null, "vca_model_config_max_status_history_size": null, "vca_model_config_net_bond_reconfigure_delay": null, "vca_model_config_no_proxy": null, "vca_model_config_provisioner_harvest_mode": null, "vca_model_config_proxy_ssh": null, "vca_model_config_snap_http_proxy": null, "vca_model_config_snap_https_proxy": null, "vca_model_config_snap_store_assertions": null, "vca_model_config_snap_store_proxy": null, "vca_model_config_snap_store_proxy_url": null, "vca_model_config_ssl_hostname_verification": null, "vca_model_config_test_mode": null, "vca_model_config_transmit_vendor_metrics": null, "vca_model_config_update_status_hook_interval": null, "vca_port": null, "vca_pubkey": null, "vca_secret": null, "vca_stablerepourl": "https://charts.helm.sh/stable", "vca_user": null}}]
+["charm", "remove", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "charm-url": "ch:amd64/kubernetes/osm-lcm-1", "charm-version": "", "life": "dying", "profile": null, "config": {"database_commonkey": "osm", "debug_common_local_path": null, "debug_lcm_local_path": null, "debug_mode": false, "debug_n2vc_local_path": null, "debug_pubkey": null, "image_pull_policy": "always", "log_level": "INFO", "mongodb_uri": null, "security_context": false, "vca_apiproxy": null, "vca_cacert": null, "vca_cloud": null, "vca_helm_ca_certs": "", "vca_host": null, "vca_k8s_cloud": null, "vca_model_config_agent_metadata_url": null, "vca_model_config_agent_stream": null, "vca_model_config_apt_ftp_proxy": null, "vca_model_config_apt_http_proxy": null, "vca_model_config_apt_https_proxy": null, "vca_model_config_apt_mirror": null, "vca_model_config_apt_no_proxy": null, "vca_model_config_automatically_retry_hooks": null, "vca_model_config_backup_dir": null, "vca_model_config_cloudinit_userdata": null, "vca_model_config_container_image_metadata_url": null, "vca_model_config_container_image_stream": null, "vca_model_config_container_inherit_properties": null, "vca_model_config_container_networking_method": null, "vca_model_config_default_series": null, "vca_model_config_default_space": null, "vca_model_config_development": null, "vca_model_config_disable_network_management": null, "vca_model_config_egress_subnets": null, "vca_model_config_enable_os_refresh_update": null, "vca_model_config_enable_os_upgrade": null, "vca_model_config_fan_config": null, "vca_model_config_firewall_mode": null, "vca_model_config_ftp_proxy": null, "vca_model_config_http_proxy": null, "vca_model_config_https_proxy": null, "vca_model_config_ignore_machine_addresses": null, "vca_model_config_image_metadata_url": null, "vca_model_config_image_stream": null, "vca_model_config_juju_ftp_proxy": null, "vca_model_config_juju_http_proxy": null, "vca_model_config_juju_https_proxy": null, "vca_model_config_juju_no_proxy": null, "vca_model_config_logforward_enabled": null, "vca_model_config_logging_config": null, "vca_model_config_lxd_snap_channel": null, "vca_model_config_max_action_results_age": null, "vca_model_config_max_action_results_size": null, "vca_model_config_max_status_history_age": null, "vca_model_config_max_status_history_size": null, "vca_model_config_net_bond_reconfigure_delay": null, "vca_model_config_no_proxy": null, "vca_model_config_provisioner_harvest_mode": null, "vca_model_config_proxy_ssh": null, "vca_model_config_snap_http_proxy": null, "vca_model_config_snap_https_proxy": null, "vca_model_config_snap_store_assertions": null, "vca_model_config_snap_store_proxy": null, "vca_model_config_snap_store_proxy_url": null, "vca_model_config_ssl_hostname_verification": null, "vca_model_config_test_mode": null, "vca_model_config_transmit_vendor_metrics": null, "vca_model_config_update_status_hook_interval": null, "vca_port": null, "vca_pubkey": null, "vca_secret": null, "vca_stablerepourl": "https://charts.helm.sh/stable", "vca_user": null}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm", "exposed": false, "charm-url": "local:kubernetes/osm-lcm-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "config": {"database_commonkey": "osm", "debug_common_local_path": "/home/ubuntu/mark/git/osm/branches/master/common", "debug_lcm_local_path": "/home/ubuntu/mark/git/osm/branches/master/LCM", "debug_mode": true, "debug_n2vc_local_path": "/home/ubuntu/mark/git/osm/branches/master/N2VC", "debug_pubkey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDUrOLpYylC9lRlIknpGeda2mzY+mqTYxLDj9Q5t2jerT/aHARSr7DBbkLroqb8bZLsHw3QSHOy9AjF7Y8z5HpkFHGL0do1A/a3MkY+TIX3+FVP8FuvSIb7fNofC2odH5Pj/5kY2TSQhGcsAeYejoYn6qQ0xElNJtWaoqPKkAe825TJkANc31YvokxYCbY9oHfzUPEXtS2nADJrn5drEgc/R8cAwPRNPs2EU/XT2u1m+UP5T9nHbFV9rjv7RhrezB1ynQ5IGsPteOCDIsLswLKpuSQ0JBpuYb6wKjzBlYYyMe1lQF+m9ZWEnywGzCEQncsOxF+GzSbxrrtTLOFgDAbT mark.beierl@canonical.com", "log_level": "DEBUG", "vca_cacert": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVFekNDQW51Z0F3SUJBZ0lWQU1iT21oMThUaFc3NDNlSGhIckZQL1JzcXd5U01BMEdDU3FHU0liM0RRRUIKQ3dVQU1DRXhEVEFMQmdOVkJBb1RCRXAxYW5VeEVEQU9CZ05WQkFNVEIycDFhblV0WTJFd0hoY05Nakl3TkRBNApNREl5TXpReldoY05Nekl3TkRBNE1ESXlPRFF6V2pBaE1RMHdDd1lEVlFRS0V3UktkV3AxTVJBd0RnWURWUVFECkV3ZHFkV3AxTFdOaE1JSUJvakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBWThBTUlJQmlnS0NBWUVBMWJMYUMwemMKQzdKOGRSdkg0UEtSZzU5MEI0S0EzRXpTNXUxbW81QldLblBoQ3RPb1hIVU03ZnhvU2RlV1dKb0FKb0hWOFlaUApiVzF0MnBzZEtGbWlZYWxhdGNkUSs5VGU5dWMxbnRNRDRlTVFTSjVKQ0MrSW83SDdCSjY0bkV4dms4RWNmT0F3CnNxL1lvMnZJaHcwVTNDZk5LaWNPNHE4MW1jamlkc001Nmp3eHA2R05SaVY5bEszV2hXd0JKWjZMdkovUDZySDAKNU8yV2crK0pNOFMzdGlFV1N3SzhZTmxiYTVKUExkdnNPVkVWWVVsK0NUc0RpRGhzZ2hFSHU2RHBzSzd5dGw2aApWa3NFRjI4Y1krRmhhVXpXejk2d0JqM1M0UUdKQXV5K1dBWStnRVZZcXIrQ0dySkVNeEJLK0VPWjJ2MjJ1YW9iClJyNmo5dkZRQ2I5YVQ5RTV1SDRZWGhIelJ2YUZLQVB4b2J5OFFpR0cwRXJkZTA1ajFYU0NaS0EyMXEyODcvR2wKT0NWWUxMMVNBb1VIbUMzUEZGU25ycDYzdUxLSWVJVTAyb1o0L3JQODlnbUM4VXBJNXgxTEdKQXJ1OEd1NHltRApBR2FxSjNWdjQ0MjIyWEhJaThXZGdwUFNLZWpQeUlReW9HMHBuTTZRUk5KaUdYdXg5S3dIblV5bEFnTUJBQUdqClFqQkFNQTRHQTFVZER3RUIvd1FFQXdJQ3BEQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRV0JCU2cKM3VmTzhhajJCc2V2R0lMaEUxQUZpaTR3VWpBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVlFQWs1eXFQeDFFWEF3MApIM2VoeVJienhCa1VKQkgwU2RDT3drelY4MVp3dmxBbktLS3hOTTFBd2VSQUxrUTNIRlMrOW11L2szK2ZZRG1uCkxMR3ZLMjM2THYzWWZpMkVHd2ZpbC9icnQ3bU5pQkRtTDdvd0Vta09oVzVKYlRiM2RRcmxtelJsVXhIU1R4d0MKUUM2NWdQTkJJcTNUZUZmU2t6ZlA1N0FIK0ZHemZYNTVBa0pnbEZpQXRRcGFoQXgxVlRaWitPK3RjbWZSSW5mUQpDSzArZE5qc3VUd2NHbzhvYUpOamJmWHNPYlA1eWFneWV5d2ZxQ3lvRExnT2gwdUlGUVBiUlBRM1g0OUw3bzhmCnNGRm9CcmVNbjFJWjJBUlplc0dWYXRKSFhRb01WRzcrK3F1L0g2dVNEMFZtK3piNTBJbGVhZHZiQVR2aUlUTlgKYWNtQkRvSmdOQ1JsTEhBR3hWN2pabFIrSFBjUitOTGordklJOUloeHVaY09STW5kTHpyT2hGSjVqM2FuOG5kbApOdW9sR2c3WW1hRmJWdFo3aUdEWnBISTdSQVFSZitjNVlKbFVIbUwrMnpNR2xiZHlKL3B5cTRjUEJsVDZlWUhtCmxSVEhseXVRaTd2ZndneXJwVU53ajMvbkNUekxjWDVqaHp3L1h2aDlGeGZpL1FTTmxKREIKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQoK", "vca_cloud": "lxd-cloud", "vca_host": "10.0.2.68", "vca_k8s_cloud": "microk8s", "vca_port": 17070, "vca_pubkey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+tPyU/gOogK/jQFbDgHtlaYhba8Y1SshxC5vL908ST2I6ku4+1XfIgVi8gfCUDRG8kzHL9S0i8iCvPYqCIasSEVD7+LCjYn19JZXWhnkwmlmHoW3a7ljw++d4aNWGKNWxiQOKKtM26ZH5yu1kKHtmZ1bcgrKGkQdiYBhzsKZ/8lRoWakGwZdDTdny6ZxmcvJ52GLyDs/K4jK730ogRVcsj7h3hb7KXKedNkX89ciAaus8m3HA9nMWsf8C0GRXR9ymGDml9pUORO8/6uOsccn5VQWHl5sitSG4K2W/5jBBNNmRQ8obV2ey7N+3nhb9luzhgk2Slj0XTjhnKOLP01Jn juju-client-key", "vca_secret": "86bbee23c74c078a3a67a95349788748", "vca_user": "admin"}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-25T15:19:30.580696141Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-25T15:19:46.158217393Z", "version": ""}, "agent-status": {"current": "executing", "message": "running upgrade-charm hook", "since": "2022-04-27T13:52:43.299439405Z", "version": ""}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm", "exposed": false, "charm-url": "local:kubernetes/osm-lcm-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "config": {"database_commonkey": "osm", "debug_common_local_path": "/home/ubuntu/mark/git/osm/branches/master/common", "debug_lcm_local_path": "/home/ubuntu/mark/git/osm/branches/master/LCM", "debug_mode": true, "debug_n2vc_local_path": "/home/ubuntu/mark/git/osm/branches/master/N2VC", "debug_pubkey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDUrOLpYylC9lRlIknpGeda2mzY+mqTYxLDj9Q5t2jerT/aHARSr7DBbkLroqb8bZLsHw3QSHOy9AjF7Y8z5HpkFHGL0do1A/a3MkY+TIX3+FVP8FuvSIb7fNofC2odH5Pj/5kY2TSQhGcsAeYejoYn6qQ0xElNJtWaoqPKkAe825TJkANc31YvokxYCbY9oHfzUPEXtS2nADJrn5drEgc/R8cAwPRNPs2EU/XT2u1m+UP5T9nHbFV9rjv7RhrezB1ynQ5IGsPteOCDIsLswLKpuSQ0JBpuYb6wKjzBlYYyMe1lQF+m9ZWEnywGzCEQncsOxF+GzSbxrrtTLOFgDAbT mark.beierl@canonical.com", "log_level": "DEBUG", "vca_cacert": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVFekNDQW51Z0F3SUJBZ0lWQU1iT21oMThUaFc3NDNlSGhIckZQL1JzcXd5U01BMEdDU3FHU0liM0RRRUIKQ3dVQU1DRXhEVEFMQmdOVkJBb1RCRXAxYW5VeEVEQU9CZ05WQkFNVEIycDFhblV0WTJFd0hoY05Nakl3TkRBNApNREl5TXpReldoY05Nekl3TkRBNE1ESXlPRFF6V2pBaE1RMHdDd1lEVlFRS0V3UktkV3AxTVJBd0RnWURWUVFECkV3ZHFkV3AxTFdOaE1JSUJvakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBWThBTUlJQmlnS0NBWUVBMWJMYUMwemMKQzdKOGRSdkg0UEtSZzU5MEI0S0EzRXpTNXUxbW81QldLblBoQ3RPb1hIVU03ZnhvU2RlV1dKb0FKb0hWOFlaUApiVzF0MnBzZEtGbWlZYWxhdGNkUSs5VGU5dWMxbnRNRDRlTVFTSjVKQ0MrSW83SDdCSjY0bkV4dms4RWNmT0F3CnNxL1lvMnZJaHcwVTNDZk5LaWNPNHE4MW1jamlkc001Nmp3eHA2R05SaVY5bEszV2hXd0JKWjZMdkovUDZySDAKNU8yV2crK0pNOFMzdGlFV1N3SzhZTmxiYTVKUExkdnNPVkVWWVVsK0NUc0RpRGhzZ2hFSHU2RHBzSzd5dGw2aApWa3NFRjI4Y1krRmhhVXpXejk2d0JqM1M0UUdKQXV5K1dBWStnRVZZcXIrQ0dySkVNeEJLK0VPWjJ2MjJ1YW9iClJyNmo5dkZRQ2I5YVQ5RTV1SDRZWGhIelJ2YUZLQVB4b2J5OFFpR0cwRXJkZTA1ajFYU0NaS0EyMXEyODcvR2wKT0NWWUxMMVNBb1VIbUMzUEZGU25ycDYzdUxLSWVJVTAyb1o0L3JQODlnbUM4VXBJNXgxTEdKQXJ1OEd1NHltRApBR2FxSjNWdjQ0MjIyWEhJaThXZGdwUFNLZWpQeUlReW9HMHBuTTZRUk5KaUdYdXg5S3dIblV5bEFnTUJBQUdqClFqQkFNQTRHQTFVZER3RUIvd1FFQXdJQ3BEQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRV0JCU2cKM3VmTzhhajJCc2V2R0lMaEUxQUZpaTR3VWpBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVlFQWs1eXFQeDFFWEF3MApIM2VoeVJienhCa1VKQkgwU2RDT3drelY4MVp3dmxBbktLS3hOTTFBd2VSQUxrUTNIRlMrOW11L2szK2ZZRG1uCkxMR3ZLMjM2THYzWWZpMkVHd2ZpbC9icnQ3bU5pQkRtTDdvd0Vta09oVzVKYlRiM2RRcmxtelJsVXhIU1R4d0MKUUM2NWdQTkJJcTNUZUZmU2t6ZlA1N0FIK0ZHemZYNTVBa0pnbEZpQXRRcGFoQXgxVlRaWitPK3RjbWZSSW5mUQpDSzArZE5qc3VUd2NHbzhvYUpOamJmWHNPYlA1eWFneWV5d2ZxQ3lvRExnT2gwdUlGUVBiUlBRM1g0OUw3bzhmCnNGRm9CcmVNbjFJWjJBUlplc0dWYXRKSFhRb01WRzcrK3F1L0g2dVNEMFZtK3piNTBJbGVhZHZiQVR2aUlUTlgKYWNtQkRvSmdOQ1JsTEhBR3hWN2pabFIrSFBjUitOTGordklJOUloeHVaY09STW5kTHpyT2hGSjVqM2FuOG5kbApOdW9sR2c3WW1hRmJWdFo3aUdEWnBISTdSQVFSZitjNVlKbFVIbUwrMnpNR2xiZHlKL3B5cTRjUEJsVDZlWUhtCmxSVEhseXVRaTd2ZndneXJwVU53ajMvbkNUekxjWDVqaHp3L1h2aDlGeGZpL1FTTmxKREIKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQoK", "vca_cloud": "lxd-cloud", "vca_host": "10.0.2.68", "vca_k8s_cloud": "microk8s", "vca_port": 17070, "vca_pubkey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+tPyU/gOogK/jQFbDgHtlaYhba8Y1SshxC5vL908ST2I6ku4+1XfIgVi8gfCUDRG8kzHL9S0i8iCvPYqCIasSEVD7+LCjYn19JZXWhnkwmlmHoW3a7ljw++d4aNWGKNWxiQOKKtM26ZH5yu1kKHtmZ1bcgrKGkQdiYBhzsKZ/8lRoWakGwZdDTdny6ZxmcvJ52GLyDs/K4jK730ogRVcsj7h3hb7KXKedNkX89ciAaus8m3HA9nMWsf8C0GRXR9ymGDml9pUORO8/6uOsccn5VQWHl5sitSG4K2W/5jBBNNmRQ8obV2ey7N+3nhb9luzhgk2Slj0XTjhnKOLP01Jn juju-client-key", "vca_secret": "86bbee23c74c078a3a67a95349788748", "vca_user": "admin"}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-25T15:19:30.580696141Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-25T15:19:46.158217393Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T13:52:44.718162892Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "Assembling pod spec", "since": "2022-04-27T13:52:45.691682061Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T13:52:44.718162892Z", "version": ""}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm", "exposed": false, "charm-url": "local:kubernetes/osm-lcm-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "config": {"database_commonkey": "osm", "debug_common_local_path": "/home/ubuntu/mark/git/osm/branches/master/common", "debug_lcm_local_path": "/home/ubuntu/mark/git/osm/branches/master/LCM", "debug_mode": true, "debug_n2vc_local_path": "/home/ubuntu/mark/git/osm/branches/master/N2VC", "debug_pubkey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDUrOLpYylC9lRlIknpGeda2mzY+mqTYxLDj9Q5t2jerT/aHARSr7DBbkLroqb8bZLsHw3QSHOy9AjF7Y8z5HpkFHGL0do1A/a3MkY+TIX3+FVP8FuvSIb7fNofC2odH5Pj/5kY2TSQhGcsAeYejoYn6qQ0xElNJtWaoqPKkAe825TJkANc31YvokxYCbY9oHfzUPEXtS2nADJrn5drEgc/R8cAwPRNPs2EU/XT2u1m+UP5T9nHbFV9rjv7RhrezB1ynQ5IGsPteOCDIsLswLKpuSQ0JBpuYb6wKjzBlYYyMe1lQF+m9ZWEnywGzCEQncsOxF+GzSbxrrtTLOFgDAbT mark.beierl@canonical.com", "log_level": "DEBUG", "vca_cacert": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVFekNDQW51Z0F3SUJBZ0lWQU1iT21oMThUaFc3NDNlSGhIckZQL1JzcXd5U01BMEdDU3FHU0liM0RRRUIKQ3dVQU1DRXhEVEFMQmdOVkJBb1RCRXAxYW5VeEVEQU9CZ05WQkFNVEIycDFhblV0WTJFd0hoY05Nakl3TkRBNApNREl5TXpReldoY05Nekl3TkRBNE1ESXlPRFF6V2pBaE1RMHdDd1lEVlFRS0V3UktkV3AxTVJBd0RnWURWUVFECkV3ZHFkV3AxTFdOaE1JSUJvakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBWThBTUlJQmlnS0NBWUVBMWJMYUMwemMKQzdKOGRSdkg0UEtSZzU5MEI0S0EzRXpTNXUxbW81QldLblBoQ3RPb1hIVU03ZnhvU2RlV1dKb0FKb0hWOFlaUApiVzF0MnBzZEtGbWlZYWxhdGNkUSs5VGU5dWMxbnRNRDRlTVFTSjVKQ0MrSW83SDdCSjY0bkV4dms4RWNmT0F3CnNxL1lvMnZJaHcwVTNDZk5LaWNPNHE4MW1jamlkc001Nmp3eHA2R05SaVY5bEszV2hXd0JKWjZMdkovUDZySDAKNU8yV2crK0pNOFMzdGlFV1N3SzhZTmxiYTVKUExkdnNPVkVWWVVsK0NUc0RpRGhzZ2hFSHU2RHBzSzd5dGw2aApWa3NFRjI4Y1krRmhhVXpXejk2d0JqM1M0UUdKQXV5K1dBWStnRVZZcXIrQ0dySkVNeEJLK0VPWjJ2MjJ1YW9iClJyNmo5dkZRQ2I5YVQ5RTV1SDRZWGhIelJ2YUZLQVB4b2J5OFFpR0cwRXJkZTA1ajFYU0NaS0EyMXEyODcvR2wKT0NWWUxMMVNBb1VIbUMzUEZGU25ycDYzdUxLSWVJVTAyb1o0L3JQODlnbUM4VXBJNXgxTEdKQXJ1OEd1NHltRApBR2FxSjNWdjQ0MjIyWEhJaThXZGdwUFNLZWpQeUlReW9HMHBuTTZRUk5KaUdYdXg5S3dIblV5bEFnTUJBQUdqClFqQkFNQTRHQTFVZER3RUIvd1FFQXdJQ3BEQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRV0JCU2cKM3VmTzhhajJCc2V2R0lMaEUxQUZpaTR3VWpBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVlFQWs1eXFQeDFFWEF3MApIM2VoeVJienhCa1VKQkgwU2RDT3drelY4MVp3dmxBbktLS3hOTTFBd2VSQUxrUTNIRlMrOW11L2szK2ZZRG1uCkxMR3ZLMjM2THYzWWZpMkVHd2ZpbC9icnQ3bU5pQkRtTDdvd0Vta09oVzVKYlRiM2RRcmxtelJsVXhIU1R4d0MKUUM2NWdQTkJJcTNUZUZmU2t6ZlA1N0FIK0ZHemZYNTVBa0pnbEZpQXRRcGFoQXgxVlRaWitPK3RjbWZSSW5mUQpDSzArZE5qc3VUd2NHbzhvYUpOamJmWHNPYlA1eWFneWV5d2ZxQ3lvRExnT2gwdUlGUVBiUlBRM1g0OUw3bzhmCnNGRm9CcmVNbjFJWjJBUlplc0dWYXRKSFhRb01WRzcrK3F1L0g2dVNEMFZtK3piNTBJbGVhZHZiQVR2aUlUTlgKYWNtQkRvSmdOQ1JsTEhBR3hWN2pabFIrSFBjUitOTGordklJOUloeHVaY09STW5kTHpyT2hGSjVqM2FuOG5kbApOdW9sR2c3WW1hRmJWdFo3aUdEWnBISTdSQVFSZitjNVlKbFVIbUwrMnpNR2xiZHlKL3B5cTRjUEJsVDZlWUhtCmxSVEhseXVRaTd2ZndneXJwVU53ajMvbkNUekxjWDVqaHp3L1h2aDlGeGZpL1FTTmxKREIKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQoK", "vca_cloud": "lxd-cloud", "vca_host": "10.0.2.68", "vca_k8s_cloud": "microk8s", "vca_port": 17070, "vca_pubkey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+tPyU/gOogK/jQFbDgHtlaYhba8Y1SshxC5vL908ST2I6ku4+1XfIgVi8gfCUDRG8kzHL9S0i8iCvPYqCIasSEVD7+LCjYn19JZXWhnkwmlmHoW3a7ljw++d4aNWGKNWxiQOKKtM26ZH5yu1kKHtmZ1bcgrKGkQdiYBhzsKZ/8lRoWakGwZdDTdny6ZxmcvJ52GLyDs/K4jK730ogRVcsj7h3hb7KXKedNkX89ciAaus8m3HA9nMWsf8C0GRXR9ymGDml9pUORO8/6uOsccn5VQWHl5sitSG4K2W/5jBBNNmRQ8obV2ey7N+3nhb9luzhgk2Slj0XTjhnKOLP01Jn juju-client-key", "vca_secret": "86bbee23c74c078a3a67a95349788748", "vca_user": "admin"}, "subordinate": false, "status": {"current": "waiting", "message": "", "since": "2022-04-27T13:52:46.113865949Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:52:46.270309812Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T13:52:44.718162892Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "waiting", "message": "installing agent", "since": "2022-04-27T13:52:46.170916945Z", "version": ""}, "agent-status": {"current": "allocating", "message": "", "since": "2022-04-27T13:52:46.170916945Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:52:46.270309812Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:52:46.185629877Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "waiting", "message": "agent initializing", "since": "2022-04-27T13:52:46.396291377Z", "version": ""}, "agent-status": {"current": "allocating", "message": "", "since": "2022-04-27T13:52:46.170916945Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "waiting", "message": "agent initializing", "since": "2022-04-27T13:52:46.396291377Z", "version": ""}, "agent-status": {"current": "allocating", "message": "", "since": "2022-04-27T13:52:46.170916945Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:52:46.270309812Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:52:47.626524855Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:52:46.270309812Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:52:47.626524855Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:52:46.270309812Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:52:49.020057468Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:52:46.270309812Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:52:49.020057468Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "installing charm software", "since": "2022-04-27T13:52:50.379089676Z", "version": ""}, "agent-status": {"current": "allocating", "message": "", "since": "2022-04-27T13:52:46.170916945Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "installing charm software", "since": "2022-04-27T13:52:50.379089676Z", "version": ""}, "agent-status": {"current": "executing", "message": "running install hook", "since": "2022-04-27T13:52:50.406261397Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "installing charm software", "since": "2022-04-27T13:52:50.379089676Z", "version": ""}, "agent-status": {"current": "executing", "message": "running mongodb-relation-created hook", "since": "2022-04-27T13:52:52.218957218Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "ro/0", "application": "ro", "series": "kubernetes", "charm-url": "ch:amd64/kubernetes/osm-ro-1", "life": "alive", "public-address": "", "private-address": "10.152.183.73", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-25T15:03:19.691982951Z", "version": ""}, "agent-status": {"current": "executing", "message": "running ro-relation-joined hook for lcm/10", "since": "2022-04-27T13:52:52.325816598Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "ch:amd64/kubernetes/mongodb-k8s-1", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-24T08:22:00.904010692Z", "version": ""}, "agent-status": {"current": "executing", "message": "running database-relation-joined hook for lcm/10", "since": "2022-04-27T13:52:52.333131271Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "ch:amd64/focal/kafka-k8s-5", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-24T17:38:51.675080168Z", "version": ""}, "agent-status": {"current": "executing", "message": "running kafka-relation-joined hook for lcm/10", "since": "2022-04-27T13:52:52.343941917Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "ch:amd64/focal/kafka-k8s-5", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-24T17:38:51.675080168Z", "version": ""}, "agent-status": {"current": "executing", "message": "running kafka-relation-changed hook for lcm/10", "since": "2022-04-27T13:52:53.180263675Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "installing charm software", "since": "2022-04-27T13:52:50.379089676Z", "version": ""}, "agent-status": {"current": "executing", "message": "running ro-relation-created hook", "since": "2022-04-27T13:52:53.81029874Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "ch:amd64/focal/kafka-k8s-5", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-24T17:38:51.675080168Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:52:53.921515789Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "ch:amd64/kubernetes/mongodb-k8s-1", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-24T08:22:00.904010692Z", "version": ""}, "agent-status": {"current": "executing", "message": "running database-relation-changed hook for lcm/10", "since": "2022-04-27T13:52:54.095455492Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "ro/0", "application": "ro", "series": "kubernetes", "charm-url": "ch:amd64/kubernetes/osm-ro-1", "life": "alive", "public-address": "", "private-address": "10.152.183.73", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-25T15:03:19.691982951Z", "version": ""}, "agent-status": {"current": "executing", "message": "running ro-relation-changed hook for lcm/10", "since": "2022-04-27T13:52:54.485374136Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "installing charm software", "since": "2022-04-27T13:52:50.379089676Z", "version": ""}, "agent-status": {"current": "executing", "message": "running kafka-relation-created hook", "since": "2022-04-27T13:52:55.252323315Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "ro/0", "application": "ro", "series": "kubernetes", "charm-url": "ch:amd64/kubernetes/osm-ro-1", "life": "alive", "public-address": "", "private-address": "10.152.183.73", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-25T15:03:19.691982951Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:52:55.946718559Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "ch:amd64/kubernetes/mongodb-k8s-1", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-24T08:22:00.904010692Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:52:56.207634629Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "installing charm software", "since": "2022-04-27T13:52:50.379089676Z", "version": ""}, "agent-status": {"current": "executing", "message": "running leader-settings-changed hook", "since": "2022-04-27T13:52:56.781189067Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "installing charm software", "since": "2022-04-27T13:52:50.379089676Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T13:53:00.13054224Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:01.044522359Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T13:53:00.13054224Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:01.044522359Z", "version": ""}, "agent-status": {"current": "executing", "message": "running start hook", "since": "2022-04-27T13:53:02.069519075Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:01.044522359Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:53:03.159295668Z", "version": ""}}]
+##########################################################################################################################################################################################################################################################
+# These next events are visible on stateless charm upgrade, but so far there is no method to link them to the overall upgrade change
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:01.044522359Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:53:03.159295668Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:52:46.270309812Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:53:03.161083444Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:01.044522359Z", "version": ""}, "agent-status": {"current": "executing", "message": "running mongodb-relation-joined hook for mongodb/0", "since": "2022-04-27T13:53:03.638418924Z", "version": ""}}]
+#["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm", "exposed": false, "charm-url": "local:kubernetes/osm-lcm-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "config": {"database_commonkey": "osm", "debug_common_local_path": "/home/ubuntu/mark/git/osm/branches/master/common", "debug_lcm_local_path": "/home/ubuntu/mark/git/osm/branches/master/LCM", "debug_mode": true, "debug_n2vc_local_path": "/home/ubuntu/mark/git/osm/branches/master/N2VC", "debug_pubkey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDUrOLpYylC9lRlIknpGeda2mzY+mqTYxLDj9Q5t2jerT/aHARSr7DBbkLroqb8bZLsHw3QSHOy9AjF7Y8z5HpkFHGL0do1A/a3MkY+TIX3+FVP8FuvSIb7fNofC2odH5Pj/5kY2TSQhGcsAeYejoYn6qQ0xElNJtWaoqPKkAe825TJkANc31YvokxYCbY9oHfzUPEXtS2nADJrn5drEgc/R8cAwPRNPs2EU/XT2u1m+UP5T9nHbFV9rjv7RhrezB1ynQ5IGsPteOCDIsLswLKpuSQ0JBpuYb6wKjzBlYYyMe1lQF+m9ZWEnywGzCEQncsOxF+GzSbxrrtTLOFgDAbT mark.beierl@canonical.com", "log_level": "DEBUG", "vca_cacert": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVFekNDQW51Z0F3SUJBZ0lWQU1iT21oMThUaFc3NDNlSGhIckZQL1JzcXd5U01BMEdDU3FHU0liM0RRRUIKQ3dVQU1DRXhEVEFMQmdOVkJBb1RCRXAxYW5VeEVEQU9CZ05WQkFNVEIycDFhblV0WTJFd0hoY05Nakl3TkRBNApNREl5TXpReldoY05Nekl3TkRBNE1ESXlPRFF6V2pBaE1RMHdDd1lEVlFRS0V3UktkV3AxTVJBd0RnWURWUVFECkV3ZHFkV3AxTFdOaE1JSUJvakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBWThBTUlJQmlnS0NBWUVBMWJMYUMwemMKQzdKOGRSdkg0UEtSZzU5MEI0S0EzRXpTNXUxbW81QldLblBoQ3RPb1hIVU03ZnhvU2RlV1dKb0FKb0hWOFlaUApiVzF0MnBzZEtGbWlZYWxhdGNkUSs5VGU5dWMxbnRNRDRlTVFTSjVKQ0MrSW83SDdCSjY0bkV4dms4RWNmT0F3CnNxL1lvMnZJaHcwVTNDZk5LaWNPNHE4MW1jamlkc001Nmp3eHA2R05SaVY5bEszV2hXd0JKWjZMdkovUDZySDAKNU8yV2crK0pNOFMzdGlFV1N3SzhZTmxiYTVKUExkdnNPVkVWWVVsK0NUc0RpRGhzZ2hFSHU2RHBzSzd5dGw2aApWa3NFRjI4Y1krRmhhVXpXejk2d0JqM1M0UUdKQXV5K1dBWStnRVZZcXIrQ0dySkVNeEJLK0VPWjJ2MjJ1YW9iClJyNmo5dkZRQ2I5YVQ5RTV1SDRZWGhIelJ2YUZLQVB4b2J5OFFpR0cwRXJkZTA1ajFYU0NaS0EyMXEyODcvR2wKT0NWWUxMMVNBb1VIbUMzUEZGU25ycDYzdUxLSWVJVTAyb1o0L3JQODlnbUM4VXBJNXgxTEdKQXJ1OEd1NHltRApBR2FxSjNWdjQ0MjIyWEhJaThXZGdwUFNLZWpQeUlReW9HMHBuTTZRUk5KaUdYdXg5S3dIblV5bEFnTUJBQUdqClFqQkFNQTRHQTFVZER3RUIvd1FFQXdJQ3BEQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRV0JCU2cKM3VmTzhhajJCc2V2R0lMaEUxQUZpaTR3VWpBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVlFQWs1eXFQeDFFWEF3MApIM2VoeVJienhCa1VKQkgwU2RDT3drelY4MVp3dmxBbktLS3hOTTFBd2VSQUxrUTNIRlMrOW11L2szK2ZZRG1uCkxMR3ZLMjM2THYzWWZpMkVHd2ZpbC9icnQ3bU5pQkRtTDdvd0Vta09oVzVKYlRiM2RRcmxtelJsVXhIU1R4d0MKUUM2NWdQTkJJcTNUZUZmU2t6ZlA1N0FIK0ZHemZYNTVBa0pnbEZpQXRRcGFoQXgxVlRaWitPK3RjbWZSSW5mUQpDSzArZE5qc3VUd2NHbzhvYUpOamJmWHNPYlA1eWFneWV5d2ZxQ3lvRExnT2gwdUlGUVBiUlBRM1g0OUw3bzhmCnNGRm9CcmVNbjFJWjJBUlplc0dWYXRKSFhRb01WRzcrK3F1L0g2dVNEMFZtK3piNTBJbGVhZHZiQVR2aUlUTlgKYWNtQkRvSmdOQ1JsTEhBR3hWN2pabFIrSFBjUitOTGordklJOUloeHVaY09STW5kTHpyT2hGSjVqM2FuOG5kbApOdW9sR2c3WW1hRmJWdFo3aUdEWnBISTdSQVFSZitjNVlKbFVIbUwrMnpNR2xiZHlKL3B5cTRjUEJsVDZlWUhtCmxSVEhseXVRaTd2ZndneXJwVU53ajMvbkNUekxjWDVqaHp3L1h2aDlGeGZpL1FTTmxKREIKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQoK", "vca_cloud": "lxd-cloud", "vca_host": "10.0.2.68", "vca_k8s_cloud": "microk8s", "vca_port": 17070, "vca_pubkey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+tPyU/gOogK/jQFbDgHtlaYhba8Y1SshxC5vL908ST2I6ku4+1XfIgVi8gfCUDRG8kzHL9S0i8iCvPYqCIasSEVD7+LCjYn19JZXWhnkwmlmHoW3a7ljw++d4aNWGKNWxiQOKKtM26ZH5yu1kKHtmZ1bcgrKGkQdiYBhzsKZ/8lRoWakGwZdDTdny6ZxmcvJ52GLyDs/K4jK730ogRVcsj7h3hb7KXKedNkX89ciAaus8m3HA9nMWsf8C0GRXR9ymGDml9pUORO8/6uOsccn5VQWHl5sitSG4K2W/5jBBNNmRQ8obV2ey7N+3nhb9luzhgk2Slj0XTjhnKOLP01Jn juju-client-key", "vca_secret": "86bbee23c74c078a3a67a95349788748", "vca_user": "admin"}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-27T13:53:04.151820427Z", "version": ""}, "workload-version": ""}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:52:46.270309812Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:53:04.183165399Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:01.044522359Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:53:04.374726337Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "ch:amd64/focal/kafka-k8s-5", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-24T17:38:51.675080168Z", "version": ""}, "agent-status": {"current": "executing", "message": "running kafka-relation-departed hook for lcm/9", "since": "2022-04-27T13:53:04.530097985Z", "version": "2.9.22"}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "ch:amd64/kubernetes/mongodb-k8s-1", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-24T08:22:00.904010692Z", "version": ""}, "agent-status": {"current": "executing", "message": "running database-relation-departed hook for lcm/9", "since": "2022-04-27T13:53:04.546541075Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "ro/0", "application": "ro", "series": "kubernetes", "charm-url": "ch:amd64/kubernetes/osm-ro-1", "life": "alive", "public-address": "", "private-address": "10.152.183.73", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-25T15:03:19.691982951Z", "version": ""}, "agent-status": {"current": "executing", "message": "running ro-relation-departed hook for lcm/9", "since": "2022-04-27T13:53:04.579582114Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:52:46.270309812Z", "version": ""}, "agent-status": {"current": "executing", "message": "running leader-settings-changed hook", "since": "2022-04-27T13:53:05.20239186Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "ch:amd64/focal/kafka-k8s-5", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-24T17:38:51.675080168Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:53:05.375082613Z", "version": "2.9.22"}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "ro/0", "application": "ro", "series": "kubernetes", "charm-url": "ch:amd64/kubernetes/osm-ro-1", "life": "alive", "public-address": "", "private-address": "10.152.183.73", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-25T15:03:19.691982951Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:53:06.287930066Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "ch:amd64/kubernetes/mongodb-k8s-1", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-24T08:22:00.904010692Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:53:06.339773748Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:01.044522359Z", "version": ""}, "agent-status": {"current": "executing", "message": "running mongodb-relation-changed hook for mongodb/0", "since": "2022-04-27T13:53:06.794110477Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:52:46.270309812Z", "version": ""}, "agent-status": {"current": "executing", "message": "running ro-relation-departed hook for ro/0", "since": "2022-04-27T13:53:08.598222736Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:01.044522359Z", "version": ""}, "agent-status": {"current": "executing", "message": "running ro-relation-changed hook", "since": "2022-04-27T13:53:09.895852655Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:52:46.270309812Z", "version": ""}, "agent-status": {"current": "executing", "message": "running kafka-relation-departed hook for kafka/0", "since": "2022-04-27T13:53:11.774748891Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:01.044522359Z", "version": ""}, "agent-status": {"current": "executing", "message": "running kafka-relation-changed hook", "since": "2022-04-27T13:53:13.053185245Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:52:46.270309812Z", "version": ""}, "agent-status": {"current": "executing", "message": "running mongodb-relation-departed hook for mongodb/0", "since": "2022-04-27T13:53:14.670972589Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:01.044522359Z", "version": ""}, "agent-status": {"current": "executing", "message": "running kafka-relation-joined hook for kafka/0", "since": "2022-04-27T13:53:16.050710673Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:52:46.270309812Z", "version": ""}, "agent-status": {"current": "executing", "message": "running mongodb-relation-broken hook", "since": "2022-04-27T13:53:17.564633836Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "Assembling pod spec", "since": "2022-04-27T13:53:18.455720015Z", "version": ""}, "agent-status": {"current": "executing", "message": "running mongodb-relation-broken hook", "since": "2022-04-27T13:53:17.564633836Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "blocked", "message": "Need kafka, mongodb, ro relations", "since": "2022-04-27T13:53:18.840406999Z", "version": ""}, "agent-status": {"current": "executing", "message": "running mongodb-relation-broken hook", "since": "2022-04-27T13:53:17.564633836Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:01.044522359Z", "version": ""}, "agent-status": {"current": "executing", "message": "running ro-relation-joined hook for ro/0", "since": "2022-04-27T13:53:19.686052274Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "blocked", "message": "Need kafka, mongodb, ro relations", "since": "2022-04-27T13:53:18.840406999Z", "version": ""}, "agent-status": {"current": "executing", "message": "running kafka-relation-broken hook", "since": "2022-04-27T13:53:21.149271009Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "Assembling pod spec", "since": "2022-04-27T13:53:22.073656705Z", "version": ""}, "agent-status": {"current": "executing", "message": "running kafka-relation-broken hook", "since": "2022-04-27T13:53:21.149271009Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "blocked", "message": "Need kafka, mongodb, ro relations", "since": "2022-04-27T13:53:22.515373602Z", "version": ""}, "agent-status": {"current": "executing", "message": "running kafka-relation-broken hook", "since": "2022-04-27T13:53:21.149271009Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:01.044522359Z", "version": ""}, "agent-status": {"current": "executing", "message": "running ro-relation-changed hook for ro/0", "since": "2022-04-27T13:53:23.355070294Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "blocked", "message": "Need kafka, mongodb, ro relations", "since": "2022-04-27T13:53:22.515373602Z", "version": ""}, "agent-status": {"current": "executing", "message": "running ro-relation-broken hook", "since": "2022-04-27T13:53:25.062699158Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "Assembling pod spec", "since": "2022-04-27T13:53:25.93483528Z", "version": ""}, "agent-status": {"current": "executing", "message": "running ro-relation-broken hook", "since": "2022-04-27T13:53:25.062699158Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "blocked", "message": "Need kafka, mongodb, ro relations", "since": "2022-04-27T13:53:26.411373222Z", "version": ""}, "agent-status": {"current": "executing", "message": "running ro-relation-broken hook", "since": "2022-04-27T13:53:25.062699158Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:01.044522359Z", "version": ""}, "agent-status": {"current": "executing", "message": "running kafka-relation-changed hook for kafka/0", "since": "2022-04-27T13:53:27.418670221Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:01.044522359Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:53:29.278763461Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "stopping charm software", "since": "2022-04-27T13:53:29.342891381Z", "version": ""}, "agent-status": {"current": "executing", "message": "running ro-relation-broken hook", "since": "2022-04-27T13:53:25.062699158Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "stopping charm software", "since": "2022-04-27T13:53:29.342891381Z", "version": ""}, "agent-status": {"current": "executing", "message": "running stop hook", "since": "2022-04-27T13:53:29.36656005Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "", "since": "2022-04-27T13:53:30.616545619Z", "version": ""}, "agent-status": {"current": "executing", "message": "running stop hook", "since": "2022-04-27T13:53:29.36656005Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "cleaning up prior to charm deletion", "since": "2022-04-27T13:53:31.150790695Z", "version": ""}, "agent-status": {"current": "executing", "message": "running stop hook", "since": "2022-04-27T13:53:29.36656005Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "cleaning up prior to charm deletion", "since": "2022-04-27T13:53:31.150790695Z", "version": ""}, "agent-status": {"current": "executing", "message": "running remove hook", "since": "2022-04-27T13:53:31.185567398Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "terminated", "message": "", "since": "2022-04-27T13:53:32.316608499Z", "version": ""}, "agent-status": {"current": "executing", "message": "running remove hook", "since": "2022-04-27T13:53:31.185567398Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dead", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "terminated", "message": "", "since": "2022-04-27T13:53:32.316608499Z", "version": ""}, "agent-status": {"current": "executing", "message": "running remove hook", "since": "2022-04-27T13:53:31.185567398Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:01.044522359Z", "version": ""}, "agent-status": {"current": "executing", "message": "running leader-elected hook", "since": "2022-04-27T13:53:32.725754517Z", "version": ""}}]
+#["unit", "remove", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dead", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "terminated", "message": "", "since": "2022-04-27T13:53:32.316608499Z", "version": ""}, "agent-status": {"current": "executing", "message": "running remove hook", "since": "2022-04-27T13:53:31.185567398Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "Assembling pod spec", "since": "2022-04-27T13:53:33.678220029Z", "version": ""}, "agent-status": {"current": "executing", "message": "running leader-elected hook", "since": "2022-04-27T13:53:32.725754517Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:34.328293936Z", "version": ""}, "agent-status": {"current": "executing", "message": "running leader-elected hook", "since": "2022-04-27T13:53:32.725754517Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:34.328293936Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:53:34.770344608Z", "version": ""}}]
\ No newline at end of file
diff --git a/n2vc/tests/unit/testdata/upgrade-sidecar.log b/n2vc/tests/unit/testdata/upgrade-sidecar.log
new file mode 100644 (file)
index 0000000..c6608b8
--- /dev/null
@@ -0,0 +1,39 @@
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+#
+["charm", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "charm-url": "local:focal/kafka-k8s-0", "charm-version": "", "life": "alive", "profile": null}]
+["charm", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "charm-url": "local:focal/kafka-k8s-0", "charm-version": "", "life": "alive", "profile": null, "config": {"kafka-properties": "clientPort=2181\nbroker.id.generation.enable=true\nlisteners=PLAINTEXT://:9092\nadvertised.listeners=PLAINTEXT://:9092\nlog.dirs=/var/lib/kafka/data\nauto.create.topics.enable=true\nauto.leader.rebalance.enable=true\nbackground.threads=10\ncompression.type=producer\ndelete.topic.enable=false\nleader.imbalance.check.interval.seconds=300\nleader.imbalance.per.broker.percentage=10\nlog.flush.interval.messages=9223372036854775807\nlog.flush.offset.checkpoint.interval.ms=60000\nlog.flush.scheduler.interval.ms=9223372036854775807\nlog.retention.bytes=-1\nlog.retention.hours=168\nlog.roll.hours=168\nlog.roll.jitter.hours=0\nlog.segment.bytes=1073741824\nlog.segment.delete.delay.ms=60000\nmessage.max.bytes=1000012\nmin.insync.replicas=1\nnum.io.threads=8\nnum.network.threads=1\nnum.recovery.threads.per.data.dir=1\nnum.replica.fetchers=1\noffset.metadata.max.bytes=4096\noffsets.commit.required.acks=-1\noffsets.commit.timeout.ms=5000\noffsets.load.buffer.size=5242880\noffsets.retention.check.interval.ms=600000\noffsets.retention.minutes=1440\noffsets.topic.compression.codec=0\noffsets.topic.num.partitions=50\noffsets.topic.replication.factor=1\noffsets.topic.segment.bytes=104857600\nqueued.max.requests=500\nquota.consumer.default=9223372036854775807\nquota.producer.default=9223372036854775807\nreplica.fetch.min.bytes=1\nreplica.fetch.wait.max.ms=500\nreplica.high.watermark.checkpoint.interval.ms=5000\nreplica.lag.time.max.ms=10000\nreplica.socket.receive.buffer.bytes=65536\nreplica.socket.timeout.ms=30000\nrequest.timeout.ms=30000\nsocket.receive.buffer.bytes=102400\nsocket.request.max.bytes=104857600\nsocket.send.buffer.bytes=102400\nunclean.leader.election.enable=true\nzookeeper.session.timeout.ms=6000\nzookeeper.set.acl=false\nbroker.id.generation.enable=true\nconnections.max.idle.ms=600000\ncontrolled.shutdown.enable=true\ncontrolled.shutdown.max.retries=3\ncontrolled.shutdown.retry.backoff.ms=5000\ncontroller.socket.timeout.ms=30000\ndefault.replication.factor=1\nfetch.purgatory.purge.interval.requests=1000\ngroup.max.session.timeout.ms=300000\ngroup.min.session.timeout.ms=6000\nlog.cleaner.backoff.ms=15000\nlog.cleaner.dedupe.buffer.size=134217728\nlog.cleaner.delete.retention.ms=86400000\nlog.cleaner.enable=true\nlog.cleaner.io.buffer.load.factor=0.9\nlog.cleaner.io.buffer.size=524288\nlog.cleaner.io.max.bytes.per.second=1.7976931348623157E308\nlog.cleaner.min.cleanable.ratio=0.5\nlog.cleaner.min.compaction.lag.ms=0\nlog.cleaner.threads=1\nlog.cleanup.policy=delete\nlog.index.interval.bytes=4096\nlog.index.size.max.bytes=10485760\nlog.message.timestamp.difference.max.ms=9223372036854775807\nlog.message.timestamp.type=CreateTime\nlog.preallocate=false\nlog.retention.check.interval.ms=300000\nmax.connections.per.ip=2147483647\nnum.partitions=1\nproducer.purgatory.purge.interval.requests=1000\nreplica.fetch.backoff.ms=1000\nreplica.fetch.max.bytes=1048576\nreplica.fetch.response.max.bytes=10485760\nreserved.broker.max.id=1000\n", "metrics": true}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka", "exposed": false, "charm-url": "local:focal/kafka-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-24T18:39:35.346890724Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "ch:amd64/focal/kafka-k8s-5", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "stopping charm software", "since": "2022-04-27T18:32:59.063102743Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T17:36:41.956361285Z", "version": "2.9.22"}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka", "exposed": false, "charm-url": "local:focal/kafka-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-24T18:39:35.346890724Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "ch:amd64/focal/kafka-k8s-5", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "stopping charm software", "since": "2022-04-27T18:32:59.063102743Z", "version": ""}, "agent-status": {"current": "executing", "message": "running stop hook", "since": "2022-04-27T18:32:59.129679756Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "ch:amd64/focal/kafka-k8s-5", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "", "since": "2022-04-27T18:32:59.701881796Z", "version": ""}, "agent-status": {"current": "executing", "message": "running stop hook", "since": "2022-04-27T18:32:59.129679756Z", "version": "2.9.22"}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka", "exposed": false, "charm-url": "local:focal/kafka-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-27T18:33:00.285536226Z", "version": ""}, "workload-version": ""}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka", "exposed": false, "charm-url": "local:focal/kafka-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-27T18:33:01.685500631Z", "version": ""}, "workload-version": ""}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka", "exposed": false, "charm-url": "local:focal/kafka-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-27T18:33:03.885273135Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "ch:amd64/focal/kafka-k8s-5", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "", "since": "2022-04-27T18:32:59.701881796Z", "version": ""}, "agent-status": {"current": "allocating", "message": "Started container charm-init", "since": "2022-04-27T18:33:03.9134045Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "local:focal/kafka-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "", "since": "2022-04-27T18:32:59.701881796Z", "version": ""}, "agent-status": {"current": "allocating", "message": "Started container charm-init", "since": "2022-04-27T18:33:03.9134045Z", "version": "2.9.22"}}]
+["charm", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "charm-url": "ch:amd64/focal/kafka-k8s-5", "charm-version": "", "life": "dying", "profile": null, "config": {"kafka-properties": "clientPort=2181\nbroker.id.generation.enable=true\nlisteners=PLAINTEXT://:9092\nadvertised.listeners=PLAINTEXT://:9092\nlog.dirs=/var/lib/kafka/data\nauto.create.topics.enable=true\nauto.leader.rebalance.enable=true\nbackground.threads=10\ncompression.type=producer\ndelete.topic.enable=false\nleader.imbalance.check.interval.seconds=300\nleader.imbalance.per.broker.percentage=10\nlog.flush.interval.messages=9223372036854775807\nlog.flush.offset.checkpoint.interval.ms=60000\nlog.flush.scheduler.interval.ms=9223372036854775807\nlog.retention.bytes=-1\nlog.retention.hours=168\nlog.roll.hours=168\nlog.roll.jitter.hours=0\nlog.segment.bytes=1073741824\nlog.segment.delete.delay.ms=60000\nmessage.max.bytes=1000012\nmin.insync.replicas=1\nnum.io.threads=8\nnum.network.threads=1\nnum.recovery.threads.per.data.dir=1\nnum.replica.fetchers=1\noffset.metadata.max.bytes=4096\noffsets.commit.required.acks=-1\noffsets.commit.timeout.ms=5000\noffsets.load.buffer.size=5242880\noffsets.retention.check.interval.ms=600000\noffsets.retention.minutes=1440\noffsets.topic.compression.codec=0\noffsets.topic.num.partitions=50\noffsets.topic.replication.factor=1\noffsets.topic.segment.bytes=104857600\nqueued.max.requests=500\nquota.consumer.default=9223372036854775807\nquota.producer.default=9223372036854775807\nreplica.fetch.min.bytes=1\nreplica.fetch.wait.max.ms=500\nreplica.high.watermark.checkpoint.interval.ms=5000\nreplica.lag.time.max.ms=10000\nreplica.socket.receive.buffer.bytes=65536\nreplica.socket.timeout.ms=30000\nrequest.timeout.ms=30000\nsocket.receive.buffer.bytes=102400\nsocket.request.max.bytes=104857600\nsocket.send.buffer.bytes=102400\nunclean.leader.election.enable=true\nzookeeper.session.timeout.ms=6000\nzookeeper.set.acl=false\nbroker.id.generation.enable=true\nconnections.max.idle.ms=600000\ncontrolled.shutdown.enable=true\ncontrolled.shutdown.max.retries=3\ncontrolled.shutdown.retry.backoff.ms=5000\ncontroller.socket.timeout.ms=30000\ndefault.replication.factor=1\nfetch.purgatory.purge.interval.requests=1000\ngroup.max.session.timeout.ms=300000\ngroup.min.session.timeout.ms=6000\nlog.cleaner.backoff.ms=15000\nlog.cleaner.dedupe.buffer.size=134217728\nlog.cleaner.delete.retention.ms=86400000\nlog.cleaner.enable=true\nlog.cleaner.io.buffer.load.factor=0.9\nlog.cleaner.io.buffer.size=524288\nlog.cleaner.io.max.bytes.per.second=1.7976931348623157E308\nlog.cleaner.min.cleanable.ratio=0.5\nlog.cleaner.min.compaction.lag.ms=0\nlog.cleaner.threads=1\nlog.cleanup.policy=delete\nlog.index.interval.bytes=4096\nlog.index.size.max.bytes=10485760\nlog.message.timestamp.difference.max.ms=9223372036854775807\nlog.message.timestamp.type=CreateTime\nlog.preallocate=false\nlog.retention.check.interval.ms=300000\nmax.connections.per.ip=2147483647\nnum.partitions=1\nproducer.purgatory.purge.interval.requests=1000\nreplica.fetch.backoff.ms=1000\nreplica.fetch.max.bytes=1048576\nreplica.fetch.response.max.bytes=10485760\nreserved.broker.max.id=1000\n", "metrics": true}}]
+["charm", "remove", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "charm-url": "ch:amd64/focal/kafka-k8s-5", "charm-version": "", "life": "dying", "profile": null, "config": {"kafka-properties": "clientPort=2181\nbroker.id.generation.enable=true\nlisteners=PLAINTEXT://:9092\nadvertised.listeners=PLAINTEXT://:9092\nlog.dirs=/var/lib/kafka/data\nauto.create.topics.enable=true\nauto.leader.rebalance.enable=true\nbackground.threads=10\ncompression.type=producer\ndelete.topic.enable=false\nleader.imbalance.check.interval.seconds=300\nleader.imbalance.per.broker.percentage=10\nlog.flush.interval.messages=9223372036854775807\nlog.flush.offset.checkpoint.interval.ms=60000\nlog.flush.scheduler.interval.ms=9223372036854775807\nlog.retention.bytes=-1\nlog.retention.hours=168\nlog.roll.hours=168\nlog.roll.jitter.hours=0\nlog.segment.bytes=1073741824\nlog.segment.delete.delay.ms=60000\nmessage.max.bytes=1000012\nmin.insync.replicas=1\nnum.io.threads=8\nnum.network.threads=1\nnum.recovery.threads.per.data.dir=1\nnum.replica.fetchers=1\noffset.metadata.max.bytes=4096\noffsets.commit.required.acks=-1\noffsets.commit.timeout.ms=5000\noffsets.load.buffer.size=5242880\noffsets.retention.check.interval.ms=600000\noffsets.retention.minutes=1440\noffsets.topic.compression.codec=0\noffsets.topic.num.partitions=50\noffsets.topic.replication.factor=1\noffsets.topic.segment.bytes=104857600\nqueued.max.requests=500\nquota.consumer.default=9223372036854775807\nquota.producer.default=9223372036854775807\nreplica.fetch.min.bytes=1\nreplica.fetch.wait.max.ms=500\nreplica.high.watermark.checkpoint.interval.ms=5000\nreplica.lag.time.max.ms=10000\nreplica.socket.receive.buffer.bytes=65536\nreplica.socket.timeout.ms=30000\nrequest.timeout.ms=30000\nsocket.receive.buffer.bytes=102400\nsocket.request.max.bytes=104857600\nsocket.send.buffer.bytes=102400\nunclean.leader.election.enable=true\nzookeeper.session.timeout.ms=6000\nzookeeper.set.acl=false\nbroker.id.generation.enable=true\nconnections.max.idle.ms=600000\ncontrolled.shutdown.enable=true\ncontrolled.shutdown.max.retries=3\ncontrolled.shutdown.retry.backoff.ms=5000\ncontroller.socket.timeout.ms=30000\ndefault.replication.factor=1\nfetch.purgatory.purge.interval.requests=1000\ngroup.max.session.timeout.ms=300000\ngroup.min.session.timeout.ms=6000\nlog.cleaner.backoff.ms=15000\nlog.cleaner.dedupe.buffer.size=134217728\nlog.cleaner.delete.retention.ms=86400000\nlog.cleaner.enable=true\nlog.cleaner.io.buffer.load.factor=0.9\nlog.cleaner.io.buffer.size=524288\nlog.cleaner.io.max.bytes.per.second=1.7976931348623157E308\nlog.cleaner.min.cleanable.ratio=0.5\nlog.cleaner.min.compaction.lag.ms=0\nlog.cleaner.threads=1\nlog.cleanup.policy=delete\nlog.index.interval.bytes=4096\nlog.index.size.max.bytes=10485760\nlog.message.timestamp.difference.max.ms=9223372036854775807\nlog.message.timestamp.type=CreateTime\nlog.preallocate=false\nlog.retention.check.interval.ms=300000\nmax.connections.per.ip=2147483647\nnum.partitions=1\nproducer.purgatory.purge.interval.requests=1000\nreplica.fetch.backoff.ms=1000\nreplica.fetch.max.bytes=1048576\nreplica.fetch.response.max.bytes=10485760\nreserved.broker.max.id=1000\n", "metrics": true}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka", "exposed": false, "charm-url": "local:focal/kafka-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-27T18:33:05.885991239Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "local:focal/kafka-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "", "since": "2022-04-27T18:32:59.701881796Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T18:33:05.939780569Z", "version": "2.9.22"}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka", "exposed": false, "charm-url": "local:focal/kafka-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-27T18:33:07.685099274Z", "version": ""}, "workload-version": ""}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka", "exposed": false, "charm-url": "local:focal/kafka-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-27T18:33:09.485853048Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "local:focal/kafka-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "", "since": "2022-04-27T18:32:59.701881796Z", "version": ""}, "agent-status": {"current": "executing", "message": "running upgrade-charm hook", "since": "2022-04-27T18:33:11.686940017Z", "version": "2.9.22"}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka", "exposed": false, "charm-url": "local:focal/kafka-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-27T18:33:09.485853048Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "local:focal/kafka-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "", "since": "2022-04-27T18:32:59.701881796Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T18:33:13.166304447Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "local:focal/kafka-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T18:33:14.910656736Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T18:33:13.166304447Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "local:focal/kafka-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T18:33:14.910656736Z", "version": ""}, "agent-status": {"current": "executing", "message": "running start hook", "since": "2022-04-27T18:33:15.313510973Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "local:focal/kafka-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T18:33:14.910656736Z", "version": ""}, "agent-status": {"current": "executing", "message": "running kafka-pebble-ready hook", "since": "2022-04-27T18:33:16.205042856Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "local:focal/kafka-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T18:33:14.910656736Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T18:33:17.168708577Z", "version": "2.9.22"}}]
\ No newline at end of file
index b2d5c60..807c892 100644 (file)
@@ -130,7 +130,6 @@ class FakeManualMachine(MagicMock):
 
 
 class FakeWatcher(AsyncMock):
 
 
 class FakeWatcher(AsyncMock):
-
     delta_to_return = None
 
     async def Next(self):
     delta_to_return = None
 
     async def Next(self):
index 0dbd71e..a4a6a23 100644 (file)
@@ -16,12 +16,15 @@ import base64
 import re
 import binascii
 import yaml
 import re
 import binascii
 import yaml
+import string
+import secrets
 from enum import Enum
 from juju.machine import Machine
 from juju.application import Application
 from juju.action import Action
 from juju.unit import Unit
 from n2vc.exceptions import N2VCInvalidCertificate
 from enum import Enum
 from juju.machine import Machine
 from juju.application import Application
 from juju.action import Action
 from juju.unit import Unit
 from n2vc.exceptions import N2VCInvalidCertificate
+from typing import Tuple
 
 
 def base64_to_cacert(b64string):
 
 
 def base64_to_cacert(b64string):
@@ -146,4 +149,31 @@ def obj_to_dict(obj: object) -> dict:
     # convert obj to yaml
     yaml_text = obj_to_yaml(obj)
     # parse to dict
     # convert obj to yaml
     yaml_text = obj_to_yaml(obj)
     # parse to dict
-    return yaml.load(yaml_text, Loader=yaml.Loader)
+    return yaml.load(yaml_text, Loader=yaml.SafeLoader)
+
+
+def get_ee_id_components(ee_id: str) -> Tuple[str, str, str]:
+    """
+    Get model, application and machine components from an execution environment id
+    :param ee_id:
+    :return: model_name, application_name, machine_id
+    """
+    parts = ee_id.split(".")
+    if len(parts) != 3:
+        raise Exception("invalid ee id.")
+    model_name = parts[0]
+    application_name = parts[1]
+    machine_id = parts[2]
+    return model_name, application_name, machine_id
+
+
+def generate_random_alfanum_string(size: int) -> str:
+    """
+    Generate random alfa-numeric string with a size given by argument
+    :param size:
+    :return: random generated string
+    """
+
+    return "".join(
+        secrets.choice(string.ascii_letters + string.digits) for i in range(size)
+    )
index e5ba877..cc6daf0 100644 (file)
@@ -1,19 +1,3 @@
-aiokafka==0.7.0
-    # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
-dataclasses==0.6
-    # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
-kafka-python==2.0.2
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
-    #   aiokafka
-git+https://osm.etsi.org/gerrit/osm/common.git@master#egg=osm-common
-    # via -r requirements-dev.in
-pycrypto==2.6.1
-    # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
-pymongo==3.11.3
-    # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
-pyyaml==5.4.1
-    # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
 #######################################################################################
 # Copyright ETSI Contributors and Others.
 #
 #######################################################################################
 # Copyright ETSI Contributors and Others.
 #
@@ -30,3 +14,35 @@ pyyaml==5.4.1
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #######################################################################################
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #######################################################################################
+aiokafka==0.8.1
+    # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
+async-timeout==4.0.3
+    # via
+    #   -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
+    #   aiokafka
+dataclasses==0.6
+    # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
+dnspython==2.4.2
+    # via
+    #   -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
+    #   pymongo
+kafka-python==2.0.2
+    # via
+    #   -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
+    #   aiokafka
+motor==3.3.1
+    # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
+osm-common @ git+https://osm.etsi.org/gerrit/osm/common.git@master
+    # via -r requirements-dev.in
+packaging==23.1
+    # via
+    #   -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
+    #   aiokafka
+pycryptodome==3.19.0
+    # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
+pymongo==4.5.0
+    # via
+    #   -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
+    #   motor
+pyyaml==6.0.1
+    # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
index 2b3c735..4f8784f 100644 (file)
@@ -1,16 +1,18 @@
-# Copyright 2021 Canonical Ltd.
+# Copyright ETSI Contributors and Others.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
 #
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
 #
-#     http://www.apache.org/licenses/LICENSE-2.0
+#    http://www.apache.org/licenses/LICENSE-2.0
 #
 #
-#     Unless required by applicable law or agreed to in writing, software
-#     distributed under the License is distributed on an "AS IS" BASIS,
-#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#     See the License for the specific language governing permissions and
-#     limitations under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
 
 stdeb
 setuptools-version-command
 
 stdeb
 setuptools-version-command
+setuptools<60
\ No newline at end of file
index 7393626..8192b48 100644 (file)
@@ -1,10 +1,3 @@
-setuptools-version-command==2.2
-    # via -r requirements-dist.in
-stdeb==0.10.0
-    # via -r requirements-dist.in
-
-# The following packages are considered to be unsafe in a requirements file:
-# setuptools
 #######################################################################################
 # Copyright ETSI Contributors and Others.
 #
 #######################################################################################
 # Copyright ETSI Contributors and Others.
 #
@@ -21,3 +14,13 @@ stdeb==0.10.0
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #######################################################################################
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #######################################################################################
+setuptools-version-command==99.9
+    # via -r requirements-dist.in
+stdeb==0.10.0
+    # via -r requirements-dist.in
+
+# The following packages are considered to be unsafe in a requirements file:
+setuptools==59.8.0
+    # via
+    #   -r requirements-dist.in
+    #   setuptools-version-command
index 08df82d..a0d68c4 100644 (file)
@@ -13,8 +13,9 @@
 #     limitations under the License.
 
 asynctest
 #     limitations under the License.
 
 asynctest
+charset-normalizer
 coverage
 coverage
-flake8
+flake8<5.0.0
 mock
 nose2
 requests-mock
 mock
 nose2
 requests-mock
index 7e3a14d..57e30a7 100644 (file)
@@ -1,37 +1,3 @@
-asynctest==0.13.0
-    # via -r requirements-test.in
-certifi==2020.12.5
-    # via requests
-chardet==4.0.0
-    # via requests
-coverage==5.5
-    # via
-    #   -r requirements-test.in
-    #   nose2
-flake8==3.9.0
-    # via -r requirements-test.in
-idna==2.10
-    # via requests
-mccabe==0.6.1
-    # via flake8
-mock==4.0.3
-    # via -r requirements-test.in
-nose2==0.10.0
-    # via -r requirements-test.in
-pycodestyle==2.7.0
-    # via flake8
-pyflakes==2.3.1
-    # via flake8
-requests-mock==1.8.0
-    # via -r requirements-test.in
-requests==2.25.1
-    # via requests-mock
-six==1.15.0
-    # via
-    #   nose2
-    #   requests-mock
-urllib3==1.26.4
-    # via requests
 #######################################################################################
 # Copyright ETSI Contributors and Others.
 #
 #######################################################################################
 # Copyright ETSI Contributors and Others.
 #
@@ -48,3 +14,35 @@ urllib3==1.26.4
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #######################################################################################
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #######################################################################################
+asynctest==0.13.0
+    # via -r requirements-test.in
+certifi==2023.7.22
+    # via requests
+charset-normalizer==3.2.0
+    # via
+    #   -r requirements-test.in
+    #   requests
+coverage==7.3.1
+    # via -r requirements-test.in
+flake8==4.0.1
+    # via -r requirements-test.in
+idna==3.4
+    # via requests
+mccabe==0.6.1
+    # via flake8
+mock==5.1.0
+    # via -r requirements-test.in
+nose2==0.13.0
+    # via -r requirements-test.in
+pycodestyle==2.8.0
+    # via flake8
+pyflakes==2.4.0
+    # via flake8
+requests==2.31.0
+    # via requests-mock
+requests-mock==1.11.0
+    # via -r requirements-test.in
+six==1.16.0
+    # via requests-mock
+urllib3==2.0.5
+    # via requests
index 91e8bf5..95605f5 100644 (file)
 #     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 #     See the License for the specific language governing permissions and
 #     limitations under the License.
 #     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 #     See the License for the specific language governing permissions and
 #     limitations under the License.
-
-juju
-kubernetes
+charset-normalizer
+google-auth<2.18.0
+juju==2.9.44.0
+kubernetes==26.1.0
+motor
 pyasn1
 pyasn1
-motor==1.3.1
+pyyaml>6
 retrying-async
 retrying-async
index 7ff786a..215ac92 100644 (file)
@@ -1,61 +1,84 @@
-async-timeout==3.0.1
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+async-timeout==4.0.3
     # via retrying-async
     # via retrying-async
-bcrypt==3.2.0
+bcrypt==4.0.1
     # via paramiko
     # via paramiko
-cachetools==4.2.1
+cachetools==5.3.1
     # via google-auth
     # via google-auth
-certifi==2020.12.5
+certifi==2023.7.22
     # via
     #   kubernetes
     #   requests
     # via
     #   kubernetes
     #   requests
-cffi==1.14.5
+cffi==1.16.0
     # via
     # via
-    #   bcrypt
     #   cryptography
     #   pynacl
     #   cryptography
     #   pynacl
-chardet==4.0.0
-    # via requests
-cryptography==3.4.7
+charset-normalizer==3.2.0
+    # via
+    #   -r requirements.in
+    #   requests
+cryptography==41.0.4
     # via paramiko
     # via paramiko
-google-auth==1.28.0
-    # via kubernetes
-idna==2.10
+dnspython==2.4.2
+    # via pymongo
+google-auth==2.17.3
+    # via
+    #   -r requirements.in
+    #   kubernetes
+idna==3.4
     # via requests
     # via requests
-juju==2.9.4
+juju==2.9.44.0
     # via -r requirements.in
     # via -r requirements.in
-jujubundlelib==0.5.6
+jujubundlelib==0.5.7
     # via theblues
     # via theblues
-kubernetes==17.17.0
-    # via -r requirements.in
+kubernetes==26.1.0
+    # via
+    #   -r requirements.in
+    #   juju
 macaroonbakery==1.3.1
     # via
     #   juju
     #   theblues
 macaroonbakery==1.3.1
     # via
     #   juju
     #   theblues
-motor==1.3.1
+motor==3.3.1
     # via -r requirements.in
     # via -r requirements.in
-mypy-extensions==0.4.3
+mypy-extensions==1.0.0
     # via typing-inspect
     # via typing-inspect
-oauthlib==3.1.0
+oauthlib==3.2.2
     # via requests-oauthlib
     # via requests-oauthlib
-paramiko==2.7.2
+paramiko==2.12.0
     # via juju
     # via juju
-protobuf==3.15.6
+protobuf==3.20.3
     # via macaroonbakery
     # via macaroonbakery
-pyasn1-modules==0.2.8
-    # via google-auth
-pyasn1==0.4.8
+pyasn1==0.5.0
     # via
     #   -r requirements.in
     #   juju
     #   pyasn1-modules
     #   rsa
     # via
     #   -r requirements.in
     #   juju
     #   pyasn1-modules
     #   rsa
-pycparser==2.20
+pyasn1-modules==0.3.0
+    # via google-auth
+pycparser==2.21
     # via cffi
 pymacaroons==0.13.0
     # via macaroonbakery
     # via cffi
 pymacaroons==0.13.0
     # via macaroonbakery
-pymongo==3.11.3
+pymongo==4.5.0
     # via motor
     # via motor
-pynacl==1.4.0
+pynacl==1.5.0
     # via
     #   macaroonbakery
     #   paramiko
     # via
     #   macaroonbakery
     #   paramiko
@@ -64,70 +87,52 @@ pyrfc3339==1.1
     # via
     #   juju
     #   macaroonbakery
     # via
     #   juju
     #   macaroonbakery
-python-dateutil==2.8.1
+python-dateutil==2.8.2
     # via kubernetes
     # via kubernetes
-pytz==2021.1
+pytz==2023.3.post1
     # via pyrfc3339
     # via pyrfc3339
-pyyaml==5.4.1
+pyyaml==6.0.1
     # via
     # via
+    #   -r requirements.in
     #   juju
     #   jujubundlelib
     #   kubernetes
     #   juju
     #   jujubundlelib
     #   kubernetes
-requests-oauthlib==1.3.0
-    # via kubernetes
-requests==2.25.1
+requests==2.31.0
     # via
     #   kubernetes
     #   macaroonbakery
     #   requests-oauthlib
     #   theblues
     # via
     #   kubernetes
     #   macaroonbakery
     #   requests-oauthlib
     #   theblues
-retrying-async==1.2.0
+requests-oauthlib==1.3.1
+    # via kubernetes
+retrying-async==2.0.0
     # via -r requirements.in
     # via -r requirements.in
-rsa==4.7.2
+rsa==4.9
     # via google-auth
     # via google-auth
-six==1.15.0
+six==1.16.0
     # via
     # via
-    #   bcrypt
     #   google-auth
     #   kubernetes
     #   macaroonbakery
     #   google-auth
     #   kubernetes
     #   macaroonbakery
-    #   protobuf
+    #   paramiko
     #   pymacaroons
     #   pymacaroons
-    #   pynacl
     #   python-dateutil
     #   python-dateutil
-    #   websocket-client
 theblues==0.5.2
     # via juju
 theblues==0.5.2
     # via juju
-toposort==1.6
+toposort==1.10
     # via juju
     # via juju
-typing-extensions==3.7.4.3
+typing-extensions==4.8.0
     # via typing-inspect
     # via typing-inspect
-typing-inspect==0.6.0
+typing-inspect==0.9.0
     # via juju
     # via juju
-urllib3==1.26.4
+urllib3==2.0.5
     # via
     #   kubernetes
     #   requests
     # via
     #   kubernetes
     #   requests
-websocket-client==0.58.0
+websocket-client==1.6.3
     # via kubernetes
     # via kubernetes
-websockets==7.0
+websockets==11.0.3
     # via juju
 
 # The following packages are considered to be unsafe in a requirements file:
 # setuptools
     # via juju
 
 # The following packages are considered to be unsafe in a requirements file:
 # setuptools
-#######################################################################################
-# Copyright ETSI Contributors and Others.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#######################################################################################
diff --git a/tox.ini b/tox.ini
index bf31bce..63aaf7a 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -23,7 +23,7 @@ toxworkdir = /tmp/.tox
 
 [testenv]
 usedevelop = True
 
 [testenv]
 usedevelop = True
-basepython = python3
+basepython = python3.10
 setenv = VIRTUAL_ENV={envdir}
          PYTHONDONTWRITEBYTECODE = 1
 deps =  -r{toxinidir}/requirements.txt
 setenv = VIRTUAL_ENV={envdir}
          PYTHONDONTWRITEBYTECODE = 1
 deps =  -r{toxinidir}/requirements.txt
@@ -31,7 +31,7 @@ deps =  -r{toxinidir}/requirements.txt
 
 #######################################################################################
 [testenv:black]
 
 #######################################################################################
 [testenv:black]
-deps = black
+deps = black==23.12.1
 skip_install = true
 commands =
         black --check --diff n2vc/
 skip_install = true
 commands =
         black --check --diff n2vc/
@@ -50,7 +50,7 @@ commands =
         coverage report --omit='*tests*'
         coverage html -d ./cover --omit='*tests*'
         coverage xml -o coverage.xml --omit=*tests*
         coverage report --omit='*tests*'
         coverage html -d ./cover --omit='*tests*'
         coverage xml -o coverage.xml --omit=*tests*
-whitelist_externals = sh
+allowlist_externals = sh
 
 
 #######################################################################################
 
 
 #######################################################################################
@@ -83,13 +83,18 @@ commands =
 
 #######################################################################################
 [testenv:pip-compile]
 
 #######################################################################################
 [testenv:pip-compile]
-deps =  pip-tools==5.5.0
+deps =  pip-tools==6.13.0
+skip_install = true
+allowlist_externals = bash
+        [
 commands =
 commands =
-        - sh -c 'for file in requirements*.in ; do pip-compile -rU --no-header $file ;\
-        out=`echo $file | sed "s/.in/.txt/"` ; \
-        head -16 tox.ini >> $out ;\
-        done'
-whitelist_externals = sh
+        - bash -c "for file in requirements*.in ; do \
+        UNSAFE="" ; \
+        if [[ $file =~ 'dist' ]] ; then UNSAFE='--allow-unsafe' ; fi ; \
+        pip-compile --resolver=backtracking -rU --no-header $UNSAFE $file ;\
+        out=`echo $file | sed 's/.in/.txt/'` ; \
+        sed -i -e '1 e head -16 tox.ini' $out ;\
+        done"
 
 
 #######################################################################################
 
 
 #######################################################################################
@@ -104,7 +109,7 @@ commands =
         python3 setup.py --command-packages=stdeb.command sdist_dsc
         sh -c 'cd deb_dist/n2vc*/ && dpkg-buildpackage -rfakeroot -uc -us'
         sh -c 'rm n2vc/requirements.txt'
         python3 setup.py --command-packages=stdeb.command sdist_dsc
         sh -c 'cd deb_dist/n2vc*/ && dpkg-buildpackage -rfakeroot -uc -us'
         sh -c 'rm n2vc/requirements.txt'
-whitelist_externals = sh
+allowlist_externals = sh
 
 #######################################################################################
 [flake8]
 
 #######################################################################################
 [flake8]
@@ -116,7 +121,8 @@ ignore =
         E125,
         E203,
         E226,
         E125,
         E203,
         E226,
-        E241
+        E241,
+        E501
 exclude =
         .git,
         __pycache__,
 exclude =
         .git,
         __pycache__,