Revert "Revert "Move N2VC code from N2VC repo to LCM"" 40/14840/1
authoralmagia <silvia.almagia@etsi.org>
Fri, 13 Dec 2024 08:45:45 +0000 (09:45 +0100)
committeralmagia <silvia.almagia@etsi.org>
Fri, 13 Dec 2024 08:45:45 +0000 (09:45 +0100)
This reverts commit b90dc26711d692b2e76f969e8b79f721caa498b1.

Requested by TSC on 13.12.2024

Change-Id: Ifa4ba6c395bfebff562642c18dcb6661ac748896

64 files changed:
MANIFEST.in
osm_lcm/lcm.py
osm_lcm/lcm_helm_conn.py
osm_lcm/n2vc/__init__.py [new file with mode: 0644]
osm_lcm/n2vc/config.py [new file with mode: 0644]
osm_lcm/n2vc/definitions.py [new file with mode: 0644]
osm_lcm/n2vc/exceptions.py [new file with mode: 0644]
osm_lcm/n2vc/juju_watcher.py [new file with mode: 0644]
osm_lcm/n2vc/k8s_conn.py [new file with mode: 0644]
osm_lcm/n2vc/k8s_helm3_conn.py [new file with mode: 0644]
osm_lcm/n2vc/k8s_helm_base_conn.py [new file with mode: 0644]
osm_lcm/n2vc/k8s_juju_conn.py [new file with mode: 0644]
osm_lcm/n2vc/kubectl.py [new file with mode: 0644]
osm_lcm/n2vc/libjuju.py [new file with mode: 0644]
osm_lcm/n2vc/loggable.py [new file with mode: 0644]
osm_lcm/n2vc/n2vc_conn.py [new file with mode: 0644]
osm_lcm/n2vc/n2vc_juju_conn.py [new file with mode: 0644]
osm_lcm/n2vc/post-renderer-scripts/mainPostRenderer/mainPostRenderer [new file with mode: 0755]
osm_lcm/n2vc/post-renderer-scripts/nodeSelector/kustomization.yaml [new file with mode: 0644]
osm_lcm/n2vc/post-renderer-scripts/nodeSelector/nodeSelector [new file with mode: 0755]
osm_lcm/n2vc/post-renderer-scripts/nodeSelector/nodeSelector-deployment-patch.yaml [new file with mode: 0644]
osm_lcm/n2vc/post-renderer-scripts/podLabels/kustomization.yaml [new file with mode: 0644]
osm_lcm/n2vc/post-renderer-scripts/podLabels/podLabels [new file with mode: 0755]
osm_lcm/n2vc/provisioner.py [new file with mode: 0644]
osm_lcm/n2vc/store.py [new file with mode: 0644]
osm_lcm/n2vc/tests/__init__.py [new file with mode: 0644]
osm_lcm/n2vc/tests/unit/README.md [new file with mode: 0644]
osm_lcm/n2vc/tests/unit/__init__.py [new file with mode: 0644]
osm_lcm/n2vc/tests/unit/test_config.py [new file with mode: 0644]
osm_lcm/n2vc/tests/unit/test_connection.py [new file with mode: 0644]
osm_lcm/n2vc/tests/unit/test_definitions.py [new file with mode: 0644]
osm_lcm/n2vc/tests/unit/test_juju_watcher.py [new file with mode: 0644]
osm_lcm/n2vc/tests/unit/test_k8s_helm3_conn.py [new file with mode: 0644]
osm_lcm/n2vc/tests/unit/test_k8s_juju_conn.py [new file with mode: 0644]
osm_lcm/n2vc/tests/unit/test_kubectl.py [new file with mode: 0644]
osm_lcm/n2vc/tests/unit/test_libjuju.py [new file with mode: 0644]
osm_lcm/n2vc/tests/unit/test_n2vc_juju_conn.py [new file with mode: 0644]
osm_lcm/n2vc/tests/unit/test_provisioner.py [new file with mode: 0644]
osm_lcm/n2vc/tests/unit/test_store.py [new file with mode: 0644]
osm_lcm/n2vc/tests/unit/test_utils.py [new file with mode: 0644]
osm_lcm/n2vc/tests/unit/testdata/__init__.py [new file with mode: 0644]
osm_lcm/n2vc/tests/unit/testdata/test_certificate.yaml [new file with mode: 0644]
osm_lcm/n2vc/tests/unit/testdata/test_db_descriptors.py [new file with mode: 0644]
osm_lcm/n2vc/tests/unit/testdata/upgrade-machine.log [new file with mode: 0644]
osm_lcm/n2vc/tests/unit/testdata/upgrade-operator.log [new file with mode: 0644]
osm_lcm/n2vc/tests/unit/testdata/upgrade-podspec-stateful.log [new file with mode: 0644]
osm_lcm/n2vc/tests/unit/testdata/upgrade-podspec-stateless.log [new file with mode: 0644]
osm_lcm/n2vc/tests/unit/testdata/upgrade-sidecar.log [new file with mode: 0644]
osm_lcm/n2vc/tests/unit/utils.py [new file with mode: 0644]
osm_lcm/n2vc/utils.py [new file with mode: 0644]
osm_lcm/n2vc/vca/__init__.py [new file with mode: 0644]
osm_lcm/n2vc/vca/cloud.py [new file with mode: 0644]
osm_lcm/n2vc/vca/connection.py [new file with mode: 0644]
osm_lcm/n2vc/vca/connection_data.py [new file with mode: 0644]
osm_lcm/ns.py
osm_lcm/odu_workflows.py
osm_lcm/tests/test_ns.py
osm_lcm/vim_sdn.py
requirements-dev.in
requirements-dev.txt
requirements-test.in
requirements-test.txt
requirements.in
requirements.txt

index 6e5bb8e..815c315 100644 (file)
@@ -15,6 +15,7 @@
 
 include README.rst
 recursive-include osm_lcm *.py *.xml *.sh lcm.cfg *.txt
+recursive-include osm_lcm/n2vc/post-renderer-scripts *
 recursive-include devops-stages *
 recursive-include osm_lcm/odu_libs/templates *.j2
 
index 7d12e37..8362acc 100644 (file)
@@ -49,7 +49,7 @@ from osm_lcm.data_utils.lcm_config import LcmCfg
 from osm_lcm.data_utils.list_utils import find_in_list
 from osm_lcm.lcm_hc import get_health_check_file
 from os import path, getenv
-from n2vc import version as n2vc_version
+from osm_lcm.n2vc import version as n2vc_version
 import traceback
 
 if getenv("OSMLCM_PDB_DEBUG", None) is not None:
index 930ec60..d2fa2df 100644 (file)
@@ -33,9 +33,9 @@ from osm_lcm.lcm_utils import LcmBase, get_ee_id_parts
 from osm_lcm.data_utils.database.database import Database
 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
 
-from n2vc.n2vc_conn import N2VCConnector
-from n2vc.k8s_helm3_conn import K8sHelm3Connector
-from n2vc.exceptions import (
+from osm_lcm.n2vc.n2vc_conn import N2VCConnector
+from osm_lcm.n2vc.k8s_helm3_conn import K8sHelm3Connector
+from osm_lcm.n2vc.exceptions import (
     N2VCBadArgumentsException,
     N2VCException,
     N2VCExecutionException,
diff --git a/osm_lcm/n2vc/__init__.py b/osm_lcm/n2vc/__init__.py
new file mode 100644 (file)
index 0000000..d97c31c
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright 2019 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+
+version = "0.0.2"
diff --git a/osm_lcm/n2vc/config.py b/osm_lcm/n2vc/config.py
new file mode 100644 (file)
index 0000000..374ec73
--- /dev/null
@@ -0,0 +1,104 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+
+import os
+import typing
+
+
+class EnvironConfig(dict):
+    prefixes = ["OSMLCM_VCA_", "OSMMON_VCA_"]
+
+    def __init__(self, prefixes: typing.List[str] = None):
+        if prefixes:
+            self.prefixes = prefixes
+        for key, value in os.environ.items():
+            if any(key.startswith(prefix) for prefix in self.prefixes):
+                self.__setitem__(self._get_renamed_key(key), value)
+
+    def _get_renamed_key(self, key: str) -> str:
+        for prefix in self.prefixes:
+            key = key.replace(prefix, "")
+        return key.lower()
+
+
+MODEL_CONFIG_KEYS = [
+    "agent-metadata-url",
+    "agent-stream",
+    "apt-ftp-proxy",
+    "apt-http-proxy",
+    "apt-https-proxy",
+    "apt-mirror",
+    "apt-no-proxy",
+    "automatically-retry-hooks",
+    "backup-dir",
+    "cloudinit-userdata",
+    "container-image-metadata-url",
+    "container-image-stream",
+    "container-inherit-properties",
+    "container-networking-method",
+    "default-series",
+    "default-space",
+    "development",
+    "disable-network-management",
+    "egress-subnets",
+    "enable-os-refresh-update",
+    "enable-os-upgrade",
+    "fan-config",
+    "firewall-mode",
+    "ftp-proxy",
+    "http-proxy",
+    "https-proxy",
+    "ignore-machine-addresses",
+    "image-metadata-url",
+    "image-stream",
+    "juju-ftp-proxy",
+    "juju-http-proxy",
+    "juju-https-proxy",
+    "juju-no-proxy",
+    "logforward-enabled",
+    "logging-config",
+    "lxd-snap-channel",
+    "max-action-results-age",
+    "max-action-results-size",
+    "max-status-history-age",
+    "max-status-history-size",
+    "net-bond-reconfigure-delay",
+    "no-proxy",
+    "provisioner-harvest-mode",
+    "proxy-ssh",
+    "snap-http-proxy",
+    "snap-https-proxy",
+    "snap-store-assertions",
+    "snap-store-proxy",
+    "snap-store-proxy-url",
+    "ssl-hostname-verification",
+    "test-mode",
+    "transmit-vendor-metrics",
+    "update-status-hook-interval",
+]
+
+
+class ModelConfig(dict):
+    prefix = "model_config_"
+
+    def __init__(self, config: dict):
+        for key, value in config.items():
+            if (
+                key.startswith(self.prefix)
+                and self._get_renamed_key(key) in MODEL_CONFIG_KEYS
+            ):
+                self.__setitem__(self._get_renamed_key(key), value)
+
+    def _get_renamed_key(self, key):
+        return key.replace(self.prefix, "").replace("_", "-")
diff --git a/osm_lcm/n2vc/definitions.py b/osm_lcm/n2vc/definitions.py
new file mode 100644 (file)
index 0000000..caa40ef
--- /dev/null
@@ -0,0 +1,108 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+
+from typing import NoReturn
+
+from osm_lcm.n2vc.utils import get_ee_id_components
+
+
+class RelationEndpoint:
+    """Represents an endpoint of an application"""
+
+    def __init__(self, ee_id: str, vca_id: str, endpoint_name: str) -> NoReturn:
+        """
+        Args:
+            ee_id: Execution environment id.
+                   Format: "<model>.<application_name>.<machine_id>".
+            vca_id: Id of the VCA. Identifies the Juju Controller
+                    where the application is deployed
+            endpoint_name: Name of the endpoint for the relation
+        """
+        ee_components = get_ee_id_components(ee_id)
+        self._model_name = ee_components[0]
+        self._application_name = ee_components[1]
+        self._vca_id = vca_id
+        self._endpoint_name = endpoint_name
+
+    @property
+    def application_name(self) -> str:
+        """Returns the application name"""
+        return self._application_name
+
+    @property
+    def endpoint(self) -> str:
+        """Returns the application name and the endpoint. Format: <application>:<endpoint>"""
+        return f"{self.application_name}:{self._endpoint_name}"
+
+    @property
+    def endpoint_name(self) -> str:
+        """Returns the endpoint name"""
+        return self._endpoint_name
+
+    @property
+    def model_name(self) -> str:
+        """Returns the model name"""
+        return self._model_name
+
+    @property
+    def vca_id(self) -> str:
+        """Returns the vca id"""
+        return self._vca_id
+
+    def __str__(self) -> str:
+        app = self.application_name
+        endpoint = self.endpoint_name
+        model = self.model_name
+        vca = self.vca_id
+        return f"{app}:{endpoint} (model: {model}, vca: {vca})"
+
+
+class Offer:
+    """Represents a juju offer"""
+
+    def __init__(self, url: str, vca_id: str = None) -> NoReturn:
+        """
+        Args:
+            url: Offer url. Format: <user>/<model>.<offer-name>.
+        """
+        self._url = url
+        self._username = url.split(".")[0].split("/")[0]
+        self._model_name = url.split(".")[0].split("/")[1]
+        self._name = url.split(".")[1]
+        self._vca_id = vca_id
+
+    @property
+    def model_name(self) -> str:
+        """Returns the model name"""
+        return self._model_name
+
+    @property
+    def name(self) -> str:
+        """Returns the offer name"""
+        return self._name
+
+    @property
+    def username(self) -> str:
+        """Returns the username"""
+        return self._username
+
+    @property
+    def url(self) -> str:
+        """Returns the offer url"""
+        return self._url
+
+    @property
+    def vca_id(self) -> str:
+        """Returns the vca id"""
+        return self._vca_id
diff --git a/osm_lcm/n2vc/exceptions.py b/osm_lcm/n2vc/exceptions.py
new file mode 100644 (file)
index 0000000..9867cb9
--- /dev/null
@@ -0,0 +1,219 @@
+# Copyright 2019 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+
+
+class N2VCException(Exception):
+    """
+    N2VC exception base class
+    """
+
+    def __init__(self, message: str = ""):
+        Exception.__init__(self, message)
+        self.message = message
+
+    def __str__(self):
+        return self.message
+
+    def __repr__(self):
+        return "{}({})".format(type(self), self.message)
+
+
+class N2VCBadArgumentsException(N2VCException):
+    """
+    Bad argument values exception
+    """
+
+    def __init__(self, message: str = "", bad_args: list = None):
+        N2VCException.__init__(self, message=message)
+        self.bad_args = bad_args
+
+    def __str__(self):
+        return "<{}> Bad arguments: {} -> {}".format(
+            type(self), super().__str__(), self.bad_args
+        )
+
+
+class N2VCConnectionException(N2VCException):
+    """
+    Error connecting to VCA
+    """
+
+    def __init__(self, message: str = "", url: str = None):
+        N2VCException.__init__(self, message=message)
+        self.url = url
+
+    def __str__(self):
+        return "<{}> Connection to {} failed: {}".format(
+            type(self), self.url, super().__str__()
+        )
+
+
+class N2VCTimeoutException(N2VCException):
+    """
+    Timeout
+    """
+
+    def __init__(self, message: str = "", timeout: str = ""):
+        N2VCException.__init__(self, message=message)
+        self.timeout = timeout
+
+    def __str__(self):
+        return "<{}> {} timeout: {}".format(type(self), self.timeout, super().__str__())
+
+
+class N2VCExecutionException(N2VCException):
+    """
+    Error executing primitive
+    """
+
+    def __init__(self, message: str = "", primitive_name: str = ""):
+        N2VCException.__init__(self, message=message)
+        self.primitive_name = primitive_name
+
+    def __str__(self):
+        return "<{}> Error executing primitive {} failed: {}".format(
+            type(self), self.primitive_name, super().__str__()
+        )
+
+
+class N2VCInvalidCertificate(N2VCException):
+    """
+    Invalid certificate
+    """
+
+    def __init__(self, message: str = ""):
+        N2VCException.__init__(self, message=message)
+
+    def __str__(self):
+        return "<{}> Invalid certificate: {}".format(type(self), super().__str__())
+
+
+class N2VCNotFound(N2VCException):
+    """
+    Not found
+    """
+
+    def __init__(self, message: str = ""):
+        N2VCException.__init__(self, message=message)
+
+    def __str__(self):
+        return "<{}> Not found: {}".format(type(self), super().__str__())
+
+
+class N2VCApplicationExists(N2VCException):
+    """
+    Application Exists
+    """
+
+    def __init__(self, message: str = ""):
+        N2VCException.__init__(self, message=message)
+
+    def __str__(self):
+        return "<{}> Application Exists: {}".format(type(self), super().__str__())
+
+
+class JujuError(N2VCException):
+    """
+    Juju Error
+    """
+
+    def __init__(self, message: str = ""):
+        N2VCException.__init__(self, message=message)
+
+    def __str__(self):
+        return "<{}> Juju Error: {}".format(type(self), super().__str__())
+
+
+class K8sException(Exception):
+    """
+    K8s exception
+    """
+
+    def __init__(self, message: str):
+        Exception.__init__(self, message)
+        self._message = message
+
+    def __str__(self):
+        return self._message
+
+    def __repr__(self):
+        return self._message
+
+
+class EntityInvalidException(Exception):
+    """Entity is not valid, the type does not match any EntityType."""
+
+
+class JujuInvalidK8sConfiguration(N2VCException):
+    """Invalid K8s configuration."""
+
+
+class JujuCharmNotFound(N2VCException):
+    """The Charm can't be found or is not readable."""
+
+
+class JujuControllerFailedConnecting(N2VCException):
+    """Failed connecting to juju controller."""
+
+
+class JujuModelAlreadyExists(N2VCException):
+    """The model already exists."""
+
+
+class JujuApplicationExists(N2VCException):
+    """The Application already exists."""
+
+
+class JujuApplicationNotFound(N2VCException):
+    """The Application cannot be found."""
+
+
+class JujuLeaderUnitNotFound(N2VCException):
+    """The Application cannot be found."""
+
+
+class JujuActionNotFound(N2VCException):
+    """The Action cannot be found."""
+
+
+class JujuMachineNotFound(N2VCException):
+    """The machine cannot be found."""
+
+
+class JujuK8sProxycharmNotSupported(N2VCException):
+    """K8s Proxy Charms not supported in this installation."""
+
+
+class N2VCPrimitiveExecutionFailed(N2VCException):
+    """Something failed while attempting to execute a primitive."""
+
+
+class NetworkServiceDoesNotExist(N2VCException):
+    """The Network Service being acted against does not exist."""
+
+
+class PrimitiveDoesNotExist(N2VCException):
+    """The Primitive being executed does not exist."""
+
+
+class NoRouteToHost(N2VCException):
+    """There was no route to the specified host."""
+
+
+class AuthenticationFailed(N2VCException):
+    """The authentication for the specified user failed."""
+
+
+class MethodNotImplemented(N2VCException):
+    """The method is not implemented."""
diff --git a/osm_lcm/n2vc/juju_watcher.py b/osm_lcm/n2vc/juju_watcher.py
new file mode 100644 (file)
index 0000000..f1181f1
--- /dev/null
@@ -0,0 +1,419 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+
+import asyncio
+import time
+
+from juju.client import client
+from osm_lcm.n2vc.exceptions import EntityInvalidException
+from osm_lcm.n2vc.n2vc_conn import N2VCConnector
+from juju.model import ModelEntity, Model
+from juju.client.overrides import Delta
+from juju.status import derive_status
+from juju.application import Application
+from websockets.exceptions import ConnectionClosed
+import logging
+
+logger = logging.getLogger("__main__")
+
+
+def status(application: Application) -> str:
+    unit_status = []
+    for unit in application.units:
+        unit_status.append(unit.workload_status)
+    return derive_status(unit_status)
+
+
+def entity_ready(entity: ModelEntity) -> bool:
+    """
+    Check if the entity is ready
+
+    :param: entity: Model entity. It can be a machine, action, or application.
+
+    :returns: boolean saying if the entity is ready or not
+    """
+
+    entity_type = entity.entity_type
+    if entity_type == "machine":
+        return entity.agent_status in ["started"]
+    elif entity_type == "action":
+        return entity.status in ["completed", "failed", "cancelled"]
+    elif entity_type == "application":
+        # Workaround for bug: https://github.com/juju/python-libjuju/issues/441
+        return entity.status in ["active", "blocked"]
+    elif entity_type == "unit":
+        return entity.agent_status in ["idle"]
+    else:
+        raise EntityInvalidException("Unknown entity type: {}".format(entity_type))
+
+
+def application_ready(application: Application) -> bool:
+    """
+    Check if an application has a leader
+
+    :param: application: Application entity.
+
+    :returns: boolean saying if the application has a unit that is a leader.
+    """
+    ready_status_list = ["active", "blocked"]
+    application_ready = application.status in ready_status_list
+    units_ready = all(
+        unit.workload_status in ready_status_list for unit in application.units
+    )
+    return application_ready and units_ready
+
+
+class JujuModelWatcher:
+    @staticmethod
+    async def wait_for_model(model: Model, timeout: float = 3600):
+        """
+        Wait for all entities in model to reach its final state.
+
+        :param: model:              Model to observe
+        :param: timeout:            Timeout for the model applications to be active
+
+        :raises: asyncio.TimeoutError when timeout reaches
+        """
+
+        if timeout is None:
+            timeout = 3600.0
+
+        # Coroutine to wait until the entity reaches the final state
+        async def wait_until_model_ready():
+            wait_for_entity = asyncio.ensure_future(
+                asyncio.wait_for(
+                    model.block_until(
+                        lambda: all(
+                            application_ready(application)
+                            for application in model.applications.values()
+                        ),
+                    ),
+                    timeout=timeout,
+                )
+            )
+
+            tasks = [wait_for_entity]
+            try:
+                await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
+            finally:
+                # Cancel tasks
+                for task in tasks:
+                    task.cancel()
+
+        await wait_until_model_ready()
+        # Check model is still ready after 10 seconds
+
+        await asyncio.sleep(10)
+        await wait_until_model_ready()
+
+    @staticmethod
+    async def wait_for(
+        model: Model,
+        entity: ModelEntity,
+        progress_timeout: float = 3600,
+        total_timeout: float = 3600,
+        db_dict: dict = None,
+        n2vc: N2VCConnector = None,
+        vca_id: str = None,
+    ):
+        """
+        Wait for entity to reach its final state.
+
+        :param: model:              Model to observe
+        :param: entity:             Entity object
+        :param: progress_timeout:   Maximum time between two updates in the model
+        :param: total_timeout:      Timeout for the entity to be active
+        :param: db_dict:            Dictionary with data of the DB to write the updates
+        :param: n2vc:               N2VC Connector objector
+        :param: vca_id:             VCA ID
+
+        :raises: asyncio.TimeoutError when timeout reaches
+        """
+
+        if progress_timeout is None:
+            progress_timeout = 3600.0
+        if total_timeout is None:
+            total_timeout = 3600.0
+
+        entity_type = entity.entity_type
+        if entity_type not in ["application", "action", "machine", "unit"]:
+            raise EntityInvalidException("Unknown entity type: {}".format(entity_type))
+
+        # Coroutine to wait until the entity reaches the final state
+        wait_for_entity = asyncio.ensure_future(
+            asyncio.wait_for(
+                model.block_until(lambda: entity_ready(entity)),
+                timeout=total_timeout,
+            )
+        )
+
+        # Coroutine to watch the model for changes (and write them to DB)
+        watcher = asyncio.ensure_future(
+            JujuModelWatcher.model_watcher(
+                model,
+                entity_id=entity.entity_id,
+                entity_type=entity_type,
+                timeout=progress_timeout,
+                db_dict=db_dict,
+                n2vc=n2vc,
+                vca_id=vca_id,
+            )
+        )
+
+        tasks = [wait_for_entity, watcher]
+        try:
+            # Execute tasks, and stop when the first is finished
+            # The watcher task won't never finish (unless it timeouts)
+            await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
+        finally:
+            # Cancel tasks
+            for task in tasks:
+                task.cancel()
+
+    @staticmethod
+    async def wait_for_units_idle(
+        model: Model, application: Application, timeout: float = 60
+    ):
+        """
+        Waits for the application and all its units to transition back to idle
+
+        :param: model:          Model to observe
+        :param: application:    The application to be observed
+        :param: timeout:        Maximum time between two updates in the model
+
+        :raises: asyncio.TimeoutError when timeout reaches
+        """
+
+        ensure_units_idle = asyncio.ensure_future(
+            asyncio.wait_for(
+                JujuModelWatcher.ensure_units_idle(model, application), timeout
+            )
+        )
+        tasks = [
+            ensure_units_idle,
+        ]
+        (done, pending) = await asyncio.wait(
+            tasks, timeout=timeout, return_when=asyncio.FIRST_COMPLETED
+        )
+
+        if ensure_units_idle in pending:
+            ensure_units_idle.cancel()
+            raise TimeoutError(
+                "Application's units failed to return to idle after {} seconds".format(
+                    timeout
+                )
+            )
+        if ensure_units_idle.result():
+            pass
+
+    @staticmethod
+    async def ensure_units_idle(model: Model, application: Application):
+        """
+        Waits forever until the application's units to transition back to idle
+
+        :param: model:          Model to observe
+        :param: application:    The application to be observed
+        """
+
+        try:
+            allwatcher = client.AllWatcherFacade.from_connection(model.connection())
+            unit_wanted_state = "executing"
+            final_state_reached = False
+
+            units = application.units
+            final_state_seen = {unit.entity_id: False for unit in units}
+            agent_state_seen = {unit.entity_id: False for unit in units}
+            workload_state = {unit.entity_id: False for unit in units}
+
+            try:
+                while not final_state_reached:
+                    change = await allwatcher.Next()
+
+                    # Keep checking to see if new units were added during the change
+                    for unit in units:
+                        if unit.entity_id not in final_state_seen:
+                            final_state_seen[unit.entity_id] = False
+                            agent_state_seen[unit.entity_id] = False
+                            workload_state[unit.entity_id] = False
+
+                    for delta in change.deltas:
+                        await asyncio.sleep(0)
+                        if delta.entity != units[0].entity_type:
+                            continue
+
+                        final_state_reached = True
+                        for unit in units:
+                            if delta.data["name"] == unit.entity_id:
+                                status = delta.data["agent-status"]["current"]
+                                workload_state[unit.entity_id] = delta.data[
+                                    "workload-status"
+                                ]["current"]
+
+                                if status == unit_wanted_state:
+                                    agent_state_seen[unit.entity_id] = True
+                                    final_state_seen[unit.entity_id] = False
+
+                                if (
+                                    status == "idle"
+                                    and agent_state_seen[unit.entity_id]
+                                ):
+                                    final_state_seen[unit.entity_id] = True
+
+                            final_state_reached = (
+                                final_state_reached
+                                and final_state_seen[unit.entity_id]
+                                and workload_state[unit.entity_id]
+                                in [
+                                    "active",
+                                    "error",
+                                ]
+                            )
+
+            except ConnectionClosed:
+                pass
+                # This is expected to happen when the
+                # entity reaches its final state, because
+                # the model connection is closed afterwards
+        except Exception as e:
+            raise e
+
+    @staticmethod
+    async def model_watcher(
+        model: Model,
+        entity_id: str,
+        entity_type: str,
+        timeout: float,
+        db_dict: dict = None,
+        n2vc: N2VCConnector = None,
+        vca_id: str = None,
+    ):
+        """
+        Observes the changes related to an specific entity in a model
+
+        :param: model:          Model to observe
+        :param: entity_id:      ID of the entity to be observed
+        :param: entity_type:    Entity Type (p.e. "application", "machine, and "action")
+        :param: timeout:        Maximum time between two updates in the model
+        :param: db_dict:        Dictionary with data of the DB to write the updates
+        :param: n2vc:           N2VC Connector objector
+        :param: vca_id:         VCA ID
+
+        :raises: asyncio.TimeoutError when timeout reaches
+        """
+
+        try:
+            allwatcher = client.AllWatcherFacade.from_connection(model.connection())
+
+            # Genenerate array with entity types to listen
+            entity_types = (
+                [entity_type, "unit"]
+                if entity_type == "application"  # TODO: Add "action" too
+                else [entity_type]
+            )
+
+            # Get time when it should timeout
+            timeout_end = time.time() + timeout
+
+            try:
+                while True:
+                    change = await allwatcher.Next()
+                    for delta in change.deltas:
+                        write = False
+                        delta_entity = None
+
+                        # Get delta EntityType
+                        delta_entity = delta.entity
+
+                        if delta_entity in entity_types:
+                            # Get entity id
+                            id = None
+                            if entity_type == "application":
+                                id = (
+                                    delta.data["application"]
+                                    if delta_entity == "unit"
+                                    else delta.data["name"]
+                                )
+                            else:
+                                if "id" in delta.data:
+                                    id = delta.data["id"]
+                                else:
+                                    print("No id {}".format(delta.data))
+
+                            # Write if the entity id match
+                            write = True if id == entity_id else False
+
+                            # Update timeout
+                            timeout_end = time.time() + timeout
+                            (
+                                status,
+                                status_message,
+                                vca_status,
+                            ) = JujuModelWatcher.get_status(delta)
+
+                            if write and n2vc is not None and db_dict:
+                                # Write status to DB
+                                status = n2vc.osm_status(delta_entity, status)
+                                await n2vc.write_app_status_to_db(
+                                    db_dict=db_dict,
+                                    status=status,
+                                    detailed_status=status_message,
+                                    vca_status=vca_status,
+                                    entity_type=delta_entity,
+                                    vca_id=vca_id,
+                                )
+                    # Check if timeout
+                    if time.time() > timeout_end:
+                        raise asyncio.TimeoutError()
+            except ConnectionClosed:
+                pass
+                # This is expected to happen when the
+                # entity reaches its final state, because
+                # the model connection is closed afterwards
+        except Exception as e:
+            raise e
+
+    @staticmethod
+    def get_status(delta: Delta) -> (str, str, str):
+        """
+        Get status from delta
+
+        :param: delta:          Delta generated by the allwatcher
+        :param: entity_type:    Entity Type (p.e. "application", "machine, and "action")
+
+        :return (status, message, vca_status)
+        """
+        if delta.entity == "machine":
+            return (
+                delta.data["agent-status"]["current"],
+                delta.data["instance-status"]["message"],
+                delta.data["instance-status"]["current"],
+            )
+        elif delta.entity == "action":
+            return (
+                delta.data["status"],
+                delta.data["status"],
+                delta.data["status"],
+            )
+        elif delta.entity == "application":
+            return (
+                delta.data["status"]["current"],
+                delta.data["status"]["message"],
+                delta.data["status"]["current"],
+            )
+        elif delta.entity == "unit":
+            return (
+                delta.data["workload-status"]["current"],
+                delta.data["workload-status"]["message"],
+                delta.data["workload-status"]["current"],
+            )
diff --git a/osm_lcm/n2vc/k8s_conn.py b/osm_lcm/n2vc/k8s_conn.py
new file mode 100644 (file)
index 0000000..33e868a
--- /dev/null
@@ -0,0 +1,525 @@
+##
+# Copyright 2019 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of OSM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+import abc
+import asyncio
+from typing import Union
+import time
+
+from osm_lcm.n2vc.loggable import Loggable
+
+
+class K8sConnector(abc.ABC, Loggable):
+    """
+    ####################################################################################
+    ################################### P U B L I C ####################################
+    ####################################################################################
+    """
+
+    @staticmethod
+    def generate_kdu_instance_name(**kwargs):
+        raise NotImplementedError("Method not implemented")
+
+    def __init__(self, db: object, log: object = None, on_update_db=None):
+        """
+
+        :param db: database object to write current operation status
+        :param log: logger for tracing
+        :param on_update_db: callback called when k8s connector updates database
+        """
+
+        # parent class
+        Loggable.__init__(self, log=log, log_to_console=True, prefix="\nK8S")
+
+        # self.log.info('Initializing generic K8S connector')
+
+        # the database and update callback
+        self.db = db
+        self.on_update_db = on_update_db
+
+        # self.log.info('K8S generic connector initialized')
+
+    @abc.abstractmethod
+    async def init_env(
+        self, k8s_creds: str, namespace: str = "kube-system", reuse_cluster_uuid=None
+    ) -> (str, bool):
+        """
+        It prepares a given K8s cluster environment to run Charts or juju Bundles on
+        both sides:
+            client (OSM)
+            server (Tiller/Charm)
+
+        :param k8s_creds: credentials to access a given K8s cluster, i.e. a valid
+        '.kube/config'
+        :param namespace: optional namespace to be used for the K8s engine (helm
+        tiller, juju). By default, 'kube-system' will be used
+        :param reuse_cluster_uuid: existing cluster uuid for reuse
+        :return: uuid of the K8s cluster and True if connector has installed some
+        software in the cluster (on error, an exception will be raised)
+        """
+
+    @abc.abstractmethod
+    async def repo_add(
+        self,
+        cluster_uuid: str,
+        name: str,
+        url: str,
+        repo_type: str = "chart",
+        cert: str = None,
+        user: str = None,
+        password: str = None,
+    ):
+        """
+        Add a new repository to OSM database
+
+        :param cluster_uuid: the cluster
+        :param name: name for the repo in OSM
+        :param url: URL of the repo
+        :param repo_type: either "chart" or "bundle"
+        :return: True if successful
+        """
+
+    @abc.abstractmethod
+    async def repo_list(self, cluster_uuid: str):
+        """
+        Get the list of registered repositories
+
+        :param cluster_uuid: the cluster
+        :return: list of registered repositories: [ (name, url) .... ]
+        """
+
+    @abc.abstractmethod
+    async def repo_remove(self, cluster_uuid: str, name: str):
+        """
+        Remove a repository from OSM
+
+        :param name: repo name in OSM
+        :param cluster_uuid: the cluster
+        :return: True if successful
+        """
+
+    @abc.abstractmethod
+    async def synchronize_repos(self, cluster_uuid: str, name: str):
+        """
+        Synchronizes the list of repositories created in the cluster with
+        the repositories added by the NBI
+
+        :param cluster_uuid: the cluster
+        :return: List of repositories deleted from the cluster and dictionary with
+        repos added
+        """
+
+    @abc.abstractmethod
+    async def reset(
+        self, cluster_uuid: str, force: bool = False, uninstall_sw: bool = False
+    ) -> bool:
+        """
+        Uninstalls Tiller/Charm from a known K8s cluster and removes it from the list
+        of known K8s clusters. Intended to be used e.g. when the NS instance is deleted.
+
+        :param cluster_uuid: UUID of a K8s cluster known by OSM.
+        :param force: force deletion, even in case there are deployed releases
+        :param uninstall_sw: flag to indicate that sw uninstallation from software is
+        needed
+        :return: str: kdu_instance generated by helm
+        """
+
+    @abc.abstractmethod
+    async def install(
+        self,
+        cluster_uuid: str,
+        kdu_model: str,
+        kdu_instance: str,
+        atomic: bool = True,
+        timeout: float = 300,
+        params: dict = None,
+        db_dict: dict = None,
+        kdu_name: str = None,
+        namespace: str = None,
+    ):
+        """
+        Deploys of a new KDU instance. It would implicitly rely on the `install` call
+        to deploy the Chart/Bundle properly parametrized (in practice, this call would
+        happen before any _initial-config-primitive_of the VNF is called).
+
+        :param cluster_uuid: UUID of a K8s cluster known by OSM
+        :param kdu_model: chart/bundle:version reference (string), which can be either
+            of these options:
+            - a name of chart/bundle available via the repos known by OSM
+            - a path to a packaged chart/bundle
+            - a path to an unpacked chart/bundle directory or a URL
+        :param kdu_instance: Kdu instance name
+        :param atomic: If set, installation process purges chart/bundle on fail, also
+            will wait until all the K8s objects are active
+        :param timeout: Time in seconds to wait for the install of the chart/bundle
+            (defaults to Helm default timeout: 300s)
+        :param params: dictionary of key-value pairs for instantiation parameters
+            (overriding default values)
+        :param dict db_dict: where to write into database when the status changes.
+                        It contains a dict with {collection: <str>, filter: {},
+                        path: <str>},
+                            e.g. {collection: "nsrs", filter:
+                            {_id: <nsd-id>, path: "_admin.deployed.K8S.3"}
+        :param kdu_name: Name of the KDU instance to be installed
+        :param namespace: K8s namespace to use for the KDU instance
+        :return: True if successful
+        """
+
+    @abc.abstractmethod
+    async def upgrade(
+        self,
+        cluster_uuid: str,
+        kdu_instance: str,
+        kdu_model: str = None,
+        atomic: bool = True,
+        timeout: float = 300,
+        params: dict = None,
+        db_dict: dict = None,
+        reset_values: bool = False,
+        reuse_values: bool = True,
+        reset_then_reuse_values: bool = False,
+        force: bool = False,
+    ):
+        """
+        Upgrades an existing KDU instance. It would implicitly use the `upgrade` call
+        over an existing Chart/Bundle. It can be used both to upgrade the chart or to
+        reconfigure it. This would be exposed as Day-2 primitive.
+
+        :param cluster_uuid: UUID of a K8s cluster known by OSM
+        :param kdu_instance: unique name for the KDU instance to be updated
+        :param kdu_model: new chart/bundle:version reference
+        :param atomic: rollback in case of fail and wait for pods and services are
+            available
+        :param timeout: Time in seconds to wait for the install of the chart/bundle
+            (defaults to Helm default timeout: 300s)
+        :param params: new dictionary of key-value pairs for instantiation parameters
+        :param dict db_dict: where to write into database when the status changes.
+                        It contains a dict with {collection: <str>, filter: {},
+                        path: <str>},
+                            e.g. {collection: "nsrs", filter:
+                            {_id: <nsd-id>, path: "_admin.deployed.K8S.3"}
+        :param reset_values: force reseting values
+        :param reuse_values: force reusing values (default)
+        :param reset_then_reuse_values: forces reseting values, then apply the last release's values
+        :param force: force recreation of resources if necessary
+        :return: reference to the new revision number of the KDU instance
+        """
+
+    @abc.abstractmethod
+    async def scale(
+        self,
+        kdu_instance: str,
+        scale: int,
+        resource_name: str,
+        total_timeout: float = 1800,
+        cluster_uuid: str = None,
+        kdu_model: str = None,
+        atomic: bool = True,
+        db_dict: dict = None,
+        **kwargs,
+    ) -> bool:
+        """Scale a resource in a KDU instance.
+
+        Args:
+            kdu_instance: KDU instance name
+            scale: Scale to which to set the resource
+            resource_name: Resource name
+            total_timeout: The time, in seconds, to wait for the install
+                to finish
+            cluster_uuid: The UUID of the cluster
+            kdu_model: The chart/bundle reference
+            atomic: if set, upgrade process rolls back changes made in case of failed upgrade.
+                The --wait flag will be set automatically if --atomic is used
+            db_dict: Dictionary for any additional data
+            kwargs: Additional parameters
+                vca_id (str): VCA ID
+
+        Returns:
+            True if successful, False otherwise
+        """
+
+    @abc.abstractmethod
+    async def get_scale_count(
+        self,
+        resource_name: str,
+        kdu_instance: str,
+        cluster_uuid: str,
+        kdu_model: str,
+        timeout: float = 300,
+        **kwargs,
+    ) -> int:
+        """Get a resource scale count in a KDU instance.
+
+        Args:
+            resource_name: Resource name
+            kdu_instance: KDU instance name
+            cluster_uuid: The UUID of the cluster
+            kdu_model:    chart/bundle reference
+            timeout:  The time, in seconds, to wait
+            kwargs: Additional parameters
+
+        Returns:
+            Resource instance count
+        """
+
+    @abc.abstractmethod
+    async def rollback(
+        self, cluster_uuid: str, kdu_instance: str, revision=0, db_dict: dict = None
+    ):
+        """
+        Rolls back a previous update of a KDU instance. It would implicitly use the
+        `rollback` call. It can be used both to rollback from a Chart/Bundle version
+        update or from a reconfiguration. This would be exposed as Day-2 primitive.
+
+        :param cluster_uuid: UUID of a K8s cluster known by OSM
+        :param kdu_instance: unique name for the KDU instance
+        :param revision: revision to which revert changes. If omitted, it will revert
+            the last update only
+        :param dict db_dict: where to write into database when the status changes.
+                        It contains a dict with {collection: <str>, filter: {},
+                        path: <str>},
+                            e.g. {collection: "nsrs", filter:
+                            {_id: <nsd-id>, path: "_admin.deployed.K8S.3"}
+        :return:If successful, reference to the current active revision of the KDU
+            instance after the rollback
+        """
+
+    @abc.abstractmethod
+    async def uninstall(self, cluster_uuid: str, kdu_instance: str):
+        """
+        Removes an existing KDU instance. It would implicitly use the `delete` call
+        (this call would happen after all _terminate-config-primitive_ of the VNF are
+        invoked).
+
+        :param cluster_uuid: UUID of a K8s cluster known by OSM
+        :param kdu_instance: unique name for the KDU instance to be deleted
+        :return: True if successful
+        """
+
+    @abc.abstractmethod
+    async def exec_primitive(
+        self,
+        cluster_uuid: str = None,
+        kdu_instance: str = None,
+        primitive_name: str = None,
+        timeout: float = 300,
+        params: dict = None,
+        db_dict: dict = None,
+    ) -> str:
+        """Exec primitive (Juju action)
+
+        :param cluster_uuid str: The UUID of the cluster
+        :param kdu_instance str: The unique name of the KDU instance
+        :param primitive_name: Name of action that will be executed
+        :param timeout: Timeout for action execution
+        :param params: Dictionary of all the parameters needed for the action
+        :db_dict: Dictionary for any additional data
+
+        :return: Returns the output of the action
+        """
+
+    @abc.abstractmethod
+    async def upgrade_charm(
+        self,
+        ee_id: str = None,
+        path: str = None,
+        charm_id: str = None,
+        charm_type: str = None,
+        timeout: float = None,
+    ) -> str:
+        """This method upgrade charms in VNFs
+
+        Args:
+            ee_id:  Execution environment id
+            path:   Local path to the charm
+            charm_id:   charm-id
+            charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
+            timeout: (Float)    Timeout for the ns update operation
+
+        Returns:
+            The output of the update operation if status equals to "completed"
+        """
+
+    @abc.abstractmethod
+    async def inspect_kdu(self, kdu_model: str, repo_url: str = None) -> str:
+        """
+        These calls will retrieve from the Chart/Bundle:
+
+            - The list of configurable values and their defaults (e.g. in Charts,
+                it would retrieve the contents of `values.yaml`).
+            - If available, any embedded help file (e.g. `readme.md`) embedded in the
+                Chart/Bundle.
+
+        :param kdu_model: chart/bundle reference
+        :param repo_url: optional, reposotory URL (None if tar.gz, URl in other cases,
+            even stable URL)
+        :return:
+
+        If successful, it will return the available parameters and their default values
+        as provided by the backend.
+        """
+
+    @abc.abstractmethod
+    async def help_kdu(self, kdu_model: str, repo_url: str = None) -> str:
+        """
+
+        :param kdu_model: chart/bundle reference
+        :param repo_url: optional, reposotory URL (None if tar.gz, URl in other cases,
+            even stable URL)
+        :return: If successful, it will return the contents of the 'readme.md'
+        """
+
+    @abc.abstractmethod
+    async def status_kdu(
+        self, cluster_uuid: str, kdu_instance: str, yaml_format: str
+    ) -> Union[str, dict]:
+        """
+        This call would retrieve tha current state of a given KDU instance. It would be
+        would allow to retrieve the _composition_ (i.e. K8s objects) and _specific
+        values_ of the configuration parameters applied to a given instance. This call
+        would be based on the `status` call.
+
+        :param cluster_uuid: UUID of a K8s cluster known by OSM
+        :param kdu_instance: unique name for the KDU instance
+        :param yaml_format: if the return shall be returned as an YAML string or as a
+                                dictionary
+        :return: If successful, it will return the following vector of arguments:
+        - K8s `namespace` in the cluster where the KDU lives
+        - `state` of the KDU instance. It can be:
+              - UNKNOWN
+              - DEPLOYED
+              - DELETED
+              - SUPERSEDED
+              - FAILED or
+              - DELETING
+        - List of `resources` (objects) that this release consists of, sorted by kind,
+          and the status of those resources
+        - Last `deployment_time`.
+
+        """
+
+    @abc.abstractmethod
+    async def get_services(
+        self, cluster_uuid: str, kdu_instance: str, namespace: str
+    ) -> list:
+        """
+        Returns a list of services defined for the specified kdu instance.
+
+        :param cluster_uuid: UUID of a K8s cluster known by OSM
+        :param kdu_instance: unique name for the KDU instance
+        :param namespace: K8s namespace used by the KDU instance
+        :return: If successful, it will return a list of services, Each service
+        can have the following data:
+        - `name` of the service
+        - `type` type of service in the k8 cluster
+        - `ports` List of ports offered by the service, for each port includes at least
+        name, port, protocol
+        - `cluster_ip` Internal ip to be used inside k8s cluster
+        - `external_ip` List of external ips (in case they are available)
+        """
+
+    @abc.abstractmethod
+    async def get_service(
+        self, cluster_uuid: str, service_name: str, namespace: str = None
+    ) -> object:
+        """
+        Obtains the data of the specified service in the k8cluster.
+
+        :param cluster_uuid: UUID of a K8s cluster known by OSM
+        :param service_name: name of the K8s service in the specified namespace
+        :param namespace: K8s namespace used by the KDU instance
+        :return: If successful, it will return a list of services, Each service can have
+        the following data:
+        - `name` of the service
+        - `type` type of service in the k8 cluster
+        - `ports` List of ports offered by the service, for each port includes at least
+        name, port, protocol
+        - `cluster_ip` Internal ip to be used inside k8s cluster
+        - `external_ip` List of external ips (in case they are available)
+        """
+
+    """
+    ####################################################################################
+    ################################### P R I V A T E ##################################
+    ####################################################################################
+    """
+
+    async def write_app_status_to_db(
+        self, db_dict: dict, status: str, detailed_status: str, operation: str
+    ) -> bool:
+        """
+        This method will write the status of the application to the database.
+
+        :param db_dict: A dictionary with the database necessary information. It shall contain the values for the keys:
+            - "collection": The Mongo DB collection to write to
+            - "filter": The query filter to use in the update process
+            - "path": The dot separated keys which targets the object to be updated
+        :param status: Status of the application
+        :param detailed_status: Detailed status of the application
+        :param operation: Operation that is being performed on the application
+        :return: True if successful
+        """
+
+        if not self.db:
+            self.warning("No db => No database write")
+            return False
+
+        if not db_dict:
+            self.warning("No db_dict => No database write")
+            return False
+
+        self.log.debug("status={}".format(status))
+
+        try:
+            the_table = db_dict["collection"]
+            the_filter = db_dict["filter"]
+            the_path = db_dict["path"]
+            if not the_path[-1] == ".":
+                the_path = the_path + "."
+            update_dict = {
+                the_path + "operation": operation,
+                the_path + "status": status,
+                the_path + "detailed-status": detailed_status,
+                the_path + "status-time": str(time.time()),
+            }
+
+            self.db.set_one(
+                table=the_table,
+                q_filter=the_filter,
+                update_dict=update_dict,
+                fail_on_empty=True,
+            )
+
+            # database callback
+            if self.on_update_db:
+                if asyncio.iscoroutinefunction(self.on_update_db):
+                    await self.on_update_db(
+                        the_table, the_filter, the_path, update_dict
+                    )
+                else:
+                    self.on_update_db(the_table, the_filter, the_path, update_dict)
+
+            return True
+
+        except Exception as e:
+            self.log.info("Exception writing status to database: {}".format(e))
+            return False
diff --git a/osm_lcm/n2vc/k8s_helm3_conn.py b/osm_lcm/n2vc/k8s_helm3_conn.py
new file mode 100644 (file)
index 0000000..98f4beb
--- /dev/null
@@ -0,0 +1,762 @@
+##
+# Copyright 2019 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of OSM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+from typing import Union
+from shlex import quote
+import os
+import yaml
+
+from osm_lcm.n2vc.k8s_helm_base_conn import K8sHelmBaseConnector
+from osm_lcm.n2vc.exceptions import K8sException
+
+
+class K8sHelm3Connector(K8sHelmBaseConnector):
+
+    """
+    ####################################################################################
+    ################################### P U B L I C ####################################
+    ####################################################################################
+    """
+
+    def __init__(
+        self,
+        fs: object,
+        db: object,
+        kubectl_command: str = "/usr/bin/kubectl",
+        helm_command: str = "/usr/bin/helm3",
+        log: object = None,
+        on_update_db=None,
+    ):
+        """
+        Initializes helm connector for helm v3
+
+        :param fs: file system for kubernetes and helm configuration
+        :param db: database object to write current operation status
+        :param kubectl_command: path to kubectl executable
+        :param helm_command: path to helm executable
+        :param log: logger
+        :param on_update_db: callback called when k8s connector updates database
+        """
+
+        # parent class
+        K8sHelmBaseConnector.__init__(
+            self,
+            db=db,
+            log=log,
+            fs=fs,
+            kubectl_command=kubectl_command,
+            helm_command=helm_command,
+            on_update_db=on_update_db,
+        )
+
+        self.log.info("K8S Helm3 connector initialized")
+
+    async def install(
+        self,
+        cluster_uuid: str,
+        kdu_model: str,
+        kdu_instance: str,
+        atomic: bool = True,
+        timeout: float = 300,
+        params: dict = None,
+        db_dict: dict = None,
+        kdu_name: str = None,
+        namespace: str = None,
+        **kwargs,
+    ):
+        """Install a helm chart
+
+        :param cluster_uuid str: The UUID of the cluster to install to
+        :param kdu_model str: chart/reference (string), which can be either
+            of these options:
+            - a name of chart available via the repos known by OSM
+              (e.g. stable/openldap, stable/openldap:1.2.4)
+            - a path to a packaged chart (e.g. mychart.tgz)
+            - a path to an unpacked chart directory or a URL (e.g. mychart)
+        :param kdu_instance: Kdu instance name
+        :param atomic bool: If set, waits until the model is active and resets
+                            the cluster on failure.
+        :param timeout int: The time, in seconds, to wait for the install
+                            to finish
+        :param params dict: Key-value pairs of instantiation parameters
+        :param kdu_name: Name of the KDU instance to be installed
+        :param namespace: K8s namespace to use for the KDU instance
+
+        :param kwargs: Additional parameters (None yet)
+
+        :return: True if successful
+        """
+
+        self.log.debug("installing {} in cluster {}".format(kdu_model, cluster_uuid))
+
+        labels_dict = None
+        if db_dict:
+            labels_dict = await self._labels_dict(db_dict, kdu_instance)
+
+        # sync local dir
+        self.fs.sync(from_path=cluster_uuid)
+
+        # init env, paths
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+
+        # for helm3 if namespace does not exist must create it
+        if namespace and namespace != "kube-system":
+            if not await self._namespace_exists(cluster_uuid, namespace):
+                try:
+                    # TODO: refactor to use kubernetes API client
+                    await self._create_namespace(cluster_uuid, namespace)
+                except Exception as e:
+                    if not await self._namespace_exists(cluster_uuid, namespace):
+                        err_msg = (
+                            "namespace {} does not exist in cluster_id {} "
+                            "error message: ".format(namespace, e)
+                        )
+                        self.log.error(err_msg)
+                        raise K8sException(err_msg)
+
+        await self._install_impl(
+            cluster_uuid,
+            kdu_model,
+            paths,
+            env,
+            kdu_instance,
+            atomic=atomic,
+            timeout=timeout,
+            params=params,
+            db_dict=db_dict,
+            labels=labels_dict,
+            kdu_name=kdu_name,
+            namespace=namespace,
+        )
+
+        # sync fs
+        self.fs.reverse_sync(from_path=cluster_uuid)
+
+        self.log.debug("Returning kdu_instance {}".format(kdu_instance))
+        return True
+
+    async def migrate(self, nsr_id, target):
+        db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+
+        # check if it has k8s deployed kdus
+        if len(db_nsr["_admin"]["deployed"]["K8s"]) < 1:
+            err_msg = "INFO: No deployed KDUs"
+            self.log.error(err_msg)
+            raise K8sException(err_msg)
+
+        kdu_id = target["vdu"]["vduId"]
+        for index, kdu in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
+            if kdu["kdu-instance"] == kdu_id:
+                namespace = kdu["namespace"]
+                cluster_uuid = kdu["k8scluster-uuid"]
+                kdu_model = kdu["kdu-model"]
+                db_dict = {
+                    "collection": "nsrs",
+                    "filter": {"_id": nsr_id},
+                    "path": "_admin.deployed.K8s.{}".format(index),
+                }
+
+                await self.upgrade(
+                    cluster_uuid,
+                    kdu_instance=kdu_id,
+                    kdu_model=kdu_model,
+                    namespace=namespace,
+                    targetHostK8sLabels=target["targetHostK8sLabels"],
+                    atomic=True,
+                    db_dict=db_dict,
+                    force=True,
+                )
+
+                return True
+
+        self.log.debug("ERROR: Unable to retrieve kdu from the database")
+
+    async def inspect_kdu(self, kdu_model: str, repo_url: str = None) -> str:
+        self.log.debug(
+            "inspect kdu_model {} from (optional) repo: {}".format(kdu_model, repo_url)
+        )
+
+        return await self._exec_inspect_command(
+            inspect_command="all", kdu_model=kdu_model, repo_url=repo_url
+        )
+
+    """
+    ####################################################################################
+    ################################### P R I V A T E ##################################
+    ####################################################################################
+    """
+
+    def _init_paths_env(self, cluster_name: str, create_if_not_exist: bool = True):
+        """
+        Creates and returns base cluster and kube dirs and returns them.
+        Also created helm3 dirs according to new directory specification, paths are
+        returned and also environment variables that must be provided to execute commands
+
+        Helm 3 directory specification uses XDG categories for variable support:
+        - Cache: $XDG_CACHE_HOME, for example, ${HOME}/.cache/helm/
+        - Configuration: $XDG_CONFIG_HOME, for example, ${HOME}/.config/helm/
+        - Data: $XDG_DATA_HOME, for example ${HOME}/.local/share/helm
+
+        The variables assigned for this paths are:
+        (In the documentation the variables names are $HELM_PATH_CACHE, $HELM_PATH_CONFIG,
+        $HELM_PATH_DATA but looking and helm env the variable names are different)
+        - Cache: $HELM_CACHE_HOME
+        - Config: $HELM_CONFIG_HOME
+        - Data: $HELM_DATA_HOME
+        - helm kubeconfig: $KUBECONFIG
+
+        :param cluster_name:  cluster_name
+        :return: Dictionary with config_paths and dictionary with helm environment variables
+        """
+
+        base = self.fs.path
+        if base.endswith("/") or base.endswith("\\"):
+            base = base[:-1]
+
+        # base dir for cluster
+        cluster_dir = base + "/" + cluster_name
+
+        # kube dir
+        kube_dir = cluster_dir + "/" + ".kube"
+        if create_if_not_exist and not os.path.exists(kube_dir):
+            self.log.debug("Creating dir {}".format(kube_dir))
+            os.makedirs(kube_dir)
+
+        helm_path_cache = cluster_dir + "/.cache/helm"
+        if create_if_not_exist and not os.path.exists(helm_path_cache):
+            self.log.debug("Creating dir {}".format(helm_path_cache))
+            os.makedirs(helm_path_cache)
+
+        helm_path_config = cluster_dir + "/.config/helm"
+        if create_if_not_exist and not os.path.exists(helm_path_config):
+            self.log.debug("Creating dir {}".format(helm_path_config))
+            os.makedirs(helm_path_config)
+
+        helm_path_data = cluster_dir + "/.local/share/helm"
+        if create_if_not_exist and not os.path.exists(helm_path_data):
+            self.log.debug("Creating dir {}".format(helm_path_data))
+            os.makedirs(helm_path_data)
+
+        config_filename = kube_dir + "/config"
+
+        # 2 - Prepare dictionary with paths
+        paths = {
+            "kube_dir": kube_dir,
+            "kube_config": config_filename,
+            "cluster_dir": cluster_dir,
+        }
+
+        # 3 - Prepare environment variables
+        env = {
+            "HELM_CACHE_HOME": helm_path_cache,
+            "HELM_CONFIG_HOME": helm_path_config,
+            "HELM_DATA_HOME": helm_path_data,
+            "KUBECONFIG": config_filename,
+        }
+
+        for file_name, file in paths.items():
+            if "dir" in file_name and not os.path.exists(file):
+                err_msg = "{} dir does not exist".format(file)
+                self.log.error(err_msg)
+                raise K8sException(err_msg)
+
+        return paths, env
+
+    async def _namespace_exists(self, cluster_id, namespace) -> bool:
+        self.log.debug(
+            "checking if namespace {} exists cluster_id {}".format(
+                namespace, cluster_id
+            )
+        )
+        namespaces = await self._get_namespaces(cluster_id)
+        return namespace in namespaces if namespaces else False
+
+    async def _get_namespaces(self, cluster_id: str):
+        self.log.debug("get namespaces cluster_id {}".format(cluster_id))
+
+        # init config, env
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_id, create_if_not_exist=True
+        )
+
+        command = "{} --kubeconfig={} get namespaces -o=yaml".format(
+            self.kubectl_command, quote(paths["kube_config"])
+        )
+        output, _rc = await self._local_async_exec(
+            command=command, raise_exception_on_error=True, env=env
+        )
+
+        data = yaml.load(output, Loader=yaml.SafeLoader)
+        namespaces = [item["metadata"]["name"] for item in data["items"]]
+        self.log.debug(f"namespaces {namespaces}")
+
+        return namespaces
+
+    async def _create_namespace(self, cluster_id: str, namespace: str):
+        self.log.debug(f"create namespace: {cluster_id} for cluster_id: {namespace}")
+
+        # init config, env
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_id, create_if_not_exist=True
+        )
+
+        command = "{} --kubeconfig={} create namespace {}".format(
+            self.kubectl_command, quote(paths["kube_config"]), quote(namespace)
+        )
+        _, _rc = await self._local_async_exec(
+            command=command, raise_exception_on_error=True, env=env
+        )
+        self.log.debug(f"namespace {namespace} created")
+
+        return _rc
+
+    async def _get_services(
+        self, cluster_id: str, kdu_instance: str, namespace: str, kubeconfig: str
+    ):
+        # init config, env
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_id, create_if_not_exist=True
+        )
+
+        command1 = "env KUBECONFIG={} {} get manifest {} --namespace={}".format(
+            kubeconfig, self._helm_command, quote(kdu_instance), quote(namespace)
+        )
+        command2 = "{} get --namespace={} -f -".format(
+            self.kubectl_command, quote(namespace)
+        )
+        output, _rc = await self._local_async_exec_pipe(
+            command1, command2, env=env, raise_exception_on_error=True
+        )
+        services = self._parse_services(output)
+
+        return services
+
+    async def _cluster_init(self, cluster_id, namespace, paths, env):
+        """
+        Implements the helm version dependent cluster initialization:
+        For helm3 it creates the namespace if it is not created
+        """
+        if namespace != "kube-system":
+            namespaces = await self._get_namespaces(cluster_id)
+            if namespace not in namespaces:
+                # TODO: refactor to use kubernetes API client
+                await self._create_namespace(cluster_id, namespace)
+
+        repo_list = await self.repo_list(cluster_id)
+        stable_repo = [repo for repo in repo_list if repo["name"] == "stable"]
+        if not stable_repo and self._stable_repo_url:
+            await self.repo_add(cluster_id, "stable", self._stable_repo_url)
+
+        # Returns False as no software needs to be uninstalled
+        return False
+
+    async def _uninstall_sw(self, cluster_id: str, namespace: str):
+        # nothing to do to uninstall sw
+        pass
+
+    async def _instances_list(self, cluster_id: str):
+        # init paths, env
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_id, create_if_not_exist=True
+        )
+
+        command = "{} list --all-namespaces  --output yaml".format(self._helm_command)
+        output, _rc = await self._local_async_exec(
+            command=command, raise_exception_on_error=True, env=env
+        )
+
+        if output and len(output) > 0:
+            self.log.debug("instances list output: {}".format(output))
+            return yaml.load(output, Loader=yaml.SafeLoader)
+        else:
+            return []
+
+    def _get_inspect_command(
+        self, show_command: str, kdu_model: str, repo_str: str, version: str
+    ):
+        """Generates the command to obtain the information about an Helm Chart package
+            (´helm show ...´ command)
+
+        Args:
+            show_command: the second part of the command (`helm show <show_command>`)
+            kdu_model: The name or path of a Helm Chart
+            repo_str: Helm Chart repository url
+            version: constraint with specific version of the Chart to use
+
+        Returns:
+            str: the generated Helm Chart command
+        """
+
+        inspect_command = "{} show {} {}{} {}".format(
+            self._helm_command, show_command, quote(kdu_model), repo_str, version
+        )
+        return inspect_command
+
+    def _get_get_command(
+        self, get_command: str, kdu_instance: str, namespace: str, kubeconfig: str
+    ):
+        get_command = (
+            "env KUBECONFIG={} {} get {} {} --namespace={} --output yaml".format(
+                kubeconfig,
+                self._helm_command,
+                get_command,
+                quote(kdu_instance),
+                quote(namespace),
+            )
+        )
+        return get_command
+
+    async def _status_kdu(
+        self,
+        cluster_id: str,
+        kdu_instance: str,
+        namespace: str = None,
+        yaml_format: bool = False,
+        show_error_log: bool = False,
+    ) -> Union[str, dict]:
+        self.log.debug(
+            "status of kdu_instance: {}, namespace: {} ".format(kdu_instance, namespace)
+        )
+
+        if not namespace:
+            namespace = "kube-system"
+
+        # init config, env
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_id, create_if_not_exist=True
+        )
+        command = "env KUBECONFIG={} {} status {} --namespace={} --output yaml".format(
+            paths["kube_config"],
+            self._helm_command,
+            quote(kdu_instance),
+            quote(namespace),
+        )
+
+        output, rc = await self._local_async_exec(
+            command=command,
+            raise_exception_on_error=True,
+            show_error_log=show_error_log,
+            env=env,
+        )
+
+        if yaml_format:
+            return str(output)
+
+        if rc != 0:
+            return None
+
+        data = yaml.load(output, Loader=yaml.SafeLoader)
+
+        # remove field 'notes' and manifest
+        try:
+            del data.get("info")["notes"]
+        except KeyError:
+            pass
+
+        # parse the manifest to a list of dictionaries
+        if "manifest" in data:
+            manifest_str = data.get("manifest")
+            manifest_docs = yaml.load_all(manifest_str, Loader=yaml.SafeLoader)
+
+            data["manifest"] = []
+            for doc in manifest_docs:
+                data["manifest"].append(doc)
+
+        return data
+
+    def _get_install_command(
+        self,
+        kdu_model: str,
+        kdu_instance: str,
+        namespace: str,
+        labels: dict,
+        params_str: str,
+        version: str,
+        atomic: bool,
+        timeout: float,
+        kubeconfig: str,
+    ) -> str:
+        timeout_str = ""
+        if timeout:
+            timeout_str = "--timeout {}s".format(timeout)
+
+        # atomic
+        atomic_str = ""
+        if atomic:
+            atomic_str = "--atomic"
+        # namespace
+        namespace_str = ""
+        if namespace:
+            namespace_str = "--namespace {}".format(quote(namespace))
+
+        # version
+        version_str = ""
+        if version:
+            version_str = "--version {}".format(version)
+
+        # labels
+        post_renderer_args = []
+        post_renderer_str = post_renderer_args_str = ""
+        if labels and self.podLabels_post_renderer_path:
+            post_renderer_args.append(
+                "{}={}".format(
+                    self.podLabels_post_renderer_path,
+                    " ".join(
+                        ["{}:{}".format(key, value) for key, value in labels.items()]
+                    ),
+                )
+            )
+
+        if len(post_renderer_args) > 0 and self.main_post_renderer_path:
+            post_renderer_str = "--post-renderer {}".format(
+                self.main_post_renderer_path,
+            )
+            post_renderer_args_str += (
+                "--post-renderer-args '" + ",".join(post_renderer_args) + "'"
+            )
+
+        command = (
+            "env KUBECONFIG={kubeconfig} {helm} install {name} {atomic} --output yaml  "
+            "{params} {timeout} {ns} {post_renderer} {post_renderer_args} {model} {ver}".format(
+                kubeconfig=kubeconfig,
+                helm=self._helm_command,
+                name=quote(kdu_instance),
+                atomic=atomic_str,
+                params=params_str,
+                timeout=timeout_str,
+                ns=namespace_str,
+                post_renderer=post_renderer_str,
+                post_renderer_args=post_renderer_args_str,
+                model=quote(kdu_model),
+                ver=version_str,
+            )
+        )
+        return command
+
+    def _get_upgrade_scale_command(
+        self,
+        kdu_model: str,
+        kdu_instance: str,
+        namespace: str,
+        scale: int,
+        labels: dict,
+        version: str,
+        atomic: bool,
+        replica_str: str,
+        timeout: float,
+        resource_name: str,
+        kubeconfig: str,
+    ) -> str:
+        """Generates the command to scale a Helm Chart release
+
+        Args:
+            kdu_model (str): Kdu model name, corresponding to the Helm local location or repository
+            kdu_instance (str): KDU instance, corresponding to the Helm Chart release in question
+            namespace (str): Namespace where this KDU instance is deployed
+            scale (int): Scale count
+            version (str): Constraint with specific version of the Chart to use
+            atomic (bool): If set, upgrade process rolls back changes made in case of failed upgrade.
+                The --wait flag will be set automatically if --atomic is used
+            replica_str (str): The key under resource_name key where the scale count is stored
+            timeout (float): The time, in seconds, to wait
+            resource_name (str): The KDU's resource to scale
+            kubeconfig (str): Kubeconfig file path
+
+        Returns:
+            str: command to scale a Helm Chart release
+        """
+
+        # scale
+        if resource_name:
+            scale_dict = {"{}.{}".format(resource_name, replica_str): scale}
+        else:
+            scale_dict = {replica_str: scale}
+
+        scale_str = self._params_to_set_option(scale_dict)
+
+        return self._get_upgrade_command(
+            kdu_model=kdu_model,
+            kdu_instance=kdu_instance,
+            namespace=namespace,
+            params_str=scale_str,
+            labels=labels,
+            version=version,
+            atomic=atomic,
+            timeout=timeout,
+            kubeconfig=kubeconfig,
+        )
+
+    def _get_upgrade_command(
+        self,
+        kdu_model: str,
+        kdu_instance: str,
+        namespace: str,
+        params_str: str,
+        labels: dict,
+        version: str,
+        atomic: bool,
+        timeout: float,
+        kubeconfig: str,
+        targetHostK8sLabels: dict = None,
+        reset_values: bool = False,
+        reuse_values: bool = True,
+        reset_then_reuse_values: bool = False,
+        force: bool = False,
+    ) -> str:
+        """Generates the command to upgrade a Helm Chart release
+
+        Args:
+            kdu_model (str): Kdu model name, corresponding to the Helm local location or repository
+            kdu_instance (str): KDU instance, corresponding to the Helm Chart release in question
+            namespace (str): Namespace where this KDU instance is deployed
+            params_str (str): Params used to upgrade the Helm Chart release
+            version (str): Constraint with specific version of the Chart to use
+            atomic (bool): If set, upgrade process rolls back changes made in case of failed upgrade.
+                The --wait flag will be set automatically if --atomic is used
+            timeout (float): The time, in seconds, to wait
+            kubeconfig (str): Kubeconfig file path
+            reset_values(bool): If set, helm resets values instead of reusing previous values.
+            reuse_values(bool): If set, helm reuses previous values.
+            reset_then_reuse_values(bool): If set, helm resets values, then apply the last release's values
+            force (bool): If set, helm forces resource updates through a replacement strategy. This may recreate pods.
+        Returns:
+            str: command to upgrade a Helm Chart release
+        """
+
+        timeout_str = ""
+        if timeout:
+            timeout_str = "--timeout {}s".format(timeout)
+
+        # atomic
+        atomic_str = ""
+        if atomic:
+            atomic_str = "--atomic"
+
+        # force
+        force_str = ""
+        if force:
+            force_str = "--force "
+
+        # version
+        version_str = ""
+        if version:
+            version_str = "--version {}".format(quote(version))
+
+        # namespace
+        namespace_str = ""
+        if namespace:
+            namespace_str = "--namespace {}".format(quote(namespace))
+
+        # reset, reuse or reset_then_reuse values
+        on_values_str = "--reuse-values"
+        if reset_values:
+            on_values_str = "--reset-values"
+        elif reuse_values:
+            on_values_str = "--reuse-values"
+        elif reset_then_reuse_values:
+            on_values_str = "--reset-then-reuse-values"
+
+        # labels
+        post_renderer_args = []
+        post_renderer_str = post_renderer_args_str = ""
+        if labels and self.podLabels_post_renderer_path:
+            post_renderer_args.append(
+                "{}={}".format(
+                    self.podLabels_post_renderer_path,
+                    " ".join(
+                        ["{}:{}".format(key, value) for key, value in labels.items()]
+                    ),
+                )
+            )
+
+        # migration
+        if targetHostK8sLabels and self.nodeSelector_post_renderer_path:
+            post_renderer_args.append(
+                "{}={}".format(
+                    self.nodeSelector_post_renderer_path,
+                    " ".join(
+                        [
+                            "{}:{}".format(key, value)
+                            for key, value in targetHostK8sLabels.items()
+                        ]
+                    ),
+                )
+            )
+
+        if len(post_renderer_args) > 0 and self.main_post_renderer_path:
+            post_renderer_str = "--post-renderer {}".format(
+                self.main_post_renderer_path,
+            )
+            post_renderer_args_str += (
+                "--post-renderer-args '" + ",".join(post_renderer_args) + "'"
+            )
+
+        command = (
+            "env KUBECONFIG={kubeconfig} {helm} upgrade {name} {model} {namespace} {atomic} {force}"
+            "--output yaml {params} {timeout} {post_renderer} {post_renderer_args} {on_values} {ver}"
+        ).format(
+            kubeconfig=kubeconfig,
+            helm=self._helm_command,
+            name=quote(kdu_instance),
+            namespace=namespace_str,
+            atomic=atomic_str,
+            force=force_str,
+            params=params_str,
+            timeout=timeout_str,
+            post_renderer=post_renderer_str,
+            post_renderer_args=post_renderer_args_str,
+            model=quote(kdu_model),
+            on_values=on_values_str,
+            ver=version_str,
+        )
+        return command
+
+    def _get_rollback_command(
+        self, kdu_instance: str, namespace: str, revision: float, kubeconfig: str
+    ) -> str:
+        return "env KUBECONFIG={} {} rollback {} {} --namespace={} --wait".format(
+            kubeconfig,
+            self._helm_command,
+            quote(kdu_instance),
+            revision,
+            quote(namespace),
+        )
+
+    def _get_uninstall_command(
+        self, kdu_instance: str, namespace: str, kubeconfig: str
+    ) -> str:
+        return "env KUBECONFIG={} {} uninstall {} --namespace={}".format(
+            kubeconfig, self._helm_command, quote(kdu_instance), quote(namespace)
+        )
+
+    def _get_helm_chart_repos_ids(self, cluster_uuid) -> list:
+        repo_ids = []
+        cluster_filter = {"_admin.helm-chart-v3.id": cluster_uuid}
+        cluster = self.db.get_one("k8sclusters", cluster_filter)
+        if cluster:
+            repo_ids = cluster.get("_admin").get("helm_chart_repos") or []
+            return repo_ids
+        else:
+            raise K8sException(
+                "k8cluster with helm-id : {} not found".format(cluster_uuid)
+            )
diff --git a/osm_lcm/n2vc/k8s_helm_base_conn.py b/osm_lcm/n2vc/k8s_helm_base_conn.py
new file mode 100644 (file)
index 0000000..d069046
--- /dev/null
@@ -0,0 +1,2403 @@
+##
+# Copyright 2019 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of OSM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+import abc
+import asyncio
+from typing import Union
+from shlex import quote
+import random
+import time
+import shlex
+import shutil
+import stat
+import os
+import yaml
+from uuid import uuid4
+from urllib.parse import urlparse
+
+from osm_lcm.n2vc.config import EnvironConfig
+from osm_lcm.n2vc.exceptions import K8sException
+from osm_lcm.n2vc.k8s_conn import K8sConnector
+from osm_lcm.n2vc.kubectl import Kubectl
+
+
+class K8sHelmBaseConnector(K8sConnector):
+
+    """
+    ####################################################################################
+    ################################### P U B L I C ####################################
+    ####################################################################################
+    """
+
+    service_account = "osm"
+
+    def __init__(
+        self,
+        fs: object,
+        db: object,
+        kubectl_command: str = "/usr/bin/kubectl",
+        helm_command: str = "/usr/bin/helm",
+        log: object = None,
+        on_update_db=None,
+    ):
+        """
+
+        :param fs: file system for kubernetes and helm configuration
+        :param db: database object to write current operation status
+        :param kubectl_command: path to kubectl executable
+        :param helm_command: path to helm executable
+        :param log: logger
+        :param on_update_db: callback called when k8s connector updates database
+        """
+
+        # parent class
+        K8sConnector.__init__(self, db=db, log=log, on_update_db=on_update_db)
+
+        self.log.info("Initializing K8S Helm connector")
+
+        self.config = EnvironConfig()
+        # random numbers for release name generation
+        random.seed(time.time())
+
+        # the file system
+        self.fs = fs
+
+        # exception if kubectl is not installed
+        self.kubectl_command = kubectl_command
+        self._check_file_exists(filename=kubectl_command, exception_if_not_exists=True)
+
+        # exception if helm is not installed
+        self._helm_command = helm_command
+        self._check_file_exists(filename=helm_command, exception_if_not_exists=True)
+
+        # exception if main post renderer executable is not present
+        self.main_post_renderer_path = EnvironConfig(prefixes=["OSMLCM_"]).get(
+            "mainpostrendererpath"
+        )
+        if self.main_post_renderer_path:
+            self._check_file_exists(
+                filename=self.main_post_renderer_path, exception_if_not_exists=True
+            )
+
+        # exception if podLabels post renderer executable is not present
+        self.podLabels_post_renderer_path = EnvironConfig(prefixes=["OSMLCM_"]).get(
+            "podlabelspostrendererpath"
+        )
+        if self.podLabels_post_renderer_path:
+            self._check_file_exists(
+                filename=self.podLabels_post_renderer_path, exception_if_not_exists=True
+            )
+
+        # exception if nodeSelector post renderer executable is not present
+        self.nodeSelector_post_renderer_path = EnvironConfig(prefixes=["OSMLCM_"]).get(
+            "nodeselectorpostrendererpath"
+        )
+        if self.nodeSelector_post_renderer_path:
+            self._check_file_exists(
+                filename=self.nodeSelector_post_renderer_path,
+                exception_if_not_exists=True,
+            )
+
+        # obtain stable repo url from config or apply default
+        self._stable_repo_url = self.config.get("stablerepourl")
+        if self._stable_repo_url == "None":
+            self._stable_repo_url = None
+
+        # Lock to avoid concurrent execution of helm commands
+        self.cmd_lock = asyncio.Lock()
+
+    def _get_namespace(self, cluster_uuid: str) -> str:
+        """
+        Obtains the namespace used by the cluster with the uuid passed by argument
+
+        param: cluster_uuid: cluster's uuid
+        """
+
+        # first, obtain the cluster corresponding to the uuid passed by argument
+        k8scluster = self.db.get_one(
+            "k8sclusters", q_filter={"_id": cluster_uuid}, fail_on_empty=False
+        )
+        return k8scluster.get("namespace")
+
+    async def init_env(
+        self,
+        k8s_creds: str,
+        namespace: str = "kube-system",
+        reuse_cluster_uuid=None,
+        **kwargs,
+    ) -> tuple[str, bool]:
+        """
+        It prepares a given K8s cluster environment to run Charts
+
+        :param k8s_creds: credentials to access a given K8s cluster, i.e. a valid
+            '.kube/config'
+        :param namespace: optional namespace to be used for helm. By default,
+            'kube-system' will be used
+        :param reuse_cluster_uuid: existing cluster uuid for reuse
+        :param kwargs: Additional parameters (None yet)
+        :return: uuid of the K8s cluster and True if connector has installed some
+            software in the cluster
+        (on error, an exception will be raised)
+        """
+
+        if reuse_cluster_uuid:
+            cluster_id = reuse_cluster_uuid
+        else:
+            cluster_id = str(uuid4())
+
+        self.log.debug(
+            "Initializing K8S Cluster {}. namespace: {}".format(cluster_id, namespace)
+        )
+
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_id, create_if_not_exist=True
+        )
+        mode = stat.S_IRUSR | stat.S_IWUSR
+        with open(paths["kube_config"], "w", mode) as f:
+            f.write(k8s_creds)
+        os.chmod(paths["kube_config"], 0o600)
+
+        # Code with initialization specific of helm version
+        n2vc_installed_sw = await self._cluster_init(cluster_id, namespace, paths, env)
+
+        # sync fs with local data
+        self.fs.reverse_sync(from_path=cluster_id)
+
+        self.log.info("Cluster {} initialized".format(cluster_id))
+
+        return cluster_id, n2vc_installed_sw
+
+    async def repo_add(
+        self,
+        cluster_uuid: str,
+        name: str,
+        url: str,
+        repo_type: str = "chart",
+        cert: str = None,
+        user: str = None,
+        password: str = None,
+        oci: bool = False,
+    ):
+        self.log.debug(
+            "Cluster {}, adding {} repository {}. URL: {}".format(
+                cluster_uuid, repo_type, name, url
+            )
+        )
+
+        # init_env
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+
+        # sync local dir
+        self.fs.sync(from_path=cluster_uuid)
+
+        if oci:
+            if user and password:
+                host_port = urlparse(url).netloc if url.startswith("oci://") else url
+                # helm registry login url
+                command = "env KUBECONFIG={} {} registry login {}".format(
+                    paths["kube_config"], self._helm_command, quote(host_port)
+                )
+            else:
+                self.log.debug(
+                    "OCI registry login is not needed for repo: {}".format(name)
+                )
+                return
+        else:
+            # helm repo add name url
+            command = "env KUBECONFIG={} {} repo add {} {}".format(
+                paths["kube_config"], self._helm_command, quote(name), quote(url)
+            )
+
+        if cert:
+            temp_cert_file = os.path.join(
+                self.fs.path, "{}/helmcerts/".format(cluster_uuid), "temp.crt"
+            )
+            os.makedirs(os.path.dirname(temp_cert_file), exist_ok=True)
+            with open(temp_cert_file, "w") as the_cert:
+                the_cert.write(cert)
+            command += " --ca-file {}".format(quote(temp_cert_file))
+
+        if user:
+            command += " --username={}".format(quote(user))
+
+        if password:
+            command += " --password={}".format(quote(password))
+
+        self.log.debug("adding repo: {}".format(command))
+        await self._local_async_exec(
+            command=command, raise_exception_on_error=True, env=env
+        )
+
+        if not oci:
+            # helm repo update
+            command = "env KUBECONFIG={} {} repo update {}".format(
+                paths["kube_config"], self._helm_command, quote(name)
+            )
+            self.log.debug("updating repo: {}".format(command))
+            await self._local_async_exec(
+                command=command, raise_exception_on_error=False, env=env
+            )
+
+        # sync fs
+        self.fs.reverse_sync(from_path=cluster_uuid)
+
+    async def repo_update(self, cluster_uuid: str, name: str, repo_type: str = "chart"):
+        self.log.debug(
+            "Cluster {}, updating {} repository {}".format(
+                cluster_uuid, repo_type, name
+            )
+        )
+
+        # init_env
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+
+        # sync local dir
+        self.fs.sync(from_path=cluster_uuid)
+
+        # helm repo update
+        command = "{} repo update {}".format(self._helm_command, quote(name))
+        self.log.debug("updating repo: {}".format(command))
+        await self._local_async_exec(
+            command=command, raise_exception_on_error=False, env=env
+        )
+
+        # sync fs
+        self.fs.reverse_sync(from_path=cluster_uuid)
+
+    async def repo_list(self, cluster_uuid: str) -> list:
+        """
+        Get the list of registered repositories
+
+        :return: list of registered repositories: [ (name, url) .... ]
+        """
+
+        self.log.debug("list repositories for cluster {}".format(cluster_uuid))
+
+        # config filename
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+
+        # sync local dir
+        self.fs.sync(from_path=cluster_uuid)
+
+        command = "env KUBECONFIG={} {} repo list --output yaml".format(
+            paths["kube_config"], self._helm_command
+        )
+
+        # Set exception to false because if there are no repos just want an empty list
+        output, _rc = await self._local_async_exec(
+            command=command, raise_exception_on_error=False, env=env
+        )
+
+        # sync fs
+        self.fs.reverse_sync(from_path=cluster_uuid)
+
+        if _rc == 0:
+            if output and len(output) > 0:
+                repos = yaml.load(output, Loader=yaml.SafeLoader)
+                # unify format between helm2 and helm3 setting all keys lowercase
+                return self._lower_keys_list(repos)
+            else:
+                return []
+        else:
+            return []
+
+    async def repo_remove(self, cluster_uuid: str, name: str):
+        self.log.debug(
+            "remove {} repositories for cluster {}".format(name, cluster_uuid)
+        )
+
+        # init env, paths
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+
+        # sync local dir
+        self.fs.sync(from_path=cluster_uuid)
+
+        command = "env KUBECONFIG={} {} repo remove {}".format(
+            paths["kube_config"], self._helm_command, quote(name)
+        )
+        await self._local_async_exec(
+            command=command, raise_exception_on_error=True, env=env
+        )
+
+        # sync fs
+        self.fs.reverse_sync(from_path=cluster_uuid)
+
+    async def reset(
+        self,
+        cluster_uuid: str,
+        force: bool = False,
+        uninstall_sw: bool = False,
+        **kwargs,
+    ) -> bool:
+        """Reset a cluster
+
+        Resets the Kubernetes cluster by removing the helm deployment that represents it.
+
+        :param cluster_uuid: The UUID of the cluster to reset
+        :param force: Boolean to force the reset
+        :param uninstall_sw: Boolean to force the reset
+        :param kwargs: Additional parameters (None yet)
+        :return: Returns True if successful or raises an exception.
+        """
+        namespace = self._get_namespace(cluster_uuid=cluster_uuid)
+        self.log.debug(
+            "Resetting K8s environment. cluster uuid: {} uninstall={}".format(
+                cluster_uuid, uninstall_sw
+            )
+        )
+
+        # sync local dir
+        self.fs.sync(from_path=cluster_uuid)
+
+        # uninstall releases if needed.
+        if uninstall_sw:
+            releases = await self.instances_list(cluster_uuid=cluster_uuid)
+            if len(releases) > 0:
+                if force:
+                    for r in releases:
+                        try:
+                            kdu_instance = r.get("name")
+                            chart = r.get("chart")
+                            self.log.debug(
+                                "Uninstalling {} -> {}".format(chart, kdu_instance)
+                            )
+                            await self.uninstall(
+                                cluster_uuid=cluster_uuid, kdu_instance=kdu_instance
+                            )
+                        except Exception as e:
+                            # will not raise exception as it was found
+                            # that in some cases of previously installed helm releases it
+                            # raised an error
+                            self.log.warn(
+                                "Error uninstalling release {}: {}".format(
+                                    kdu_instance, e
+                                )
+                            )
+                else:
+                    msg = (
+                        "Cluster uuid: {} has releases and not force. Leaving K8s helm environment"
+                    ).format(cluster_uuid)
+                    self.log.warn(msg)
+                    uninstall_sw = (
+                        False  # Allow to remove k8s cluster without removing Tiller
+                    )
+
+        if uninstall_sw:
+            await self._uninstall_sw(cluster_id=cluster_uuid, namespace=namespace)
+
+        # delete cluster directory
+        self.log.debug("Removing directory {}".format(cluster_uuid))
+        self.fs.file_delete(cluster_uuid, ignore_non_exist=True)
+        # Remove also local directorio if still exist
+        direct = self.fs.path + "/" + cluster_uuid
+        shutil.rmtree(direct, ignore_errors=True)
+
+        return True
+
+    def _is_helm_chart_a_file(self, chart_name: str):
+        return chart_name.count("/") > 1
+
+    @staticmethod
+    def _is_helm_chart_a_url(chart_name: str):
+        result = urlparse(chart_name)
+        return all([result.scheme, result.netloc])
+
+    async def _install_impl(
+        self,
+        cluster_id: str,
+        kdu_model: str,
+        paths: dict,
+        env: dict,
+        kdu_instance: str,
+        atomic: bool = True,
+        timeout: float = 300,
+        params: dict = None,
+        db_dict: dict = None,
+        labels: dict = None,
+        kdu_name: str = None,
+        namespace: str = None,
+    ):
+        # init env, paths
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_id, create_if_not_exist=True
+        )
+
+        # params to str
+        params_str, file_to_delete = self._params_to_file_option(
+            cluster_id=cluster_id, params=params
+        )
+
+        kdu_model, version = await self._prepare_helm_chart(kdu_model, cluster_id)
+
+        command = self._get_install_command(
+            kdu_model,
+            kdu_instance,
+            namespace,
+            labels,
+            params_str,
+            version,
+            atomic,
+            timeout,
+            paths["kube_config"],
+        )
+
+        self.log.debug("installing: {}".format(command))
+
+        if atomic:
+            # exec helm in a task
+            exec_task = asyncio.ensure_future(
+                coro_or_future=self._local_async_exec(
+                    command=command, raise_exception_on_error=False, env=env
+                )
+            )
+
+            # write status in another task
+            status_task = asyncio.ensure_future(
+                coro_or_future=self._store_status(
+                    cluster_id=cluster_id,
+                    kdu_instance=kdu_instance,
+                    namespace=namespace,
+                    db_dict=db_dict,
+                    operation="install",
+                )
+            )
+
+            # wait for execution task
+            await asyncio.wait([exec_task])
+
+            # cancel status task
+            status_task.cancel()
+
+            output, rc = exec_task.result()
+
+        else:
+            output, rc = await self._local_async_exec(
+                command=command, raise_exception_on_error=False, env=env
+            )
+
+        # remove temporal values yaml file
+        if file_to_delete:
+            os.remove(file_to_delete)
+
+        # write final status
+        await self._store_status(
+            cluster_id=cluster_id,
+            kdu_instance=kdu_instance,
+            namespace=namespace,
+            db_dict=db_dict,
+            operation="install",
+        )
+
+        if rc != 0:
+            msg = "Error executing command: {}\nOutput: {}".format(command, output)
+            self.log.error(msg)
+            raise K8sException(msg)
+
+    async def upgrade(
+        self,
+        cluster_uuid: str,
+        kdu_instance: str,
+        kdu_model: str = None,
+        atomic: bool = True,
+        timeout: float = 300,
+        params: dict = None,
+        db_dict: dict = None,
+        namespace: str = None,
+        targetHostK8sLabels: dict = None,
+        reset_values: bool = False,
+        reuse_values: bool = True,
+        reset_then_reuse_values: bool = False,
+        force: bool = False,
+    ):
+        self.log.debug("upgrading {} in cluster {}".format(kdu_model, cluster_uuid))
+
+        # sync local dir
+        self.fs.sync(from_path=cluster_uuid)
+
+        # look for instance to obtain namespace
+
+        # set namespace
+        if not namespace:
+            instance_info = await self.get_instance_info(cluster_uuid, kdu_instance)
+            if not instance_info:
+                raise K8sException("kdu_instance {} not found".format(kdu_instance))
+            namespace = instance_info["namespace"]
+
+        # init env, paths
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+
+        # sync local dir
+        self.fs.sync(from_path=cluster_uuid)
+
+        # params to str
+        params_str, file_to_delete = self._params_to_file_option(
+            cluster_id=cluster_uuid, params=params
+        )
+
+        kdu_model, version = await self._prepare_helm_chart(kdu_model, cluster_uuid)
+
+        labels_dict = None
+        if db_dict and await self._contains_labels(
+            kdu_instance, namespace, paths["kube_config"], env
+        ):
+            labels_dict = await self._labels_dict(db_dict, kdu_instance)
+
+        command = self._get_upgrade_command(
+            kdu_model,
+            kdu_instance,
+            namespace,
+            params_str,
+            labels_dict,
+            version,
+            atomic,
+            timeout,
+            paths["kube_config"],
+            targetHostK8sLabels,
+            reset_values,
+            reuse_values,
+            reset_then_reuse_values,
+            force,
+        )
+
+        self.log.debug("upgrading: {}".format(command))
+
+        if atomic:
+            # exec helm in a task
+            exec_task = asyncio.ensure_future(
+                coro_or_future=self._local_async_exec(
+                    command=command, raise_exception_on_error=False, env=env
+                )
+            )
+            # write status in another task
+            status_task = asyncio.ensure_future(
+                coro_or_future=self._store_status(
+                    cluster_id=cluster_uuid,
+                    kdu_instance=kdu_instance,
+                    namespace=namespace,
+                    db_dict=db_dict,
+                    operation="upgrade",
+                )
+            )
+
+            # wait for execution task
+            await asyncio.wait([exec_task])
+
+            # cancel status task
+            status_task.cancel()
+            output, rc = exec_task.result()
+
+        else:
+            output, rc = await self._local_async_exec(
+                command=command, raise_exception_on_error=False, env=env
+            )
+
+        # remove temporal values yaml file
+        if file_to_delete:
+            os.remove(file_to_delete)
+
+        # write final status
+        await self._store_status(
+            cluster_id=cluster_uuid,
+            kdu_instance=kdu_instance,
+            namespace=namespace,
+            db_dict=db_dict,
+            operation="upgrade",
+        )
+
+        if rc != 0:
+            msg = "Error executing command: {}\nOutput: {}".format(command, output)
+            self.log.error(msg)
+            raise K8sException(msg)
+
+        # sync fs
+        self.fs.reverse_sync(from_path=cluster_uuid)
+
+        # return new revision number
+        instance = await self.get_instance_info(
+            cluster_uuid=cluster_uuid, kdu_instance=kdu_instance
+        )
+        if instance:
+            revision = int(instance.get("revision"))
+            self.log.debug("New revision: {}".format(revision))
+            return revision
+        else:
+            return 0
+
+    async def scale(
+        self,
+        kdu_instance: str,
+        scale: int,
+        resource_name: str,
+        total_timeout: float = 1800,
+        cluster_uuid: str = None,
+        kdu_model: str = None,
+        atomic: bool = True,
+        db_dict: dict = None,
+        **kwargs,
+    ):
+        """Scale a resource in a Helm Chart.
+
+        Args:
+            kdu_instance: KDU instance name
+            scale: Scale to which to set the resource
+            resource_name: Resource name
+            total_timeout: The time, in seconds, to wait
+            cluster_uuid: The UUID of the cluster
+            kdu_model: The chart reference
+            atomic: if set, upgrade process rolls back changes made in case of failed upgrade.
+                The --wait flag will be set automatically if --atomic is used
+            db_dict: Dictionary for any additional data
+            kwargs: Additional parameters
+
+        Returns:
+            True if successful, False otherwise
+        """
+
+        debug_mgs = "scaling {} in cluster {}".format(kdu_model, cluster_uuid)
+        if resource_name:
+            debug_mgs = "scaling resource {} in model {} (cluster {})".format(
+                resource_name, kdu_model, cluster_uuid
+            )
+
+        self.log.debug(debug_mgs)
+
+        # look for instance to obtain namespace
+        # get_instance_info function calls the sync command
+        instance_info = await self.get_instance_info(cluster_uuid, kdu_instance)
+        if not instance_info:
+            raise K8sException("kdu_instance {} not found".format(kdu_instance))
+
+        # init env, paths
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+
+        # version
+        kdu_model, version = await self._prepare_helm_chart(kdu_model, cluster_uuid)
+
+        repo_url = await self._find_repo(kdu_model, cluster_uuid)
+
+        _, replica_str = await self._get_replica_count_url(
+            kdu_model, repo_url, resource_name
+        )
+
+        labels_dict = None
+        if db_dict and await self._contains_labels(
+            kdu_instance, instance_info["namespace"], paths["kube_config"], env
+        ):
+            labels_dict = await self._labels_dict(db_dict, kdu_instance)
+
+        command = self._get_upgrade_scale_command(
+            kdu_model,
+            kdu_instance,
+            instance_info["namespace"],
+            scale,
+            labels_dict,
+            version,
+            atomic,
+            replica_str,
+            total_timeout,
+            resource_name,
+            paths["kube_config"],
+        )
+
+        self.log.debug("scaling: {}".format(command))
+
+        if atomic:
+            # exec helm in a task
+            exec_task = asyncio.ensure_future(
+                coro_or_future=self._local_async_exec(
+                    command=command, raise_exception_on_error=False, env=env
+                )
+            )
+            # write status in another task
+            status_task = asyncio.ensure_future(
+                coro_or_future=self._store_status(
+                    cluster_id=cluster_uuid,
+                    kdu_instance=kdu_instance,
+                    namespace=instance_info["namespace"],
+                    db_dict=db_dict,
+                    operation="scale",
+                )
+            )
+
+            # wait for execution task
+            await asyncio.wait([exec_task])
+
+            # cancel status task
+            status_task.cancel()
+            output, rc = exec_task.result()
+
+        else:
+            output, rc = await self._local_async_exec(
+                command=command, raise_exception_on_error=False, env=env
+            )
+
+        # write final status
+        await self._store_status(
+            cluster_id=cluster_uuid,
+            kdu_instance=kdu_instance,
+            namespace=instance_info["namespace"],
+            db_dict=db_dict,
+            operation="scale",
+        )
+
+        if rc != 0:
+            msg = "Error executing command: {}\nOutput: {}".format(command, output)
+            self.log.error(msg)
+            raise K8sException(msg)
+
+        # sync fs
+        self.fs.reverse_sync(from_path=cluster_uuid)
+
+        return True
+
+    async def get_scale_count(
+        self,
+        resource_name: str,
+        kdu_instance: str,
+        cluster_uuid: str,
+        kdu_model: str,
+        **kwargs,
+    ) -> int:
+        """Get a resource scale count.
+
+        Args:
+            cluster_uuid: The UUID of the cluster
+            resource_name: Resource name
+            kdu_instance: KDU instance name
+            kdu_model: The name or path of an Helm Chart
+            kwargs: Additional parameters
+
+        Returns:
+            Resource instance count
+        """
+
+        self.log.debug(
+            "getting scale count for {} in cluster {}".format(kdu_model, cluster_uuid)
+        )
+
+        # look for instance to obtain namespace
+        instance_info = await self.get_instance_info(cluster_uuid, kdu_instance)
+        if not instance_info:
+            raise K8sException("kdu_instance {} not found".format(kdu_instance))
+
+        # init env, paths
+        paths, _ = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+
+        replicas = await self._get_replica_count_instance(
+            kdu_instance=kdu_instance,
+            namespace=instance_info["namespace"],
+            kubeconfig=paths["kube_config"],
+            resource_name=resource_name,
+        )
+
+        self.log.debug(
+            f"Number of replicas of the KDU instance {kdu_instance} and resource {resource_name} obtained: {replicas}"
+        )
+
+        # Get default value if scale count is not found from provided values
+        # Important note: this piece of code shall only be executed in the first scaling operation,
+        # since it is expected that the _get_replica_count_instance is able to obtain the number of
+        # replicas when a scale operation was already conducted previously for this KDU/resource!
+        if replicas is None:
+            repo_url = await self._find_repo(
+                kdu_model=kdu_model, cluster_uuid=cluster_uuid
+            )
+            replicas, _ = await self._get_replica_count_url(
+                kdu_model=kdu_model, repo_url=repo_url, resource_name=resource_name
+            )
+
+            self.log.debug(
+                f"Number of replicas of the Helm Chart package for KDU instance {kdu_instance} and resource "
+                f"{resource_name} obtained: {replicas}"
+            )
+
+            if replicas is None:
+                msg = "Replica count not found. Cannot be scaled"
+                self.log.error(msg)
+                raise K8sException(msg)
+
+        return int(replicas)
+
+    async def rollback(
+        self, cluster_uuid: str, kdu_instance: str, revision=0, db_dict: dict = None
+    ):
+        self.log.debug(
+            "rollback kdu_instance {} to revision {} from cluster {}".format(
+                kdu_instance, revision, cluster_uuid
+            )
+        )
+
+        # sync local dir
+        self.fs.sync(from_path=cluster_uuid)
+
+        # look for instance to obtain namespace
+        instance_info = await self.get_instance_info(cluster_uuid, kdu_instance)
+        if not instance_info:
+            raise K8sException("kdu_instance {} not found".format(kdu_instance))
+
+        # init env, paths
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+
+        # sync local dir
+        self.fs.sync(from_path=cluster_uuid)
+
+        command = self._get_rollback_command(
+            kdu_instance, instance_info["namespace"], revision, paths["kube_config"]
+        )
+
+        self.log.debug("rolling_back: {}".format(command))
+
+        # exec helm in a task
+        exec_task = asyncio.ensure_future(
+            coro_or_future=self._local_async_exec(
+                command=command, raise_exception_on_error=False, env=env
+            )
+        )
+        # write status in another task
+        status_task = asyncio.ensure_future(
+            coro_or_future=self._store_status(
+                cluster_id=cluster_uuid,
+                kdu_instance=kdu_instance,
+                namespace=instance_info["namespace"],
+                db_dict=db_dict,
+                operation="rollback",
+            )
+        )
+
+        # wait for execution task
+        await asyncio.wait([exec_task])
+
+        # cancel status task
+        status_task.cancel()
+
+        output, rc = exec_task.result()
+
+        # write final status
+        await self._store_status(
+            cluster_id=cluster_uuid,
+            kdu_instance=kdu_instance,
+            namespace=instance_info["namespace"],
+            db_dict=db_dict,
+            operation="rollback",
+        )
+
+        if rc != 0:
+            msg = "Error executing command: {}\nOutput: {}".format(command, output)
+            self.log.error(msg)
+            raise K8sException(msg)
+
+        # sync fs
+        self.fs.reverse_sync(from_path=cluster_uuid)
+
+        # return new revision number
+        instance = await self.get_instance_info(
+            cluster_uuid=cluster_uuid, kdu_instance=kdu_instance
+        )
+        if instance:
+            revision = int(instance.get("revision"))
+            self.log.debug("New revision: {}".format(revision))
+            return revision
+        else:
+            return 0
+
+    async def uninstall(self, cluster_uuid: str, kdu_instance: str, **kwargs):
+        """
+        Removes an existing KDU instance. It would implicitly use the `delete` or 'uninstall' call
+        (this call should happen after all _terminate-config-primitive_ of the VNF
+        are invoked).
+
+        :param cluster_uuid: UUID of a K8s cluster known by OSM, or namespace:cluster_id
+        :param kdu_instance: unique name for the KDU instance to be deleted
+        :param kwargs: Additional parameters (None yet)
+        :return: True if successful
+        """
+
+        self.log.debug(
+            "uninstall kdu_instance {} from cluster {}".format(
+                kdu_instance, cluster_uuid
+            )
+        )
+
+        # sync local dir
+        self.fs.sync(from_path=cluster_uuid)
+
+        # look for instance to obtain namespace
+        instance_info = await self.get_instance_info(cluster_uuid, kdu_instance)
+        if not instance_info:
+            self.log.warning(("kdu_instance {} not found".format(kdu_instance)))
+            return True
+        # init env, paths
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+
+        # sync local dir
+        self.fs.sync(from_path=cluster_uuid)
+
+        command = self._get_uninstall_command(
+            kdu_instance, instance_info["namespace"], paths["kube_config"]
+        )
+        output, _rc = await self._local_async_exec(
+            command=command, raise_exception_on_error=True, env=env
+        )
+
+        # sync fs
+        self.fs.reverse_sync(from_path=cluster_uuid)
+
+        return self._output_to_table(output)
+
+    async def instances_list(self, cluster_uuid: str) -> list:
+        """
+        returns a list of deployed releases in a cluster
+
+        :param cluster_uuid: the 'cluster' or 'namespace:cluster'
+        :return:
+        """
+
+        self.log.debug("list releases for cluster {}".format(cluster_uuid))
+
+        # sync local dir
+        self.fs.sync(from_path=cluster_uuid)
+
+        # execute internal command
+        result = await self._instances_list(cluster_uuid)
+
+        # sync fs
+        self.fs.reverse_sync(from_path=cluster_uuid)
+
+        return result
+
+    async def get_instance_info(self, cluster_uuid: str, kdu_instance: str):
+        instances = await self.instances_list(cluster_uuid=cluster_uuid)
+        for instance in instances:
+            if instance.get("name") == kdu_instance:
+                return instance
+        self.log.debug("Instance {} not found".format(kdu_instance))
+        return None
+
+    async def upgrade_charm(
+        self,
+        ee_id: str = None,
+        path: str = None,
+        charm_id: str = None,
+        charm_type: str = None,
+        timeout: float = None,
+    ) -> str:
+        """This method upgrade charms in VNFs
+
+        Args:
+            ee_id:  Execution environment id
+            path:   Local path to the charm
+            charm_id:   charm-id
+            charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
+            timeout: (Float)    Timeout for the ns update operation
+
+        Returns:
+            The output of the update operation if status equals to "completed"
+        """
+        raise K8sException("KDUs deployed with Helm do not support charm upgrade")
+
+    async def exec_primitive(
+        self,
+        cluster_uuid: str = None,
+        kdu_instance: str = None,
+        primitive_name: str = None,
+        timeout: float = 300,
+        params: dict = None,
+        db_dict: dict = None,
+        **kwargs,
+    ) -> str:
+        """Exec primitive (Juju action)
+
+        :param cluster_uuid: The UUID of the cluster or namespace:cluster
+        :param kdu_instance: The unique name of the KDU instance
+        :param primitive_name: Name of action that will be executed
+        :param timeout: Timeout for action execution
+        :param params: Dictionary of all the parameters needed for the action
+        :db_dict: Dictionary for any additional data
+        :param kwargs: Additional parameters (None yet)
+
+        :return: Returns the output of the action
+        """
+        raise K8sException(
+            "KDUs deployed with Helm don't support actions "
+            "different from rollback, upgrade and status"
+        )
+
+    async def get_services(
+        self, cluster_uuid: str, kdu_instance: str, namespace: str
+    ) -> list:
+        """
+        Returns a list of services defined for the specified kdu instance.
+
+        :param cluster_uuid: UUID of a K8s cluster known by OSM
+        :param kdu_instance: unique name for the KDU instance
+        :param namespace: K8s namespace used by the KDU instance
+        :return: If successful, it will return a list of services, Each service
+        can have the following data:
+        - `name` of the service
+        - `type` type of service in the k8 cluster
+        - `ports` List of ports offered by the service, for each port includes at least
+        name, port, protocol
+        - `cluster_ip` Internal ip to be used inside k8s cluster
+        - `external_ip` List of external ips (in case they are available)
+        """
+
+        self.log.debug(
+            "get_services: cluster_uuid: {}, kdu_instance: {}".format(
+                cluster_uuid, kdu_instance
+            )
+        )
+
+        # init env, paths
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+
+        # sync local dir
+        self.fs.sync(from_path=cluster_uuid)
+
+        # get list of services names for kdu
+        service_names = await self._get_services(
+            cluster_uuid, kdu_instance, namespace, paths["kube_config"]
+        )
+
+        service_list = []
+        for service in service_names:
+            service = await self._get_service(cluster_uuid, service, namespace)
+            service_list.append(service)
+
+        # sync fs
+        self.fs.reverse_sync(from_path=cluster_uuid)
+
+        return service_list
+
+    async def get_service(
+        self, cluster_uuid: str, service_name: str, namespace: str
+    ) -> object:
+        self.log.debug(
+            "get service, service_name: {}, namespace: {}, cluster_uuid: {}".format(
+                service_name, namespace, cluster_uuid
+            )
+        )
+
+        # sync local dir
+        self.fs.sync(from_path=cluster_uuid)
+
+        service = await self._get_service(cluster_uuid, service_name, namespace)
+
+        # sync fs
+        self.fs.reverse_sync(from_path=cluster_uuid)
+
+        return service
+
+    async def status_kdu(
+        self, cluster_uuid: str, kdu_instance: str, yaml_format: str = False, **kwargs
+    ) -> Union[str, dict]:
+        """
+        This call would retrieve tha current state of a given KDU instance. It would be
+        would allow to retrieve the _composition_ (i.e. K8s objects) and _specific
+        values_ of the configuration parameters applied to a given instance. This call
+        would be based on the `status` call.
+
+        :param cluster_uuid: UUID of a K8s cluster known by OSM
+        :param kdu_instance: unique name for the KDU instance
+        :param kwargs: Additional parameters (None yet)
+        :param yaml_format: if the return shall be returned as an YAML string or as a
+                                dictionary
+        :return: If successful, it will return the following vector of arguments:
+        - K8s `namespace` in the cluster where the KDU lives
+        - `state` of the KDU instance. It can be:
+              - UNKNOWN
+              - DEPLOYED
+              - DELETED
+              - SUPERSEDED
+              - FAILED or
+              - DELETING
+        - List of `resources` (objects) that this release consists of, sorted by kind,
+          and the status of those resources
+        - Last `deployment_time`.
+
+        """
+        self.log.debug(
+            "status_kdu: cluster_uuid: {}, kdu_instance: {}".format(
+                cluster_uuid, kdu_instance
+            )
+        )
+
+        # sync local dir
+        self.fs.sync(from_path=cluster_uuid)
+
+        # get instance: needed to obtain namespace
+        instances = await self._instances_list(cluster_id=cluster_uuid)
+        for instance in instances:
+            if instance.get("name") == kdu_instance:
+                break
+        else:
+            # instance does not exist
+            raise K8sException(
+                "Instance name: {} not found in cluster: {}".format(
+                    kdu_instance, cluster_uuid
+                )
+            )
+
+        status = await self._status_kdu(
+            cluster_id=cluster_uuid,
+            kdu_instance=kdu_instance,
+            namespace=instance["namespace"],
+            yaml_format=yaml_format,
+            show_error_log=True,
+        )
+
+        # sync fs
+        self.fs.reverse_sync(from_path=cluster_uuid)
+
+        return status
+
+    async def get_values_kdu(
+        self, kdu_instance: str, namespace: str, kubeconfig: str
+    ) -> str:
+        self.log.debug("get kdu_instance values {}".format(kdu_instance))
+
+        return await self._exec_get_command(
+            get_command="values",
+            kdu_instance=kdu_instance,
+            namespace=namespace,
+            kubeconfig=kubeconfig,
+        )
+
+    async def values_kdu(self, kdu_model: str, repo_url: str = None) -> str:
+        """Method to obtain the Helm Chart package's values
+
+        Args:
+            kdu_model: The name or path of an Helm Chart
+            repo_url: Helm Chart repository url
+
+        Returns:
+            str: the values of the Helm Chart package
+        """
+
+        self.log.debug(
+            "inspect kdu_model values {} from (optional) repo: {}".format(
+                kdu_model, repo_url
+            )
+        )
+
+        return await self._exec_inspect_command(
+            inspect_command="values", kdu_model=kdu_model, repo_url=repo_url
+        )
+
+    async def help_kdu(self, kdu_model: str, repo_url: str = None) -> str:
+        self.log.debug(
+            "inspect kdu_model {} readme.md from repo: {}".format(kdu_model, repo_url)
+        )
+
+        return await self._exec_inspect_command(
+            inspect_command="readme", kdu_model=kdu_model, repo_url=repo_url
+        )
+
+    async def synchronize_repos(self, cluster_uuid: str):
+        self.log.debug("synchronize repos for cluster helm-id: {}".format(cluster_uuid))
+        try:
+            db_repo_ids = self._get_helm_chart_repos_ids(cluster_uuid)
+            db_repo_dict = self._get_db_repos_dict(db_repo_ids)
+
+            local_repo_list = await self.repo_list(cluster_uuid)
+            local_repo_dict = {repo["name"]: repo["url"] for repo in local_repo_list}
+
+            deleted_repo_list = []
+            added_repo_dict = {}
+
+            # iterate over the list of repos in the database that should be
+            # added if not present
+            for repo_name, db_repo in db_repo_dict.items():
+                try:
+                    # check if it is already present
+                    curr_repo_url = local_repo_dict.get(db_repo["name"])
+                    repo_id = db_repo.get("_id")
+                    if curr_repo_url != db_repo["url"]:
+                        if curr_repo_url:
+                            self.log.debug(
+                                "repo {} url changed, delete and and again".format(
+                                    db_repo["url"]
+                                )
+                            )
+                            await self.repo_remove(cluster_uuid, db_repo["name"])
+                            deleted_repo_list.append(repo_id)
+
+                        # add repo
+                        self.log.debug("add repo {}".format(db_repo["name"]))
+                        await self.repo_add(
+                            cluster_uuid,
+                            db_repo["name"],
+                            db_repo["url"],
+                            cert=db_repo.get("ca_cert"),
+                            user=db_repo.get("user"),
+                            password=db_repo.get("password"),
+                            oci=db_repo.get("oci", False),
+                        )
+                        added_repo_dict[repo_id] = db_repo["name"]
+                except Exception as e:
+                    raise K8sException(
+                        "Error adding repo id: {}, err_msg: {} ".format(
+                            repo_id, repr(e)
+                        )
+                    )
+
+            # Delete repos that are present but not in nbi_list
+            for repo_name in local_repo_dict:
+                if not db_repo_dict.get(repo_name) and repo_name != "stable":
+                    self.log.debug("delete repo {}".format(repo_name))
+                    try:
+                        await self.repo_remove(cluster_uuid, repo_name)
+                        deleted_repo_list.append(repo_name)
+                    except Exception as e:
+                        self.warning(
+                            "Error deleting repo, name: {}, err_msg: {}".format(
+                                repo_name, str(e)
+                            )
+                        )
+
+            return deleted_repo_list, added_repo_dict
+
+        except K8sException:
+            raise
+        except Exception as e:
+            # Do not raise errors synchronizing repos
+            self.log.error("Error synchronizing repos: {}".format(e))
+            raise Exception("Error synchronizing repos: {}".format(e))
+
+    def _get_db_repos_dict(self, repo_ids: list):
+        db_repos_dict = {}
+        for repo_id in repo_ids:
+            db_repo = self.db.get_one("k8srepos", {"_id": repo_id})
+            db_repos_dict[db_repo["name"]] = db_repo
+        return db_repos_dict
+
+    """
+    ####################################################################################
+    ################################### TO BE IMPLEMENTED SUBCLASSES ###################
+    ####################################################################################
+    """
+
+    @abc.abstractmethod
+    def _init_paths_env(self, cluster_name: str, create_if_not_exist: bool = True):
+        """
+        Creates and returns base cluster and kube dirs and returns them.
+        Also created helm3 dirs according to new directory specification, paths are
+        not returned but assigned to helm environment variables
+
+        :param cluster_name:  cluster_name
+        :return: Dictionary with config_paths and dictionary with helm environment variables
+        """
+
+    @abc.abstractmethod
+    async def _cluster_init(self, cluster_id, namespace, paths, env):
+        """
+        Implements the helm version dependent cluster initialization
+        """
+
+    @abc.abstractmethod
+    async def _instances_list(self, cluster_id):
+        """
+        Implements the helm version dependent helm instances list
+        """
+
+    @abc.abstractmethod
+    async def _get_services(self, cluster_id, kdu_instance, namespace, kubeconfig):
+        """
+        Implements the helm version dependent method to obtain services from a helm instance
+        """
+
+    @abc.abstractmethod
+    async def _status_kdu(
+        self,
+        cluster_id: str,
+        kdu_instance: str,
+        namespace: str = None,
+        yaml_format: bool = False,
+        show_error_log: bool = False,
+    ) -> Union[str, dict]:
+        """
+        Implements the helm version dependent method to obtain status of a helm instance
+        """
+
+    @abc.abstractmethod
+    def _get_install_command(
+        self,
+        kdu_model,
+        kdu_instance,
+        namespace,
+        labels,
+        params_str,
+        version,
+        atomic,
+        timeout,
+        kubeconfig,
+    ) -> str:
+        """
+        Obtain command to be executed to delete the indicated instance
+        """
+
+    @abc.abstractmethod
+    def _get_upgrade_scale_command(
+        self,
+        kdu_model,
+        kdu_instance,
+        namespace,
+        count,
+        labels,
+        version,
+        atomic,
+        replicas,
+        timeout,
+        resource_name,
+        kubeconfig,
+    ) -> str:
+        """Generates the command to scale a Helm Chart release
+
+        Args:
+            kdu_model (str): Kdu model name, corresponding to the Helm local location or repository
+            kdu_instance (str): KDU instance, corresponding to the Helm Chart release in question
+            namespace (str): Namespace where this KDU instance is deployed
+            scale (int): Scale count
+            version (str): Constraint with specific version of the Chart to use
+            atomic (bool): If set, upgrade process rolls back changes made in case of failed upgrade.
+                The --wait flag will be set automatically if --atomic is used
+            replica_str (str): The key under resource_name key where the scale count is stored
+            timeout (float): The time, in seconds, to wait
+            resource_name (str): The KDU's resource to scale
+            kubeconfig (str): Kubeconfig file path
+
+        Returns:
+            str: command to scale a Helm Chart release
+        """
+
+    @abc.abstractmethod
+    def _get_upgrade_command(
+        self,
+        kdu_model,
+        kdu_instance,
+        namespace,
+        params_str,
+        labels,
+        version,
+        atomic,
+        timeout,
+        kubeconfig,
+        targetHostK8sLabels,
+        reset_values,
+        reuse_values,
+        reset_then_reuse_values,
+        force,
+    ) -> str:
+        """Generates the command to upgrade a Helm Chart release
+
+        Args:
+            kdu_model (str): Kdu model name, corresponding to the Helm local location or repository
+            kdu_instance (str): KDU instance, corresponding to the Helm Chart release in question
+            namespace (str): Namespace where this KDU instance is deployed
+            params_str (str): Params used to upgrade the Helm Chart release
+            version (str): Constraint with specific version of the Chart to use
+            atomic (bool): If set, upgrade process rolls back changes made in case of failed upgrade.
+                The --wait flag will be set automatically if --atomic is used
+            timeout (float): The time, in seconds, to wait
+            kubeconfig (str): Kubeconfig file path
+            reset_values(bool): If set, helm resets values instead of reusing previous values.
+            reuse_values(bool): If set, helm reuses previous values.
+            reset_then_reuse_values(bool): If set, helm resets values, then apply the last release's values
+            force (bool): If set, helm forces resource updates through a replacement strategy. This may recreate pods.
+        Returns:
+            str: command to upgrade a Helm Chart release
+        """
+
+    @abc.abstractmethod
+    def _get_rollback_command(
+        self, kdu_instance, namespace, revision, kubeconfig
+    ) -> str:
+        """
+        Obtain command to be executed to rollback the indicated instance
+        """
+
+    @abc.abstractmethod
+    def _get_uninstall_command(
+        self, kdu_instance: str, namespace: str, kubeconfig: str
+    ) -> str:
+        """
+        Obtain command to be executed to delete the indicated instance
+        """
+
+    @abc.abstractmethod
+    def _get_inspect_command(
+        self, show_command: str, kdu_model: str, repo_str: str, version: str
+    ):
+        """Generates the command to obtain the information about an Helm Chart package
+            (´helm show ...´ command)
+
+        Args:
+            show_command: the second part of the command (`helm show <show_command>`)
+            kdu_model: The name or path of an Helm Chart
+            repo_url: Helm Chart repository url
+            version: constraint with specific version of the Chart to use
+
+        Returns:
+            str: the generated Helm Chart command
+        """
+
+    @abc.abstractmethod
+    def _get_get_command(
+        self, get_command: str, kdu_instance: str, namespace: str, kubeconfig: str
+    ):
+        """Obtain command to be executed to get information about the kdu instance."""
+
+    @abc.abstractmethod
+    async def _uninstall_sw(self, cluster_id: str, namespace: str):
+        """
+        Method call to uninstall cluster software for helm. This method is dependent
+        of helm version
+        For Helm v2 it will be called when Tiller must be uninstalled
+        For Helm v3 it does nothing and does not need to be callled
+        """
+
+    @abc.abstractmethod
+    def _get_helm_chart_repos_ids(self, cluster_uuid) -> list:
+        """
+        Obtains the cluster repos identifiers
+        """
+
+    """
+    ####################################################################################
+    ################################### P R I V A T E ##################################
+    ####################################################################################
+    """
+
+    @staticmethod
+    def _check_file_exists(filename: str, exception_if_not_exists: bool = False):
+        if os.path.exists(filename):
+            return True
+        else:
+            msg = "File {} does not exist".format(filename)
+            if exception_if_not_exists:
+                raise K8sException(msg)
+
+    @staticmethod
+    def _remove_multiple_spaces(strobj):
+        strobj = strobj.strip()
+        while "  " in strobj:
+            strobj = strobj.replace("  ", " ")
+        return strobj
+
+    @staticmethod
+    def _output_to_lines(output: str) -> list:
+        output_lines = list()
+        lines = output.splitlines(keepends=False)
+        for line in lines:
+            line = line.strip()
+            if len(line) > 0:
+                output_lines.append(line)
+        return output_lines
+
+    @staticmethod
+    def _output_to_table(output: str) -> list:
+        output_table = list()
+        lines = output.splitlines(keepends=False)
+        for line in lines:
+            line = line.replace("\t", " ")
+            line_list = list()
+            output_table.append(line_list)
+            cells = line.split(sep=" ")
+            for cell in cells:
+                cell = cell.strip()
+                if len(cell) > 0:
+                    line_list.append(cell)
+        return output_table
+
+    @staticmethod
+    def _parse_services(output: str) -> list:
+        lines = output.splitlines(keepends=False)
+        services = []
+        for line in lines:
+            line = line.replace("\t", " ")
+            cells = line.split(sep=" ")
+            if len(cells) > 0 and cells[0].startswith("service/"):
+                elems = cells[0].split(sep="/")
+                if len(elems) > 1:
+                    services.append(elems[1])
+        return services
+
+    @staticmethod
+    def _get_deep(dictionary: dict, members: tuple):
+        target = dictionary
+        value = None
+        try:
+            for m in members:
+                value = target.get(m)
+                if not value:
+                    return None
+                else:
+                    target = value
+        except Exception:
+            pass
+        return value
+
+    # find key:value in several lines
+    @staticmethod
+    def _find_in_lines(p_lines: list, p_key: str) -> str:
+        for line in p_lines:
+            try:
+                if line.startswith(p_key + ":"):
+                    parts = line.split(":")
+                    the_value = parts[1].strip()
+                    return the_value
+            except Exception:
+                # ignore it
+                pass
+        return None
+
+    @staticmethod
+    def _lower_keys_list(input_list: list):
+        """
+        Transform the keys in a list of dictionaries to lower case and returns a new list
+        of dictionaries
+        """
+        new_list = []
+        if input_list:
+            for dictionary in input_list:
+                new_dict = dict((k.lower(), v) for k, v in dictionary.items())
+                new_list.append(new_dict)
+        return new_list
+
+    async def _local_async_exec(
+        self,
+        command: str,
+        raise_exception_on_error: bool = False,
+        show_error_log: bool = True,
+        encode_utf8: bool = False,
+        env: dict = None,
+    ) -> tuple[str, int]:
+        command = K8sHelmBaseConnector._remove_multiple_spaces(command)
+        self.log.debug(
+            "Executing async local command: {}, env: {}".format(command, env)
+        )
+
+        # split command
+        command = shlex.split(command)
+
+        environ = os.environ.copy()
+        if env:
+            environ.update(env)
+
+        try:
+            async with self.cmd_lock:
+                process = await asyncio.create_subprocess_exec(
+                    *command,
+                    stdout=asyncio.subprocess.PIPE,
+                    stderr=asyncio.subprocess.PIPE,
+                    env=environ,
+                )
+
+                # wait for command terminate
+                stdout, stderr = await process.communicate()
+
+                return_code = process.returncode
+
+            output = ""
+            if stdout:
+                output = stdout.decode("utf-8").strip()
+                # output = stdout.decode()
+            if stderr:
+                output = stderr.decode("utf-8").strip()
+                # output = stderr.decode()
+
+            if return_code != 0 and show_error_log:
+                self.log.debug(
+                    "Return code (FAIL): {}\nOutput:\n{}".format(return_code, output)
+                )
+            else:
+                self.log.debug("Return code: {}".format(return_code))
+
+            if raise_exception_on_error and return_code != 0:
+                raise K8sException(output)
+
+            if encode_utf8:
+                output = output.encode("utf-8").strip()
+                output = str(output).replace("\\n", "\n")
+
+            return output, return_code
+
+        except asyncio.CancelledError:
+            # first, kill the process if it is still running
+            if process.returncode is None:
+                process.kill()
+            raise
+        except K8sException:
+            raise
+        except Exception as e:
+            msg = "Exception executing command: {} -> {}".format(command, e)
+            self.log.error(msg)
+            if raise_exception_on_error:
+                raise K8sException(e) from e
+            else:
+                return "", -1
+
+    async def _local_async_exec_pipe(
+        self,
+        command1: str,
+        command2: str,
+        raise_exception_on_error: bool = True,
+        show_error_log: bool = True,
+        encode_utf8: bool = False,
+        env: dict = None,
+    ):
+        command1 = K8sHelmBaseConnector._remove_multiple_spaces(command1)
+        command2 = K8sHelmBaseConnector._remove_multiple_spaces(command2)
+        command = "{} | {}".format(command1, command2)
+        self.log.debug(
+            "Executing async local command: {}, env: {}".format(command, env)
+        )
+
+        # split command
+        command1 = shlex.split(command1)
+        command2 = shlex.split(command2)
+
+        environ = os.environ.copy()
+        if env:
+            environ.update(env)
+
+        process_1 = None
+        try:
+            async with self.cmd_lock:
+                read, write = os.pipe()
+                process_1 = await asyncio.create_subprocess_exec(
+                    *command1, stdout=write, env=environ
+                )
+                os.close(write)
+                process_2 = await asyncio.create_subprocess_exec(
+                    *command2, stdin=read, stdout=asyncio.subprocess.PIPE, env=environ
+                )
+                os.close(read)
+                stdout, stderr = await process_2.communicate()
+
+                return_code = process_2.returncode
+
+            output = ""
+            if stdout:
+                output = stdout.decode("utf-8").strip()
+                # output = stdout.decode()
+            if stderr:
+                output = stderr.decode("utf-8").strip()
+                # output = stderr.decode()
+
+            if return_code != 0 and show_error_log:
+                self.log.debug(
+                    "Return code (FAIL): {}\nOutput:\n{}".format(return_code, output)
+                )
+            else:
+                self.log.debug("Return code: {}".format(return_code))
+
+            if raise_exception_on_error and return_code != 0:
+                raise K8sException(output)
+
+            if encode_utf8:
+                output = output.encode("utf-8").strip()
+                output = str(output).replace("\\n", "\n")
+
+            return output, return_code
+        except asyncio.CancelledError:
+            # first, kill the processes if they are still running
+            for process in (process_1, process_2):
+                if process.returncode is None:
+                    process.kill()
+            raise
+        except K8sException:
+            raise
+        except Exception as e:
+            msg = "Exception executing command: {} -> {}".format(command, e)
+            self.log.error(msg)
+            if raise_exception_on_error:
+                raise K8sException(e) from e
+            else:
+                return "", -1
+
+    async def _get_service(self, cluster_id, service_name, namespace):
+        """
+        Obtains the data of the specified service in the k8cluster.
+
+        :param cluster_id: id of a K8s cluster known by OSM
+        :param service_name: name of the K8s service in the specified namespace
+        :param namespace: K8s namespace used by the KDU instance
+        :return: If successful, it will return a service with the following data:
+        - `name` of the service
+        - `type` type of service in the k8 cluster
+        - `ports` List of ports offered by the service, for each port includes at least
+        name, port, protocol
+        - `cluster_ip` Internal ip to be used inside k8s cluster
+        - `external_ip` List of external ips (in case they are available)
+        """
+
+        # init config, env
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_id, create_if_not_exist=True
+        )
+
+        command = "{} --kubeconfig={} --namespace={} get service {} -o=yaml".format(
+            self.kubectl_command,
+            paths["kube_config"],
+            quote(namespace),
+            quote(service_name),
+        )
+
+        output, _rc = await self._local_async_exec(
+            command=command, raise_exception_on_error=True, env=env
+        )
+
+        data = yaml.load(output, Loader=yaml.SafeLoader)
+
+        service = {
+            "name": service_name,
+            "type": self._get_deep(data, ("spec", "type")),
+            "ports": self._get_deep(data, ("spec", "ports")),
+            "cluster_ip": self._get_deep(data, ("spec", "clusterIP")),
+        }
+        if service["type"] == "LoadBalancer":
+            ip_map_list = self._get_deep(data, ("status", "loadBalancer", "ingress"))
+            ip_list = [elem["ip"] for elem in ip_map_list]
+            service["external_ip"] = ip_list
+
+        return service
+
+    async def _exec_get_command(
+        self, get_command: str, kdu_instance: str, namespace: str, kubeconfig: str
+    ):
+        """Obtains information about the kdu instance."""
+
+        full_command = self._get_get_command(
+            get_command, kdu_instance, namespace, kubeconfig
+        )
+
+        output, _rc = await self._local_async_exec(command=full_command)
+
+        return output
+
+    async def _exec_inspect_command(
+        self, inspect_command: str, kdu_model: str, repo_url: str = None
+    ):
+        """Obtains information about an Helm Chart package (´helm show´ command)
+
+        Args:
+            inspect_command: the Helm sub command (`helm show <inspect_command> ...`)
+            kdu_model: The name or path of an Helm Chart
+            repo_url: Helm Chart repository url
+
+        Returns:
+            str: the requested info about the Helm Chart package
+        """
+
+        repo_str = ""
+        if repo_url:
+            repo_str = " --repo {}".format(quote(repo_url))
+
+            # Obtain the Chart's name and store it in the var kdu_model
+            kdu_model, _ = self._split_repo(kdu_model=kdu_model)
+
+        kdu_model, version = self._split_version(kdu_model)
+        if version:
+            version_str = "--version {}".format(quote(version))
+        else:
+            version_str = ""
+
+        full_command = self._get_inspect_command(
+            show_command=inspect_command,
+            kdu_model=quote(kdu_model),
+            repo_str=repo_str,
+            version=version_str,
+        )
+
+        output, _ = await self._local_async_exec(command=full_command)
+
+        return output
+
+    async def _get_replica_count_url(
+        self,
+        kdu_model: str,
+        repo_url: str = None,
+        resource_name: str = None,
+    ) -> tuple[int, str]:
+        """Get the replica count value in the Helm Chart Values.
+
+        Args:
+            kdu_model: The name or path of an Helm Chart
+            repo_url: Helm Chart repository url
+            resource_name: Resource name
+
+        Returns:
+            A tuple with:
+            - The number of replicas of the specific instance; if not found, returns None; and
+            - The string corresponding to the replica count key in the Helm values
+        """
+
+        kdu_values = yaml.load(
+            await self.values_kdu(kdu_model=kdu_model, repo_url=repo_url),
+            Loader=yaml.SafeLoader,
+        )
+
+        self.log.debug(f"Obtained the Helm package values for the KDU: {kdu_values}")
+
+        if not kdu_values:
+            raise K8sException(
+                "kdu_values not found for kdu_model {}".format(kdu_model)
+            )
+
+        if resource_name:
+            kdu_values = kdu_values.get(resource_name, None)
+
+        if not kdu_values:
+            msg = "resource {} not found in the values in model {}".format(
+                resource_name, kdu_model
+            )
+            self.log.error(msg)
+            raise K8sException(msg)
+
+        duplicate_check = False
+
+        replica_str = ""
+        replicas = None
+
+        if kdu_values.get("replicaCount") is not None:
+            replicas = kdu_values["replicaCount"]
+            replica_str = "replicaCount"
+        elif kdu_values.get("replicas") is not None:
+            duplicate_check = True
+            replicas = kdu_values["replicas"]
+            replica_str = "replicas"
+        else:
+            if resource_name:
+                msg = (
+                    "replicaCount or replicas not found in the resource"
+                    "{} values in model {}. Cannot be scaled".format(
+                        resource_name, kdu_model
+                    )
+                )
+            else:
+                msg = (
+                    "replicaCount or replicas not found in the values"
+                    "in model {}. Cannot be scaled".format(kdu_model)
+                )
+            self.log.error(msg)
+            raise K8sException(msg)
+
+        # Control if replicas and replicaCount exists at the same time
+        msg = "replicaCount and replicas are exists at the same time"
+        if duplicate_check:
+            if "replicaCount" in kdu_values:
+                self.log.error(msg)
+                raise K8sException(msg)
+        else:
+            if "replicas" in kdu_values:
+                self.log.error(msg)
+                raise K8sException(msg)
+
+        return replicas, replica_str
+
+    async def _get_replica_count_instance(
+        self,
+        kdu_instance: str,
+        namespace: str,
+        kubeconfig: str,
+        resource_name: str = None,
+    ) -> int:
+        """Get the replica count value in the instance.
+
+        Args:
+            kdu_instance: The name of the KDU instance
+            namespace: KDU instance namespace
+            kubeconfig:
+            resource_name: Resource name
+
+        Returns:
+            The number of replicas of the specific instance; if not found, returns None
+        """
+
+        kdu_values = yaml.load(
+            await self.get_values_kdu(kdu_instance, namespace, kubeconfig),
+            Loader=yaml.SafeLoader,
+        )
+
+        self.log.debug(f"Obtained the Helm values for the KDU instance: {kdu_values}")
+
+        replicas = None
+
+        if kdu_values:
+            resource_values = (
+                kdu_values.get(resource_name, None) if resource_name else None
+            )
+
+            for replica_str in ("replicaCount", "replicas"):
+                if resource_values:
+                    replicas = resource_values.get(replica_str)
+                else:
+                    replicas = kdu_values.get(replica_str)
+
+                if replicas is not None:
+                    break
+
+        return replicas
+
+    async def _labels_dict(self, db_dict, kdu_instance):
+        # get the network service registry
+        ns_id = db_dict["filter"]["_id"]
+        try:
+            db_nsr = self.db.get_one("nsrs", {"_id": ns_id})
+        except Exception as e:
+            print("nsr {} not found: {}".format(ns_id, e))
+        nsd_id = db_nsr["nsd"]["_id"]
+
+        # get the kdu registry
+        for index, kdu in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
+            if kdu["kdu-instance"] == kdu_instance:
+                db_kdur = kdu
+                break
+        else:
+            # No kdur found, could be the case of an EE chart
+            return {}
+
+        kdu_name = db_kdur["kdu-name"]
+        member_vnf_index = db_kdur["member-vnf-index"]
+        # get the vnf registry
+        try:
+            db_vnfr = self.db.get_one(
+                "vnfrs",
+                {"nsr-id-ref": ns_id, "member-vnf-index-ref": member_vnf_index},
+            )
+        except Exception as e:
+            print("vnfr {} not found: {}".format(member_vnf_index, e))
+
+        vnf_id = db_vnfr["_id"]
+        vnfd_id = db_vnfr["vnfd-id"]
+
+        return {
+            "managed-by": "osm.etsi.org",
+            "osm.etsi.org/ns-id": ns_id,
+            "osm.etsi.org/nsd-id": nsd_id,
+            "osm.etsi.org/vnf-id": vnf_id,
+            "osm.etsi.org/vnfd-id": vnfd_id,
+            "osm.etsi.org/kdu-id": kdu_instance,
+            "osm.etsi.org/kdu-name": kdu_name,
+        }
+
+    async def _contains_labels(self, kdu_instance, namespace, kube_config, env):
+        command = "env KUBECONFIG={} {} get manifest {} --namespace={}".format(
+            kube_config,
+            self._helm_command,
+            quote(kdu_instance),
+            quote(namespace),
+        )
+        output, rc = await self._local_async_exec(
+            command=command, raise_exception_on_error=False, env=env
+        )
+        manifests = yaml.safe_load_all(output)
+        for manifest in manifests:
+            # Check if the manifest has metadata and labels
+            if (
+                manifest is not None
+                and "metadata" in manifest
+                and "labels" in manifest["metadata"]
+            ):
+                labels = {
+                    "managed-by",
+                    "osm.etsi.org/kdu-id",
+                    "osm.etsi.org/kdu-name",
+                    "osm.etsi.org/ns-id",
+                    "osm.etsi.org/nsd-id",
+                    "osm.etsi.org/vnf-id",
+                    "osm.etsi.org/vnfd-id",
+                }
+                if labels.issubset(manifest["metadata"]["labels"].keys()):
+                    return True
+        return False
+
+    async def _store_status(
+        self,
+        cluster_id: str,
+        operation: str,
+        kdu_instance: str,
+        namespace: str = None,
+        db_dict: dict = None,
+    ) -> None:
+        """
+        Obtains the status of the KDU instance based on Helm Charts, and stores it in the database.
+
+        :param cluster_id (str): the cluster where the KDU instance is deployed
+        :param operation (str): The operation related to the status to be updated (for instance, "install" or "upgrade")
+        :param kdu_instance (str): The KDU instance in relation to which the status is obtained
+        :param namespace (str): The Kubernetes namespace where the KDU instance was deployed. Defaults to None
+        :param db_dict (dict): A dictionary with the database necessary information. It shall contain the
+        values for the keys:
+            - "collection": The Mongo DB collection to write to
+            - "filter": The query filter to use in the update process
+            - "path": The dot separated keys which targets the object to be updated
+        Defaults to None.
+        """
+
+        try:
+            detailed_status = await self._status_kdu(
+                cluster_id=cluster_id,
+                kdu_instance=kdu_instance,
+                yaml_format=False,
+                namespace=namespace,
+            )
+
+            status = detailed_status.get("info").get("description")
+            self.log.debug(f"Status for KDU {kdu_instance} obtained: {status}.")
+
+            # write status to db
+            result = await self.write_app_status_to_db(
+                db_dict=db_dict,
+                status=str(status),
+                detailed_status=str(detailed_status),
+                operation=operation,
+            )
+
+            if not result:
+                self.log.info("Error writing in database. Task exiting...")
+
+        except asyncio.CancelledError as e:
+            self.log.warning(
+                f"Exception in method {self._store_status.__name__} (task cancelled): {e}"
+            )
+        except Exception as e:
+            self.log.warning(f"Exception in method {self._store_status.__name__}: {e}")
+
+    # params for use in -f file
+    # returns values file option and filename (in order to delete it at the end)
+    def _params_to_file_option(self, cluster_id: str, params: dict) -> tuple[str, str]:
+        if params and len(params) > 0:
+            self._init_paths_env(cluster_name=cluster_id, create_if_not_exist=True)
+
+            def get_random_number():
+                r = random.SystemRandom().randint(1, 99999999)
+                s = str(r)
+                while len(s) < 10:
+                    s = "0" + s
+                return s
+
+            params2 = dict()
+            for key in params:
+                value = params.get(key)
+                if "!!yaml" in str(value):
+                    value = yaml.safe_load(value[7:])
+                params2[key] = value
+
+            values_file = get_random_number() + ".yaml"
+            with open(values_file, "w") as stream:
+                yaml.dump(params2, stream, indent=4, default_flow_style=False)
+
+            return "-f {}".format(values_file), values_file
+
+        return "", None
+
+    # params for use in --set option
+    @staticmethod
+    def _params_to_set_option(params: dict) -> str:
+        pairs = [
+            f"{quote(str(key))}={quote(str(value))}"
+            for key, value in params.items()
+            if value is not None
+        ]
+        if not pairs:
+            return ""
+        return "--set " + ",".join(pairs)
+
+    @staticmethod
+    def generate_kdu_instance_name(**kwargs):
+        chart_name = kwargs["kdu_model"]
+        # check embeded chart (file or dir)
+        if chart_name.startswith("/"):
+            # extract file or directory name
+            chart_name = chart_name[chart_name.rfind("/") + 1 :]
+        # check URL
+        elif "://" in chart_name:
+            # extract last portion of URL
+            chart_name = chart_name[chart_name.rfind("/") + 1 :]
+
+        name = ""
+        for c in chart_name:
+            if c.isalpha() or c.isnumeric():
+                name += c
+            else:
+                name += "-"
+        if len(name) > 35:
+            name = name[0:35]
+
+        # if does not start with alpha character, prefix 'a'
+        if not name[0].isalpha():
+            name = "a" + name
+
+        name += "-"
+
+        def get_random_number():
+            r = random.SystemRandom().randint(1, 99999999)
+            s = str(r)
+            s = s.rjust(10, "0")
+            return s
+
+        name = name + get_random_number()
+        return name.lower()
+
+    def _split_version(self, kdu_model: str) -> tuple[str, str]:
+        version = None
+        if (
+            not (
+                self._is_helm_chart_a_file(kdu_model)
+                or self._is_helm_chart_a_url(kdu_model)
+            )
+            and ":" in kdu_model
+        ):
+            parts = kdu_model.split(sep=":")
+            if len(parts) == 2:
+                version = str(parts[1])
+                kdu_model = parts[0]
+        return kdu_model, version
+
+    def _split_repo(self, kdu_model: str) -> tuple[str, str]:
+        """Obtain the Helm Chart's repository and Chart's names from the KDU model
+
+        Args:
+            kdu_model (str): Associated KDU model
+
+        Returns:
+            (str, str): Tuple with the Chart name in index 0, and the repo name
+                        in index 2; if there was a problem finding them, return None
+                        for both
+        """
+
+        chart_name = None
+        repo_name = None
+
+        idx = kdu_model.find("/")
+        if not self._is_helm_chart_a_url(kdu_model) and idx >= 0:
+            chart_name = kdu_model[idx + 1 :]
+            repo_name = kdu_model[:idx]
+
+        return chart_name, repo_name
+
+    async def _find_repo(self, kdu_model: str, cluster_uuid: str) -> str:
+        """Obtain the Helm repository for an Helm Chart
+
+        Args:
+            kdu_model (str): the KDU model associated with the Helm Chart instantiation
+            cluster_uuid (str): The cluster UUID associated with the Helm Chart instantiation
+
+        Returns:
+            str: the repository URL; if Helm Chart is a local one, the function returns None
+        """
+
+        _, repo_name = self._split_repo(kdu_model=kdu_model)
+
+        repo_url = None
+        if repo_name:
+            # Find repository link
+            local_repo_list = await self.repo_list(cluster_uuid)
+            for repo in local_repo_list:
+                if repo["name"] == repo_name:
+                    repo_url = repo["url"]
+                    break  # it is not necessary to continue the loop if the repo link was found...
+
+        return repo_url
+
+    def _repo_to_oci_url(self, repo):
+        db_repo = self.db.get_one("k8srepos", {"name": repo}, fail_on_empty=False)
+        if db_repo and "oci" in db_repo:
+            return db_repo.get("url")
+
+    async def _prepare_helm_chart(self, kdu_model, cluster_id):
+        # e.g.: "stable/openldap", "1.0"
+        kdu_model, version = self._split_version(kdu_model)
+        # e.g.: "openldap, stable"
+        chart_name, repo = self._split_repo(kdu_model)
+        if repo and chart_name:  # repo/chart case
+            oci_url = self._repo_to_oci_url(repo)
+            if oci_url:  # oci does not require helm repo update
+                kdu_model = f"{oci_url.rstrip('/')}/{chart_name.lstrip('/')}"  # urljoin doesn't work for oci schema
+            else:
+                await self.repo_update(cluster_id, repo)
+        return kdu_model, version
+
+    async def create_certificate(
+        self, cluster_uuid, namespace, dns_prefix, name, secret_name, usage
+    ):
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+        kubectl = Kubectl(config_file=paths["kube_config"])
+        await kubectl.create_certificate(
+            namespace=namespace,
+            name=name,
+            dns_prefix=dns_prefix,
+            secret_name=secret_name,
+            usages=[usage],
+            issuer_name="ca-issuer",
+        )
+
+    async def delete_certificate(self, cluster_uuid, namespace, certificate_name):
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+        kubectl = Kubectl(config_file=paths["kube_config"])
+        await kubectl.delete_certificate(namespace, certificate_name)
+
+    async def create_namespace(
+        self,
+        namespace,
+        cluster_uuid,
+        labels,
+    ):
+        """
+        Create a namespace in a specific cluster
+
+        :param namespace:    Namespace to be created
+        :param cluster_uuid: K8s cluster uuid used to retrieve kubeconfig
+        :param labels:       Dictionary with labels for the new namespace
+        :returns: None
+        """
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+        kubectl = Kubectl(config_file=paths["kube_config"])
+        await kubectl.create_namespace(
+            name=namespace,
+            labels=labels,
+        )
+
+    async def delete_namespace(
+        self,
+        namespace,
+        cluster_uuid,
+    ):
+        """
+        Delete a namespace in a specific cluster
+
+        :param namespace: namespace to be deleted
+        :param cluster_uuid: K8s cluster uuid used to retrieve kubeconfig
+        :returns: None
+        """
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+        kubectl = Kubectl(config_file=paths["kube_config"])
+        await kubectl.delete_namespace(
+            name=namespace,
+        )
+
+    async def copy_secret_data(
+        self,
+        src_secret: str,
+        dst_secret: str,
+        cluster_uuid: str,
+        data_key: str,
+        src_namespace: str = "osm",
+        dst_namespace: str = "osm",
+    ):
+        """
+        Copy a single key and value from an existing secret to a new one
+
+        :param src_secret: name of the existing secret
+        :param dst_secret: name of the new secret
+        :param cluster_uuid: K8s cluster uuid used to retrieve kubeconfig
+        :param data_key: key of the existing secret to be copied
+        :param src_namespace: Namespace of the existing secret
+        :param dst_namespace: Namespace of the new secret
+        :returns: None
+        """
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+        kubectl = Kubectl(config_file=paths["kube_config"])
+        secret_data = await kubectl.get_secret_content(
+            name=src_secret,
+            namespace=src_namespace,
+        )
+        # Only the corresponding data_key value needs to be copy
+        data = {data_key: secret_data.get(data_key)}
+        await kubectl.create_secret(
+            name=dst_secret,
+            data=data,
+            namespace=dst_namespace,
+            secret_type="Opaque",
+        )
+
+    async def setup_default_rbac(
+        self,
+        name,
+        namespace,
+        cluster_uuid,
+        api_groups,
+        resources,
+        verbs,
+        service_account,
+    ):
+        """
+        Create a basic RBAC for a new namespace.
+
+        :param name: name of both Role and Role Binding
+        :param namespace: K8s namespace
+        :param cluster_uuid: K8s cluster uuid used to retrieve kubeconfig
+        :param api_groups: Api groups to be allowed in Policy Rule
+        :param resources: Resources to be allowed in Policy Rule
+        :param verbs: Verbs to be allowed in Policy Rule
+        :param service_account: Service Account name used to bind the Role
+        :returns: None
+        """
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+        kubectl = Kubectl(config_file=paths["kube_config"])
+        await kubectl.create_role(
+            name=name,
+            labels={},
+            namespace=namespace,
+            api_groups=api_groups,
+            resources=resources,
+            verbs=verbs,
+        )
+        await kubectl.create_role_binding(
+            name=name,
+            labels={},
+            namespace=namespace,
+            role_name=name,
+            sa_name=service_account,
+        )
diff --git a/osm_lcm/n2vc/k8s_juju_conn.py b/osm_lcm/n2vc/k8s_juju_conn.py
new file mode 100644 (file)
index 0000000..db78519
--- /dev/null
@@ -0,0 +1,961 @@
+# Copyright 2019 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+
+import asyncio
+from typing import Union
+import os
+import uuid
+import yaml
+import tempfile
+import binascii
+
+from osm_lcm.n2vc.config import EnvironConfig
+from osm_lcm.n2vc.definitions import RelationEndpoint
+from osm_lcm.n2vc.exceptions import K8sException
+from osm_lcm.n2vc.k8s_conn import K8sConnector
+from osm_lcm.n2vc.kubectl import Kubectl
+from .exceptions import MethodNotImplemented
+from osm_lcm.n2vc.libjuju import Libjuju
+from osm_lcm.n2vc.utils import obj_to_dict, obj_to_yaml
+from osm_lcm.n2vc.store import MotorStore
+from osm_lcm.n2vc.vca.cloud import Cloud
+from osm_lcm.n2vc.vca.connection import get_connection
+
+
+RBAC_LABEL_KEY_NAME = "rbac-id"
+RBAC_STACK_PREFIX = "juju-credential"
+
+
+def generate_rbac_id():
+    return binascii.hexlify(os.urandom(4)).decode()
+
+
+class K8sJujuConnector(K8sConnector):
+    libjuju = None
+
+    def __init__(
+        self,
+        fs: object,
+        db: object,
+        kubectl_command: str = "/usr/bin/kubectl",
+        juju_command: str = "/usr/bin/juju",
+        log: object = None,
+        on_update_db=None,
+    ):
+        """
+        :param fs: file system for kubernetes and helm configuration
+        :param db: Database object
+        :param kubectl_command: path to kubectl executable
+        :param helm_command: path to helm executable
+        :param log: logger
+        """
+
+        # parent class
+        K8sConnector.__init__(self, db, log=log, on_update_db=on_update_db)
+
+        self.fs = fs
+        self.log.debug("Initializing K8S Juju connector")
+
+        db_uri = EnvironConfig(prefixes=["OSMLCM_", "OSMMON_"]).get("database_uri")
+        self._store = MotorStore(db_uri)
+        self.loading_libjuju = asyncio.Lock()
+        self.uninstall_locks = {}
+
+        self.log.debug("K8S Juju connector initialized")
+        # TODO: Remove these commented lines:
+        # self.authenticated = False
+        # self.models = {}
+        # self.juju_secret = ""
+
+    """Initialization"""
+
+    async def init_env(
+        self,
+        k8s_creds: str,
+        namespace: str = "kube-system",
+        reuse_cluster_uuid: str = None,
+        **kwargs,
+    ) -> (str, bool):
+        """
+        It prepares a given K8s cluster environment to run Juju bundles.
+
+        :param k8s_creds: credentials to access a given K8s cluster, i.e. a valid
+            '.kube/config'
+        :param namespace: optional namespace to be used for juju. By default,
+            'kube-system' will be used
+        :param reuse_cluster_uuid: existing cluster uuid for reuse
+        :param: kwargs: Additional parameters
+            vca_id (str): VCA ID
+
+        :return: uuid of the K8s cluster and True if connector has installed some
+            software in the cluster
+            (on error, an exception will be raised)
+        """
+        libjuju = await self._get_libjuju(kwargs.get("vca_id"))
+
+        cluster_uuid = reuse_cluster_uuid or str(uuid.uuid4())
+        kubectl = self._get_kubectl(k8s_creds)
+
+        # CREATING RESOURCES IN K8S
+        rbac_id = generate_rbac_id()
+        metadata_name = "{}-{}".format(RBAC_STACK_PREFIX, rbac_id)
+        labels = {RBAC_STACK_PREFIX: rbac_id}
+
+        # Create cleanup dictionary to clean up created resources
+        # if it fails in the middle of the process
+        cleanup_data = []
+        try:
+            self.log.debug("Initializing K8s cluster for juju")
+            kubectl.create_cluster_role(name=metadata_name, labels=labels)
+            self.log.debug("Cluster role created")
+            cleanup_data.append(
+                {"delete": kubectl.delete_cluster_role, "args": (metadata_name,)}
+            )
+
+            kubectl.create_service_account(name=metadata_name, labels=labels)
+            self.log.debug("Service account created")
+            cleanup_data.append(
+                {"delete": kubectl.delete_service_account, "args": (metadata_name,)}
+            )
+
+            kubectl.create_cluster_role_binding(name=metadata_name, labels=labels)
+            self.log.debug("Role binding created")
+            cleanup_data.append(
+                {
+                    "delete": kubectl.delete_cluster_role_binding,
+                    "args": (metadata_name,),
+                }
+            )
+            token, client_cert_data = await kubectl.get_secret_data(metadata_name)
+
+            default_storage_class = kubectl.get_default_storage_class()
+            self.log.debug("Default storage class: {}".format(default_storage_class))
+            await libjuju.add_k8s(
+                name=cluster_uuid,
+                rbac_id=rbac_id,
+                token=token,
+                client_cert_data=client_cert_data,
+                configuration=kubectl.configuration,
+                storage_class=default_storage_class,
+                credential_name=self._get_credential_name(cluster_uuid),
+            )
+            self.log.debug("K8s cluster added to juju controller")
+            return cluster_uuid, True
+        except Exception as e:
+            self.log.error("Error initializing k8scluster: {}".format(e), exc_info=True)
+            if len(cleanup_data) > 0:
+                self.log.debug("Cleaning up created resources in k8s cluster...")
+                for item in cleanup_data:
+                    delete_function = item["delete"]
+                    delete_args = item["args"]
+                    delete_function(*delete_args)
+                self.log.debug("Cleanup finished")
+            raise e
+
+    """Repo Management"""
+
+    async def repo_add(
+        self,
+        name: str,
+        url: str,
+        _type: str = "charm",
+        cert: str = None,
+        user: str = None,
+        password: str = None,
+    ):
+        raise MethodNotImplemented()
+
+    async def repo_list(self):
+        raise MethodNotImplemented()
+
+    async def repo_remove(self, name: str):
+        raise MethodNotImplemented()
+
+    async def synchronize_repos(self, cluster_uuid: str, name: str):
+        """
+        Returns None as currently add_repo is not implemented
+        """
+        return None
+
+    """Reset"""
+
+    async def reset(
+        self,
+        cluster_uuid: str,
+        force: bool = False,
+        uninstall_sw: bool = False,
+        **kwargs,
+    ) -> bool:
+        """Reset a cluster
+
+        Resets the Kubernetes cluster by removing the model that represents it.
+
+        :param cluster_uuid str: The UUID of the cluster to reset
+        :param force: Force reset
+        :param uninstall_sw: Boolean to uninstall sw
+        :param: kwargs: Additional parameters
+            vca_id (str): VCA ID
+
+        :return: Returns True if successful or raises an exception.
+        """
+
+        try:
+            self.log.debug("[reset] Removing k8s cloud")
+            libjuju = await self._get_libjuju(kwargs.get("vca_id"))
+
+            cloud = Cloud(cluster_uuid, self._get_credential_name(cluster_uuid))
+
+            cloud_creds = await libjuju.get_cloud_credentials(cloud)
+
+            await libjuju.remove_cloud(cluster_uuid)
+
+            credentials = self.get_credentials(cluster_uuid=cluster_uuid)
+
+            kubectl = self._get_kubectl(credentials)
+
+            delete_functions = [
+                kubectl.delete_cluster_role_binding,
+                kubectl.delete_service_account,
+                kubectl.delete_cluster_role,
+            ]
+
+            credential_attrs = cloud_creds[0].result["attrs"]
+            if RBAC_LABEL_KEY_NAME in credential_attrs:
+                rbac_id = credential_attrs[RBAC_LABEL_KEY_NAME]
+                metadata_name = "{}-{}".format(RBAC_STACK_PREFIX, rbac_id)
+                for delete_func in delete_functions:
+                    try:
+                        delete_func(metadata_name)
+                    except Exception as e:
+                        self.log.warning("Cannot remove resource in K8s {}".format(e))
+
+        except Exception as e:
+            self.log.debug("Caught exception during reset: {}".format(e))
+            raise e
+        return True
+
+    """Deployment"""
+
+    async def install(
+        self,
+        cluster_uuid: str,
+        kdu_model: str,
+        kdu_instance: str,
+        atomic: bool = True,
+        timeout: float = 1800,
+        params: dict = None,
+        db_dict: dict = None,
+        kdu_name: str = None,
+        namespace: str = None,
+        **kwargs,
+    ) -> bool:
+        """Install a bundle
+
+        :param cluster_uuid str: The UUID of the cluster to install to
+        :param kdu_model str: The name or path of a bundle to install
+        :param kdu_instance: Kdu instance name
+        :param atomic bool: If set, waits until the model is active and resets
+                            the cluster on failure.
+        :param timeout int: The time, in seconds, to wait for the install
+                            to finish
+        :param params dict: Key-value pairs of instantiation parameters
+        :param kdu_name: Name of the KDU instance to be installed
+        :param namespace: K8s namespace to use for the KDU instance
+        :param kwargs: Additional parameters
+            vca_id (str): VCA ID
+
+        :return: If successful, returns ?
+        """
+        libjuju = await self._get_libjuju(kwargs.get("vca_id"))
+        bundle = kdu_model
+
+        if not db_dict:
+            raise K8sException("db_dict must be set")
+        if not bundle:
+            raise K8sException("bundle must be set")
+
+        if bundle.startswith("cs:"):
+            # For Juju Bundles provided by the Charm Store
+            pass
+        elif bundle.startswith("ch:"):
+            # For Juju Bundles provided by the Charm Hub (this only works for juju version >= 2.9)
+            pass
+        elif bundle.startswith("http"):
+            # Download the file
+            pass
+        else:
+            new_workdir = kdu_model.strip(kdu_model.split("/")[-1])
+            os.chdir(new_workdir)
+            bundle = "local:{}".format(kdu_model)
+
+        # default namespace to kdu_instance
+        if not namespace:
+            namespace = kdu_instance
+
+        self.log.debug("Checking for model named {}".format(namespace))
+
+        # Create the new model
+        self.log.debug("Adding model: {}".format(namespace))
+        cloud = Cloud(cluster_uuid, self._get_credential_name(cluster_uuid))
+        await libjuju.add_model(namespace, cloud)
+
+        # if model:
+        # TODO: Instantiation parameters
+
+        """
+        "Juju bundle that models the KDU, in any of the following ways:
+            - <juju-repo>/<juju-bundle>
+            - <juju-bundle folder under k8s_models folder in the package>
+            - <juju-bundle tgz file (w/ or w/o extension) under k8s_models folder
+                in the package>
+            - <URL_where_to_fetch_juju_bundle>
+        """
+        try:
+            previous_workdir = os.getcwd()
+        except FileNotFoundError:
+            previous_workdir = "/app/storage"
+
+        self.log.debug("[install] deploying {}".format(bundle))
+        instantiation_params = params.get("overlay") if params else None
+        await libjuju.deploy(
+            bundle,
+            model_name=namespace,
+            wait=atomic,
+            timeout=timeout,
+            instantiation_params=instantiation_params,
+        )
+        os.chdir(previous_workdir)
+
+        # update information in the database (first, the VCA status, and then, the namespace)
+        if self.on_update_db:
+            await self.on_update_db(
+                cluster_uuid,
+                kdu_instance,
+                filter=db_dict["filter"],
+                vca_id=kwargs.get("vca_id"),
+            )
+
+        self.db.set_one(
+            table="nsrs",
+            q_filter={"_admin.deployed.K8s.kdu-instance": kdu_instance},
+            update_dict={"_admin.deployed.K8s.$.namespace": namespace},
+        )
+
+        return True
+
+    async def scale(
+        self,
+        kdu_instance: str,
+        scale: int,
+        resource_name: str,
+        total_timeout: float = 1800,
+        namespace: str = None,
+        **kwargs,
+    ) -> bool:
+        """Scale an application in a model
+
+        :param: kdu_instance str:        KDU instance name
+        :param: scale int:               Scale to which to set the application
+        :param: resource_name str:       The application name in the Juju Bundle
+        :param: timeout float:           The time, in seconds, to wait for the install
+                                         to finish
+        :param namespace str: The namespace (model) where the Bundle was deployed
+        :param kwargs:                   Additional parameters
+                                            vca_id (str): VCA ID
+
+        :return: If successful, returns True
+        """
+
+        model_name = self._obtain_namespace(
+            kdu_instance=kdu_instance, namespace=namespace
+        )
+        try:
+            libjuju = await self._get_libjuju(kwargs.get("vca_id"))
+            await libjuju.scale_application(
+                model_name=model_name,
+                application_name=resource_name,
+                scale=scale,
+                total_timeout=total_timeout,
+            )
+        except Exception as e:
+            error_msg = "Error scaling application {} of the model {} of the kdu instance {}: {}".format(
+                resource_name, model_name, kdu_instance, e
+            )
+            self.log.error(error_msg)
+            raise K8sException(message=error_msg)
+        return True
+
+    async def get_scale_count(
+        self, resource_name: str, kdu_instance: str, namespace: str = None, **kwargs
+    ) -> int:
+        """Get an application scale count
+
+        :param: resource_name str:       The application name in the Juju Bundle
+        :param: kdu_instance str:        KDU instance name
+        :param namespace str: The namespace (model) where the Bundle was deployed
+        :param kwargs:                   Additional parameters
+                                            vca_id (str): VCA ID
+        :return: Return application instance count
+        """
+
+        model_name = self._obtain_namespace(
+            kdu_instance=kdu_instance, namespace=namespace
+        )
+        try:
+            libjuju = await self._get_libjuju(kwargs.get("vca_id"))
+            status = await libjuju.get_model_status(model_name=model_name)
+            return len(status.applications[resource_name].units)
+        except Exception as e:
+            error_msg = (
+                f"Error getting scale count from application {resource_name} of the model {model_name} of "
+                f"the kdu instance {kdu_instance}: {e}"
+            )
+            self.log.error(error_msg)
+            raise K8sException(message=error_msg)
+
+    async def instances_list(self, cluster_uuid: str) -> list:
+        """
+        returns a list of deployed releases in a cluster
+
+        :param cluster_uuid: the cluster
+        :return:
+        """
+        return []
+
+    async def upgrade(
+        self,
+        cluster_uuid: str,
+        kdu_instance: str,
+        kdu_model: str = None,
+        params: dict = None,
+    ) -> str:
+        """Upgrade a model
+
+        :param cluster_uuid str: The UUID of the cluster to upgrade
+        :param kdu_instance str: The unique name of the KDU instance
+        :param kdu_model str: The name or path of the bundle to upgrade to
+        :param params dict: Key-value pairs of instantiation parameters
+
+        :return: If successful, reference to the new revision number of the
+                 KDU instance.
+        """
+
+        # TODO: Loop through the bundle and upgrade each charm individually
+
+        """
+        The API doesn't have a concept of bundle upgrades, because there are
+        many possible changes: charm revision, disk, number of units, etc.
+
+        As such, we are only supporting a limited subset of upgrades. We'll
+        upgrade the charm revision but leave storage and scale untouched.
+
+        Scale changes should happen through OSM constructs, and changes to
+        storage would require a redeployment of the service, at least in this
+        initial release.
+        """
+        raise MethodNotImplemented()
+
+    """Rollback"""
+
+    async def rollback(
+        self, cluster_uuid: str, kdu_instance: str, revision: int = 0
+    ) -> str:
+        """Rollback a model
+
+        :param cluster_uuid str: The UUID of the cluster to rollback
+        :param kdu_instance str: The unique name of the KDU instance
+        :param revision int: The revision to revert to. If omitted, rolls back
+                             the previous upgrade.
+
+        :return: If successful, returns the revision of active KDU instance,
+                 or raises an exception
+        """
+        raise MethodNotImplemented()
+
+    """Deletion"""
+
+    async def uninstall(
+        self, cluster_uuid: str, kdu_instance: str, namespace: str = None, **kwargs
+    ) -> bool:
+        """Uninstall a KDU instance
+
+        :param cluster_uuid str: The UUID of the cluster
+        :param kdu_instance str: The unique name of the KDU instance
+        :param namespace str: The namespace (model) where the Bundle was deployed
+        :param kwargs: Additional parameters
+            vca_id (str): VCA ID
+
+        :return: Returns True if successful, or raises an exception
+        """
+        model_name = self._obtain_namespace(
+            kdu_instance=kdu_instance, namespace=namespace
+        )
+
+        self.log.debug(f"[uninstall] Destroying model: {model_name}")
+
+        will_not_delete = False
+        if model_name not in self.uninstall_locks:
+            self.uninstall_locks[model_name] = asyncio.Lock()
+        delete_lock = self.uninstall_locks[model_name]
+
+        while delete_lock.locked():
+            will_not_delete = True
+            await asyncio.sleep(0.1)
+
+        if will_not_delete:
+            self.log.info("Model {} deleted by another worker.".format(model_name))
+            return True
+
+        try:
+            async with delete_lock:
+                libjuju = await self._get_libjuju(kwargs.get("vca_id"))
+
+                await libjuju.destroy_model(model_name, total_timeout=3600)
+        finally:
+            self.uninstall_locks.pop(model_name)
+
+        self.log.debug(f"[uninstall] Model {model_name} destroyed")
+        return True
+
+    async def upgrade_charm(
+        self,
+        ee_id: str = None,
+        path: str = None,
+        charm_id: str = None,
+        charm_type: str = None,
+        timeout: float = None,
+    ) -> str:
+        """This method upgrade charms in VNFs
+
+        Args:
+            ee_id:  Execution environment id
+            path:   Local path to the charm
+            charm_id:   charm-id
+            charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
+            timeout: (Float)    Timeout for the ns update operation
+
+        Returns:
+            The output of the update operation if status equals to "completed"
+        """
+        raise K8sException(
+            "KDUs deployed with Juju Bundle do not support charm upgrade"
+        )
+
+    async def exec_primitive(
+        self,
+        cluster_uuid: str = None,
+        kdu_instance: str = None,
+        primitive_name: str = None,
+        timeout: float = 300,
+        params: dict = None,
+        db_dict: dict = None,
+        namespace: str = None,
+        **kwargs,
+    ) -> str:
+        """Exec primitive (Juju action)
+
+        :param cluster_uuid str: The UUID of the cluster
+        :param kdu_instance str: The unique name of the KDU instance
+        :param primitive_name: Name of action that will be executed
+        :param timeout: Timeout for action execution
+        :param params: Dictionary of all the parameters needed for the action
+        :param db_dict: Dictionary for any additional data
+        :param namespace str: The namespace (model) where the Bundle was deployed
+        :param kwargs: Additional parameters
+            vca_id (str): VCA ID
+
+        :return: Returns the output of the action
+        """
+        libjuju = await self._get_libjuju(kwargs.get("vca_id"))
+
+        namespace = self._obtain_namespace(
+            kdu_instance=kdu_instance, namespace=namespace
+        )
+
+        if not params or "application-name" not in params:
+            raise K8sException(
+                "Missing application-name argument, \
+                                argument needed for K8s actions"
+            )
+        try:
+            self.log.debug(
+                "[exec_primitive] Getting model "
+                "{} for the kdu_instance: {}".format(namespace, kdu_instance)
+            )
+            application_name = params["application-name"]
+            actions = await libjuju.get_actions(
+                application_name=application_name, model_name=namespace
+            )
+            if primitive_name not in actions:
+                raise K8sException("Primitive {} not found".format(primitive_name))
+            output, status = await libjuju.execute_action(
+                application_name=application_name,
+                model_name=namespace,
+                action_name=primitive_name,
+                **params,
+            )
+
+            if status != "completed":
+                raise K8sException(
+                    "status is not completed: {} output: {}".format(status, output)
+                )
+            if self.on_update_db:
+                await self.on_update_db(
+                    cluster_uuid=cluster_uuid,
+                    kdu_instance=kdu_instance,
+                    filter=db_dict["filter"],
+                )
+
+            return output
+
+        except Exception as e:
+            error_msg = "Error executing primitive {}: {}".format(primitive_name, e)
+            self.log.error(error_msg)
+            raise K8sException(message=error_msg)
+
+    """Introspection"""
+
+    async def inspect_kdu(self, kdu_model: str) -> dict:
+        """Inspect a KDU
+
+        Inspects a bundle and returns a dictionary of config parameters and
+        their default values.
+
+        :param kdu_model str: The name or path of the bundle to inspect.
+
+        :return: If successful, returns a dictionary of available parameters
+                 and their default values.
+        """
+
+        kdu = {}
+        if not os.path.exists(kdu_model):
+            raise K8sException("file {} not found".format(kdu_model))
+
+        with open(kdu_model, "r") as f:
+            bundle = yaml.safe_load(f.read())
+
+            """
+            {
+                'description': 'Test bundle',
+                'bundle': 'kubernetes',
+                'applications': {
+                    'mariadb-k8s': {
+                        'charm': 'cs:~charmed-osm/mariadb-k8s-20',
+                        'scale': 1,
+                        'options': {
+                            'password': 'manopw',
+                            'root_password': 'osm4u',
+                            'user': 'mano'
+                        },
+                        'series': 'kubernetes'
+                    }
+                }
+            }
+            """
+            # TODO: This should be returned in an agreed-upon format
+            kdu = bundle["applications"]
+
+        return kdu
+
+    async def help_kdu(self, kdu_model: str) -> str:
+        """View the README
+
+                If available, returns the README of the bundle.
+
+                :param kdu_model str: The name or path of a bundle
+        f
+                :return: If found, returns the contents of the README.
+        """
+        readme = None
+
+        files = ["README", "README.txt", "README.md"]
+        path = os.path.dirname(kdu_model)
+        for file in os.listdir(path):
+            if file in files:
+                with open(file, "r") as f:
+                    readme = f.read()
+                    break
+
+        return readme
+
+    async def status_kdu(
+        self,
+        cluster_uuid: str,
+        kdu_instance: str,
+        complete_status: bool = False,
+        yaml_format: bool = False,
+        namespace: str = None,
+        **kwargs,
+    ) -> Union[str, dict]:
+        """Get the status of the KDU
+
+        Get the current status of the KDU instance.
+
+        :param cluster_uuid str: The UUID of the cluster
+        :param kdu_instance str: The unique id of the KDU instance
+        :param complete_status: To get the complete_status of the KDU
+        :param yaml_format: To get the status in proper format for NSR record
+        :param namespace str: The namespace (model) where the Bundle was deployed
+        :param: kwargs: Additional parameters
+            vca_id (str): VCA ID
+
+        :return: Returns a dictionary containing namespace, state, resources,
+                 and deployment_time and returns complete_status if complete_status is True
+        """
+        libjuju = await self._get_libjuju(kwargs.get("vca_id"))
+        status = {}
+
+        model_name = self._obtain_namespace(
+            kdu_instance=kdu_instance, namespace=namespace
+        )
+        model_status = await libjuju.get_model_status(model_name=model_name)
+
+        if not complete_status:
+            for name in model_status.applications:
+                application = model_status.applications[name]
+                status[name] = {"status": application["status"]["status"]}
+        else:
+            if yaml_format:
+                return obj_to_yaml(model_status)
+            else:
+                return obj_to_dict(model_status)
+
+        return status
+
+    async def add_relation(
+        self, provider: RelationEndpoint, requirer: RelationEndpoint
+    ):
+        """
+        Add relation between two charmed endpoints
+
+        :param: provider: Provider relation endpoint
+        :param: requirer: Requirer relation endpoint
+        """
+        self.log.debug(f"adding new relation between {provider} and {requirer}")
+        cross_model_relation = (
+            provider.model_name != requirer.model_name
+            or provider.vca_id != requirer.vca_id
+        )
+        try:
+            if cross_model_relation:
+                # Cross-model relation
+                provider_libjuju = await self._get_libjuju(provider.vca_id)
+                requirer_libjuju = await self._get_libjuju(requirer.vca_id)
+                offer = await provider_libjuju.offer(provider)
+                if offer:
+                    saas_name = await requirer_libjuju.consume(
+                        requirer.model_name, offer, provider_libjuju
+                    )
+                    await requirer_libjuju.add_relation(
+                        requirer.model_name, requirer.endpoint, saas_name
+                    )
+            else:
+                # Standard relation
+                vca_id = provider.vca_id
+                model = provider.model_name
+                libjuju = await self._get_libjuju(vca_id)
+                # add juju relations between two applications
+                await libjuju.add_relation(
+                    model_name=model,
+                    endpoint_1=provider.endpoint,
+                    endpoint_2=requirer.endpoint,
+                )
+        except Exception as e:
+            message = f"Error adding relation between {provider} and {requirer}: {e}"
+            self.log.error(message)
+            raise Exception(message=message)
+
+    async def update_vca_status(
+        self, vcastatus: dict, kdu_instance: str, namespace: str = None, **kwargs
+    ):
+        """
+        Add all configs, actions, executed actions of all applications in a model to vcastatus dict
+
+        :param vcastatus dict: dict containing vcastatus
+        :param kdu_instance str: The unique id of the KDU instance
+        :param namespace str: The namespace (model) where the Bundle was deployed
+        :param: kwargs: Additional parameters
+            vca_id (str): VCA ID
+
+        :return: None
+        """
+
+        model_name = self._obtain_namespace(
+            kdu_instance=kdu_instance, namespace=namespace
+        )
+
+        libjuju = await self._get_libjuju(kwargs.get("vca_id"))
+        try:
+            for vca_model_name in vcastatus:
+                # Adding executed actions
+                vcastatus[vca_model_name][
+                    "executedActions"
+                ] = await libjuju.get_executed_actions(model_name=model_name)
+
+                for application in vcastatus[vca_model_name]["applications"]:
+                    # Adding application actions
+                    vcastatus[vca_model_name]["applications"][application][
+                        "actions"
+                    ] = {}
+                    # Adding application configs
+                    vcastatus[vca_model_name]["applications"][application][
+                        "configs"
+                    ] = await libjuju.get_application_configs(
+                        model_name=model_name, application_name=application
+                    )
+
+        except Exception as e:
+            self.log.debug("Error in updating vca status: {}".format(str(e)))
+
+    async def get_services(
+        self, cluster_uuid: str, kdu_instance: str, namespace: str
+    ) -> list:
+        """Return a list of services of a kdu_instance"""
+
+        namespace = self._obtain_namespace(
+            kdu_instance=kdu_instance, namespace=namespace
+        )
+
+        credentials = self.get_credentials(cluster_uuid=cluster_uuid)
+        kubectl = self._get_kubectl(credentials)
+        return kubectl.get_services(
+            field_selector="metadata.namespace={}".format(namespace)
+        )
+
+    async def get_service(
+        self, cluster_uuid: str, service_name: str, namespace: str
+    ) -> object:
+        """Return data for a specific service inside a namespace"""
+
+        credentials = self.get_credentials(cluster_uuid=cluster_uuid)
+        kubectl = self._get_kubectl(credentials)
+        return kubectl.get_services(
+            field_selector="metadata.name={},metadata.namespace={}".format(
+                service_name, namespace
+            )
+        )[0]
+
+    def get_credentials(self, cluster_uuid: str) -> str:
+        """
+        Get Cluster Kubeconfig
+        """
+        k8scluster = self.db.get_one(
+            "k8sclusters", q_filter={"_id": cluster_uuid}, fail_on_empty=False
+        )
+
+        self.db.encrypt_decrypt_fields(
+            k8scluster.get("credentials"),
+            "decrypt",
+            ["password", "secret"],
+            schema_version=k8scluster["schema_version"],
+            salt=k8scluster["_id"],
+        )
+
+        return yaml.safe_dump(k8scluster.get("credentials"))
+
+    def _get_credential_name(self, cluster_uuid: str) -> str:
+        """
+        Get credential name for a k8s cloud
+
+        We cannot use the cluster_uuid for the credential name directly,
+        because it cannot start with a number, it must start with a letter.
+        Therefore, the k8s cloud credential name will be "cred-" followed
+        by the cluster uuid.
+
+        :param: cluster_uuid:   Cluster UUID of the kubernetes cloud (=cloud_name)
+
+        :return:                Name to use for the credential name.
+        """
+        return "cred-{}".format(cluster_uuid)
+
+    def get_namespace(self, cluster_uuid: str) -> str:
+        """Get the namespace UUID
+        Gets the namespace's unique name
+
+        :param cluster_uuid str: The UUID of the cluster
+        :returns: The namespace UUID, or raises an exception
+        """
+        pass
+
+    @staticmethod
+    def generate_kdu_instance_name(**kwargs):
+        db_dict = kwargs.get("db_dict")
+        kdu_name = kwargs.get("kdu_name", None)
+        if kdu_name:
+            kdu_instance = "{}-{}".format(kdu_name, db_dict["filter"]["_id"])
+        else:
+            kdu_instance = db_dict["filter"]["_id"]
+        return kdu_instance
+
+    async def _get_libjuju(self, vca_id: str = None) -> Libjuju:
+        """
+        Get libjuju object
+
+        :param: vca_id: VCA ID
+                        If None, get a libjuju object with a Connection to the default VCA
+                        Else, geta libjuju object with a Connection to the specified VCA
+        """
+        if not vca_id:
+            while self.loading_libjuju.locked():
+                await asyncio.sleep(0.1)
+            if not self.libjuju:
+                async with self.loading_libjuju:
+                    vca_connection = await get_connection(self._store)
+                    self.libjuju = Libjuju(vca_connection, log=self.log)
+            return self.libjuju
+        else:
+            vca_connection = await get_connection(self._store, vca_id)
+            return Libjuju(vca_connection, log=self.log, n2vc=self)
+
+    def _get_kubectl(self, credentials: str) -> Kubectl:
+        """
+        Get Kubectl object
+
+        :param: kubeconfig_credentials: Kubeconfig credentials
+        """
+        kubecfg = tempfile.NamedTemporaryFile()
+        with open(kubecfg.name, "w") as kubecfg_file:
+            kubecfg_file.write(credentials)
+        return Kubectl(config_file=kubecfg.name)
+
+    def _obtain_namespace(self, kdu_instance: str, namespace: str = None) -> str:
+        """
+        Obtain the namespace/model name to use in the instantiation of a Juju Bundle in K8s. The default namespace is
+        the kdu_instance name. However, if the user passes the namespace where he wants to deploy the bundle,
+        that namespace will be used.
+
+        :param kdu_instance: the default KDU instance name
+        :param namespace: the namespace passed by the User
+        """
+
+        # deault the namespace/model name to the kdu_instance name TODO -> this should be the real return... But
+        #  once the namespace is not passed in most methods, I had to do this in another way. But I think this should
+        #  be the procedure in the future return namespace if namespace else kdu_instance
+
+        # TODO -> has referred above, this should be avoided in the future, this is temporary, in order to avoid
+        #  compatibility issues
+        return (
+            namespace
+            if namespace
+            else self._obtain_namespace_from_db(kdu_instance=kdu_instance)
+        )
+
+    def _obtain_namespace_from_db(self, kdu_instance: str) -> str:
+        db_nsrs = self.db.get_one(
+            table="nsrs", q_filter={"_admin.deployed.K8s.kdu-instance": kdu_instance}
+        )
+        for k8s in db_nsrs["_admin"]["deployed"]["K8s"]:
+            if k8s.get("kdu-instance") == kdu_instance:
+                return k8s.get("namespace")
+        return ""
diff --git a/osm_lcm/n2vc/kubectl.py b/osm_lcm/n2vc/kubectl.py
new file mode 100644 (file)
index 0000000..4190740
--- /dev/null
@@ -0,0 +1,1048 @@
+#######################################################################################
+# Copyright 2020 Canonical Ltd.
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+#######################################################################################
+
+import base64
+import logging
+from typing import Dict
+import typing
+import uuid
+import json
+import tarfile
+import io
+from time import sleep
+
+from distutils.version import LooseVersion
+
+from kubernetes import client, config
+from kubernetes.client.api import VersionApi
+from kubernetes.client.models import (
+    V1ClusterRole,
+    V1Role,
+    V1ObjectMeta,
+    V1PolicyRule,
+    V1ServiceAccount,
+    V1ClusterRoleBinding,
+    V1RoleBinding,
+    V1RoleRef,
+    RbacV1Subject,
+    V1Secret,
+    V1SecretReference,
+    V1Namespace,
+    V1PersistentVolumeClaim,
+    V1PersistentVolumeClaimSpec,
+    V1PersistentVolumeClaimVolumeSource,
+    V1ResourceRequirements,
+    V1Pod,
+    V1PodSpec,
+    V1Volume,
+    V1VolumeMount,
+    V1Container,
+)
+from kubernetes.client.rest import ApiException
+from kubernetes.stream import stream
+from osm_lcm.n2vc.libjuju import retry_callback
+from retrying_async import retry
+
+SERVICE_ACCOUNT_TOKEN_KEY = "token"
+SERVICE_ACCOUNT_ROOT_CA_KEY = "ca.crt"
+# clients
+CORE_CLIENT = "core_v1"
+RBAC_CLIENT = "rbac_v1"
+STORAGE_CLIENT = "storage_v1"
+CUSTOM_OBJECT_CLIENT = "custom_object"
+
+
+class Kubectl:
+    def __init__(self, config_file=None):
+        config.load_kube_config(config_file=config_file)
+        self._clients = {
+            CORE_CLIENT: client.CoreV1Api(),
+            RBAC_CLIENT: client.RbacAuthorizationV1Api(),
+            STORAGE_CLIENT: client.StorageV1Api(),
+            CUSTOM_OBJECT_CLIENT: client.CustomObjectsApi(),
+        }
+        self._configuration = config.kube_config.Configuration.get_default_copy()
+        self.logger = logging.getLogger("lcm.kubectl")
+
+    @property
+    def configuration(self):
+        return self._configuration
+
+    @property
+    def clients(self):
+        return self._clients
+
+    def get_services(
+        self,
+        field_selector: str = None,
+        label_selector: str = None,
+    ) -> typing.List[typing.Dict]:
+        """
+        Get Service list from a namespace
+
+        :param: field_selector:     Kubernetes field selector for the namespace
+        :param: label_selector:     Kubernetes label selector for the namespace
+
+        :return: List of the services matching the selectors specified
+        """
+        kwargs = {}
+        if field_selector:
+            kwargs["field_selector"] = field_selector
+        if label_selector:
+            kwargs["label_selector"] = label_selector
+        try:
+            result = self.clients[CORE_CLIENT].list_service_for_all_namespaces(**kwargs)
+            return [
+                {
+                    "name": i.metadata.name,
+                    "cluster_ip": i.spec.cluster_ip,
+                    "type": i.spec.type,
+                    "ports": (
+                        [
+                            {
+                                "name": p.name,
+                                "node_port": p.node_port,
+                                "port": p.port,
+                                "protocol": p.protocol,
+                                "target_port": p.target_port,
+                            }
+                            for p in i.spec.ports
+                        ]
+                        if i.spec.ports
+                        else []
+                    ),
+                    "external_ip": [i.ip for i in i.status.load_balancer.ingress]
+                    if i.status.load_balancer.ingress
+                    else None,
+                }
+                for i in result.items
+            ]
+        except ApiException as e:
+            self.logger.error("Error calling get services: {}".format(e))
+            raise e
+
+    def get_default_storage_class(self) -> str:
+        """
+        Default storage class
+
+        :return:    Returns the default storage class name, if exists.
+                    If not, it returns the first storage class.
+                    If there are not storage classes, returns None
+        """
+        storage_classes = self.clients[STORAGE_CLIENT].list_storage_class()
+        selected_sc = None
+        default_sc_annotations = {
+            "storageclass.kubernetes.io/is-default-class": "true",
+            # Older clusters still use the beta annotation.
+            "storageclass.beta.kubernetes.io/is-default-class": "true",
+        }
+        for sc in storage_classes.items:
+            if not selected_sc:
+                # Select the first storage class in case there is no a default-class
+                selected_sc = sc.metadata.name
+            annotations = sc.metadata.annotations or {}
+            if any(
+                k in annotations and annotations[k] == v
+                for k, v in default_sc_annotations.items()
+            ):
+                # Default storage
+                selected_sc = sc.metadata.name
+                break
+        return selected_sc
+
+    def create_cluster_role(
+        self,
+        name: str,
+        labels: Dict[str, str],
+        namespace: str = "kube-system",
+    ):
+        """
+        Create a cluster role
+
+        :param: name:       Name of the cluster role
+        :param: labels:     Labels for cluster role metadata
+        :param: namespace:  Kubernetes namespace for cluster role metadata
+                            Default: kube-system
+        """
+        cluster_roles = self.clients[RBAC_CLIENT].list_cluster_role(
+            field_selector="metadata.name={}".format(name)
+        )
+
+        if len(cluster_roles.items) > 0:
+            raise Exception("Role with metadata.name={} already exists".format(name))
+
+        metadata = V1ObjectMeta(name=name, labels=labels, namespace=namespace)
+        # Cluster role
+        cluster_role = V1ClusterRole(
+            metadata=metadata,
+            rules=[
+                V1PolicyRule(api_groups=["*"], resources=["*"], verbs=["*"]),
+                V1PolicyRule(non_resource_ur_ls=["*"], verbs=["*"]),
+            ],
+        )
+
+        self.clients[RBAC_CLIENT].create_cluster_role(cluster_role)
+
+    async def create_role(
+        self,
+        name: str,
+        labels: Dict[str, str],
+        api_groups: list,
+        resources: list,
+        verbs: list,
+        namespace: str,
+    ):
+        """
+        Create a role with one PolicyRule
+
+        :param: name:       Name of the namespaced Role
+        :param: labels:     Labels for namespaced Role metadata
+        :param: api_groups: List with api-groups allowed in the policy rule
+        :param: resources:  List with resources allowed in the policy rule
+        :param: verbs:      List with verbs allowed in the policy rule
+        :param: namespace:  Kubernetes namespace for Role metadata
+
+        :return: None
+        """
+
+        roles = self.clients[RBAC_CLIENT].list_namespaced_role(
+            namespace, field_selector="metadata.name={}".format(name)
+        )
+
+        if len(roles.items) > 0:
+            raise Exception("Role with metadata.name={} already exists".format(name))
+
+        metadata = V1ObjectMeta(name=name, labels=labels, namespace=namespace)
+
+        role = V1Role(
+            metadata=metadata,
+            rules=[
+                V1PolicyRule(api_groups=api_groups, resources=resources, verbs=verbs),
+            ],
+        )
+
+        self.clients[RBAC_CLIENT].create_namespaced_role(namespace, role)
+
+    def delete_cluster_role(self, name: str):
+        """
+        Delete a cluster role
+
+        :param: name:       Name of the cluster role
+        """
+        self.clients[RBAC_CLIENT].delete_cluster_role(name)
+
+    def _get_kubectl_version(self):
+        version = VersionApi().get_code()
+        return "{}.{}".format(version.major, version.minor)
+
+    def _need_to_create_new_secret(self):
+        min_k8s_version = "1.24"
+        current_k8s_version = self._get_kubectl_version()
+        return LooseVersion(min_k8s_version) <= LooseVersion(current_k8s_version)
+
+    def _get_secret_name(self, service_account_name: str):
+        random_alphanum = str(uuid.uuid4())[:5]
+        return "{}-token-{}".format(service_account_name, random_alphanum)
+
+    def _create_service_account_secret(
+        self,
+        service_account_name: str,
+        namespace: str,
+        secret_name: str,
+    ):
+        """
+        Create a secret for the service account. K8s version >= 1.24
+
+        :param: service_account_name: Name of the service account
+        :param: namespace:  Kubernetes namespace for service account metadata
+        :param: secret_name: Name of the secret
+        """
+        v1_core = self.clients[CORE_CLIENT]
+        secrets = v1_core.list_namespaced_secret(
+            namespace, field_selector="metadata.name={}".format(secret_name)
+        ).items
+
+        if len(secrets) > 0:
+            raise Exception(
+                "Secret with metadata.name={} already exists".format(secret_name)
+            )
+
+        annotations = {"kubernetes.io/service-account.name": service_account_name}
+        metadata = V1ObjectMeta(
+            name=secret_name, namespace=namespace, annotations=annotations
+        )
+        type = "kubernetes.io/service-account-token"
+        secret = V1Secret(metadata=metadata, type=type)
+        v1_core.create_namespaced_secret(namespace, secret)
+
+    def _get_secret_reference_list(self, namespace: str, secret_name: str):
+        """
+        Return a secret reference list with one secret.
+        K8s version >= 1.24
+
+        :param: namespace:  Kubernetes namespace for service account metadata
+        :param: secret_name: Name of the secret
+        :rtype: list[V1SecretReference]
+        """
+        return [V1SecretReference(name=secret_name, namespace=namespace)]
+
+    def create_service_account(
+        self,
+        name: str,
+        labels: Dict[str, str],
+        namespace: str = "kube-system",
+    ):
+        """
+        Create a service account
+
+        :param: name:       Name of the service account
+        :param: labels:     Labels for service account metadata
+        :param: namespace:  Kubernetes namespace for service account metadata
+                            Default: kube-system
+        """
+        v1_core = self.clients[CORE_CLIENT]
+        service_accounts = v1_core.list_namespaced_service_account(
+            namespace, field_selector="metadata.name={}".format(name)
+        )
+        if len(service_accounts.items) > 0:
+            raise Exception(
+                "Service account with metadata.name={} already exists".format(name)
+            )
+
+        metadata = V1ObjectMeta(name=name, labels=labels, namespace=namespace)
+
+        if self._need_to_create_new_secret():
+            secret_name = self._get_secret_name(name)
+            secrets = self._get_secret_reference_list(namespace, secret_name)
+            service_account = V1ServiceAccount(metadata=metadata, secrets=secrets)
+            v1_core.create_namespaced_service_account(namespace, service_account)
+            self._create_service_account_secret(name, namespace, secret_name)
+        else:
+            service_account = V1ServiceAccount(metadata=metadata)
+            v1_core.create_namespaced_service_account(namespace, service_account)
+
+    def delete_secret(self, name: str, namespace: str = "kube-system"):
+        """
+        Delete a secret
+
+        :param: name:       Name of the secret
+        :param: namespace:  Kubernetes namespace
+                            Default: kube-system
+        """
+        self.clients[CORE_CLIENT].delete_namespaced_secret(name, namespace)
+
+    def delete_service_account(self, name: str, namespace: str = "kube-system"):
+        """
+        Delete a service account
+
+        :param: name:       Name of the service account
+        :param: namespace:  Kubernetes namespace for service account metadata
+                            Default: kube-system
+        """
+        self.clients[CORE_CLIENT].delete_namespaced_service_account(name, namespace)
+
+    def create_cluster_role_binding(
+        self, name: str, labels: Dict[str, str], namespace: str = "kube-system"
+    ):
+        """
+        Create a cluster role binding
+
+        :param: name:       Name of the cluster role
+        :param: labels:     Labels for cluster role binding metadata
+        :param: namespace:  Kubernetes namespace for cluster role binding metadata
+                            Default: kube-system
+        """
+        role_bindings = self.clients[RBAC_CLIENT].list_cluster_role_binding(
+            field_selector="metadata.name={}".format(name)
+        )
+        if len(role_bindings.items) > 0:
+            raise Exception("Generated rbac id already exists")
+
+        role_binding = V1ClusterRoleBinding(
+            metadata=V1ObjectMeta(name=name, labels=labels),
+            role_ref=V1RoleRef(kind="ClusterRole", name=name, api_group=""),
+            subjects=[
+                RbacV1Subject(kind="ServiceAccount", name=name, namespace=namespace)
+            ],
+        )
+        self.clients[RBAC_CLIENT].create_cluster_role_binding(role_binding)
+
+    async def create_role_binding(
+        self,
+        name: str,
+        role_name: str,
+        sa_name: str,
+        labels: Dict[str, str],
+        namespace: str,
+    ):
+        """
+        Create a cluster role binding
+
+        :param: name:       Name of the namespaced Role Binding
+        :param: role_name:  Name of the namespaced Role to be bound
+        :param: sa_name:    Name of the Service Account to be bound
+        :param: labels:     Labels for Role Binding metadata
+        :param: namespace:  Kubernetes namespace for Role Binding metadata
+
+        :return: None
+        """
+        role_bindings = self.clients[RBAC_CLIENT].list_namespaced_role_binding(
+            namespace, field_selector="metadata.name={}".format(name)
+        )
+        if len(role_bindings.items) > 0:
+            raise Exception(
+                "Role Binding with metadata.name={} already exists".format(name)
+            )
+
+        role_binding = V1RoleBinding(
+            metadata=V1ObjectMeta(name=name, labels=labels),
+            role_ref=V1RoleRef(kind="Role", name=role_name, api_group=""),
+            subjects=[
+                RbacV1Subject(kind="ServiceAccount", name=sa_name, namespace=namespace)
+            ],
+        )
+        self.clients[RBAC_CLIENT].create_namespaced_role_binding(
+            namespace, role_binding
+        )
+
+    def delete_cluster_role_binding(self, name: str):
+        """
+        Delete a cluster role binding
+
+        :param: name:       Name of the cluster role binding
+        """
+        self.clients[RBAC_CLIENT].delete_cluster_role_binding(name)
+
+    @retry(
+        attempts=10,
+        delay=1,
+        fallback=Exception("Failed getting the secret from service account"),
+        callback=retry_callback,
+    )
+    async def get_secret_data(
+        self, name: str, namespace: str = "kube-system"
+    ) -> (str, str):
+        """
+        Get secret data
+
+        :param: name:       Name of the secret data
+        :param: namespace:  Name of the namespace where the secret is stored
+
+        :return: Tuple with the token and client certificate
+        """
+        v1_core = self.clients[CORE_CLIENT]
+
+        secret_name = None
+
+        service_accounts = v1_core.list_namespaced_service_account(
+            namespace, field_selector="metadata.name={}".format(name)
+        )
+        if len(service_accounts.items) == 0:
+            raise Exception(
+                "Service account not found with metadata.name={}".format(name)
+            )
+        service_account = service_accounts.items[0]
+        if service_account.secrets and len(service_account.secrets) > 0:
+            secret_name = service_account.secrets[0].name
+        if not secret_name:
+            raise Exception(
+                "Failed getting the secret from service account {}".format(name)
+            )
+        # TODO: refactor to use get_secret_content
+        secret = v1_core.list_namespaced_secret(
+            namespace, field_selector="metadata.name={}".format(secret_name)
+        ).items[0]
+
+        token = secret.data[SERVICE_ACCOUNT_TOKEN_KEY]
+        client_certificate_data = secret.data[SERVICE_ACCOUNT_ROOT_CA_KEY]
+
+        return (
+            base64.b64decode(token).decode("utf-8"),
+            base64.b64decode(client_certificate_data).decode("utf-8"),
+        )
+
+    @retry(
+        attempts=10,
+        delay=1,
+        fallback=Exception("Failed getting data from the secret"),
+    )
+    async def get_secret_content(
+        self,
+        name: str,
+        namespace: str,
+    ) -> dict:
+        """
+        Get secret data
+
+        :param: name:       Name of the secret
+        :param: namespace:  Name of the namespace where the secret is stored
+
+        :return: Dictionary with secret's data
+        """
+        v1_core = self.clients[CORE_CLIENT]
+
+        secret = v1_core.read_namespaced_secret(name, namespace)
+
+        return secret.data
+
+    @retry(
+        attempts=10,
+        delay=1,
+        fallback=Exception("Failed creating the secret"),
+    )
+    async def create_secret(
+        self, name: str, data: dict, namespace: str, secret_type: str
+    ):
+        """
+        Create secret with data
+
+        :param: name:        Name of the secret
+        :param: data:        Dict with data content. Values must be already base64 encoded
+        :param: namespace:   Name of the namespace where the secret will be stored
+        :param: secret_type: Type of the secret, e.g., Opaque, kubernetes.io/service-account-token, kubernetes.io/tls
+
+        :return: None
+        """
+        self.logger.info("Enter create_secret function")
+        v1_core = self.clients[CORE_CLIENT]
+        self.logger.info(f"v1_core: {v1_core}")
+        metadata = V1ObjectMeta(name=name, namespace=namespace)
+        self.logger.info(f"metadata: {metadata}")
+        secret = V1Secret(metadata=metadata, data=data, type=secret_type)
+        self.logger.info(f"secret: {secret}")
+        v1_core.create_namespaced_secret(namespace, secret)
+        self.logger.info("Namespaced secret was created")
+
+    async def create_certificate(
+        self,
+        namespace: str,
+        name: str,
+        dns_prefix: str,
+        secret_name: str,
+        usages: list,
+        issuer_name: str,
+    ):
+        """
+        Creates cert-manager certificate object
+
+        :param: namespace:       Name of the namespace where the certificate and secret is stored
+        :param: name:            Name of the certificate object
+        :param: dns_prefix:      Prefix for the dnsNames. They will be prefixed to the common k8s svc suffixes
+        :param: secret_name:     Name of the secret created by cert-manager
+        :param: usages:          List of X.509 key usages
+        :param: issuer_name:     Name of the cert-manager's Issuer or ClusterIssuer object
+
+        """
+        certificate_body = {
+            "apiVersion": "cert-manager.io/v1",
+            "kind": "Certificate",
+            "metadata": {"name": name, "namespace": namespace},
+            "spec": {
+                "secretName": secret_name,
+                "privateKey": {
+                    "rotationPolicy": "Always",
+                    "algorithm": "ECDSA",
+                    "size": 256,
+                },
+                "duration": "8760h",  # 1 Year
+                "renewBefore": "2208h",  # 9 months
+                "subject": {"organizations": ["osm"]},
+                "commonName": "osm",
+                "isCA": False,
+                "usages": usages,
+                "dnsNames": [
+                    "{}.{}".format(dns_prefix, namespace),
+                    "{}.{}.svc".format(dns_prefix, namespace),
+                    "{}.{}.svc.cluster".format(dns_prefix, namespace),
+                    "{}.{}.svc.cluster.local".format(dns_prefix, namespace),
+                ],
+                "issuerRef": {"name": issuer_name, "kind": "ClusterIssuer"},
+            },
+        }
+        client = self.clients[CUSTOM_OBJECT_CLIENT]
+        try:
+            client.create_namespaced_custom_object(
+                group="cert-manager.io",
+                plural="certificates",
+                version="v1",
+                body=certificate_body,
+                namespace=namespace,
+            )
+        except ApiException as e:
+            info = json.loads(e.body)
+            if info.get("reason").lower() == "alreadyexists":
+                self.logger.warning("Certificate already exists: {}".format(e))
+            else:
+                raise e
+
+    async def delete_certificate(self, namespace, object_name):
+        client = self.clients[CUSTOM_OBJECT_CLIENT]
+        try:
+            client.delete_namespaced_custom_object(
+                group="cert-manager.io",
+                plural="certificates",
+                version="v1",
+                name=object_name,
+                namespace=namespace,
+            )
+        except ApiException as e:
+            info = json.loads(e.body)
+            if info.get("reason").lower() == "notfound":
+                self.logger.warning("Certificate already deleted: {}".format(e))
+            else:
+                raise e
+
+    @retry(
+        attempts=10,
+        delay=1,
+        fallback=Exception("Failed creating the namespace"),
+    )
+    async def create_namespace(self, name: str, labels: dict = None):
+        """
+        Create a namespace
+
+        :param: name:       Name of the namespace to be created
+        :param: labels:     Dictionary with labels for the new namespace
+
+        """
+        v1_core = self.clients[CORE_CLIENT]
+        metadata = V1ObjectMeta(name=name, labels=labels)
+        namespace = V1Namespace(
+            metadata=metadata,
+        )
+
+        try:
+            v1_core.create_namespace(namespace)
+            self.logger.debug("Namespace created: {}".format(name))
+        except ApiException as e:
+            info = json.loads(e.body)
+            if info.get("reason").lower() == "alreadyexists":
+                self.logger.warning("Namespace already exists: {}".format(e))
+            else:
+                raise e
+
+    @retry(
+        attempts=10,
+        delay=1,
+        fallback=Exception("Failed deleting the namespace"),
+    )
+    async def delete_namespace(self, name: str):
+        """
+        Delete a namespace
+
+        :param: name:       Name of the namespace to be deleted
+
+        """
+        try:
+            self.clients[CORE_CLIENT].delete_namespace(name)
+        except ApiException as e:
+            if e.reason == "Not Found":
+                self.logger.warning("Namespace already deleted: {}".format(e))
+
+    def get_secrets(
+        self,
+        namespace: str,
+        field_selector: str = None,
+    ) -> typing.List[typing.Dict]:
+        """
+        Get Secret list from a namespace
+
+        :param: namespace:  Kubernetes namespace
+        :param: field_selector:     Kubernetes field selector
+
+        :return: List of the secrets matching the selectors specified
+        """
+        try:
+            v1_core = self.clients[CORE_CLIENT]
+            secrets = v1_core.list_namespaced_secret(
+                namespace=namespace,
+                field_selector=field_selector,
+            ).items
+            return secrets
+        except ApiException as e:
+            self.logger.error("Error calling get secrets: {}".format(e))
+            raise e
+
+    def create_generic_object(
+        self,
+        api_group: str,
+        api_plural: str,
+        api_version: str,
+        namespace: str,
+        manifest_dict: dict,
+    ):
+        """
+        Creates generic object
+
+        :param: api_group:       API Group
+        :param: api_plural:      API Plural
+        :param: api_version:     API Version
+        :param: namespace:       Namespace
+        :param: manifest_dict:   Dictionary with the content of the Kubernetes manifest
+
+        """
+        client = self.clients[CUSTOM_OBJECT_CLIENT]
+        try:
+            if namespace:
+                client.create_namespaced_custom_object(
+                    group=api_group,
+                    plural=api_plural,
+                    version=api_version,
+                    body=manifest_dict,
+                    namespace=namespace,
+                )
+            else:
+                client.create_cluster_custom_object(
+                    group=api_group,
+                    plural=api_plural,
+                    version=api_version,
+                    body=manifest_dict,
+                )
+        except ApiException as e:
+            info = json.loads(e.body)
+            if info.get("reason").lower() == "alreadyexists":
+                self.logger.warning("Object already exists: {}".format(e))
+            else:
+                raise e
+
+    def delete_generic_object(
+        self,
+        api_group: str,
+        api_plural: str,
+        api_version: str,
+        namespace: str,
+        name: str,
+    ):
+        """
+        Deletes generic object
+
+        :param: api_group:       API Group
+        :param: api_plural:      API Plural
+        :param: api_version:     API Version
+        :param: namespace:       Namespace
+        :param: name:            Name of the object
+
+        """
+        client = self.clients[CUSTOM_OBJECT_CLIENT]
+        try:
+            if namespace:
+                client.delete_namespaced_custom_object(
+                    group=api_group,
+                    plural=api_plural,
+                    version=api_version,
+                    name=name,
+                    namespace=namespace,
+                )
+            else:
+                client.delete_cluster_custom_object(
+                    group=api_group,
+                    plural=api_plural,
+                    version=api_version,
+                    name=name,
+                )
+        except ApiException as e:
+            info = json.loads(e.body)
+            if info.get("reason").lower() == "notfound":
+                self.logger.warning("Object already deleted: {}".format(e))
+            else:
+                raise e
+
+    async def get_generic_object(
+        self,
+        api_group: str,
+        api_plural: str,
+        api_version: str,
+        namespace: str,
+        name: str,
+    ):
+        """
+        Gets generic object
+
+        :param: api_group:       API Group
+        :param: api_plural:      API Plural
+        :param: api_version:     API Version
+        :param: namespace:       Namespace
+        :param: name:            Name of the object
+
+        """
+        client = self.clients[CUSTOM_OBJECT_CLIENT]
+        try:
+            if namespace:
+                object_dict = client.list_namespaced_custom_object(
+                    group=api_group,
+                    plural=api_plural,
+                    version=api_version,
+                    namespace=namespace,
+                    field_selector=f"metadata.name={name}",
+                )
+            else:
+                object_dict = client.list_cluster_custom_object(
+                    group=api_group,
+                    plural=api_plural,
+                    version=api_version,
+                    field_selector=f"metadata.name={name}",
+                )
+            if len(object_dict.get("items")) == 0:
+                return None
+            return object_dict.get("items")[0]
+        except ApiException as e:
+            self.logger.debug(f"Exception: {e}")
+            info = json.loads(e.body)
+            if info.get("reason").lower() == "notfound":
+                self.logger.warning("Cannot get custom object: {}".format(e))
+                return None
+            else:
+                raise e
+
+    async def list_generic_object(
+        self,
+        api_group: str,
+        api_plural: str,
+        api_version: str,
+        namespace: str,
+    ):
+        """
+        Lists all generic objects of the requested API group
+
+        :param: api_group:       API Group
+        :param: api_plural:      API Plural
+        :param: api_version:     API Version
+        :param: namespace:       Namespace
+
+        """
+        client = self.clients[CUSTOM_OBJECT_CLIENT]
+        try:
+            if namespace:
+                object_dict = client.list_namespaced_custom_object(
+                    group=api_group,
+                    plural=api_plural,
+                    version=api_version,
+                    namespace=namespace,
+                )
+            else:
+                object_dict = client.list_cluster_custom_object(
+                    group=api_group,
+                    plural=api_plural,
+                    version=api_version,
+                )
+            self.logger.debug(f"Object-list: {object_dict.get('items')}")
+            return object_dict.get("items")
+        except ApiException as e:
+            self.logger.debug(f"Exception: {e}")
+            info = json.loads(e.body)
+            if info.get("reason").lower() == "notfound":
+                self.logger.warning(
+                    "Cannot find specified custom objects: {}".format(e)
+                )
+                return []
+            else:
+                raise e
+
+    @retry(
+        attempts=10,
+        delay=1,
+        fallback=Exception("Failed creating the secret"),
+    )
+    async def create_secret_string(
+        self, name: str, string_data: str, namespace: str, secret_type: str
+    ):
+        """
+        Create secret with data
+
+        :param: name:        Name of the secret
+        :param: string_data: String with data content
+        :param: namespace:   Name of the namespace where the secret will be stored
+        :param: secret_type: Type of the secret, e.g., Opaque, kubernetes.io/service-account-token, kubernetes.io/tls
+
+        :return: None
+        """
+        v1_core = self.clients[CORE_CLIENT]
+        metadata = V1ObjectMeta(name=name, namespace=namespace)
+        secret = V1Secret(metadata=metadata, string_data=string_data, type=secret_type)
+        v1_core.create_namespaced_secret(namespace, secret)
+
+    @retry(
+        attempts=10,
+        delay=1,
+        fallback=Exception("Failed creating the pvc"),
+    )
+    async def create_pvc(self, name: str, namespace: str):
+        """
+        Create a namespace
+
+        :param: name:       Name of the pvc to be created
+        :param: namespace:  Name of the namespace where the pvc will be stored
+
+        """
+        try:
+            pvc = V1PersistentVolumeClaim(
+                api_version="v1",
+                kind="PersistentVolumeClaim",
+                metadata=V1ObjectMeta(name=name),
+                spec=V1PersistentVolumeClaimSpec(
+                    access_modes=["ReadWriteOnce"],
+                    resources=V1ResourceRequirements(requests={"storage": "100Mi"}),
+                ),
+            )
+            self.clients[CORE_CLIENT].create_namespaced_persistent_volume_claim(
+                namespace=namespace, body=pvc
+            )
+        except ApiException as e:
+            info = json.loads(e.body)
+            if info.get("reason").lower() == "alreadyexists":
+                self.logger.warning("PVC already exists: {}".format(e))
+            else:
+                raise e
+
+    @retry(
+        attempts=10,
+        delay=1,
+        fallback=Exception("Failed deleting the pvc"),
+    )
+    async def delete_pvc(self, name: str, namespace: str):
+        """
+        Create a namespace
+
+        :param: name:       Name of the pvc to be deleted
+        :param: namespace:  Namespace
+
+        """
+        self.clients[CORE_CLIENT].delete_namespaced_persistent_volume_claim(
+            name, namespace
+        )
+
+    def copy_file_to_pod(
+        self, namespace, pod_name, container_name, src_file, dest_path
+    ):
+        # Create an in-memory tar file containing the source file
+        tar_buffer = io.BytesIO()
+        with tarfile.open(fileobj=tar_buffer, mode="w") as tar:
+            tar.add(src_file, arcname=dest_path.split("/")[-1])
+
+        tar_buffer.seek(0)
+
+        # Define the command to extract the tar file in the pod
+        exec_command = ["tar", "xvf", "-", "-C", dest_path.rsplit("/", 1)[0]]
+
+        # Execute the command
+        resp = stream(
+            self.clients[CORE_CLIENT].connect_get_namespaced_pod_exec,
+            pod_name,
+            namespace,
+            command=exec_command,
+            container=container_name,
+            stdin=True,
+            stderr=True,
+            stdout=True,
+            tty=False,
+            _preload_content=False,
+        )
+
+        # Write the tar data to the pod
+        resp.write_stdin(tar_buffer.read())
+        resp.close()
+
+    @retry(
+        attempts=10,
+        delay=1,
+        fallback=Exception("Failed creating the pvc"),
+    )
+    async def create_pvc_with_content(
+        self, name: str, namespace: str, src_file: str, dest_filename: str
+    ):
+        """
+        Create a PVC with content
+
+        :param: name:       Name of the pvc to be created
+        :param: namespace:  Name of the namespace where the pvc will be stored
+        :param: src_file:   File to be copied
+        :param: filename:   Name of the file in the destination folder
+        """
+        pod_name = f"copy-pod-{name}"
+        self.logger.debug(f"Creating pvc {name}")
+        await self.create_pvc(name=name, namespace=namespace)
+        self.logger.debug("Sleeping")
+        sleep(40)
+        self.logger.debug(f"Creating pod {pod_name}")
+        await self.create_copy_pod(name=pod_name, namespace=namespace, pvc_name=name)
+        self.logger.debug("Sleeping")
+        sleep(40)
+        self.logger.debug(f"Copying files to pod {pod_name}")
+        self.copy_file_to_pod(
+            namespace=namespace,
+            pod_name=pod_name,
+            container_name="copy-container",
+            src_file=src_file,
+            dest_path=f"/mnt/data/{dest_filename}",
+        )
+
+    @retry(
+        attempts=10,
+        delay=1,
+        fallback=Exception("Failed creating the pvc"),
+    )
+    async def create_copy_pod(self, name: str, namespace: str, pvc_name: str):
+        """
+        Create a pod to copy content into a PVC
+
+        :param: name:       Name of the pod to be created
+        :param: namespace:  Name of the namespace where the pod will be stored
+        :param: pvc_name:   Name of the PVC that the pod will mount as a volume
+
+        """
+        pod = V1Pod(
+            api_version="v1",
+            kind="Pod",
+            metadata=client.V1ObjectMeta(name=name),
+            spec=V1PodSpec(
+                containers=[
+                    V1Container(
+                        name="copy-container",
+                        image="busybox",  # Imagen ligera para copiar archivos
+                        command=["sleep", "3600"],  # Mantén el contenedor en ejecución
+                        volume_mounts=[
+                            V1VolumeMount(mount_path="/mnt/data", name="my-storage")
+                        ],
+                    )
+                ],
+                volumes=[
+                    V1Volume(
+                        name="my-storage",
+                        persistent_volume_claim=V1PersistentVolumeClaimVolumeSource(
+                            claim_name=pvc_name
+                        ),
+                    )
+                ],
+            ),
+        )
+        # Create the pod
+        self.clients[CORE_CLIENT].create_namespaced_pod(namespace=namespace, body=pod)
+
+    @retry(
+        attempts=10,
+        delay=1,
+        fallback=Exception("Failed deleting the pod"),
+    )
+    async def delete_pod(self, name: str, namespace: str):
+        """
+        Create a namespace
+
+        :param: name:       Name of the pod to be deleted
+        :param: namespace:  Namespace
+
+        """
+        self.clients[CORE_CLIENT].delete_namespaced_pod(name, namespace)
diff --git a/osm_lcm/n2vc/libjuju.py b/osm_lcm/n2vc/libjuju.py
new file mode 100644 (file)
index 0000000..cbd2b4c
--- /dev/null
@@ -0,0 +1,1908 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+
+import asyncio
+import logging
+import os
+import typing
+import yaml
+
+import time
+
+import juju.errors
+from juju.bundle import BundleHandler
+from juju.model import Model
+from juju.machine import Machine
+from juju.application import Application
+from juju.unit import Unit
+from juju.url import URL
+from juju.version import DEFAULT_ARCHITECTURE
+from juju.client._definitions import (
+    FullStatus,
+    QueryApplicationOffersResults,
+    Cloud,
+    CloudCredential,
+)
+from juju.controller import Controller
+from juju.client import client
+from juju import tag
+
+from osm_lcm.n2vc.definitions import Offer, RelationEndpoint
+from osm_lcm.n2vc.juju_watcher import JujuModelWatcher
+from osm_lcm.n2vc.provisioner import AsyncSSHProvisioner
+from osm_lcm.n2vc.n2vc_conn import N2VCConnector
+from osm_lcm.n2vc.exceptions import (
+    JujuMachineNotFound,
+    JujuApplicationNotFound,
+    JujuLeaderUnitNotFound,
+    JujuActionNotFound,
+    JujuControllerFailedConnecting,
+    JujuApplicationExists,
+    JujuInvalidK8sConfiguration,
+    JujuError,
+)
+from osm_lcm.n2vc.vca.cloud import Cloud as VcaCloud
+from osm_lcm.n2vc.vca.connection import Connection
+from kubernetes.client.configuration import Configuration
+from retrying_async import retry
+
+
+RBAC_LABEL_KEY_NAME = "rbac-id"
+
+
+@asyncio.coroutine
+def retry_callback(attempt, exc, args, kwargs, delay=0.5, *, loop):
+    # Specifically overridden from upstream implementation so it can
+    # continue to work with Python 3.10
+    yield from asyncio.sleep(attempt * delay)
+    return retry
+
+
+class Libjuju:
+    def __init__(
+        self,
+        vca_connection: Connection,
+        log: logging.Logger = None,
+        n2vc: N2VCConnector = None,
+    ):
+        """
+        Constructor
+
+        :param: vca_connection:         n2vc.vca.connection object
+        :param: log:                    Logger
+        :param: n2vc:                   N2VC object
+        """
+
+        self.log = log or logging.getLogger("Libjuju")
+        self.n2vc = n2vc
+        self.vca_connection = vca_connection
+
+        self.creating_model = asyncio.Lock()
+
+        if self.vca_connection.is_default:
+            self.health_check_task = self._create_health_check_task()
+
+    def _create_health_check_task(self):
+        return asyncio.get_event_loop().create_task(self.health_check())
+
+    async def get_controller(self, timeout: float = 60.0) -> Controller:
+        """
+        Get controller
+
+        :param: timeout: Time in seconds to wait for controller to connect
+        """
+        controller = None
+        try:
+            controller = Controller()
+            await asyncio.wait_for(
+                controller.connect(
+                    endpoint=self.vca_connection.data.endpoints,
+                    username=self.vca_connection.data.user,
+                    password=self.vca_connection.data.secret,
+                    cacert=self.vca_connection.data.cacert,
+                ),
+                timeout=timeout,
+            )
+            if self.vca_connection.is_default:
+                endpoints = await controller.api_endpoints
+                if not all(
+                    endpoint in self.vca_connection.endpoints for endpoint in endpoints
+                ):
+                    await self.vca_connection.update_endpoints(endpoints)
+            return controller
+        except asyncio.CancelledError as e:
+            raise e
+        except Exception as e:
+            self.log.error(
+                "Failed connecting to controller: {}... {}".format(
+                    self.vca_connection.data.endpoints, e
+                )
+            )
+            if controller:
+                await self.disconnect_controller(controller)
+
+            raise JujuControllerFailedConnecting(
+                f"Error connecting to Juju controller: {e}"
+            )
+
+    async def disconnect(self):
+        """Disconnect"""
+        # Cancel health check task
+        self.health_check_task.cancel()
+        self.log.debug("Libjuju disconnected!")
+
+    async def disconnect_model(self, model: Model):
+        """
+        Disconnect model
+
+        :param: model: Model that will be disconnected
+        """
+        await model.disconnect()
+
+    async def disconnect_controller(self, controller: Controller):
+        """
+        Disconnect controller
+
+        :param: controller: Controller that will be disconnected
+        """
+        if controller:
+            await controller.disconnect()
+
+    @retry(attempts=3, delay=5, timeout=None, callback=retry_callback)
+    async def add_model(self, model_name: str, cloud: VcaCloud):
+        """
+        Create model
+
+        :param: model_name: Model name
+        :param: cloud: Cloud object
+        """
+
+        # Get controller
+        controller = await self.get_controller()
+        model = None
+        try:
+            # Block until other workers have finished model creation
+            while self.creating_model.locked():
+                await asyncio.sleep(0.1)
+
+            # Create the model
+            async with self.creating_model:
+                if await self.model_exists(model_name, controller=controller):
+                    return
+                self.log.debug("Creating model {}".format(model_name))
+                model = await controller.add_model(
+                    model_name,
+                    config=self.vca_connection.data.model_config,
+                    cloud_name=cloud.name,
+                    credential_name=cloud.credential_name,
+                )
+        except juju.errors.JujuAPIError as e:
+            if "already exists" in e.message:
+                pass
+            else:
+                raise e
+        finally:
+            if model:
+                await self.disconnect_model(model)
+            await self.disconnect_controller(controller)
+
+    async def get_executed_actions(self, model_name: str) -> list:
+        """
+        Get executed/history of actions for a model.
+
+        :param: model_name: Model name, str.
+        :return: List of executed actions for a model.
+        """
+        model = None
+        executed_actions = []
+        controller = await self.get_controller()
+        try:
+            model = await self.get_model(controller, model_name)
+            # Get all unique action names
+            actions = {}
+            for application in model.applications:
+                application_actions = await self.get_actions(application, model_name)
+                actions.update(application_actions)
+            # Get status of all actions
+            for application_action in actions:
+                app_action_status_list = await model.get_action_status(
+                    name=application_action
+                )
+                for action_id, action_status in app_action_status_list.items():
+                    executed_action = {
+                        "id": action_id,
+                        "action": application_action,
+                        "status": action_status,
+                    }
+                    # Get action output by id
+                    action_status = await model.get_action_output(executed_action["id"])
+                    for k, v in action_status.items():
+                        executed_action[k] = v
+                    executed_actions.append(executed_action)
+        except Exception as e:
+            raise JujuError(
+                "Error in getting executed actions for model: {}. Error: {}".format(
+                    model_name, str(e)
+                )
+            )
+        finally:
+            if model:
+                await self.disconnect_model(model)
+            await self.disconnect_controller(controller)
+        return executed_actions
+
+    async def get_application_configs(
+        self, model_name: str, application_name: str
+    ) -> dict:
+        """
+        Get available configs for an application.
+
+        :param: model_name: Model name, str.
+        :param: application_name: Application name, str.
+
+        :return: A dict which has key - action name, value - action description
+        """
+        model = None
+        application_configs = {}
+        controller = await self.get_controller()
+        try:
+            model = await self.get_model(controller, model_name)
+            application = self._get_application(
+                model, application_name=application_name
+            )
+            application_configs = await application.get_config()
+        except Exception as e:
+            raise JujuError(
+                "Error in getting configs for application: {} in model: {}. Error: {}".format(
+                    application_name, model_name, str(e)
+                )
+            )
+        finally:
+            if model:
+                await self.disconnect_model(model)
+            await self.disconnect_controller(controller)
+        return application_configs
+
+    @retry(attempts=3, delay=5, callback=retry_callback)
+    async def get_model(self, controller: Controller, model_name: str) -> Model:
+        """
+        Get model from controller
+
+        :param: controller: Controller
+        :param: model_name: Model name
+
+        :return: Model: The created Juju model object
+        """
+        return await controller.get_model(model_name)
+
+    async def model_exists(
+        self, model_name: str, controller: Controller = None
+    ) -> bool:
+        """
+        Check if model exists
+
+        :param: controller: Controller
+        :param: model_name: Model name
+
+        :return bool
+        """
+        need_to_disconnect = False
+
+        # Get controller if not passed
+        if not controller:
+            controller = await self.get_controller()
+            need_to_disconnect = True
+
+        # Check if model exists
+        try:
+            return model_name in await controller.list_models()
+        finally:
+            if need_to_disconnect:
+                await self.disconnect_controller(controller)
+
+    async def models_exist(self, model_names: [str]) -> (bool, list):
+        """
+        Check if models exists
+
+        :param: model_names: List of strings with model names
+
+        :return (bool, list[str]): (True if all models exists, List of model names that don't exist)
+        """
+        if not model_names:
+            raise Exception(
+                "model_names must be a non-empty array. Given value: {}".format(
+                    model_names
+                )
+            )
+        non_existing_models = []
+        models = await self.list_models()
+        existing_models = list(set(models).intersection(model_names))
+        non_existing_models = list(set(model_names) - set(existing_models))
+
+        return (
+            len(non_existing_models) == 0,
+            non_existing_models,
+        )
+
+    async def get_model_status(self, model_name: str) -> FullStatus:
+        """
+        Get model status
+
+        :param: model_name: Model name
+
+        :return: Full status object
+        """
+        controller = await self.get_controller()
+        model = await self.get_model(controller, model_name)
+        try:
+            return await model.get_status()
+        finally:
+            await self.disconnect_model(model)
+            await self.disconnect_controller(controller)
+
+    async def create_machine(
+        self,
+        model_name: str,
+        machine_id: str = None,
+        db_dict: dict = None,
+        progress_timeout: float = None,
+        total_timeout: float = None,
+        series: str = "bionic",
+        wait: bool = True,
+    ) -> (Machine, bool):
+        """
+        Create machine
+
+        :param: model_name:         Model name
+        :param: machine_id:         Machine id
+        :param: db_dict:            Dictionary with data of the DB to write the updates
+        :param: progress_timeout:   Maximum time between two updates in the model
+        :param: total_timeout:      Timeout for the entity to be active
+        :param: series:             Series of the machine (xenial, bionic, focal, ...)
+        :param: wait:               Wait until machine is ready
+
+        :return: (juju.machine.Machine, bool):  Machine object and a boolean saying
+                                                if the machine is new or it already existed
+        """
+        new = False
+        machine = None
+
+        self.log.debug(
+            "Creating machine (id={}) in model: {}".format(machine_id, model_name)
+        )
+
+        # Get controller
+        controller = await self.get_controller()
+
+        # Get model
+        model = await self.get_model(controller, model_name)
+        try:
+            if machine_id is not None:
+                self.log.debug(
+                    "Searching machine (id={}) in model {}".format(
+                        machine_id, model_name
+                    )
+                )
+
+                # Get machines from model and get the machine with machine_id if exists
+                machines = await model.get_machines()
+                if machine_id in machines:
+                    self.log.debug(
+                        "Machine (id={}) found in model {}".format(
+                            machine_id, model_name
+                        )
+                    )
+                    machine = machines[machine_id]
+                else:
+                    raise JujuMachineNotFound("Machine {} not found".format(machine_id))
+
+            if machine is None:
+                self.log.debug("Creating a new machine in model {}".format(model_name))
+
+                # Create machine
+                machine = await model.add_machine(
+                    spec=None, constraints=None, disks=None, series=series
+                )
+                new = True
+
+                # Wait until the machine is ready
+                self.log.debug(
+                    "Wait until machine {} is ready in model {}".format(
+                        machine.entity_id, model_name
+                    )
+                )
+                if wait:
+                    await JujuModelWatcher.wait_for(
+                        model=model,
+                        entity=machine,
+                        progress_timeout=progress_timeout,
+                        total_timeout=total_timeout,
+                        db_dict=db_dict,
+                        n2vc=self.n2vc,
+                        vca_id=self.vca_connection._vca_id,
+                    )
+        finally:
+            await self.disconnect_model(model)
+            await self.disconnect_controller(controller)
+
+        self.log.debug(
+            "Machine {} ready at {} in model {}".format(
+                machine.entity_id, machine.dns_name, model_name
+            )
+        )
+        return machine, new
+
+    async def provision_machine(
+        self,
+        model_name: str,
+        hostname: str,
+        username: str,
+        private_key_path: str,
+        db_dict: dict = None,
+        progress_timeout: float = None,
+        total_timeout: float = None,
+    ) -> str:
+        """
+        Manually provisioning of a machine
+
+        :param: model_name:         Model name
+        :param: hostname:           IP to access the machine
+        :param: username:           Username to login to the machine
+        :param: private_key_path:   Local path for the private key
+        :param: db_dict:            Dictionary with data of the DB to write the updates
+        :param: progress_timeout:   Maximum time between two updates in the model
+        :param: total_timeout:      Timeout for the entity to be active
+
+        :return: (Entity): Machine id
+        """
+        self.log.debug(
+            "Provisioning machine. model: {}, hostname: {}, username: {}".format(
+                model_name, hostname, username
+            )
+        )
+
+        # Get controller
+        controller = await self.get_controller()
+
+        # Get model
+        model = await self.get_model(controller, model_name)
+
+        try:
+            # Get provisioner
+            provisioner = AsyncSSHProvisioner(
+                host=hostname,
+                user=username,
+                private_key_path=private_key_path,
+                log=self.log,
+            )
+
+            # Provision machine
+            params = await provisioner.provision_machine()
+
+            params.jobs = ["JobHostUnits"]
+
+            self.log.debug("Adding machine to model")
+            connection = model.connection()
+            client_facade = client.ClientFacade.from_connection(connection)
+
+            results = await client_facade.AddMachines(params=[params])
+            error = results.machines[0].error
+
+            if error:
+                msg = "Error adding machine: {}".format(error.message)
+                self.log.error(msg=msg)
+                raise ValueError(msg)
+
+            machine_id = results.machines[0].machine
+
+            self.log.debug("Installing Juju agent into machine {}".format(machine_id))
+            asyncio.ensure_future(
+                provisioner.install_agent(
+                    connection=connection,
+                    nonce=params.nonce,
+                    machine_id=machine_id,
+                    proxy=self.vca_connection.data.api_proxy,
+                    series=params.series,
+                )
+            )
+
+            machine = None
+            for _ in range(10):
+                machine_list = await model.get_machines()
+                if machine_id in machine_list:
+                    self.log.debug("Machine {} found in model!".format(machine_id))
+                    machine = model.machines.get(machine_id)
+                    break
+                await asyncio.sleep(2)
+
+            if machine is None:
+                msg = "Machine {} not found in model".format(machine_id)
+                self.log.error(msg=msg)
+                raise JujuMachineNotFound(msg)
+
+            self.log.debug(
+                "Wait until machine {} is ready in model {}".format(
+                    machine.entity_id, model_name
+                )
+            )
+            await JujuModelWatcher.wait_for(
+                model=model,
+                entity=machine,
+                progress_timeout=progress_timeout,
+                total_timeout=total_timeout,
+                db_dict=db_dict,
+                n2vc=self.n2vc,
+                vca_id=self.vca_connection._vca_id,
+            )
+        except Exception as e:
+            raise e
+        finally:
+            await self.disconnect_model(model)
+            await self.disconnect_controller(controller)
+
+        self.log.debug(
+            "Machine provisioned {} in model {}".format(machine_id, model_name)
+        )
+
+        return machine_id
+
+    async def deploy(
+        self,
+        uri: str,
+        model_name: str,
+        wait: bool = True,
+        timeout: float = 3600,
+        instantiation_params: dict = None,
+    ):
+        """
+        Deploy bundle or charm: Similar to the juju CLI command `juju deploy`
+
+        :param uri:            Path or Charm Store uri in which the charm or bundle can be found
+        :param model_name:     Model name
+        :param wait:           Indicates whether to wait or not until all applications are active
+        :param timeout:        Time in seconds to wait until all applications are active
+        :param instantiation_params: To be applied as overlay bundle over primary bundle.
+        """
+        controller = await self.get_controller()
+        model = await self.get_model(controller, model_name)
+        overlays = []
+        try:
+            await self._validate_instantiation_params(uri, model, instantiation_params)
+            overlays = self._get_overlays(model_name, instantiation_params)
+            await model.deploy(uri, trust=True, overlays=overlays)
+            if wait:
+                await JujuModelWatcher.wait_for_model(model, timeout=timeout)
+                self.log.debug("All units active in model {}".format(model_name))
+        finally:
+            self._remove_overlay_file(overlays)
+            await self.disconnect_model(model)
+            await self.disconnect_controller(controller)
+
+    async def _validate_instantiation_params(
+        self, uri: str, model, instantiation_params: dict
+    ) -> None:
+        """Checks if all the applications in instantiation_params
+        exist ins the original bundle.
+
+        Raises:
+            JujuApplicationNotFound if there is an invalid app in
+            the instantiation params.
+        """
+        overlay_apps = self._get_apps_in_instantiation_params(instantiation_params)
+        if not overlay_apps:
+            return
+        original_apps = await self._get_apps_in_original_bundle(uri, model)
+        if not all(app in original_apps for app in overlay_apps):
+            raise JujuApplicationNotFound(
+                "Cannot find application {} in original bundle {}".format(
+                    overlay_apps, original_apps
+                )
+            )
+
+    async def _get_apps_in_original_bundle(self, uri: str, model) -> set:
+        """Bundle is downloaded in BundleHandler.fetch_plan.
+        That method takes care of opening and exception handling.
+
+        Resolve method gets all the information regarding the channel,
+        track, revision, type, source.
+
+        Returns:
+            Set with the names of the applications in original bundle.
+        """
+        url = URL.parse(uri)
+        architecture = DEFAULT_ARCHITECTURE  # only AMD64 is allowed
+        res = await model.deploy_types[str(url.schema)].resolve(
+            url, architecture, entity_url=uri
+        )
+        handler = BundleHandler(model, trusted=True, forced=False)
+        await handler.fetch_plan(url, res.origin)
+        return handler.applications
+
+    def _get_apps_in_instantiation_params(self, instantiation_params: dict) -> list:
+        """Extract applications key in instantiation params.
+
+        Returns:
+            List with the names of the applications in instantiation params.
+
+        Raises:
+            JujuError if applications key is not found.
+        """
+        if not instantiation_params:
+            return []
+        try:
+            return [key for key in instantiation_params.get("applications")]
+        except Exception as e:
+            raise JujuError("Invalid overlay format. {}".format(str(e)))
+
+    def _get_overlays(self, model_name: str, instantiation_params: dict) -> list:
+        """Creates a temporary overlay file which includes the instantiation params.
+        Only one overlay file is created.
+
+        Returns:
+            List with one overlay filename. Empty list if there are no instantiation params.
+        """
+        if not instantiation_params:
+            return []
+        file_name = model_name + "-overlay.yaml"
+        self._write_overlay_file(file_name, instantiation_params)
+        return [file_name]
+
+    def _write_overlay_file(self, file_name: str, instantiation_params: dict) -> None:
+        with open(file_name, "w") as file:
+            yaml.dump(instantiation_params, file)
+
+    def _remove_overlay_file(self, overlay: list) -> None:
+        """Overlay contains either one or zero file names."""
+        if not overlay:
+            return
+        try:
+            filename = overlay[0]
+            os.remove(filename)
+        except OSError as e:
+            self.log.warning(
+                "Overlay file {} could not be removed: {}".format(filename, e)
+            )
+
+    async def add_unit(
+        self,
+        application_name: str,
+        model_name: str,
+        machine_id: str,
+        db_dict: dict = None,
+        progress_timeout: float = None,
+        total_timeout: float = None,
+    ):
+        """Add unit
+
+        :param: application_name:   Application name
+        :param: model_name:         Model name
+        :param: machine_id          Machine id
+        :param: db_dict:            Dictionary with data of the DB to write the updates
+        :param: progress_timeout:   Maximum time between two updates in the model
+        :param: total_timeout:      Timeout for the entity to be active
+
+        :return: None
+        """
+
+        model = None
+        controller = await self.get_controller()
+        try:
+            model = await self.get_model(controller, model_name)
+            application = self._get_application(model, application_name)
+
+            if application is not None:
+                # Checks if the given machine id in the model,
+                # otherwise function raises an error
+                _machine, _series = self._get_machine_info(model, machine_id)
+
+                self.log.debug(
+                    "Adding unit (machine {}) to application {} in model ~{}".format(
+                        machine_id, application_name, model_name
+                    )
+                )
+
+                await application.add_unit(to=machine_id)
+
+                await JujuModelWatcher.wait_for(
+                    model=model,
+                    entity=application,
+                    progress_timeout=progress_timeout,
+                    total_timeout=total_timeout,
+                    db_dict=db_dict,
+                    n2vc=self.n2vc,
+                    vca_id=self.vca_connection._vca_id,
+                )
+                self.log.debug(
+                    "Unit is added to application {} in model {}".format(
+                        application_name, model_name
+                    )
+                )
+            else:
+                raise JujuApplicationNotFound(
+                    "Application {} not exists".format(application_name)
+                )
+        finally:
+            if model:
+                await self.disconnect_model(model)
+            await self.disconnect_controller(controller)
+
+    async def destroy_unit(
+        self,
+        application_name: str,
+        model_name: str,
+        machine_id: str,
+        total_timeout: float = None,
+    ):
+        """Destroy unit
+
+        :param: application_name:   Application name
+        :param: model_name:         Model name
+        :param: machine_id          Machine id
+        :param: total_timeout:      Timeout for the entity to be active
+
+        :return: None
+        """
+
+        model = None
+        controller = await self.get_controller()
+        try:
+            model = await self.get_model(controller, model_name)
+            application = self._get_application(model, application_name)
+
+            if application is None:
+                raise JujuApplicationNotFound(
+                    "Application not found: {} (model={})".format(
+                        application_name, model_name
+                    )
+                )
+
+            unit = self._get_unit(application, machine_id)
+            if not unit:
+                raise JujuError(
+                    "A unit with machine id {} not in available units".format(
+                        machine_id
+                    )
+                )
+
+            unit_name = unit.name
+
+            self.log.debug(
+                "Destroying unit {} from application {} in model {}".format(
+                    unit_name, application_name, model_name
+                )
+            )
+            await application.destroy_unit(unit_name)
+
+            self.log.debug(
+                "Waiting for unit {} to be destroyed in application {} (model={})...".format(
+                    unit_name, application_name, model_name
+                )
+            )
+
+            # TODO: Add functionality in the Juju watcher to replace this kind of blocks
+            if total_timeout is None:
+                total_timeout = 3600
+            end = time.time() + total_timeout
+            while time.time() < end:
+                if not self._get_unit(application, machine_id):
+                    self.log.debug(
+                        "The unit {} was destroyed in application {} (model={}) ".format(
+                            unit_name, application_name, model_name
+                        )
+                    )
+                    return
+                await asyncio.sleep(5)
+            self.log.debug(
+                "Unit {} is destroyed from application {} in model {}".format(
+                    unit_name, application_name, model_name
+                )
+            )
+        finally:
+            if model:
+                await self.disconnect_model(model)
+            await self.disconnect_controller(controller)
+
+    async def deploy_charm(
+        self,
+        application_name: str,
+        path: str,
+        model_name: str,
+        machine_id: str,
+        db_dict: dict = None,
+        progress_timeout: float = None,
+        total_timeout: float = None,
+        config: dict = None,
+        series: str = None,
+        num_units: int = 1,
+    ):
+        """Deploy charm
+
+        :param: application_name:   Application name
+        :param: path:               Local path to the charm
+        :param: model_name:         Model name
+        :param: machine_id          ID of the machine
+        :param: db_dict:            Dictionary with data of the DB to write the updates
+        :param: progress_timeout:   Maximum time between two updates in the model
+        :param: total_timeout:      Timeout for the entity to be active
+        :param: config:             Config for the charm
+        :param: series:             Series of the charm
+        :param: num_units:          Number of units
+
+        :return: (juju.application.Application): Juju application
+        """
+        self.log.debug(
+            "Deploying charm {} to machine {} in model ~{}".format(
+                application_name, machine_id, model_name
+            )
+        )
+        self.log.debug("charm: {}".format(path))
+
+        # Get controller
+        controller = await self.get_controller()
+
+        # Get model
+        model = await self.get_model(controller, model_name)
+
+        try:
+            if application_name not in model.applications:
+                if machine_id is not None:
+                    machine, series = self._get_machine_info(model, machine_id)
+
+                application = await model.deploy(
+                    entity_url=path,
+                    application_name=application_name,
+                    channel="stable",
+                    num_units=1,
+                    series=series,
+                    to=machine_id,
+                    config=config,
+                )
+
+                self.log.debug(
+                    "Wait until application {} is ready in model {}".format(
+                        application_name, model_name
+                    )
+                )
+                if num_units > 1:
+                    for _ in range(num_units - 1):
+                        m, _ = await self.create_machine(model_name, wait=False)
+                        await application.add_unit(to=m.entity_id)
+
+                await JujuModelWatcher.wait_for(
+                    model=model,
+                    entity=application,
+                    progress_timeout=progress_timeout,
+                    total_timeout=total_timeout,
+                    db_dict=db_dict,
+                    n2vc=self.n2vc,
+                    vca_id=self.vca_connection._vca_id,
+                )
+                self.log.debug(
+                    "Application {} is ready in model {}".format(
+                        application_name, model_name
+                    )
+                )
+            else:
+                raise JujuApplicationExists(
+                    "Application {} exists".format(application_name)
+                )
+        except juju.errors.JujuError as e:
+            if "already exists" in e.message:
+                raise JujuApplicationExists(
+                    "Application {} exists".format(application_name)
+                )
+            else:
+                raise e
+        finally:
+            await self.disconnect_model(model)
+            await self.disconnect_controller(controller)
+
+        return application
+
+    async def upgrade_charm(
+        self,
+        application_name: str,
+        path: str,
+        model_name: str,
+        total_timeout: float = None,
+        **kwargs,
+    ):
+        """Upgrade Charm
+
+        :param: application_name:   Application name
+        :param: model_name:         Model name
+        :param: path:               Local path to the charm
+        :param: total_timeout:      Timeout for the entity to be active
+
+        :return: (str, str): (output and status)
+        """
+
+        self.log.debug(
+            "Upgrading charm {} in model {} from path {}".format(
+                application_name, model_name, path
+            )
+        )
+
+        await self.resolve_application(
+            model_name=model_name, application_name=application_name
+        )
+
+        # Get controller
+        controller = await self.get_controller()
+
+        # Get model
+        model = await self.get_model(controller, model_name)
+
+        try:
+            # Get application
+            application = self._get_application(
+                model,
+                application_name=application_name,
+            )
+            if application is None:
+                raise JujuApplicationNotFound(
+                    "Cannot find application {} to upgrade".format(application_name)
+                )
+
+            await application.refresh(path=path)
+
+            self.log.debug(
+                "Wait until charm upgrade is completed for application {} (model={})".format(
+                    application_name, model_name
+                )
+            )
+
+            await JujuModelWatcher.ensure_units_idle(
+                model=model, application=application
+            )
+
+            if application.status == "error":
+                error_message = "Unknown"
+                for unit in application.units:
+                    if (
+                        unit.workload_status == "error"
+                        and unit.workload_status_message != ""  # pylint: disable=E1101
+                    ):
+                        error_message = (
+                            unit.workload_status_message  # pylint: disable=E1101
+                        )
+
+                message = "Application {} failed update in {}: {}".format(
+                    application_name, model_name, error_message
+                )
+                self.log.error(message)
+                raise JujuError(message=message)
+
+            self.log.debug(
+                "Application {} is ready in model {}".format(
+                    application_name, model_name
+                )
+            )
+
+        finally:
+            await self.disconnect_model(model)
+            await self.disconnect_controller(controller)
+
+        return application
+
+    async def resolve_application(self, model_name: str, application_name: str):
+        controller = await self.get_controller()
+        model = await self.get_model(controller, model_name)
+
+        try:
+            application = self._get_application(
+                model,
+                application_name=application_name,
+            )
+            if application is None:
+                raise JujuApplicationNotFound(
+                    "Cannot find application {} to resolve".format(application_name)
+                )
+
+            while application.status == "error":
+                for unit in application.units:
+                    if unit.workload_status == "error":
+                        self.log.debug(
+                            "Model {}, Application {}, Unit {} in error state, resolving".format(
+                                model_name, application_name, unit.entity_id
+                            )
+                        )
+                        try:
+                            await unit.resolved(retry=False)  # pylint: disable=E1101
+                        except Exception:
+                            pass
+
+                await asyncio.sleep(1)
+
+        finally:
+            await self.disconnect_model(model)
+            await self.disconnect_controller(controller)
+
+    async def resolve(self, model_name: str):
+        controller = await self.get_controller()
+        model = await self.get_model(controller, model_name)
+        all_units_active = False
+        try:
+            while not all_units_active:
+                all_units_active = True
+                for application_name, application in model.applications.items():
+                    if application.status == "error":
+                        for unit in application.units:
+                            if unit.workload_status == "error":
+                                self.log.debug(
+                                    "Model {}, Application {}, Unit {} in error state, resolving".format(
+                                        model_name, application_name, unit.entity_id
+                                    )
+                                )
+                                try:
+                                    await unit.resolved(retry=False)
+                                    all_units_active = False
+                                except Exception:
+                                    pass
+
+                if not all_units_active:
+                    await asyncio.sleep(5)
+        finally:
+            await self.disconnect_model(model)
+            await self.disconnect_controller(controller)
+
+    async def scale_application(
+        self,
+        model_name: str,
+        application_name: str,
+        scale: int = 1,
+        total_timeout: float = None,
+    ):
+        """
+        Scale application (K8s)
+
+        :param: model_name:         Model name
+        :param: application_name:   Application name
+        :param: scale:              Scale to which to set this application
+        :param: total_timeout:      Timeout for the entity to be active
+        """
+
+        model = None
+        controller = await self.get_controller()
+        try:
+            model = await self.get_model(controller, model_name)
+
+            self.log.debug(
+                "Scaling application {} in model {}".format(
+                    application_name, model_name
+                )
+            )
+            application = self._get_application(model, application_name)
+            if application is None:
+                raise JujuApplicationNotFound("Cannot scale application")
+            await application.scale(scale=scale)
+            # Wait until application is scaled in model
+            self.log.debug(
+                "Waiting for application {} to be scaled in model {}...".format(
+                    application_name, model_name
+                )
+            )
+            if total_timeout is None:
+                total_timeout = 1800
+            end = time.time() + total_timeout
+            while time.time() < end:
+                application_scale = self._get_application_count(model, application_name)
+                # Before calling wait_for_model function,
+                # wait until application unit count and scale count are equal.
+                # Because there is a delay before scaling triggers in Juju model.
+                if application_scale == scale:
+                    await JujuModelWatcher.wait_for_model(
+                        model=model, timeout=total_timeout
+                    )
+                    self.log.debug(
+                        "Application {} is scaled in model {}".format(
+                            application_name, model_name
+                        )
+                    )
+                    return
+                await asyncio.sleep(5)
+            raise Exception(
+                "Timeout waiting for application {} in model {} to be scaled".format(
+                    application_name, model_name
+                )
+            )
+        finally:
+            if model:
+                await self.disconnect_model(model)
+            await self.disconnect_controller(controller)
+
+    def _get_application_count(self, model: Model, application_name: str) -> int:
+        """Get number of units of the application
+
+        :param: model:              Model object
+        :param: application_name:   Application name
+
+        :return: int (or None if application doesn't exist)
+        """
+        application = self._get_application(model, application_name)
+        if application is not None:
+            return len(application.units)
+
+    def _get_application(self, model: Model, application_name: str) -> Application:
+        """Get application
+
+        :param: model:              Model object
+        :param: application_name:   Application name
+
+        :return: juju.application.Application (or None if it doesn't exist)
+        """
+        if model.applications and application_name in model.applications:
+            return model.applications[application_name]
+
+    def _get_unit(self, application: Application, machine_id: str) -> Unit:
+        """Get unit
+
+        :param: application:        Application object
+        :param: machine_id:         Machine id
+
+        :return: Unit
+        """
+        unit = None
+        for u in application.units:
+            if u.machine_id == machine_id:
+                unit = u
+                break
+        return unit
+
+    def _get_machine_info(
+        self,
+        model,
+        machine_id: str,
+    ) -> (str, str):
+        """Get machine info
+
+        :param: model:          Model object
+        :param: machine_id:     Machine id
+
+        :return: (str, str): (machine, series)
+        """
+        if machine_id not in model.machines:
+            msg = "Machine {} not found in model".format(machine_id)
+            self.log.error(msg=msg)
+            raise JujuMachineNotFound(msg)
+        machine = model.machines[machine_id]
+        return machine, machine.series
+
+    async def execute_action(
+        self,
+        application_name: str,
+        model_name: str,
+        action_name: str,
+        db_dict: dict = None,
+        machine_id: str = None,
+        progress_timeout: float = None,
+        total_timeout: float = None,
+        **kwargs,
+    ):
+        """Execute action
+
+        :param: application_name:   Application name
+        :param: model_name:         Model name
+        :param: action_name:        Name of the action
+        :param: db_dict:            Dictionary with data of the DB to write the updates
+        :param: machine_id          Machine id
+        :param: progress_timeout:   Maximum time between two updates in the model
+        :param: total_timeout:      Timeout for the entity to be active
+
+        :return: (str, str): (output and status)
+        """
+        self.log.debug(
+            "Executing action {} using params {}".format(action_name, kwargs)
+        )
+        # Get controller
+        controller = await self.get_controller()
+
+        # Get model
+        model = await self.get_model(controller, model_name)
+
+        try:
+            # Get application
+            application = self._get_application(
+                model,
+                application_name=application_name,
+            )
+            if application is None:
+                raise JujuApplicationNotFound("Cannot execute action")
+            # Racing condition:
+            #   Ocassionally, self._get_leader_unit() will return None
+            #   because the leader elected hook has not been triggered yet.
+            #   Therefore, we are doing some retries. If it happens again,
+            #   re-open bug 1236
+            if machine_id is None:
+                unit = await self._get_leader_unit(application)
+                self.log.debug(
+                    "Action {} is being executed on the leader unit {}".format(
+                        action_name, unit.name
+                    )
+                )
+            else:
+                unit = self._get_unit(application, machine_id)
+                if not unit:
+                    raise JujuError(
+                        "A unit with machine id {} not in available units".format(
+                            machine_id
+                        )
+                    )
+                self.log.debug(
+                    "Action {} is being executed on {} unit".format(
+                        action_name, unit.name
+                    )
+                )
+
+            actions = await application.get_actions()
+
+            if action_name not in actions:
+                raise JujuActionNotFound(
+                    "Action {} not in available actions".format(action_name)
+                )
+
+            action = await unit.run_action(action_name, **kwargs)
+
+            self.log.debug(
+                "Wait until action {} is completed in application {} (model={})".format(
+                    action_name, application_name, model_name
+                )
+            )
+            await JujuModelWatcher.wait_for(
+                model=model,
+                entity=action,
+                progress_timeout=progress_timeout,
+                total_timeout=total_timeout,
+                db_dict=db_dict,
+                n2vc=self.n2vc,
+                vca_id=self.vca_connection._vca_id,
+            )
+
+            output = await model.get_action_output(action_uuid=action.entity_id)
+            status = await model.get_action_status(uuid_or_prefix=action.entity_id)
+            status = (
+                status[action.entity_id] if action.entity_id in status else "failed"
+            )
+
+            self.log.debug(
+                "Action {} completed with status {} in application {} (model={})".format(
+                    action_name, action.status, application_name, model_name
+                )
+            )
+        finally:
+            await self.disconnect_model(model)
+            await self.disconnect_controller(controller)
+
+        return output, status
+
+    async def get_actions(self, application_name: str, model_name: str) -> dict:
+        """Get list of actions
+
+        :param: application_name: Application name
+        :param: model_name: Model name
+
+        :return: Dict with this format
+            {
+                "action_name": "Description of the action",
+                ...
+            }
+        """
+        self.log.debug(
+            "Getting list of actions for application {}".format(application_name)
+        )
+
+        # Get controller
+        controller = await self.get_controller()
+
+        # Get model
+        model = await self.get_model(controller, model_name)
+
+        try:
+            # Get application
+            application = self._get_application(
+                model,
+                application_name=application_name,
+            )
+
+            # Return list of actions
+            return await application.get_actions()
+
+        finally:
+            # Disconnect from model and controller
+            await self.disconnect_model(model)
+            await self.disconnect_controller(controller)
+
+    async def get_metrics(self, model_name: str, application_name: str) -> dict:
+        """Get the metrics collected by the VCA.
+
+        :param model_name The name or unique id of the network service
+        :param application_name The name of the application
+        """
+        if not model_name or not application_name:
+            raise Exception("model_name and application_name must be non-empty strings")
+        metrics = {}
+        controller = await self.get_controller()
+        model = await self.get_model(controller, model_name)
+        try:
+            application = self._get_application(model, application_name)
+            if application is not None:
+                metrics = await application.get_metrics()
+        finally:
+            self.disconnect_model(model)
+            self.disconnect_controller(controller)
+        return metrics
+
+    async def add_relation(
+        self,
+        model_name: str,
+        endpoint_1: str,
+        endpoint_2: str,
+    ):
+        """Add relation
+
+        :param: model_name:     Model name
+        :param: endpoint_1      First endpoint name
+                                ("app:endpoint" format or directly the saas name)
+        :param: endpoint_2:     Second endpoint name (^ same format)
+        """
+
+        self.log.debug("Adding relation: {} -> {}".format(endpoint_1, endpoint_2))
+
+        # Get controller
+        controller = await self.get_controller()
+
+        # Get model
+        model = await self.get_model(controller, model_name)
+
+        # Add relation
+        try:
+            await model.add_relation(endpoint_1, endpoint_2)
+        except juju.errors.JujuAPIError as e:
+            if self._relation_is_not_found(e):
+                self.log.warning("Relation not found: {}".format(e.message))
+                return
+            if self._relation_already_exist(e):
+                self.log.warning("Relation already exists: {}".format(e.message))
+                return
+            # another exception, raise it
+            raise e
+        finally:
+            await self.disconnect_model(model)
+            await self.disconnect_controller(controller)
+
+    def _relation_is_not_found(self, juju_error):
+        text = "not found"
+        return (text in juju_error.message) or (
+            juju_error.error_code and text in juju_error.error_code
+        )
+
+    def _relation_already_exist(self, juju_error):
+        text = "already exists"
+        return (text in juju_error.message) or (
+            juju_error.error_code and text in juju_error.error_code
+        )
+
+    async def offer(self, endpoint: RelationEndpoint) -> Offer:
+        """
+        Create an offer from a RelationEndpoint
+
+        :param: endpoint: Relation endpoint
+
+        :return: Offer object
+        """
+        model_name = endpoint.model_name
+        offer_name = f"{endpoint.application_name}-{endpoint.endpoint_name}"
+        controller = await self.get_controller()
+        model = None
+        try:
+            model = await self.get_model(controller, model_name)
+            await model.create_offer(endpoint.endpoint, offer_name=offer_name)
+            offer_list = await self._list_offers(model_name, offer_name=offer_name)
+            if offer_list:
+                return Offer(offer_list[0].offer_url)
+            else:
+                raise Exception("offer was not created")
+        except juju.errors.JujuError as e:
+            if "application offer already exists" not in e.message:
+                raise e
+        finally:
+            if model:
+                self.disconnect_model(model)
+            self.disconnect_controller(controller)
+
+    async def consume(
+        self,
+        model_name: str,
+        offer: Offer,
+        provider_libjuju: "Libjuju",
+    ) -> str:
+        """
+        Consumes a remote offer in the model. Relations can be created later using "juju relate".
+
+        :param: model_name:             Model name
+        :param: offer:                  Offer object to consume
+        :param: provider_libjuju:       Libjuju object of the provider endpoint
+
+        :raises ParseError if there's a problem parsing the offer_url
+        :raises JujuError if remote offer includes and endpoint
+        :raises JujuAPIError if the operation is not successful
+
+        :returns: Saas name. It is the application name in the model that reference the remote application.
+        """
+        saas_name = f'{offer.name}-{offer.model_name.replace("-", "")}'
+        if offer.vca_id:
+            saas_name = f"{saas_name}-{offer.vca_id}"
+        controller = await self.get_controller()
+        model = None
+        provider_controller = None
+        try:
+            model = await controller.get_model(model_name)
+            provider_controller = await provider_libjuju.get_controller()
+            await model.consume(
+                offer.url, application_alias=saas_name, controller=provider_controller
+            )
+            return saas_name
+        finally:
+            if model:
+                await self.disconnect_model(model)
+            if provider_controller:
+                await provider_libjuju.disconnect_controller(provider_controller)
+            await self.disconnect_controller(controller)
+
+    async def destroy_model(self, model_name: str, total_timeout: float = 1800):
+        """
+        Destroy model
+
+        :param: model_name:     Model name
+        :param: total_timeout:  Timeout
+        """
+
+        controller = await self.get_controller()
+        model = None
+        try:
+            if not await self.model_exists(model_name, controller=controller):
+                self.log.warn(f"Model {model_name} doesn't exist")
+                return
+
+            self.log.debug(f"Getting model {model_name} to be destroyed")
+            model = await self.get_model(controller, model_name)
+            self.log.debug(f"Destroying manual machines in model {model_name}")
+            # Destroy machines that are manually provisioned
+            # and still are in pending state
+            await self._destroy_pending_machines(model, only_manual=True)
+            await self.disconnect_model(model)
+
+            await asyncio.wait_for(
+                self._destroy_model(model_name, controller),
+                timeout=total_timeout,
+            )
+        except Exception as e:
+            if not await self.model_exists(model_name, controller=controller):
+                self.log.warn(
+                    f"Failed deleting model {model_name}: model doesn't exist"
+                )
+                return
+            self.log.warn(f"Failed deleting model {model_name}: {e}")
+            raise e
+        finally:
+            if model:
+                await self.disconnect_model(model)
+            await self.disconnect_controller(controller)
+
+    async def _destroy_model(
+        self,
+        model_name: str,
+        controller: Controller,
+    ):
+        """
+        Destroy model from controller
+
+        :param: model: Model name to be removed
+        :param: controller: Controller object
+        :param: timeout: Timeout in seconds
+        """
+        self.log.debug(f"Destroying model {model_name}")
+
+        async def _destroy_model_gracefully(model_name: str, controller: Controller):
+            self.log.info(f"Gracefully deleting model {model_name}")
+            resolved = False
+            while model_name in await controller.list_models():
+                if not resolved:
+                    await self.resolve(model_name)
+                    resolved = True
+                await controller.destroy_model(model_name, destroy_storage=True)
+
+                await asyncio.sleep(5)
+            self.log.info(f"Model {model_name} deleted gracefully")
+
+        async def _destroy_model_forcefully(model_name: str, controller: Controller):
+            self.log.info(f"Forcefully deleting model {model_name}")
+            while model_name in await controller.list_models():
+                await controller.destroy_model(
+                    model_name, destroy_storage=True, force=True, max_wait=60
+                )
+                await asyncio.sleep(5)
+            self.log.info(f"Model {model_name} deleted forcefully")
+
+        try:
+            try:
+                await asyncio.wait_for(
+                    _destroy_model_gracefully(model_name, controller), timeout=120
+                )
+            except asyncio.TimeoutError:
+                await _destroy_model_forcefully(model_name, controller)
+        except juju.errors.JujuError as e:
+            if any("has been removed" in error for error in e.errors):
+                return
+            if any("model not found" in error for error in e.errors):
+                return
+            raise e
+
+    async def destroy_application(
+        self, model_name: str, application_name: str, total_timeout: float
+    ):
+        """
+        Destroy application
+
+        :param: model_name:         Model name
+        :param: application_name:   Application name
+        :param: total_timeout:      Timeout
+        """
+
+        controller = await self.get_controller()
+        model = None
+
+        try:
+            model = await self.get_model(controller, model_name)
+            self.log.debug(
+                "Destroying application {} in model {}".format(
+                    application_name, model_name
+                )
+            )
+            application = self._get_application(model, application_name)
+            if application:
+                await application.destroy()
+            else:
+                self.log.warning("Application not found: {}".format(application_name))
+
+            self.log.debug(
+                "Waiting for application {} to be destroyed in model {}...".format(
+                    application_name, model_name
+                )
+            )
+            if total_timeout is None:
+                total_timeout = 3600
+            end = time.time() + total_timeout
+            while time.time() < end:
+                if not self._get_application(model, application_name):
+                    self.log.debug(
+                        "The application {} was destroyed in model {} ".format(
+                            application_name, model_name
+                        )
+                    )
+                    return
+                await asyncio.sleep(5)
+            raise Exception(
+                "Timeout waiting for application {} to be destroyed in model {}".format(
+                    application_name, model_name
+                )
+            )
+        finally:
+            if model is not None:
+                await self.disconnect_model(model)
+            await self.disconnect_controller(controller)
+
+    async def _destroy_pending_machines(self, model: Model, only_manual: bool = False):
+        """
+        Destroy pending machines in a given model
+
+        :param: only_manual:    Bool that indicates only manually provisioned
+                                machines should be destroyed (if True), or that
+                                all pending machines should be destroyed
+        """
+        status = await model.get_status()
+        for machine_id in status.machines:
+            machine_status = status.machines[machine_id]
+            if machine_status.agent_status.status == "pending":
+                if only_manual and not machine_status.instance_id.startswith("manual:"):
+                    break
+                machine = model.machines[machine_id]
+                await machine.destroy(force=True)
+
+    async def configure_application(
+        self, model_name: str, application_name: str, config: dict = None
+    ):
+        """Configure application
+
+        :param: model_name:         Model name
+        :param: application_name:   Application name
+        :param: config:             Config to apply to the charm
+        """
+        self.log.debug("Configuring application {}".format(application_name))
+
+        if config:
+            controller = await self.get_controller()
+            model = None
+            try:
+                model = await self.get_model(controller, model_name)
+                application = self._get_application(
+                    model,
+                    application_name=application_name,
+                )
+                await application.set_config(config)
+            finally:
+                if model:
+                    await self.disconnect_model(model)
+                await self.disconnect_controller(controller)
+
+    async def health_check(self, interval: float = 300.0):
+        """
+        Health check to make sure controller and controller_model connections are OK
+
+        :param: interval: Time in seconds between checks
+        """
+        controller = None
+        while True:
+            try:
+                controller = await self.get_controller()
+                # self.log.debug("VCA is alive")
+            except Exception as e:
+                self.log.error("Health check to VCA failed: {}".format(e))
+            finally:
+                await self.disconnect_controller(controller)
+            await asyncio.sleep(interval)
+
+    async def list_models(self, contains: str = None) -> [str]:
+        """List models with certain names
+
+        :param: contains:   String that is contained in model name
+
+        :retur: [models] Returns list of model names
+        """
+
+        controller = await self.get_controller()
+        try:
+            models = await controller.list_models()
+            if contains:
+                models = [model for model in models if contains in model]
+            return models
+        finally:
+            await self.disconnect_controller(controller)
+
+    async def _list_offers(
+        self, model_name: str, offer_name: str = None
+    ) -> QueryApplicationOffersResults:
+        """
+        List offers within a model
+
+        :param: model_name: Model name
+        :param: offer_name: Offer name to filter.
+
+        :return: Returns application offers results in the model
+        """
+
+        controller = await self.get_controller()
+        try:
+            offers = (await controller.list_offers(model_name)).results
+            if offer_name:
+                matching_offer = []
+                for offer in offers:
+                    if offer.offer_name == offer_name:
+                        matching_offer.append(offer)
+                        break
+                offers = matching_offer
+            return offers
+        finally:
+            await self.disconnect_controller(controller)
+
+    async def add_k8s(
+        self,
+        name: str,
+        rbac_id: str,
+        token: str,
+        client_cert_data: str,
+        configuration: Configuration,
+        storage_class: str,
+        credential_name: str = None,
+    ):
+        """
+        Add a Kubernetes cloud to the controller
+
+        Similar to the `juju add-k8s` command in the CLI
+
+        :param: name:               Name for the K8s cloud
+        :param: configuration:      Kubernetes configuration object
+        :param: storage_class:      Storage Class to use in the cloud
+        :param: credential_name:    Storage Class to use in the cloud
+        """
+
+        if not storage_class:
+            raise Exception("storage_class must be a non-empty string")
+        if not name:
+            raise Exception("name must be a non-empty string")
+        if not configuration:
+            raise Exception("configuration must be provided")
+
+        endpoint = configuration.host
+        credential = self.get_k8s_cloud_credential(
+            configuration,
+            client_cert_data,
+            token,
+        )
+        credential.attrs[RBAC_LABEL_KEY_NAME] = rbac_id
+        cloud = client.Cloud(
+            type_="kubernetes",
+            auth_types=[credential.auth_type],
+            endpoint=endpoint,
+            ca_certificates=[client_cert_data],
+            config={
+                "operator-storage": storage_class,
+                "workload-storage": storage_class,
+            },
+        )
+
+        return await self.add_cloud(
+            name, cloud, credential, credential_name=credential_name
+        )
+
+    def get_k8s_cloud_credential(
+        self,
+        configuration: Configuration,
+        client_cert_data: str,
+        token: str = None,
+    ) -> client.CloudCredential:
+        attrs = {}
+        # TODO: Test with AKS
+        key = None  # open(configuration.key_file, "r").read()
+        username = configuration.username
+        password = configuration.password
+
+        if client_cert_data:
+            attrs["ClientCertificateData"] = client_cert_data
+        if key:
+            attrs["ClientKeyData"] = key
+        if token:
+            if username or password:
+                raise JujuInvalidK8sConfiguration("Cannot set both token and user/pass")
+            attrs["Token"] = token
+
+        auth_type = None
+        if key:
+            auth_type = "oauth2"
+            if client_cert_data:
+                auth_type = "oauth2withcert"
+            if not token:
+                raise JujuInvalidK8sConfiguration(
+                    "missing token for auth type {}".format(auth_type)
+                )
+        elif username:
+            if not password:
+                self.log.debug(
+                    "credential for user {} has empty password".format(username)
+                )
+            attrs["username"] = username
+            attrs["password"] = password
+            if client_cert_data:
+                auth_type = "userpasswithcert"
+            else:
+                auth_type = "userpass"
+        elif client_cert_data and token:
+            auth_type = "certificate"
+        else:
+            raise JujuInvalidK8sConfiguration("authentication method not supported")
+        return client.CloudCredential(auth_type=auth_type, attrs=attrs)
+
+    async def add_cloud(
+        self,
+        name: str,
+        cloud: Cloud,
+        credential: CloudCredential = None,
+        credential_name: str = None,
+    ) -> Cloud:
+        """
+        Add cloud to the controller
+
+        :param: name:               Name of the cloud to be added
+        :param: cloud:              Cloud object
+        :param: credential:         CloudCredentials object for the cloud
+        :param: credential_name:    Credential name.
+                                    If not defined, cloud of the name will be used.
+        """
+        controller = await self.get_controller()
+        try:
+            _ = await controller.add_cloud(name, cloud)
+            if credential:
+                await controller.add_credential(
+                    credential_name or name, credential=credential, cloud=name
+                )
+            # Need to return the object returned by the controller.add_cloud() function
+            # I'm returning the original value now until this bug is fixed:
+            #   https://github.com/juju/python-libjuju/issues/443
+            return cloud
+        finally:
+            await self.disconnect_controller(controller)
+
+    async def remove_cloud(self, name: str):
+        """
+        Remove cloud
+
+        :param: name:   Name of the cloud to be removed
+        """
+        controller = await self.get_controller()
+        try:
+            await controller.remove_cloud(name)
+        except juju.errors.JujuError as e:
+            if len(e.errors) == 1 and f'cloud "{name}" not found' == e.errors[0]:
+                self.log.warning(f"Cloud {name} not found, so it could not be deleted.")
+            else:
+                raise e
+        finally:
+            await self.disconnect_controller(controller)
+
+    @retry(
+        attempts=20, delay=5, fallback=JujuLeaderUnitNotFound(), callback=retry_callback
+    )
+    async def _get_leader_unit(self, application: Application) -> Unit:
+        unit = None
+        for u in application.units:
+            if await u.is_leader_from_status():
+                unit = u
+                break
+        if not unit:
+            raise Exception()
+        return unit
+
+    async def get_cloud_credentials(self, cloud: Cloud) -> typing.List:
+        """
+        Get cloud credentials
+
+        :param: cloud: Cloud object. The returned credentials will be from this cloud.
+
+        :return: List of credentials object associated to the specified cloud
+
+        """
+        controller = await self.get_controller()
+        try:
+            facade = client.CloudFacade.from_connection(controller.connection())
+            cloud_cred_tag = tag.credential(
+                cloud.name, self.vca_connection.data.user, cloud.credential_name
+            )
+            params = [client.Entity(cloud_cred_tag)]
+            return (await facade.Credential(params)).results
+        finally:
+            await self.disconnect_controller(controller)
+
+    async def check_application_exists(self, model_name, application_name) -> bool:
+        """Check application exists
+
+        :param: model_name:         Model Name
+        :param: application_name:   Application Name
+
+        :return: Boolean
+        """
+
+        model = None
+        controller = await self.get_controller()
+        try:
+            model = await self.get_model(controller, model_name)
+            self.log.debug(
+                "Checking if application {} exists in model {}".format(
+                    application_name, model_name
+                )
+            )
+            return self._get_application(model, application_name) is not None
+        finally:
+            if model:
+                await self.disconnect_model(model)
+            await self.disconnect_controller(controller)
diff --git a/osm_lcm/n2vc/loggable.py b/osm_lcm/n2vc/loggable.py
new file mode 100644 (file)
index 0000000..d129b4b
--- /dev/null
@@ -0,0 +1,179 @@
+##
+# Copyright 2019 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of OSM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+
+import asyncio
+import datetime
+import inspect
+import logging
+import threading  # only for logging purposes (not for using threads)
+import time
+
+
+class Loggable:
+    def __init__(self, log, log_to_console: bool = False, prefix: str = ""):
+        self._last_log_time = None  # used for time increment in logging
+        self._log_to_console = log_to_console
+        self._prefix = prefix
+        if log is not None:
+            self.log = log
+        else:
+            self.log = logging.getLogger(__name__)
+
+    def debug(self, msg: str):
+        self._log_msg(log_level="DEBUG", msg=msg)
+
+    def info(self, msg: str):
+        self._log_msg(log_level="INFO", msg=msg)
+
+    def warning(self, msg: str):
+        self._log_msg(log_level="WARNING", msg=msg)
+
+    def error(self, msg: str):
+        self._log_msg(log_level="ERROR", msg=msg)
+
+    def critical(self, msg: str):
+        self._log_msg(log_level="CRITICAL", msg=msg)
+
+    ####################################################################################
+
+    def _log_msg(self, log_level: str, msg: str):
+        """Generic log method"""
+        msg = self._format_log(
+            log_level=log_level,
+            msg=msg,
+            obj=self,
+            level=3,
+            include_path=False,
+            include_thread=False,
+            include_coroutine=True,
+        )
+        if self._log_to_console:
+            print(msg)
+        else:
+            if self.log is not None:
+                if log_level == "DEBUG":
+                    self.log.debug(msg)
+                elif log_level == "INFO":
+                    self.log.info(msg)
+                elif log_level == "WARNING":
+                    self.log.warning(msg)
+                elif log_level == "ERROR":
+                    self.log.error(msg)
+                elif log_level == "CRITICAL":
+                    self.log.critical(msg)
+
+    def _format_log(
+        self,
+        log_level: str,
+        msg: str = "",
+        obj: object = None,
+        level: int = None,
+        include_path: bool = False,
+        include_thread: bool = False,
+        include_coroutine: bool = True,
+    ) -> str:
+        # time increment from last log
+        now = time.perf_counter()
+        if self._last_log_time is None:
+            time_str = " (+0.000)"
+        else:
+            diff = round(now - self._last_log_time, 3)
+            time_str = " (+{})".format(diff)
+        self._last_log_time = now
+
+        if level is None:
+            level = 1
+
+        # stack info
+        fi = inspect.stack()[level]
+        filename = fi.filename
+        func = fi.function
+        lineno = fi.lineno
+        # filename without path
+        if not include_path:
+            i = filename.rfind("/")
+            if i > 0:
+                filename = filename[i + 1 :]
+
+        # datetime
+        dt = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
+        dt = dt + time_str
+        # dt = time_str       # logger already shows datetime
+
+        # current thread
+        if include_thread:
+            thread_name = "th:{}".format(threading.current_thread().getName())
+        else:
+            thread_name = ""
+
+        # current coroutine
+
+        coroutine_id = ""
+        if include_coroutine:
+            try:
+                if asyncio.current_task() is not None:
+
+                    def print_cor_name(c):
+                        import inspect
+
+                        try:
+                            for m in inspect.getmembers(c):
+                                if m[0] == "__name__":
+                                    return m[1]
+                        except Exception:
+                            pass
+
+                    coro = asyncio.current_task()._coro
+                    coroutine_id = "coro-{} {}()".format(
+                        hex(id(coro))[2:], print_cor_name(coro)
+                    )
+            except Exception:
+                coroutine_id = ""
+
+        # classname
+        if obj is not None:
+            obj_type = obj.__class__.__name__  # type: str
+            log_msg = "{} {} {} {} {}::{}.{}():{}\n{}".format(
+                self._prefix,
+                dt,
+                thread_name,
+                coroutine_id,
+                filename,
+                obj_type,
+                func,
+                lineno,
+                str(msg),
+            )
+        else:
+            log_msg = "{} {} {} {} {}::{}():{}\n{}".format(
+                self._prefix,
+                dt,
+                thread_name,
+                coroutine_id,
+                filename,
+                func,
+                lineno,
+                str(msg),
+            )
+
+        return log_msg
diff --git a/osm_lcm/n2vc/n2vc_conn.py b/osm_lcm/n2vc/n2vc_conn.py
new file mode 100644 (file)
index 0000000..e2104fa
--- /dev/null
@@ -0,0 +1,534 @@
+##
+# Copyright 2019 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of OSM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+
+import abc
+import asyncio
+from http import HTTPStatus
+from shlex import quote
+import os
+import shlex
+import subprocess
+import time
+
+from osm_lcm.n2vc.exceptions import N2VCBadArgumentsException
+from osm_common.dbmongo import DbException
+import yaml
+
+from osm_lcm.n2vc.loggable import Loggable
+from osm_lcm.n2vc.utils import JujuStatusToOSM, N2VCDeploymentStatus
+
+
+class N2VCConnector(abc.ABC, Loggable):
+    """Generic N2VC connector
+
+    Abstract class
+    """
+
+    """
+    ####################################################################################
+    ################################### P U B L I C ####################################
+    ####################################################################################
+    """
+
+    def __init__(
+        self,
+        db: object,
+        fs: object,
+        log: object,
+        on_update_db=None,
+        **kwargs,
+    ):
+        """Initialize N2VC abstract connector. It defines de API for VCA connectors
+
+        :param object db: Mongo object managing the MongoDB (repo common DbBase)
+        :param object fs: FileSystem object managing the package artifacts (repo common
+            FsBase)
+        :param object log: the logging object to log to
+        :param on_update_db: callback called when n2vc connector updates database.
+            Received arguments:
+            table: e.g. "nsrs"
+            filter: e.g. {_id: <nsd-id> }
+            path: e.g. "_admin.deployed.VCA.3."
+            updated_data: e.g. , "{ _admin.deployed.VCA.3.status: 'xxx', etc }"
+        """
+
+        # parent class
+        Loggable.__init__(self, log=log, log_to_console=True, prefix="\nN2VC")
+
+        # check arguments
+        if db is None:
+            raise N2VCBadArgumentsException("Argument db is mandatory", ["db"])
+        if fs is None:
+            raise N2VCBadArgumentsException("Argument fs is mandatory", ["fs"])
+
+        # store arguments into self
+        self.db = db
+        self.fs = fs
+        self.on_update_db = on_update_db
+
+        # generate private/public key-pair
+        self.private_key_path = None
+        self.public_key_path = None
+
+    @abc.abstractmethod
+    async def get_status(self, namespace: str, yaml_format: bool = True):
+        """Get namespace status
+
+        :param namespace: we obtain ns from namespace
+        :param yaml_format: returns a yaml string
+        """
+
+    # TODO: review which public key
+    def get_public_key(self) -> str:
+        """Get the VCA ssh-public-key
+
+        Returns the SSH public key from local mahine, to be injected into virtual
+        machines to be managed by the VCA.
+        First run, a ssh keypair will be created.
+        The public key is injected into a VM so that we can provision the
+        machine with Juju, after which Juju will communicate with the VM
+        directly via the juju agent.
+        """
+
+        # Find the path where we expect our key lives (~/.ssh)
+        homedir = os.environ.get("HOME")
+        if not homedir:
+            self.log.warning("No HOME environment variable, using /tmp")
+            homedir = "/tmp"
+        sshdir = "{}/.ssh".format(homedir)
+        sshdir = os.path.realpath(os.path.normpath(os.path.abspath(sshdir)))
+        if not os.path.exists(sshdir):
+            os.mkdir(sshdir)
+
+        self.private_key_path = "{}/id_n2vc_rsa".format(sshdir)
+        self.private_key_path = os.path.realpath(
+            os.path.normpath(os.path.abspath(self.private_key_path))
+        )
+        self.public_key_path = "{}.pub".format(self.private_key_path)
+        self.public_key_path = os.path.realpath(
+            os.path.normpath(os.path.abspath(self.public_key_path))
+        )
+
+        # If we don't have a key generated, then we have to generate it using ssh-keygen
+        if not os.path.exists(self.private_key_path):
+            command = "ssh-keygen -t {} -b {} -N '' -f {}".format(
+                "rsa", "4096", quote(self.private_key_path)
+            )
+            # run command with arguments
+            args = shlex.split(command)
+            subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+        # Read the public key. Only one public key (one line) in the file
+        with open(self.public_key_path, "r") as file:
+            public_key = file.readline()
+
+        return public_key
+
+    @abc.abstractmethod
+    async def create_execution_environment(
+        self,
+        namespace: str,
+        db_dict: dict,
+        reuse_ee_id: str = None,
+        progress_timeout: float = None,
+        total_timeout: float = None,
+    ) -> tuple[str, dict]:
+        """Create an Execution Environment. Returns when it is created or raises an
+        exception on failing
+
+        :param str namespace: Contains a dot separate string.
+                    LCM will use: [<nsi-id>].<ns-id>.<vnf-id>.<vdu-id>[-<count>]
+        :param dict db_dict: where to write to database when the status changes.
+            It contains a dictionary with {collection: str, filter: {},  path: str},
+                e.g. {collection: "nsrs", filter: {_id: <nsd-id>, path:
+                "_admin.deployed.VCA.3"}
+        :param str reuse_ee_id: ee id from an older execution. It allows us to reuse an
+            older environment
+        :param float progress_timeout:
+        :param float total_timeout:
+        :returns str, dict: id of the new execution environment and credentials for it
+                    (credentials can contains hostname, username, etc depending on
+                    underlying cloud)
+        """
+
+    @abc.abstractmethod
+    async def register_execution_environment(
+        self,
+        namespace: str,
+        credentials: dict,
+        db_dict: dict,
+        progress_timeout: float = None,
+        total_timeout: float = None,
+    ) -> str:
+        """
+        Register an existing execution environment at the VCA
+
+        :param str namespace: same as create_execution_environment method
+        :param dict credentials: credentials to access the existing execution
+            environment
+            (it can contains hostname, username, path to private key, etc depending on
+            underlying cloud)
+        :param dict db_dict: where to write to database when the status changes.
+            It contains a dictionary with {collection: str, filter: {},  path: str},
+                e.g. {collection: "nsrs", filter:
+                    {_id: <nsd-id>, path: "_admin.deployed.VCA.3"}
+        :param float progress_timeout:
+        :param float total_timeout:
+        :returns str: id of the execution environment
+        """
+
+    @abc.abstractmethod
+    async def install_configuration_sw(
+        self,
+        ee_id: str,
+        artifact_path: str,
+        db_dict: dict,
+        progress_timeout: float = None,
+        total_timeout: float = None,
+    ):
+        """
+        Install the software inside the execution environment identified by ee_id
+
+        :param str ee_id: the id of the execution environment returned by
+            create_execution_environment or register_execution_environment
+        :param str artifact_path: where to locate the artifacts (parent folder) using
+            the self.fs
+            the final artifact path will be a combination of this artifact_path and
+            additional string from the config_dict (e.g. charm name)
+        :param dict db_dict: where to write into database when the status changes.
+                        It contains a dict with
+                            {collection: <str>, filter: {},  path: <str>},
+                            e.g. {collection: "nsrs", filter:
+                                {_id: <nsd-id>, path: "_admin.deployed.VCA.3"}
+        :param float progress_timeout:
+        :param float total_timeout:
+        """
+
+    @abc.abstractmethod
+    async def install_k8s_proxy_charm(
+        self,
+        charm_name: str,
+        namespace: str,
+        artifact_path: str,
+        db_dict: dict,
+        progress_timeout: float = None,
+        total_timeout: float = None,
+        config: dict = None,
+    ) -> str:
+        """
+        Install a k8s proxy charm
+
+        :param charm_name: Name of the charm being deployed
+        :param namespace: collection of all the uuids related to the charm.
+        :param str artifact_path: where to locate the artifacts (parent folder) using
+            the self.fs
+            the final artifact path will be a combination of this artifact_path and
+            additional string from the config_dict (e.g. charm name)
+        :param dict db_dict: where to write into database when the status changes.
+                        It contains a dict with
+                            {collection: <str>, filter: {},  path: <str>},
+                            e.g. {collection: "nsrs", filter:
+                                {_id: <nsd-id>, path: "_admin.deployed.VCA.3"}
+        :param float progress_timeout:
+        :param float total_timeout:
+        :param config: Dictionary with additional configuration
+
+        :returns ee_id: execution environment id.
+        """
+
+    @abc.abstractmethod
+    async def get_ee_ssh_public__key(
+        self,
+        ee_id: str,
+        db_dict: dict,
+        progress_timeout: float = None,
+        total_timeout: float = None,
+    ) -> str:
+        """
+        Generate a priv/pub key pair in the execution environment and return the public
+        key
+
+        :param str ee_id: the id of the execution environment returned by
+            create_execution_environment or register_execution_environment
+        :param dict db_dict: where to write into database when the status changes.
+                        It contains a dict with
+                            {collection: <str>, filter: {},  path: <str>},
+                            e.g. {collection: "nsrs", filter:
+                                {_id: <nsd-id>, path: "_admin.deployed.VCA.3"}
+        :param float progress_timeout:
+        :param float total_timeout:
+        :returns: public key of the execution environment
+                    For the case of juju proxy charm ssh-layered, it is the one
+                    returned by 'get-ssh-public-key' primitive.
+                    It raises a N2VC exception if fails
+        """
+
+    @abc.abstractmethod
+    async def add_relation(
+        self, ee_id_1: str, ee_id_2: str, endpoint_1: str, endpoint_2: str
+    ):
+        """
+        Add a relation between two Execution Environments (using their associated
+        endpoints).
+
+        :param str ee_id_1: The id of the first execution environment
+        :param str ee_id_2: The id of the second execution environment
+        :param str endpoint_1: The endpoint in the first execution environment
+        :param str endpoint_2: The endpoint in the second execution environment
+        """
+
+    # TODO
+    @abc.abstractmethod
+    async def remove_relation(self):
+        """ """
+
+    # TODO
+    @abc.abstractmethod
+    async def deregister_execution_environments(self):
+        """ """
+
+    @abc.abstractmethod
+    async def delete_namespace(
+        self, namespace: str, db_dict: dict = None, total_timeout: float = None
+    ):
+        """
+        Remove a network scenario and its execution environments
+        :param namespace: [<nsi-id>].<ns-id>
+        :param dict db_dict: where to write into database when the status changes.
+                        It contains a dict with
+                            {collection: <str>, filter: {},  path: <str>},
+                            e.g. {collection: "nsrs", filter:
+                                {_id: <nsd-id>, path: "_admin.deployed.VCA.3"}
+        :param float total_timeout:
+        """
+
+    @abc.abstractmethod
+    async def delete_execution_environment(
+        self, ee_id: str, db_dict: dict = None, total_timeout: float = None
+    ):
+        """
+        Delete an execution environment
+        :param str ee_id: id of the execution environment to delete
+        :param dict db_dict: where to write into database when the status changes.
+                        It contains a dict with
+                            {collection: <str>, filter: {},  path: <str>},
+                            e.g. {collection: "nsrs", filter:
+                                {_id: <nsd-id>, path: "_admin.deployed.VCA.3"}
+        :param float total_timeout:
+        """
+
+    @abc.abstractmethod
+    async def upgrade_charm(
+        self,
+        ee_id: str = None,
+        path: str = None,
+        charm_id: str = None,
+        charm_type: str = None,
+        timeout: float = None,
+    ) -> str:
+        """This method upgrade charms in VNFs
+
+        Args:
+            ee_id:  Execution environment id
+            path:   Local path to the charm
+            charm_id:   charm-id
+            charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
+            timeout: (Float)    Timeout for the ns update operation
+
+        Returns:
+            The output of the update operation if status equals to "completed"
+        """
+
+    @abc.abstractmethod
+    async def exec_primitive(
+        self,
+        ee_id: str,
+        primitive_name: str,
+        params_dict: dict,
+        db_dict: dict = None,
+        progress_timeout: float = None,
+        total_timeout: float = None,
+    ) -> str:
+        """
+        Execute a primitive in the execution environment
+
+        :param str ee_id: the one returned by create_execution_environment or
+            register_execution_environment
+        :param str primitive_name: must be one defined in the software. There is one
+            called 'config', where, for the proxy case, the 'credentials' of VM are
+            provided
+        :param dict params_dict: parameters of the action
+        :param dict db_dict: where to write into database when the status changes.
+                        It contains a dict with
+                            {collection: <str>, filter: {},  path: <str>},
+                            e.g. {collection: "nsrs", filter:
+                                {_id: <nsd-id>, path: "_admin.deployed.VCA.3"}
+        :param float progress_timeout:
+        :param float total_timeout:
+        :returns str: primitive result, if ok. It raises exceptions in case of fail
+        """
+
+    async def disconnect(self):
+        """
+        Disconnect from VCA
+        """
+
+    """
+    ####################################################################################
+    ################################### P R I V A T E ##################################
+    ####################################################################################
+    """
+
+    def _get_namespace_components(
+        self, namespace: str
+    ) -> tuple[str, str, str, str, str]:
+        """
+        Split namespace components
+
+        :param namespace: [<nsi-id>].<ns-id>.<vnf-id>.<vdu-id>[-<count>]
+        :return: nsi_id, ns_id, vnf_id, vdu_id, vdu_count
+        """
+
+        # check parameters
+        if namespace is None or len(namespace) == 0:
+            raise N2VCBadArgumentsException(
+                "Argument namespace is mandatory", ["namespace"]
+            )
+
+        # split namespace components
+        parts = namespace.split(".")
+        nsi_id = None
+        ns_id = None
+        vnf_id = None
+        vdu_id = None
+        vdu_count = None
+        if len(parts) > 0 and len(parts[0]) > 0:
+            nsi_id = parts[0]
+        if len(parts) > 1 and len(parts[1]) > 0:
+            ns_id = parts[1]
+        if len(parts) > 2 and len(parts[2]) > 0:
+            vnf_id = parts[2]
+        if len(parts) > 3 and len(parts[3]) > 0:
+            vdu_id = parts[3]
+            vdu_parts = parts[3].split("-")
+            if len(vdu_parts) > 1:
+                vdu_id = vdu_parts[0]
+                vdu_count = vdu_parts[1]
+
+        return nsi_id, ns_id, vnf_id, vdu_id, vdu_count
+
+    async def write_app_status_to_db(
+        self,
+        db_dict: dict,
+        status: N2VCDeploymentStatus,
+        detailed_status: str,
+        vca_status: str,
+        entity_type: str,
+        vca_id: str = None,
+    ):
+        """
+        Write application status to database
+
+        :param: db_dict: DB dictionary
+        :param: status: Status of the application
+        :param: detailed_status: Detailed status
+        :param: vca_status: VCA status
+        :param: entity_type: Entity type ("application", "machine, and "action")
+        :param: vca_id: Id of the VCA. If None, the default VCA will be used.
+        """
+        if not db_dict:
+            self.log.debug("No db_dict => No database write")
+            return
+
+        # self.log.debug('status={} / detailed-status={} / VCA-status={}/entity_type={}'
+        #          .format(str(status.value), detailed_status, vca_status, entity_type))
+
+        try:
+            the_table = db_dict["collection"]
+            the_filter = db_dict["filter"]
+            the_path = db_dict["path"]
+            if not the_path[-1] == ".":
+                the_path = the_path + "."
+            update_dict = {
+                the_path + "status": str(status.value),
+                the_path + "detailed-status": detailed_status,
+                the_path + "VCA-status": vca_status,
+                the_path + "entity-type": entity_type,
+                the_path + "status-time": str(time.time()),
+            }
+
+            self.db.set_one(
+                table=the_table,
+                q_filter=the_filter,
+                update_dict=update_dict,
+                fail_on_empty=True,
+            )
+
+            # database callback
+            if self.on_update_db:
+                if asyncio.iscoroutinefunction(self.on_update_db):
+                    await self.on_update_db(
+                        the_table, the_filter, the_path, update_dict, vca_id=vca_id
+                    )
+                else:
+                    self.on_update_db(
+                        the_table, the_filter, the_path, update_dict, vca_id=vca_id
+                    )
+
+        except DbException as e:
+            if e.http_code == HTTPStatus.NOT_FOUND:
+                self.log.error(
+                    "NOT_FOUND error: Exception writing status to database: {}".format(
+                        e
+                    )
+                )
+            else:
+                self.log.info("Exception writing status to database: {}".format(e))
+
+    def osm_status(self, entity_type: str, status: str) -> N2VCDeploymentStatus:
+        if status not in JujuStatusToOSM[entity_type]:
+            self.log.warning("Status {} not found in JujuStatusToOSM.".format(status))
+            return N2VCDeploymentStatus.UNKNOWN
+        return JujuStatusToOSM[entity_type][status]
+
+
+def obj_to_yaml(obj: object) -> str:
+    # dump to yaml
+    dump_text = yaml.dump(obj, default_flow_style=False, indent=2)
+    # split lines
+    lines = dump_text.splitlines()
+    # remove !!python/object tags
+    yaml_text = ""
+    for line in lines:
+        index = line.find("!!python/object")
+        if index >= 0:
+            line = line[:index]
+        yaml_text += line + "\n"
+    return yaml_text
+
+
+def obj_to_dict(obj: object) -> dict:
+    # convert obj to yaml
+    yaml_text = obj_to_yaml(obj)
+    # parse to dict
+    return yaml.load(yaml_text, Loader=yaml.SafeLoader)
diff --git a/osm_lcm/n2vc/n2vc_juju_conn.py b/osm_lcm/n2vc/n2vc_juju_conn.py
new file mode 100644 (file)
index 0000000..3a156ef
--- /dev/null
@@ -0,0 +1,1564 @@
+##
+# Copyright 2019 Telefonica Investigacion y Desarrollo, S.A.U.
+# This file is part of OSM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+import asyncio
+import logging
+
+from osm_lcm.n2vc.config import EnvironConfig
+from osm_lcm.n2vc.definitions import RelationEndpoint
+from osm_lcm.n2vc.exceptions import (
+    N2VCBadArgumentsException,
+    N2VCException,
+    N2VCConnectionException,
+    N2VCExecutionException,
+    N2VCApplicationExists,
+    JujuApplicationExists,
+    # N2VCNotFound,
+    MethodNotImplemented,
+)
+from osm_lcm.n2vc.n2vc_conn import N2VCConnector
+from osm_lcm.n2vc.n2vc_conn import obj_to_dict, obj_to_yaml
+from osm_lcm.n2vc.libjuju import Libjuju, retry_callback
+from osm_lcm.n2vc.store import MotorStore
+from osm_lcm.n2vc.utils import get_ee_id_components, generate_random_alfanum_string
+from osm_lcm.n2vc.vca.connection import get_connection
+from retrying_async import retry
+from typing import Tuple
+
+
+class N2VCJujuConnector(N2VCConnector):
+
+    """
+    ####################################################################################
+    ################################### P U B L I C ####################################
+    ####################################################################################
+    """
+
+    BUILT_IN_CLOUDS = ["localhost", "microk8s"]
+    libjuju = None
+
+    def __init__(
+        self,
+        db: object,
+        fs: object,
+        log: object = None,
+        on_update_db=None,
+    ):
+        """
+        Constructor
+
+        :param: db: Database object from osm_common
+        :param: fs: Filesystem object from osm_common
+        :param: log: Logger
+        :param: on_update_db: Callback function to be called for updating the database.
+        """
+
+        # parent class constructor
+        N2VCConnector.__init__(self, db=db, fs=fs, log=log, on_update_db=on_update_db)
+
+        # silence websocket traffic log
+        logging.getLogger("websockets.protocol").setLevel(logging.INFO)
+        logging.getLogger("juju.client.connection").setLevel(logging.WARN)
+        logging.getLogger("model").setLevel(logging.WARN)
+
+        self.log.info("Initializing N2VC juju connector...")
+
+        db_uri = EnvironConfig(prefixes=["OSMLCM_", "OSMMON_"]).get("database_uri")
+        self._store = MotorStore(db_uri)
+        self.loading_libjuju = asyncio.Lock()
+        self.delete_namespace_locks = {}
+        self.log.info("N2VC juju connector initialized")
+
+    async def get_status(
+        self, namespace: str, yaml_format: bool = True, vca_id: str = None
+    ):
+        """
+        Get status from all juju models from a VCA
+
+        :param namespace: we obtain ns from namespace
+        :param yaml_format: returns a yaml string
+        :param: vca_id: VCA ID from which the status will be retrieved.
+        """
+        # TODO: Review where is this function used. It is not optimal at all to get the status
+        #       from all the juju models of a particular VCA. Additionally, these models might
+        #       not have been deployed by OSM, in that case we are getting information from
+        #       deployments outside of OSM's scope.
+
+        # self.log.info('Getting NS status. namespace: {}'.format(namespace))
+        libjuju = await self._get_libjuju(vca_id)
+
+        _nsi_id, ns_id, _vnf_id, _vdu_id, _vdu_count = self._get_namespace_components(
+            namespace=namespace
+        )
+        # model name is ns_id
+        model_name = ns_id
+        if model_name is None:
+            msg = "Namespace {} not valid".format(namespace)
+            self.log.error(msg)
+            raise N2VCBadArgumentsException(msg, ["namespace"])
+
+        status = {}
+        models = await libjuju.list_models(contains=ns_id)
+
+        for m in models:
+            status[m] = await libjuju.get_model_status(m)
+
+        if yaml_format:
+            return obj_to_yaml(status)
+        else:
+            return obj_to_dict(status)
+
+    async def update_vca_status(self, vcastatus: dict, vca_id: str = None):
+        """
+        Add all configs, actions, executed actions of all applications in a model to vcastatus dict.
+
+        :param vcastatus: dict containing vcaStatus
+        :param: vca_id: VCA ID
+
+        :return: None
+        """
+        try:
+            libjuju = await self._get_libjuju(vca_id)
+            for model_name in vcastatus:
+                # Adding executed actions
+                vcastatus[model_name][
+                    "executedActions"
+                ] = await libjuju.get_executed_actions(model_name)
+                for application in vcastatus[model_name]["applications"]:
+                    # Adding application actions
+                    vcastatus[model_name]["applications"][application][
+                        "actions"
+                    ] = await libjuju.get_actions(application, model_name)
+                    # Adding application configs
+                    vcastatus[model_name]["applications"][application][
+                        "configs"
+                    ] = await libjuju.get_application_configs(model_name, application)
+        except Exception as e:
+            self.log.debug("Error in updating vca status: {}".format(str(e)))
+
+    async def create_execution_environment(
+        self,
+        namespace: str,
+        db_dict: dict,
+        reuse_ee_id: str = None,
+        progress_timeout: float = None,
+        total_timeout: float = None,
+        vca_id: str = None,
+    ) -> (str, dict):
+        """
+        Create an Execution Environment. Returns when it is created or raises an
+        exception on failing
+
+        :param: namespace: Contains a dot separate string.
+                    LCM will use: [<nsi-id>].<ns-id>.<vnf-id>.<vdu-id>[-<count>]
+        :param: db_dict: where to write to database when the status changes.
+            It contains a dictionary with {collection: str, filter: {},  path: str},
+                e.g. {collection: "nsrs", filter: {_id: <nsd-id>, path:
+                "_admin.deployed.VCA.3"}
+        :param: reuse_ee_id: ee id from an older execution. It allows us to reuse an
+                             older environment
+        :param: progress_timeout: Progress timeout
+        :param: total_timeout: Total timeout
+        :param: vca_id: VCA ID
+
+        :returns: id of the new execution environment and credentials for it
+                  (credentials can contains hostname, username, etc depending on underlying cloud)
+        """
+
+        self.log.info(
+            "Creating execution environment. namespace: {}, reuse_ee_id: {}".format(
+                namespace, reuse_ee_id
+            )
+        )
+        libjuju = await self._get_libjuju(vca_id)
+
+        machine_id = None
+        if reuse_ee_id:
+            model_name, application_name, machine_id = self._get_ee_id_components(
+                ee_id=reuse_ee_id
+            )
+        else:
+            (
+                _nsi_id,
+                ns_id,
+                _vnf_id,
+                _vdu_id,
+                _vdu_count,
+            ) = self._get_namespace_components(namespace=namespace)
+            # model name is ns_id
+            model_name = ns_id
+            # application name
+            application_name = self._get_application_name(namespace=namespace)
+
+        self.log.debug(
+            "model name: {}, application name:  {}, machine_id: {}".format(
+                model_name, application_name, machine_id
+            )
+        )
+
+        # create or reuse a new juju machine
+        try:
+            if not await libjuju.model_exists(model_name):
+                await libjuju.add_model(model_name, libjuju.vca_connection.lxd_cloud)
+            machine, new = await libjuju.create_machine(
+                model_name=model_name,
+                machine_id=machine_id,
+                db_dict=db_dict,
+                progress_timeout=progress_timeout,
+                total_timeout=total_timeout,
+            )
+            # id for the execution environment
+            ee_id = N2VCJujuConnector._build_ee_id(
+                model_name=model_name,
+                application_name=application_name,
+                machine_id=str(machine.entity_id),
+            )
+            self.log.debug("ee_id: {}".format(ee_id))
+
+            if new:
+                # write ee_id in database
+                self._write_ee_id_db(db_dict=db_dict, ee_id=ee_id)
+
+        except Exception as e:
+            message = "Error creating machine on juju: {}".format(e)
+            self.log.error(message)
+            raise N2VCException(message=message)
+
+        # new machine credentials
+        credentials = {"hostname": machine.dns_name}
+
+        self.log.info(
+            "Execution environment created. ee_id: {}, credentials: {}".format(
+                ee_id, credentials
+            )
+        )
+
+        return ee_id, credentials
+
+    async def register_execution_environment(
+        self,
+        namespace: str,
+        credentials: dict,
+        db_dict: dict,
+        progress_timeout: float = None,
+        total_timeout: float = None,
+        vca_id: str = None,
+    ) -> str:
+        """
+        Register an existing execution environment at the VCA
+
+        :param: namespace: Contains a dot separate string.
+                    LCM will use: [<nsi-id>].<ns-id>.<vnf-id>.<vdu-id>[-<count>]
+        :param: credentials: credentials to access the existing execution environment
+                            (it can contains hostname, username, path to private key,
+                            etc depending on underlying cloud)
+        :param: db_dict: where to write to database when the status changes.
+            It contains a dictionary with {collection: str, filter: {},  path: str},
+                e.g. {collection: "nsrs", filter: {_id: <nsd-id>, path:
+                "_admin.deployed.VCA.3"}
+        :param: reuse_ee_id: ee id from an older execution. It allows us to reuse an
+                             older environment
+        :param: progress_timeout: Progress timeout
+        :param: total_timeout: Total timeout
+        :param: vca_id: VCA ID
+
+        :returns: id of the execution environment
+        """
+        self.log.info(
+            "Registering execution environment. namespace={}, credentials={}".format(
+                namespace, credentials
+            )
+        )
+        libjuju = await self._get_libjuju(vca_id)
+
+        if credentials is None:
+            raise N2VCBadArgumentsException(
+                message="credentials are mandatory", bad_args=["credentials"]
+            )
+        if credentials.get("hostname"):
+            hostname = credentials["hostname"]
+        else:
+            raise N2VCBadArgumentsException(
+                message="hostname is mandatory", bad_args=["credentials.hostname"]
+            )
+        if credentials.get("username"):
+            username = credentials["username"]
+        else:
+            raise N2VCBadArgumentsException(
+                message="username is mandatory", bad_args=["credentials.username"]
+            )
+        if "private_key_path" in credentials:
+            private_key_path = credentials["private_key_path"]
+        else:
+            # if not passed as argument, use generated private key path
+            private_key_path = self.private_key_path
+
+        _nsi_id, ns_id, _vnf_id, _vdu_id, _vdu_count = self._get_namespace_components(
+            namespace=namespace
+        )
+
+        # model name
+        model_name = ns_id
+        # application name
+        application_name = self._get_application_name(namespace=namespace)
+
+        # register machine on juju
+        try:
+            if not await libjuju.model_exists(model_name):
+                await libjuju.add_model(model_name, libjuju.vca_connection.lxd_cloud)
+            machine_id = await libjuju.provision_machine(
+                model_name=model_name,
+                hostname=hostname,
+                username=username,
+                private_key_path=private_key_path,
+                db_dict=db_dict,
+                progress_timeout=progress_timeout,
+                total_timeout=total_timeout,
+            )
+        except Exception as e:
+            self.log.error("Error registering machine: {}".format(e))
+            raise N2VCException(
+                message="Error registering machine on juju: {}".format(e)
+            )
+
+        self.log.info("Machine registered: {}".format(machine_id))
+
+        # id for the execution environment
+        ee_id = N2VCJujuConnector._build_ee_id(
+            model_name=model_name,
+            application_name=application_name,
+            machine_id=str(machine_id),
+        )
+
+        self.log.info("Execution environment registered. ee_id: {}".format(ee_id))
+
+        return ee_id
+
+    # In case of native_charm is being deployed, if JujuApplicationExists error happens
+    # it will try to add_unit
+    @retry(
+        attempts=3,
+        delay=5,
+        retry_exceptions=(N2VCApplicationExists,),
+        timeout=None,
+        callback=retry_callback,
+    )
+    async def install_configuration_sw(
+        self,
+        ee_id: str,
+        artifact_path: str,
+        db_dict: dict,
+        progress_timeout: float = None,
+        total_timeout: float = None,
+        config: dict = None,
+        num_units: int = 1,
+        vca_id: str = None,
+        scaling_out: bool = False,
+        vca_type: str = None,
+    ):
+        """
+        Install the software inside the execution environment identified by ee_id
+
+        :param: ee_id: the id of the execution environment returned by
+                          create_execution_environment or register_execution_environment
+        :param: artifact_path: where to locate the artifacts (parent folder) using
+                                  the self.fs
+                                  the final artifact path will be a combination of this
+                                  artifact_path and additional string from the config_dict
+                                  (e.g. charm name)
+        :param: db_dict: where to write into database when the status changes.
+                             It contains a dict with
+                                {collection: <str>, filter: {},  path: <str>},
+                                e.g. {collection: "nsrs", filter:
+                                    {_id: <nsd-id>, path: "_admin.deployed.VCA.3"}
+        :param: progress_timeout: Progress timeout
+        :param: total_timeout: Total timeout
+        :param: config: Dictionary with deployment config information.
+        :param: num_units: Number of units to deploy of a particular charm.
+        :param: vca_id: VCA ID
+        :param: scaling_out: Boolean to indicate if it is a scaling out operation
+        :param: vca_type: VCA type
+        """
+
+        self.log.info(
+            (
+                "Installing configuration sw on ee_id: {}, "
+                "artifact path: {}, db_dict: {}"
+            ).format(ee_id, artifact_path, db_dict)
+        )
+        libjuju = await self._get_libjuju(vca_id)
+
+        # check arguments
+        if ee_id is None or len(ee_id) == 0:
+            raise N2VCBadArgumentsException(
+                message="ee_id is mandatory", bad_args=["ee_id"]
+            )
+        if artifact_path is None or len(artifact_path) == 0:
+            raise N2VCBadArgumentsException(
+                message="artifact_path is mandatory", bad_args=["artifact_path"]
+            )
+        if db_dict is None:
+            raise N2VCBadArgumentsException(
+                message="db_dict is mandatory", bad_args=["db_dict"]
+            )
+
+        try:
+            (
+                model_name,
+                application_name,
+                machine_id,
+            ) = N2VCJujuConnector._get_ee_id_components(ee_id=ee_id)
+            self.log.debug(
+                "model: {}, application: {}, machine: {}".format(
+                    model_name, application_name, machine_id
+                )
+            )
+        except Exception:
+            raise N2VCBadArgumentsException(
+                message="ee_id={} is not a valid execution environment id".format(
+                    ee_id
+                ),
+                bad_args=["ee_id"],
+            )
+
+        # remove // in charm path
+        while artifact_path.find("//") >= 0:
+            artifact_path = artifact_path.replace("//", "/")
+
+        # check charm path
+        if not self.fs.file_exists(artifact_path):
+            msg = "artifact path does not exist: {}".format(artifact_path)
+            raise N2VCBadArgumentsException(message=msg, bad_args=["artifact_path"])
+
+        if artifact_path.startswith("/"):
+            full_path = self.fs.path + artifact_path
+        else:
+            full_path = self.fs.path + "/" + artifact_path
+
+        try:
+            if vca_type == "native_charm" and await libjuju.check_application_exists(
+                model_name, application_name
+            ):
+                await libjuju.add_unit(
+                    application_name=application_name,
+                    model_name=model_name,
+                    machine_id=machine_id,
+                    db_dict=db_dict,
+                    progress_timeout=progress_timeout,
+                    total_timeout=total_timeout,
+                )
+            else:
+                await libjuju.deploy_charm(
+                    model_name=model_name,
+                    application_name=application_name,
+                    path=full_path,
+                    machine_id=machine_id,
+                    db_dict=db_dict,
+                    progress_timeout=progress_timeout,
+                    total_timeout=total_timeout,
+                    config=config,
+                    num_units=num_units,
+                )
+        except JujuApplicationExists as e:
+            raise N2VCApplicationExists(
+                message="Error deploying charm into ee={} : {}".format(ee_id, e.message)
+            )
+        except Exception as e:
+            raise N2VCException(
+                message="Error deploying charm into ee={} : {}".format(ee_id, e)
+            )
+
+        self.log.info("Configuration sw installed")
+
+    async def install_k8s_proxy_charm(
+        self,
+        charm_name: str,
+        namespace: str,
+        artifact_path: str,
+        db_dict: dict,
+        progress_timeout: float = None,
+        total_timeout: float = None,
+        config: dict = None,
+        vca_id: str = None,
+    ) -> str:
+        """
+        Install a k8s proxy charm
+
+        :param charm_name: Name of the charm being deployed
+        :param namespace: collection of all the uuids related to the charm.
+        :param str artifact_path: where to locate the artifacts (parent folder) using
+            the self.fs
+            the final artifact path will be a combination of this artifact_path and
+            additional string from the config_dict (e.g. charm name)
+        :param dict db_dict: where to write into database when the status changes.
+                        It contains a dict with
+                            {collection: <str>, filter: {},  path: <str>},
+                            e.g. {collection: "nsrs", filter:
+                                {_id: <nsd-id>, path: "_admin.deployed.VCA.3"}
+        :param: progress_timeout: Progress timeout
+        :param: total_timeout: Total timeout
+        :param config: Dictionary with additional configuration
+        :param vca_id: VCA ID
+
+        :returns ee_id: execution environment id.
+        """
+        self.log.info(
+            "Installing k8s proxy charm: {}, artifact path: {}, db_dict: {}".format(
+                charm_name, artifact_path, db_dict
+            )
+        )
+        libjuju = await self._get_libjuju(vca_id)
+
+        if artifact_path is None or len(artifact_path) == 0:
+            raise N2VCBadArgumentsException(
+                message="artifact_path is mandatory", bad_args=["artifact_path"]
+            )
+        if db_dict is None:
+            raise N2VCBadArgumentsException(
+                message="db_dict is mandatory", bad_args=["db_dict"]
+            )
+
+        # remove // in charm path
+        while artifact_path.find("//") >= 0:
+            artifact_path = artifact_path.replace("//", "/")
+
+        # check charm path
+        if not self.fs.file_exists(artifact_path):
+            msg = "artifact path does not exist: {}".format(artifact_path)
+            raise N2VCBadArgumentsException(message=msg, bad_args=["artifact_path"])
+
+        if artifact_path.startswith("/"):
+            full_path = self.fs.path + artifact_path
+        else:
+            full_path = self.fs.path + "/" + artifact_path
+
+        _, ns_id, _, _, _ = self._get_namespace_components(namespace=namespace)
+        model_name = "{}-k8s".format(ns_id)
+        if not await libjuju.model_exists(model_name):
+            await libjuju.add_model(model_name, libjuju.vca_connection.k8s_cloud)
+        application_name = self._get_application_name(namespace)
+
+        try:
+            await libjuju.deploy_charm(
+                model_name=model_name,
+                application_name=application_name,
+                path=full_path,
+                machine_id=None,
+                db_dict=db_dict,
+                progress_timeout=progress_timeout,
+                total_timeout=total_timeout,
+                config=config,
+            )
+        except Exception as e:
+            raise N2VCException(message="Error deploying charm: {}".format(e))
+
+        self.log.info("K8s proxy charm installed")
+        ee_id = N2VCJujuConnector._build_ee_id(
+            model_name=model_name, application_name=application_name, machine_id="k8s"
+        )
+
+        self._write_ee_id_db(db_dict=db_dict, ee_id=ee_id)
+
+        return ee_id
+
+    async def get_ee_ssh_public__key(
+        self,
+        ee_id: str,
+        db_dict: dict,
+        progress_timeout: float = None,
+        total_timeout: float = None,
+        vca_id: str = None,
+    ) -> str:
+        """
+        Get Execution environment ssh public key
+
+        :param: ee_id: the id of the execution environment returned by
+            create_execution_environment or register_execution_environment
+        :param: db_dict: where to write into database when the status changes.
+                            It contains a dict with
+                                {collection: <str>, filter: {},  path: <str>},
+                                e.g. {collection: "nsrs", filter:
+                                    {_id: <nsd-id>, path: "_admin.deployed.VCA.3"}
+        :param: progress_timeout: Progress timeout
+        :param: total_timeout: Total timeout
+        :param vca_id: VCA ID
+        :returns: public key of the execution environment
+                    For the case of juju proxy charm ssh-layered, it is the one
+                    returned by 'get-ssh-public-key' primitive.
+                    It raises a N2VC exception if fails
+        """
+
+        self.log.info(
+            (
+                "Generating priv/pub key pair and get pub key on ee_id: {}, db_dict: {}"
+            ).format(ee_id, db_dict)
+        )
+        libjuju = await self._get_libjuju(vca_id)
+
+        # check arguments
+        if ee_id is None or len(ee_id) == 0:
+            raise N2VCBadArgumentsException(
+                message="ee_id is mandatory", bad_args=["ee_id"]
+            )
+        if db_dict is None:
+            raise N2VCBadArgumentsException(
+                message="db_dict is mandatory", bad_args=["db_dict"]
+            )
+
+        try:
+            (
+                model_name,
+                application_name,
+                machine_id,
+            ) = N2VCJujuConnector._get_ee_id_components(ee_id=ee_id)
+            self.log.debug(
+                "model: {}, application: {}, machine: {}".format(
+                    model_name, application_name, machine_id
+                )
+            )
+        except Exception:
+            raise N2VCBadArgumentsException(
+                message="ee_id={} is not a valid execution environment id".format(
+                    ee_id
+                ),
+                bad_args=["ee_id"],
+            )
+
+        # try to execute ssh layer primitives (if exist):
+        #       generate-ssh-key
+        #       get-ssh-public-key
+
+        output = None
+
+        application_name = N2VCJujuConnector._format_app_name(application_name)
+
+        # execute action: generate-ssh-key
+        try:
+            output, _status = await libjuju.execute_action(
+                model_name=model_name,
+                application_name=application_name,
+                action_name="generate-ssh-key",
+                db_dict=db_dict,
+                progress_timeout=progress_timeout,
+                total_timeout=total_timeout,
+            )
+        except Exception as e:
+            self.log.info(
+                "Skipping exception while executing action generate-ssh-key: {}".format(
+                    e
+                )
+            )
+
+        # execute action: get-ssh-public-key
+        try:
+            output, _status = await libjuju.execute_action(
+                model_name=model_name,
+                application_name=application_name,
+                action_name="get-ssh-public-key",
+                db_dict=db_dict,
+                progress_timeout=progress_timeout,
+                total_timeout=total_timeout,
+            )
+        except Exception as e:
+            msg = "Cannot execute action get-ssh-public-key: {}\n".format(e)
+            self.log.info(msg)
+            raise N2VCExecutionException(e, primitive_name="get-ssh-public-key")
+
+        # return public key if exists
+        return output["pubkey"] if "pubkey" in output else output
+
+    async def get_metrics(
+        self, model_name: str, application_name: str, vca_id: str = None
+    ) -> dict:
+        """
+        Get metrics from application
+
+        :param: model_name: Model name
+        :param: application_name: Application name
+        :param: vca_id: VCA ID
+
+        :return: Dictionary with obtained metrics
+        """
+        libjuju = await self._get_libjuju(vca_id)
+        return await libjuju.get_metrics(model_name, application_name)
+
+    async def add_relation(
+        self, provider: RelationEndpoint, requirer: RelationEndpoint
+    ):
+        """
+        Add relation between two charmed endpoints
+
+        :param: provider: Provider relation endpoint
+        :param: requirer: Requirer relation endpoint
+        """
+        self.log.debug(f"adding new relation between {provider} and {requirer}")
+        cross_model_relation = (
+            provider.model_name != requirer.model_name
+            or provider.vca_id != requirer.vca_id
+        )
+        try:
+            if cross_model_relation:
+                # Cross-model relation
+                provider_libjuju = await self._get_libjuju(provider.vca_id)
+                requirer_libjuju = await self._get_libjuju(requirer.vca_id)
+                offer = await provider_libjuju.offer(provider)
+                if offer:
+                    saas_name = await requirer_libjuju.consume(
+                        requirer.model_name, offer, provider_libjuju
+                    )
+                    await requirer_libjuju.add_relation(
+                        requirer.model_name, requirer.endpoint, saas_name
+                    )
+            else:
+                # Standard relation
+                vca_id = provider.vca_id
+                model = provider.model_name
+                libjuju = await self._get_libjuju(vca_id)
+                # add juju relations between two applications
+                await libjuju.add_relation(
+                    model_name=model,
+                    endpoint_1=provider.endpoint,
+                    endpoint_2=requirer.endpoint,
+                )
+        except Exception as e:
+            message = f"Error adding relation between {provider} and {requirer}: {e}"
+            self.log.error(message)
+            raise N2VCException(message=message)
+
+    async def remove_relation(self):
+        # TODO
+        self.log.info("Method not implemented yet")
+        raise MethodNotImplemented()
+
+    async def deregister_execution_environments(self):
+        self.log.info("Method not implemented yet")
+        raise MethodNotImplemented()
+
+    async def delete_namespace(
+        self,
+        namespace: str,
+        db_dict: dict = None,
+        total_timeout: float = None,
+        vca_id: str = None,
+    ):
+        """
+        Remove a network scenario and its execution environments
+        :param: namespace: [<nsi-id>].<ns-id>
+        :param: db_dict: where to write into database when the status changes.
+                        It contains a dict with
+                            {collection: <str>, filter: {},  path: <str>},
+                            e.g. {collection: "nsrs", filter:
+                                {_id: <nsd-id>, path: "_admin.deployed.VCA.3"}
+        :param: total_timeout: Total timeout
+        :param: vca_id: VCA ID
+        """
+        self.log.info("Deleting namespace={}".format(namespace))
+        will_not_delete = False
+        if namespace not in self.delete_namespace_locks:
+            self.delete_namespace_locks[namespace] = asyncio.Lock()
+        delete_lock = self.delete_namespace_locks[namespace]
+
+        while delete_lock.locked():
+            will_not_delete = True
+            await asyncio.sleep(0.1)
+
+        if will_not_delete:
+            self.log.info("Namespace {} deleted by another worker.".format(namespace))
+            return
+
+        try:
+            async with delete_lock:
+                libjuju = await self._get_libjuju(vca_id)
+
+                # check arguments
+                if namespace is None:
+                    raise N2VCBadArgumentsException(
+                        message="namespace is mandatory", bad_args=["namespace"]
+                    )
+
+                (
+                    _nsi_id,
+                    ns_id,
+                    _vnf_id,
+                    _vdu_id,
+                    _vdu_count,
+                ) = self._get_namespace_components(namespace=namespace)
+                if ns_id is not None:
+                    try:
+                        models = await libjuju.list_models(contains=ns_id)
+                        for model in models:
+                            await libjuju.destroy_model(
+                                model_name=model, total_timeout=total_timeout
+                            )
+                    except Exception as e:
+                        self.log.error(f"Error deleting namespace {namespace} : {e}")
+                        raise N2VCException(
+                            message="Error deleting namespace {} : {}".format(
+                                namespace, e
+                            )
+                        )
+                else:
+                    raise N2VCBadArgumentsException(
+                        message="only ns_id is permitted to delete yet",
+                        bad_args=["namespace"],
+                    )
+        except Exception as e:
+            self.log.error(f"Error deleting namespace {namespace} : {e}")
+            raise e
+        finally:
+            self.delete_namespace_locks.pop(namespace)
+        self.log.info("Namespace {} deleted".format(namespace))
+
+    async def delete_execution_environment(
+        self,
+        ee_id: str,
+        db_dict: dict = None,
+        total_timeout: float = None,
+        scaling_in: bool = False,
+        vca_type: str = None,
+        vca_id: str = None,
+        application_to_delete: str = None,
+    ):
+        """
+        Delete an execution environment
+        :param str ee_id: id of the execution environment to delete
+        :param dict db_dict: where to write into database when the status changes.
+                        It contains a dict with
+                            {collection: <str>, filter: {},  path: <str>},
+                            e.g. {collection: "nsrs", filter:
+                                {_id: <nsd-id>, path: "_admin.deployed.VCA.3"}
+        :param total_timeout: Total timeout
+        :param scaling_in: Boolean to indicate if it is a scaling in operation
+        :param vca_type: VCA type
+        :param vca_id: VCA ID
+        :param application_to_delete: name of the single application to be deleted
+        """
+        self.log.info("Deleting execution environment ee_id={}".format(ee_id))
+        libjuju = await self._get_libjuju(vca_id)
+
+        # check arguments
+        if ee_id is None:
+            raise N2VCBadArgumentsException(
+                message="ee_id is mandatory", bad_args=["ee_id"]
+            )
+
+        model_name, application_name, machine_id = self._get_ee_id_components(
+            ee_id=ee_id
+        )
+        try:
+            if application_to_delete == application_name:
+                # destroy the application
+                await libjuju.destroy_application(
+                    model_name=model_name,
+                    application_name=application_name,
+                    total_timeout=total_timeout,
+                )
+                # if model is empty delete it
+                controller = await libjuju.get_controller()
+                model = await libjuju.get_model(
+                    controller=controller,
+                    model_name=model_name,
+                )
+                if not model.applications:
+                    self.log.info("Model {} is empty, deleting it".format(model_name))
+                    await libjuju.destroy_model(
+                        model_name=model_name,
+                        total_timeout=total_timeout,
+                    )
+            elif not scaling_in:
+                # destroy the model
+                await libjuju.destroy_model(
+                    model_name=model_name, total_timeout=total_timeout
+                )
+            elif vca_type == "native_charm" and scaling_in:
+                # destroy the unit in the application
+                await libjuju.destroy_unit(
+                    application_name=application_name,
+                    model_name=model_name,
+                    machine_id=machine_id,
+                    total_timeout=total_timeout,
+                )
+            else:
+                # destroy the application
+                await libjuju.destroy_application(
+                    model_name=model_name,
+                    application_name=application_name,
+                    total_timeout=total_timeout,
+                )
+        except Exception as e:
+            raise N2VCException(
+                message=(
+                    "Error deleting execution environment {} (application {}) : {}"
+                ).format(ee_id, application_name, e)
+            )
+
+        self.log.info("Execution environment {} deleted".format(ee_id))
+
+    async def exec_primitive(
+        self,
+        ee_id: str,
+        primitive_name: str,
+        params_dict: dict,
+        db_dict: dict = None,
+        progress_timeout: float = None,
+        total_timeout: float = None,
+        vca_id: str = None,
+        vca_type: str = None,
+    ) -> str:
+        """
+        Execute a primitive in the execution environment
+
+        :param: ee_id: the one returned by create_execution_environment or
+            register_execution_environment
+        :param: primitive_name: must be one defined in the software. There is one
+            called 'config', where, for the proxy case, the 'credentials' of VM are
+            provided
+        :param: params_dict: parameters of the action
+        :param: db_dict: where to write into database when the status changes.
+                        It contains a dict with
+                            {collection: <str>, filter: {},  path: <str>},
+                            e.g. {collection: "nsrs", filter:
+                                {_id: <nsd-id>, path: "_admin.deployed.VCA.3"}
+        :param: progress_timeout: Progress timeout
+        :param: total_timeout: Total timeout
+        :param: vca_id: VCA ID
+        :param: vca_type: VCA type
+        :returns str: primitive result, if ok. It raises exceptions in case of fail
+        """
+
+        self.log.info(
+            "Executing primitive: {} on ee: {}, params: {}".format(
+                primitive_name, ee_id, params_dict
+            )
+        )
+        libjuju = await self._get_libjuju(vca_id)
+
+        # check arguments
+        if ee_id is None or len(ee_id) == 0:
+            raise N2VCBadArgumentsException(
+                message="ee_id is mandatory", bad_args=["ee_id"]
+            )
+        if primitive_name is None or len(primitive_name) == 0:
+            raise N2VCBadArgumentsException(
+                message="action_name is mandatory", bad_args=["action_name"]
+            )
+        if params_dict is None:
+            params_dict = dict()
+
+        try:
+            (
+                model_name,
+                application_name,
+                machine_id,
+            ) = N2VCJujuConnector._get_ee_id_components(ee_id=ee_id)
+            # To run action on the leader unit in libjuju.execute_action function,
+            # machine_id must be set to None if vca_type is not native_charm
+            if vca_type != "native_charm":
+                machine_id = None
+        except Exception:
+            raise N2VCBadArgumentsException(
+                message="ee_id={} is not a valid execution environment id".format(
+                    ee_id
+                ),
+                bad_args=["ee_id"],
+            )
+
+        if primitive_name == "config":
+            # Special case: config primitive
+            try:
+                await libjuju.configure_application(
+                    model_name=model_name,
+                    application_name=application_name,
+                    config=params_dict,
+                )
+                actions = await libjuju.get_actions(
+                    application_name=application_name, model_name=model_name
+                )
+                self.log.debug(
+                    "Application {} has these actions: {}".format(
+                        application_name, actions
+                    )
+                )
+                if "verify-ssh-credentials" in actions:
+                    # execute verify-credentials
+                    num_retries = 20
+                    retry_timeout = 15.0
+                    for _ in range(num_retries):
+                        try:
+                            self.log.debug("Executing action verify-ssh-credentials...")
+                            output, ok = await libjuju.execute_action(
+                                model_name=model_name,
+                                application_name=application_name,
+                                action_name="verify-ssh-credentials",
+                                db_dict=db_dict,
+                                progress_timeout=progress_timeout,
+                                total_timeout=total_timeout,
+                            )
+
+                            if ok == "failed":
+                                self.log.debug(
+                                    "Error executing verify-ssh-credentials: {}. Retrying..."
+                                )
+                                await asyncio.sleep(retry_timeout)
+
+                                continue
+                            self.log.debug("Result: {}, output: {}".format(ok, output))
+                            break
+                        except asyncio.CancelledError:
+                            raise
+                    else:
+                        self.log.error(
+                            "Error executing verify-ssh-credentials after {} retries. ".format(
+                                num_retries
+                            )
+                        )
+                else:
+                    msg = "Action verify-ssh-credentials does not exist in application {}".format(
+                        application_name
+                    )
+                    self.log.debug(msg=msg)
+            except Exception as e:
+                self.log.error("Error configuring juju application: {}".format(e))
+                raise N2VCExecutionException(
+                    message="Error configuring application into ee={} : {}".format(
+                        ee_id, e
+                    ),
+                    primitive_name=primitive_name,
+                )
+            return "CONFIG OK"
+        else:
+            try:
+                output, status = await libjuju.execute_action(
+                    model_name=model_name,
+                    application_name=application_name,
+                    action_name=primitive_name,
+                    db_dict=db_dict,
+                    machine_id=machine_id,
+                    progress_timeout=progress_timeout,
+                    total_timeout=total_timeout,
+                    **params_dict,
+                )
+                if status == "completed":
+                    return output
+                else:
+                    if "output" in output:
+                        raise Exception(f'{status}: {output["output"]}')
+                    else:
+                        raise Exception(
+                            f"{status}: No further information received from action"
+                        )
+
+            except Exception as e:
+                self.log.error(f"Error executing primitive {primitive_name}: {e}")
+                raise N2VCExecutionException(
+                    message=f"Error executing primitive {primitive_name} in ee={ee_id}: {e}",
+                    primitive_name=primitive_name,
+                )
+
+    async def upgrade_charm(
+        self,
+        ee_id: str = None,
+        path: str = None,
+        charm_id: str = None,
+        charm_type: str = None,
+        timeout: float = None,
+    ) -> str:
+        """This method upgrade charms in VNFs
+
+        Args:
+            ee_id:  Execution environment id
+            path:   Local path to the charm
+            charm_id:   charm-id
+            charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
+            timeout: (Float)    Timeout for the ns update operation
+
+        Returns:
+            The output of the update operation if status equals to "completed"
+
+        """
+        self.log.info("Upgrading charm: {} on ee: {}".format(path, ee_id))
+        libjuju = await self._get_libjuju(charm_id)
+
+        # check arguments
+        if ee_id is None or len(ee_id) == 0:
+            raise N2VCBadArgumentsException(
+                message="ee_id is mandatory", bad_args=["ee_id"]
+            )
+        try:
+            (
+                model_name,
+                application_name,
+                machine_id,
+            ) = N2VCJujuConnector._get_ee_id_components(ee_id=ee_id)
+
+        except Exception:
+            raise N2VCBadArgumentsException(
+                message="ee_id={} is not a valid execution environment id".format(
+                    ee_id
+                ),
+                bad_args=["ee_id"],
+            )
+
+        try:
+            await libjuju.upgrade_charm(
+                application_name=application_name,
+                path=path,
+                model_name=model_name,
+                total_timeout=timeout,
+            )
+
+            return f"Charm upgraded with application name {application_name}"
+
+        except Exception as e:
+            self.log.error("Error upgrading charm {}: {}".format(path, e))
+
+            raise N2VCException(
+                message="Error upgrading charm {} in ee={} : {}".format(path, ee_id, e)
+            )
+
+    async def disconnect(self, vca_id: str = None):
+        """
+        Disconnect from VCA
+
+        :param: vca_id: VCA ID
+        """
+        self.log.info("closing juju N2VC...")
+        libjuju = await self._get_libjuju(vca_id)
+        try:
+            await libjuju.disconnect()
+        except Exception as e:
+            raise N2VCConnectionException(
+                message="Error disconnecting controller: {}".format(e),
+                url=libjuju.vca_connection.data.endpoints,
+            )
+
+    """
+####################################################################################
+################################### P R I V A T E ##################################
+####################################################################################
+    """
+
+    async def _get_libjuju(self, vca_id: str = None) -> Libjuju:
+        """
+        Get libjuju object
+
+        :param: vca_id: VCA ID
+                        If None, get a libjuju object with a Connection to the default VCA
+                        Else, geta libjuju object with a Connection to the specified VCA
+        """
+        if not vca_id:
+            while self.loading_libjuju.locked():
+                await asyncio.sleep(0.1)
+            if not self.libjuju:
+                async with self.loading_libjuju:
+                    vca_connection = await get_connection(self._store)
+                    self.libjuju = Libjuju(vca_connection, log=self.log)
+            return self.libjuju
+        else:
+            vca_connection = await get_connection(self._store, vca_id)
+            return Libjuju(vca_connection, log=self.log, n2vc=self)
+
+    def _write_ee_id_db(self, db_dict: dict, ee_id: str):
+        # write ee_id to database: _admin.deployed.VCA.x
+        try:
+            the_table = db_dict["collection"]
+            the_filter = db_dict["filter"]
+            the_path = db_dict["path"]
+            if not the_path[-1] == ".":
+                the_path = the_path + "."
+            update_dict = {the_path + "ee_id": ee_id}
+            # self.log.debug('Writing ee_id to database: {}'.format(the_path))
+            self.db.set_one(
+                table=the_table,
+                q_filter=the_filter,
+                update_dict=update_dict,
+                fail_on_empty=True,
+            )
+        except asyncio.CancelledError:
+            raise
+        except Exception as e:
+            self.log.error("Error writing ee_id to database: {}".format(e))
+
+    @staticmethod
+    def _build_ee_id(model_name: str, application_name: str, machine_id: str):
+        """
+        Build an execution environment id form model, application and machine
+        :param model_name:
+        :param application_name:
+        :param machine_id:
+        :return:
+        """
+        # id for the execution environment
+        return "{}.{}.{}".format(model_name, application_name, machine_id)
+
+    @staticmethod
+    def _get_ee_id_components(ee_id: str) -> (str, str, str):
+        """
+        Get model, application and machine components from an execution environment id
+        :param ee_id:
+        :return: model_name, application_name, machine_id
+        """
+
+        return get_ee_id_components(ee_id)
+
+    @staticmethod
+    def _find_charm_level(vnf_id: str, vdu_id: str) -> str:
+        """Decides the charm level.
+        Args:
+            vnf_id  (str):  VNF id
+            vdu_id  (str):  VDU id
+
+        Returns:
+            charm_level (str):  ns-level or vnf-level or vdu-level
+        """
+        if vdu_id and not vnf_id:
+            raise N2VCException(message="If vdu-id exists, vnf-id should be provided.")
+        if vnf_id and vdu_id:
+            return "vdu-level"
+        if vnf_id and not vdu_id:
+            return "vnf-level"
+        if not vnf_id and not vdu_id:
+            return "ns-level"
+
+    @staticmethod
+    def _generate_backward_compatible_application_name(
+        vnf_id: str, vdu_id: str, vdu_count: str
+    ) -> str:
+        """Generate backward compatible application name
+         by limiting the app name to 50 characters.
+
+        Args:
+            vnf_id  (str):  VNF ID
+            vdu_id  (str):  VDU ID
+            vdu_count   (str):  vdu-count-index
+
+        Returns:
+            application_name (str): generated application name
+
+        """
+        if vnf_id is None or len(vnf_id) == 0:
+            vnf_id = ""
+        else:
+            # Shorten the vnf_id to its last twelve characters
+            vnf_id = "vnf-" + vnf_id[-12:]
+
+        if vdu_id is None or len(vdu_id) == 0:
+            vdu_id = ""
+        else:
+            # Shorten the vdu_id to its last twelve characters
+            vdu_id = "-vdu-" + vdu_id[-12:]
+
+        if vdu_count is None or len(vdu_count) == 0:
+            vdu_count = ""
+        else:
+            vdu_count = "-cnt-" + vdu_count
+
+        # Generate a random suffix with 5 characters (the default size used by K8s)
+        random_suffix = generate_random_alfanum_string(size=5)
+
+        application_name = "app-{}{}{}-{}".format(
+            vnf_id, vdu_id, vdu_count, random_suffix
+        )
+        return application_name
+
+    @staticmethod
+    def _get_vca_record(search_key: str, vca_records: list, vdu_id: str) -> dict:
+        """Get the correct VCA record dict depending on the search key
+
+        Args:
+            search_key  (str):      keyword to find the correct VCA record
+            vca_records (list):     All VCA records as list
+            vdu_id  (str):          VDU ID
+
+        Returns:
+            vca_record  (dict):     Dictionary which includes the correct VCA record
+
+        """
+        return next(
+            filter(lambda record: record[search_key] == vdu_id, vca_records), {}
+        )
+
+    @staticmethod
+    def _generate_application_name(
+        charm_level: str,
+        vnfrs: dict,
+        vca_records: list,
+        vnf_count: str = None,
+        vdu_id: str = None,
+        vdu_count: str = None,
+    ) -> str:
+        """Generate application name to make the relevant charm of VDU/KDU
+        in the VNFD descriptor become clearly visible.
+        Limiting the app name to 50 characters.
+
+        Args:
+            charm_level  (str):  level of charm
+            vnfrs  (dict):  vnf record dict
+            vca_records   (list):   db_nsr["_admin"]["deployed"]["VCA"] as list
+            vnf_count   (str): vnf count index
+            vdu_id   (str):  VDU ID
+            vdu_count   (str):  vdu count index
+
+        Returns:
+            application_name (str): generated application name
+
+        """
+        application_name = ""
+        if charm_level == "ns-level":
+            if len(vca_records) != 1:
+                raise N2VCException(message="One VCA record is expected.")
+            # Only one VCA record is expected if it's ns-level charm.
+            # Shorten the charm name to its first 40 characters.
+            charm_name = vca_records[0]["charm_name"][:40]
+            if not charm_name:
+                raise N2VCException(message="Charm name should be provided.")
+            application_name = charm_name + "-ns"
+
+        elif charm_level == "vnf-level":
+            if len(vca_records) < 1:
+                raise N2VCException(message="One or more VCA record is expected.")
+            # If VNF is scaled, more than one VCA record may be included in vca_records
+            # but ee_descriptor_id is same.
+            # Shorten the ee_descriptor_id and member-vnf-index-ref
+            # to first 12 characters.
+            application_name = (
+                vca_records[0]["ee_descriptor_id"][:12]
+                + "-"
+                + vnf_count
+                + "-"
+                + vnfrs["member-vnf-index-ref"][:12]
+                + "-vnf"
+            )
+        elif charm_level == "vdu-level":
+            if len(vca_records) < 1:
+                raise N2VCException(message="One or more VCA record is expected.")
+
+            # Charms are also used for deployments with Helm charts.
+            # If deployment unit is a Helm chart/KDU,
+            # vdu_profile_id and vdu_count will be empty string.
+            if vdu_count is None:
+                vdu_count = ""
+
+            # If vnf/vdu is scaled, more than one VCA record may be included in vca_records
+            # but ee_descriptor_id is same.
+            # Shorten the ee_descriptor_id, member-vnf-index-ref and vdu_profile_id
+            # to first 12 characters.
+            if not vdu_id:
+                raise N2VCException(message="vdu-id should be provided.")
+
+            vca_record = N2VCJujuConnector._get_vca_record(
+                "vdu_id", vca_records, vdu_id
+            )
+
+            if not vca_record:
+                vca_record = N2VCJujuConnector._get_vca_record(
+                    "kdu_name", vca_records, vdu_id
+                )
+
+            application_name = (
+                vca_record["ee_descriptor_id"][:12]
+                + "-"
+                + vnf_count
+                + "-"
+                + vnfrs["member-vnf-index-ref"][:12]
+                + "-"
+                + vdu_id[:12]
+                + "-"
+                + vdu_count
+                + "-vdu"
+            )
+
+        return application_name
+
+    def _get_vnf_count_and_record(
+        self, charm_level: str, vnf_id_and_count: str
+    ) -> Tuple[str, dict]:
+        """Get the vnf count and VNF record depend on charm level
+
+        Args:
+            charm_level  (str)
+            vnf_id_and_count (str)
+
+        Returns:
+            (vnf_count  (str), db_vnfr(dict)) as Tuple
+
+        """
+        vnf_count = ""
+        db_vnfr = {}
+
+        if charm_level in ("vnf-level", "vdu-level"):
+            vnf_id = "-".join(vnf_id_and_count.split("-")[:-1])
+            vnf_count = vnf_id_and_count.split("-")[-1]
+            db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
+
+        # If the charm is ns level, it returns empty vnf_count and db_vnfr
+        return vnf_count, db_vnfr
+
+    @staticmethod
+    def _get_vca_records(charm_level: str, db_nsr: dict, db_vnfr: dict) -> list:
+        """Get the VCA records from db_nsr dict
+
+        Args:
+            charm_level (str):  level of charm
+            db_nsr  (dict):     NS record from database
+            db_vnfr (dict):     VNF record from database
+
+        Returns:
+            vca_records (list):  List of VCA record dictionaries
+
+        """
+        vca_records = {}
+        if charm_level == "ns-level":
+            vca_records = list(
+                filter(
+                    lambda vca_record: vca_record["target_element"] == "ns",
+                    db_nsr["_admin"]["deployed"]["VCA"],
+                )
+            )
+        elif charm_level in ["vnf-level", "vdu-level"]:
+            vca_records = list(
+                filter(
+                    lambda vca_record: vca_record["member-vnf-index"]
+                    == db_vnfr["member-vnf-index-ref"],
+                    db_nsr["_admin"]["deployed"]["VCA"],
+                )
+            )
+
+        return vca_records
+
+    def _get_application_name(self, namespace: str) -> str:
+        """Build application name from namespace
+
+        Application name structure:
+            NS level: <charm-name>-ns
+            VNF level: <ee-name>-z<vnf-ordinal-scale-number>-<vnf-profile-id>-vnf
+            VDU level: <ee-name>-z<vnf-ordinal-scale-number>-<vnf-profile-id>-
+            <vdu-profile-id>-z<vdu-ordinal-scale-number>-vdu
+
+        Application naming for backward compatibility (old structure):
+            NS level: app-<random_value>
+            VNF level: app-vnf-<vnf-id>-z<ordinal-scale-number>-<random_value>
+            VDU level: app-vnf-<vnf-id>-z<vnf-ordinal-scale-number>-vdu-
+            <vdu-id>-cnt-<vdu-count>-z<vdu-ordinal-scale-number>-<random_value>
+
+        Args:
+            namespace   (str)
+
+        Returns:
+            application_name    (str)
+
+        """
+        # split namespace components
+        (
+            nsi_id,
+            ns_id,
+            vnf_id_and_count,
+            vdu_id,
+            vdu_count,
+        ) = self._get_namespace_components(namespace=namespace)
+
+        if not ns_id:
+            raise N2VCException(message="ns-id should be provided.")
+
+        charm_level = self._find_charm_level(vnf_id_and_count, vdu_id)
+        db_nsr = self.db.get_one("nsrs", {"_id": ns_id})
+        vnf_count, db_vnfr = self._get_vnf_count_and_record(
+            charm_level, vnf_id_and_count
+        )
+        vca_records = self._get_vca_records(charm_level, db_nsr, db_vnfr)
+
+        if all("charm_name" in vca_record.keys() for vca_record in vca_records):
+            application_name = self._generate_application_name(
+                charm_level,
+                db_vnfr,
+                vca_records,
+                vnf_count=vnf_count,
+                vdu_id=vdu_id,
+                vdu_count=vdu_count,
+            )
+        else:
+            application_name = self._generate_backward_compatible_application_name(
+                vnf_id_and_count, vdu_id, vdu_count
+            )
+
+        return N2VCJujuConnector._format_app_name(application_name)
+
+    @staticmethod
+    def _format_model_name(name: str) -> str:
+        """Format the name of the model.
+
+        Model names may only contain lowercase letters, digits and hyphens
+        """
+
+        return name.replace("_", "-").replace(" ", "-").lower()
+
+    @staticmethod
+    def _format_app_name(name: str) -> str:
+        """Format the name of the application (in order to assure valid application name).
+
+        Application names have restrictions (run juju deploy --help):
+            - contains lowercase letters 'a'-'z'
+            - contains numbers '0'-'9'
+            - contains hyphens '-'
+            - starts with a lowercase letter
+            - not two or more consecutive hyphens
+            - after a hyphen, not a group with all numbers
+        """
+
+        def all_numbers(s: str) -> bool:
+            for c in s:
+                if not c.isdigit():
+                    return False
+            return True
+
+        new_name = name.replace("_", "-")
+        new_name = new_name.replace(" ", "-")
+        new_name = new_name.lower()
+        while new_name.find("--") >= 0:
+            new_name = new_name.replace("--", "-")
+        groups = new_name.split("-")
+
+        # find 'all numbers' groups and prefix them with a letter
+        app_name = ""
+        for i in range(len(groups)):
+            group = groups[i]
+            if all_numbers(group):
+                group = "z" + group
+            if i > 0:
+                app_name += "-"
+            app_name += group
+
+        if app_name[0].isdigit():
+            app_name = "z" + app_name
+
+        return app_name
+
+    async def validate_vca(self, vca_id: str):
+        """
+        Validate a VCA by connecting/disconnecting to/from it
+
+        :param: vca_id: VCA ID
+        """
+        vca_connection = await get_connection(self._store, vca_id=vca_id)
+        libjuju = Libjuju(vca_connection, log=self.log, n2vc=self)
+        controller = await libjuju.get_controller()
+        await libjuju.disconnect_controller(controller)
diff --git a/osm_lcm/n2vc/post-renderer-scripts/mainPostRenderer/mainPostRenderer b/osm_lcm/n2vc/post-renderer-scripts/mainPostRenderer/mainPostRenderer
new file mode 100755 (executable)
index 0000000..740c62f
--- /dev/null
@@ -0,0 +1,32 @@
+#!/bin/bash
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+# Default values for osm.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+IFS=',' read -r -a args <<< "$1"
+rendered_output=$(cat /dev/stdin)
+
+for arg in "${args[@]}"; do
+    key=$(echo "$arg" | cut -d'=' -f1)
+    value=$(echo "$arg" | cut -d'=' -f2)
+
+    rendered_output=$(echo "$rendered_output" | "$key" "$value")
+done
+
+echo "$rendered_output"
\ No newline at end of file
diff --git a/osm_lcm/n2vc/post-renderer-scripts/nodeSelector/kustomization.yaml b/osm_lcm/n2vc/post-renderer-scripts/nodeSelector/kustomization.yaml
new file mode 100644 (file)
index 0000000..6fa7da3
--- /dev/null
@@ -0,0 +1,29 @@
+#!/bin/bash
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+# Default values for osm.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+resources:
+- all.yaml
+patches:
+- path: nodeSelector-deployment-patch.yaml
+  target:
+    kind: Deployment
\ No newline at end of file
diff --git a/osm_lcm/n2vc/post-renderer-scripts/nodeSelector/nodeSelector b/osm_lcm/n2vc/post-renderer-scripts/nodeSelector/nodeSelector
new file mode 100755 (executable)
index 0000000..29ac779
--- /dev/null
@@ -0,0 +1,43 @@
+#!/bin/bash
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+# Default values for osm.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+dirname="nodeSelector-"$(tr -dc A-Za-z0-9 </dev/urandom | head -c 13)
+
+mkdir /tmp/$dirname && cd /tmp/$dirname
+
+cp /usr/lib/python3/dist-packages/n2vc/post-renderer-scripts/nodeSelector/kustomization.yaml .
+
+cp /usr/lib/python3/dist-packages/n2vc/post-renderer-scripts/nodeSelector/nodeSelector-deployment-patch.yaml .
+
+cat <&0 > all.yaml
+
+IFS=' ' read -r -a labels <<< "$1"
+content=""
+for label in "${labels[@]}"; do
+    key=$(echo "$label" | cut -d':' -f1)
+    value=$(echo "$label" | cut -d':' -f2)
+    content="$content
+        $key: $value"
+done
+
+echo -e "$content" >> nodeSelector-deployment-patch.yaml
+
+kubectl kustomize && cd .. && rm -r /tmp/$dirname
\ No newline at end of file
diff --git a/osm_lcm/n2vc/post-renderer-scripts/nodeSelector/nodeSelector-deployment-patch.yaml b/osm_lcm/n2vc/post-renderer-scripts/nodeSelector/nodeSelector-deployment-patch.yaml
new file mode 100644 (file)
index 0000000..4f1e8d8
--- /dev/null
@@ -0,0 +1,29 @@
+#!/bin/bash
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+# Default values for osm.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+apiVersion: v1
+kind: Deployment
+metadata:
+  name: nodeSelectors-deployment
+spec:
+  template:
+    spec:
+      nodeSelector:
\ No newline at end of file
diff --git a/osm_lcm/n2vc/post-renderer-scripts/podLabels/kustomization.yaml b/osm_lcm/n2vc/post-renderer-scripts/podLabels/kustomization.yaml
new file mode 100644 (file)
index 0000000..358978b
--- /dev/null
@@ -0,0 +1,23 @@
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+# Default values for osm.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+resources:
+- all.yaml
\ No newline at end of file
diff --git a/osm_lcm/n2vc/post-renderer-scripts/podLabels/podLabels b/osm_lcm/n2vc/post-renderer-scripts/podLabels/podLabels
new file mode 100755 (executable)
index 0000000..d6d2870
--- /dev/null
@@ -0,0 +1,42 @@
+#!/bin/bash
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+# Default values for osm.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+dirname="podlabels-"$(tr -dc A-Za-z0-9 </dev/urandom | head -c 13)
+
+mkdir /tmp/$dirname && cd /tmp/$dirname
+
+cp /usr/lib/python3/dist-packages/n2vc/post-renderer-scripts/podLabels/kustomization.yaml .
+
+cat <&0 > all.yaml
+
+IFS=' ' read -r -a labels <<< "$1"
+content="commonLabels:"
+for label in "${labels[@]}"; do
+    key=$(echo "$label" | cut -d':' -f1)
+    value=$(echo "$label" | cut -d':' -f2)
+    content="$content
+  $key: $value"
+done
+
+# Add content to the file
+echo -e "\n$content" >> kustomization.yaml
+
+kubectl kustomize && cd .. && rm -r /tmp/$dirname
\ No newline at end of file
diff --git a/osm_lcm/n2vc/provisioner.py b/osm_lcm/n2vc/provisioner.py
new file mode 100644 (file)
index 0000000..ab25993
--- /dev/null
@@ -0,0 +1,376 @@
+# Copyright 2019 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+import logging
+import os
+import re
+from subprocess import CalledProcessError
+import tempfile
+import uuid
+
+from juju.client import client
+import asyncio
+
+arches = [
+    [re.compile(r"amd64|x86_64"), "amd64"],
+    [re.compile(r"i?[3-9]86"), "i386"],
+    [re.compile(r"(arm$)|(armv.*)"), "armhf"],
+    [re.compile(r"aarch64"), "arm64"],
+    [re.compile(r"ppc64|ppc64el|ppc64le"), "ppc64el"],
+    [re.compile(r"s390x?"), "s390x"],
+]
+
+
+def normalize_arch(rawArch):
+    """Normalize the architecture string."""
+    for arch in arches:
+        if arch[0].match(rawArch):
+            return arch[1]
+
+
+DETECTION_SCRIPT = """#!/bin/bash
+set -e
+os_id=$(grep '^ID=' /etc/os-release | tr -d '"' | cut -d= -f2)
+if [ "$os_id" = 'centos' ] || [ "$os_id" = 'rhel' ] ; then
+  os_version=$(grep '^VERSION_ID=' /etc/os-release | tr -d '"' | cut -d= -f2)
+  echo "$os_id$os_version"
+else
+  lsb_release -cs
+fi
+uname -m
+grep MemTotal /proc/meminfo
+cat /proc/cpuinfo
+"""
+
+INITIALIZE_UBUNTU_SCRIPT = """set -e
+(id ubuntu &> /dev/null) || useradd -m ubuntu -s /bin/bash
+umask 0077
+temp=$(mktemp)
+echo 'ubuntu ALL=(ALL) NOPASSWD:ALL' > $temp
+install -m 0440 $temp /etc/sudoers.d/90-juju-ubuntu
+rm $temp
+su ubuntu -c '[ -f ~/.ssh/authorized_keys ] || install -D -m 0600 /dev/null ~/.ssh/authorized_keys'
+export authorized_keys="{}"
+if [ ! -z "$authorized_keys" ]; then
+    su ubuntu -c 'echo $authorized_keys >> ~/.ssh/authorized_keys'
+fi
+"""
+
+IPTABLES_SCRIPT = """#!/bin/bash
+set -e
+[ -v `which netfilter-persistent` ] && apt update \
+    && DEBIAN_FRONTEND=noninteractive apt-get install -yqq iptables-persistent
+iptables -t nat -A OUTPUT -p tcp -d {} -j DNAT --to-destination {}
+netfilter-persistent save
+"""
+
+IPTABLES_SCRIPT_RHEL = """#!/bin/bash
+set -e
+[ -v `which firewalld` ] && yum install -q -y firewalld
+systemctl is-active --quiet firewalld || systemctl start firewalld \
+    && firewall-cmd --permanent --zone=public --set-target=ACCEPT
+systemctl is-enabled --quiet firewalld || systemctl enable firewalld
+firewall-cmd --direct --permanent --add-rule ipv4 nat OUTPUT 0 -d {} -p tcp \
+    -j DNAT --to-destination {}
+firewall-cmd --reload
+"""
+
+CLOUD_INIT_WAIT_SCRIPT = """#!/bin/bash
+set -e
+cloud-init status --wait
+"""
+
+
+class AsyncSSHProvisioner:
+    """Provision a manually created machine via SSH."""
+
+    user = ""
+    host = ""
+    private_key_path = ""
+
+    def __init__(self, user, host, private_key_path, log=None):
+        self.host = host
+        self.user = user
+        self.private_key_path = private_key_path
+        self.log = log if log else logging.getLogger(__name__)
+
+    async def _scp(self, source_file, destination_file):
+        """Execute an scp command. Requires a fully qualified source and
+        destination.
+
+        :param str source_file: Path to the source file
+        :param str destination_file: Path to the destination file
+        """
+        cmd = [
+            "scp",
+            "-i",
+            os.path.expanduser(self.private_key_path),
+            "-o",
+            "StrictHostKeyChecking=no",
+            "-q",
+            "-B",
+        ]
+        destination = "{}@{}:{}".format(self.user, self.host, destination_file)
+        cmd.extend([source_file, destination])
+        process = await asyncio.create_subprocess_exec(*cmd)
+        await process.wait()
+        if process.returncode != 0:
+            raise CalledProcessError(returncode=process.returncode, cmd=cmd)
+
+    async def _ssh(self, command):
+        """Run a command remotely via SSH.
+
+        :param str command: The command to execute
+        :return: tuple: The stdout and stderr of the command execution
+        :raises: :class:`CalledProcessError` if the command fails
+        """
+
+        destination = "{}@{}".format(self.user, self.host)
+        cmd = [
+            "ssh",
+            "-i",
+            os.path.expanduser(self.private_key_path),
+            "-o",
+            "StrictHostKeyChecking=no",
+            "-q",
+            destination,
+        ]
+        cmd.extend([command])
+        process = await asyncio.create_subprocess_exec(
+            *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
+        )
+        stdout, stderr = await process.communicate()
+
+        if process.returncode != 0:
+            output = stderr.decode("utf-8").strip()
+            raise CalledProcessError(
+                returncode=process.returncode, cmd=cmd, output=output
+            )
+        return (stdout.decode("utf-8").strip(), stderr.decode("utf-8").strip())
+
+    async def _init_ubuntu_user(self):
+        """Initialize the ubuntu user.
+
+        :return: bool: If the initialization was successful
+        :raises: :class:`CalledProcessError` if the _ssh command fails
+        """
+        retry = 10
+        attempts = 0
+        delay = 15
+        while attempts <= retry:
+            try:
+                attempts += 1
+                # Attempt to establish a SSH connection
+                stdout, stderr = await self._ssh("sudo -n true")
+                break
+            except CalledProcessError as e:
+                self.log.debug(
+                    "Waiting for VM to boot, sleeping {} seconds".format(delay)
+                )
+                if attempts > retry:
+                    raise e
+                else:
+                    await asyncio.sleep(delay)
+                    # Slowly back off the retry
+                    delay += 15
+
+        # Infer the public key
+        public_key = None
+        public_key_path = "{}.pub".format(self.private_key_path)
+
+        if not os.path.exists(public_key_path):
+            raise FileNotFoundError(
+                "Public key '{}' doesn't exist.".format(public_key_path)
+            )
+
+        with open(public_key_path, "r") as f:
+            public_key = f.readline()
+
+        script = INITIALIZE_UBUNTU_SCRIPT.format(public_key)
+
+        stdout, stderr = await self._run_configure_script(script)
+
+        return True
+
+    async def _detect_hardware_and_os(self):
+        """Detect the target hardware capabilities and OS series.
+
+        :return: str: A raw string containing OS and hardware information.
+        """
+
+        info = {
+            "series": "",
+            "arch": "",
+            "cpu-cores": "",
+            "mem": "",
+        }
+
+        stdout, stderr = await self._run_configure_script(DETECTION_SCRIPT)
+
+        lines = stdout.split("\n")
+        info["series"] = lines[0].strip()
+        info["arch"] = normalize_arch(lines[1].strip())
+
+        memKb = re.split(r"\s+", lines[2])[1]
+
+        # Convert megabytes -> kilobytes
+        info["mem"] = round(int(memKb) / 1024)
+
+        # Detect available CPUs
+        recorded = {}
+        for line in lines[3:]:
+            physical_id = ""
+            print(line)
+
+            if line.find("physical id") == 0:
+                physical_id = line.split(":")[1].strip()
+            elif line.find("cpu cores") == 0:
+                cores = line.split(":")[1].strip()
+
+                if physical_id not in recorded.keys():
+                    info["cpu-cores"] += cores
+                    recorded[physical_id] = True
+
+        return info
+
+    async def provision_machine(self):
+        """Perform the initial provisioning of the target machine.
+
+        :return: bool: The client.AddMachineParams
+        """
+        params = client.AddMachineParams()
+
+        if await self._init_ubuntu_user():
+            hw = await self._detect_hardware_and_os()
+            params.series = hw["series"]
+            params.instance_id = "manual:{}".format(self.host)
+            params.nonce = "manual:{}:{}".format(
+                self.host,
+                str(uuid.uuid4()),
+            )  # a nop for Juju w/manual machines
+            params.hardware_characteristics = {
+                "arch": hw["arch"],
+                "mem": int(hw["mem"]),
+                "cpu-cores": int(hw["cpu-cores"]),
+            }
+            params.addresses = [{"value": self.host, "type": "ipv4", "scope": "public"}]
+
+        return params
+
+    async def install_agent(
+        self, connection, nonce, machine_id, proxy=None, series=None
+    ):
+        """
+        :param object connection: Connection to Juju API
+        :param str nonce: The nonce machine specification
+        :param str machine_id: The id assigned to the machine
+        :param str proxy: IP of the API_PROXY
+        :param str series: OS name
+
+        :return: bool: If the initialization was successful
+        """
+        # The path where the Juju agent should be installed.
+        data_dir = "/var/lib/juju"
+
+        # Disabling this prevents `apt-get update` from running initially, so
+        # charms will fail to deploy
+        disable_package_commands = False
+
+        client_facade = client.ClientFacade.from_connection(connection)
+        results = await client_facade.ProvisioningScript(
+            data_dir=data_dir,
+            disable_package_commands=disable_package_commands,
+            machine_id=machine_id,
+            nonce=nonce,
+        )
+
+        """Get the IP of the controller
+
+        Parse the provisioning script, looking for the first apiaddress.
+
+        Example:
+            apiaddresses:
+            - 10.195.8.2:17070
+            - 127.0.0.1:17070
+            - '[::1]:17070'
+        """
+        try:
+            # Wait until cloud-init finish
+            await self._run_configure_script(CLOUD_INIT_WAIT_SCRIPT)
+        except Exception:
+            self.log.debug("cloud-init not present in machine {}".format(machine_id))
+
+        if proxy:
+            m = re.search(
+                r"apiaddresses:\n- (\d+\.\d+\.\d+\.\d+):17070", results.script
+            )
+            apiaddress = m.group(1)
+
+            """Add IP Table rule
+
+            In order to route the traffic to the private ip of the Juju controller
+            we use a DNAT rule to tell the machine that the destination for the
+            private address is the public address of the machine where the Juju
+            controller is running in LXD. That machine will have a complimentary
+            iptables rule, routing traffic to the appropriate LXD container.
+            """
+
+            if series and ("centos" in series or "rhel" in series):
+                script = IPTABLES_SCRIPT_RHEL.format(apiaddress, proxy)
+            else:
+                script = IPTABLES_SCRIPT.format(apiaddress, proxy)
+
+            # Run this in a retry loop, because dpkg may be running and cause the
+            # script to fail.
+            retry = 10
+            attempts = 0
+            delay = 15
+
+            while attempts <= retry:
+                try:
+                    attempts += 1
+                    stdout, stderr = await self._run_configure_script(script)
+                    break
+                except Exception as e:
+                    self.log.debug(
+                        "Waiting for DNAT rules to be applied and saved, "
+                        "sleeping {} seconds".format(delay)
+                    )
+                    if attempts > retry:
+                        raise e
+                    else:
+                        await asyncio.sleep(delay)
+                        # Slowly back off the retry
+                        delay += 15
+
+        # self.log.debug("Running configure script")
+        await self._run_configure_script(results.script)
+        # self.log.debug("Configure script finished")
+
+    async def _run_configure_script(self, script, root=True):
+        """Run the script to install the Juju agent on the target machine.
+
+        :param str script: The script to be executed
+        """
+        _, tmpFile = tempfile.mkstemp()
+        with open(tmpFile, "w") as f:
+            f.write(script)
+            f.close()
+
+        # copy the local copy of the script to the remote machine
+        await self._scp(tmpFile, tmpFile)
+
+        # run the provisioning script
+        return await self._ssh(
+            "{} /bin/bash {}".format("sudo" if root else "", tmpFile)
+        )
diff --git a/osm_lcm/n2vc/store.py b/osm_lcm/n2vc/store.py
new file mode 100644 (file)
index 0000000..e24d9fc
--- /dev/null
@@ -0,0 +1,298 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+
+import abc
+import typing
+
+from motor.motor_asyncio import AsyncIOMotorClient
+from osm_lcm.n2vc.config import EnvironConfig
+from osm_lcm.n2vc.vca.connection_data import ConnectionData
+from osm_common.dbmongo import DbMongo, DbException
+from osm_common.dbbase import Encryption
+
+
+DB_NAME = "osm"
+
+
+class Store(abc.ABC):
+    @abc.abstractmethod
+    async def get_vca_connection_data(self, vca_id: str) -> ConnectionData:
+        """
+        Get VCA connection data
+
+        :param: vca_id: VCA ID
+
+        :returns: ConnectionData with the information of the database
+        """
+
+    @abc.abstractmethod
+    async def update_vca_endpoints(self, hosts: typing.List[str], vca_id: str):
+        """
+        Update VCA endpoints
+
+        :param: endpoints: List of endpoints to write in the database
+        :param: vca_id: VCA ID
+        """
+
+    @abc.abstractmethod
+    async def get_vca_endpoints(self, vca_id: str = None) -> typing.List[str]:
+        """
+        Get list if VCA endpoints
+
+        :param: vca_id: VCA ID
+
+        :returns: List of endpoints
+        """
+
+    @abc.abstractmethod
+    async def get_vca_id(self, vim_id: str = None) -> str:
+        """
+        Get VCA id for a VIM account
+
+        :param: vim_id: Vim account ID
+        """
+
+
+class DbMongoStore(Store):
+    def __init__(self, db: DbMongo):
+        """
+        Constructor
+
+        :param: db: osm_common.dbmongo.DbMongo object
+        """
+        self.db = db
+
+    async def get_vca_connection_data(self, vca_id: str) -> ConnectionData:
+        """
+        Get VCA connection data
+
+        :param: vca_id: VCA ID
+
+        :returns: ConnectionData with the information of the database
+        """
+        data = self.db.get_one("vca", q_filter={"_id": vca_id})
+        self.db.encrypt_decrypt_fields(
+            data,
+            "decrypt",
+            ["secret", "cacert"],
+            schema_version=data["schema_version"],
+            salt=data["_id"],
+        )
+        return ConnectionData(**data)
+
+    async def update_vca_endpoints(
+        self, endpoints: typing.List[str], vca_id: str = None
+    ):
+        """
+        Update VCA endpoints
+
+        :param: endpoints: List of endpoints to write in the database
+        :param: vca_id: VCA ID
+        """
+        if vca_id:
+            data = self.db.get_one("vca", q_filter={"_id": vca_id})
+            data["endpoints"] = endpoints
+            self._update("vca", vca_id, data)
+        else:
+            # The default VCA. Data for the endpoints is in a different place
+            juju_info = self._get_juju_info()
+            # If it doesn't, then create it
+            if not juju_info:
+                try:
+                    self.db.create(
+                        "vca",
+                        {"_id": "juju"},
+                    )
+                except DbException as e:
+                    # Racing condition: check if another N2VC worker has created it
+                    juju_info = self._get_juju_info()
+                    if not juju_info:
+                        raise e
+            self.db.set_one(
+                "vca",
+                {"_id": "juju"},
+                {"api_endpoints": endpoints},
+            )
+
+    async def get_vca_endpoints(self, vca_id: str = None) -> typing.List[str]:
+        """
+        Get list if VCA endpoints
+
+        :param: vca_id: VCA ID
+
+        :returns: List of endpoints
+        """
+        endpoints = []
+        if vca_id:
+            endpoints = self.get_vca_connection_data(vca_id).endpoints
+        else:
+            juju_info = self._get_juju_info()
+            if juju_info and "api_endpoints" in juju_info:
+                endpoints = juju_info["api_endpoints"]
+        return endpoints
+
+    async def get_vca_id(self, vim_id: str = None) -> str:
+        """
+        Get VCA ID from the database for a given VIM account ID
+
+        :param: vim_id: VIM account ID
+        """
+        return (
+            self.db.get_one(
+                "vim_accounts",
+                q_filter={"_id": vim_id},
+                fail_on_empty=False,
+            ).get("vca")
+            if vim_id
+            else None
+        )
+
+    def _update(self, collection: str, id: str, data: dict):
+        """
+        Update object in database
+
+        :param: collection: Collection name
+        :param: id: ID of the object
+        :param: data: Object data
+        """
+        self.db.replace(
+            collection,
+            id,
+            data,
+        )
+
+    def _get_juju_info(self):
+        """Get Juju information (the default VCA) from the admin collection"""
+        return self.db.get_one(
+            "vca",
+            q_filter={"_id": "juju"},
+            fail_on_empty=False,
+        )
+
+
+class MotorStore(Store):
+    def __init__(self, uri: str):
+        """
+        Constructor
+
+        :param: uri: Connection string to connect to the database.
+        """
+        self._client = AsyncIOMotorClient(uri)
+        self._secret_key = None
+        self._config = EnvironConfig(prefixes=["OSMLCM_", "OSMMON_"])
+        self.encryption = Encryption(
+            uri=uri,
+            config=self._config,
+            encoding_type="utf-8",
+            logger_name="db",
+        )
+
+    @property
+    def _database(self):
+        return self._client[DB_NAME]
+
+    @property
+    def _vca_collection(self):
+        return self._database["vca"]
+
+    @property
+    def _admin_collection(self):
+        return self._database["admin"]
+
+    @property
+    def _vim_accounts_collection(self):
+        return self._database["vim_accounts"]
+
+    async def get_vca_connection_data(self, vca_id: str) -> ConnectionData:
+        """
+        Get VCA connection data
+
+        :param: vca_id: VCA ID
+
+        :returns: ConnectionData with the information of the database
+        """
+        data = await self._vca_collection.find_one({"_id": vca_id})
+        if not data:
+            raise Exception("vca with id {} not found".format(vca_id))
+        await self.encryption.decrypt_fields(
+            data,
+            ["secret", "cacert"],
+            schema_version=data["schema_version"],
+            salt=data["_id"],
+        )
+        return ConnectionData(**data)
+
+    async def update_vca_endpoints(
+        self, endpoints: typing.List[str], vca_id: str = None
+    ):
+        """
+        Update VCA endpoints
+
+        :param: endpoints: List of endpoints to write in the database
+        :param: vca_id: VCA ID
+        """
+        if vca_id:
+            data = await self._vca_collection.find_one({"_id": vca_id})
+            data["endpoints"] = endpoints
+            await self._vca_collection.replace_one({"_id": vca_id}, data)
+        else:
+            # The default VCA. Data for the endpoints is in a different place
+            juju_info = await self._get_juju_info()
+            # If it doesn't, then create it
+            if not juju_info:
+                try:
+                    await self._admin_collection.insert_one({"_id": "juju"})
+                except Exception as e:
+                    # Racing condition: check if another N2VC worker has created it
+                    juju_info = await self._get_juju_info()
+                    if not juju_info:
+                        raise e
+
+            await self._admin_collection.replace_one(
+                {"_id": "juju"}, {"api_endpoints": endpoints}
+            )
+
+    async def get_vca_endpoints(self, vca_id: str = None) -> typing.List[str]:
+        """
+        Get list if VCA endpoints
+
+        :param: vca_id: VCA ID
+
+        :returns: List of endpoints
+        """
+        endpoints = []
+        if vca_id:
+            endpoints = (await self.get_vca_connection_data(vca_id)).endpoints
+        else:
+            juju_info = await self._get_juju_info()
+            if juju_info and "api_endpoints" in juju_info:
+                endpoints = juju_info["api_endpoints"]
+        return endpoints
+
+    async def get_vca_id(self, vim_id: str = None) -> str:
+        """
+        Get VCA ID from the database for a given VIM account ID
+
+        :param: vim_id: VIM account ID
+        """
+        vca_id = None
+        if vim_id:
+            vim_account = await self._vim_accounts_collection.find_one({"_id": vim_id})
+            if vim_account and "vca" in vim_account:
+                vca_id = vim_account["vca"]
+        return vca_id
+
+    async def _get_juju_info(self):
+        """Get Juju information (the default VCA) from the admin collection"""
+        return await self._admin_collection.find_one({"_id": "juju"})
diff --git a/osm_lcm/n2vc/tests/__init__.py b/osm_lcm/n2vc/tests/__init__.py
new file mode 100644 (file)
index 0000000..ec4fe4b
--- /dev/null
@@ -0,0 +1,13 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
diff --git a/osm_lcm/n2vc/tests/unit/README.md b/osm_lcm/n2vc/tests/unit/README.md
new file mode 100644 (file)
index 0000000..39c791b
--- /dev/null
@@ -0,0 +1,48 @@
+<!--- Copyright 2020 Canonical Ltd.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License. --->
+
+
+# N2VC Unit Testing Guideline
+
+## Use `test_libjuju.py` as a guideline
+
+Even though the Test Cases still have plenty of potential improvements we feel like this file is the most polished of all of them. Therefore it should be used as a baseline of any future tests or changes in current tests for what is the minimum standard.
+
+## Try to use mock as much as possible
+
+There are some cases where FakeClasses (which still inherit from Mock classes) are used. This is only for the cases where the construction of the object requires to much additional mocking. Using standard mocks gives more testing possibilities.
+
+## Separate your Test Cases into different classes
+
+It is preferrable to have a TestCase Class for each method and several test methods to test different scenarios. If all of the classes need the same setup a Parent TestCase class can be created with a setUp method and afterwards the other TestCases can inherit from it like this:
+
+```python
+class GetControllerTest(LibjujuTestCase):
+
+    def setUp(self):
+        super(GetControllerTest, self).setUp()
+```
+
+## Things to assert
+
+It is more important to actually assert the important logic than have a high code coverage but not actually testing the code.
+
+These are some of the things that should be always asserted:
+
+* Assert all Exceptions are launched correctly.
+* Assert the return values are the expected ones for **both** succesfull executions and unsuccesful ones.
+* Assert that all important calls have been called the correct amount of time and with the correct arguments.
+* Assert that when the method is failing the correct log messages are posted.
+* Assert that all things to need to be disconnected after execution are correctly disconnected.
+
diff --git a/osm_lcm/n2vc/tests/unit/__init__.py b/osm_lcm/n2vc/tests/unit/__init__.py
new file mode 100644 (file)
index 0000000..ec4fe4b
--- /dev/null
@@ -0,0 +1,13 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
diff --git a/osm_lcm/n2vc/tests/unit/test_config.py b/osm_lcm/n2vc/tests/unit/test_config.py
new file mode 100644 (file)
index 0000000..77d5a3a
--- /dev/null
@@ -0,0 +1,58 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+
+from unittest import TestCase
+from unittest.mock import patch
+
+
+from osm_lcm.n2vc.config import EnvironConfig, ModelConfig, MODEL_CONFIG_KEYS
+
+
+def generate_os_environ_dict(config, prefix):
+    return {f"{prefix}{k.upper()}": v for k, v in config.items()}
+
+
+class TestEnvironConfig(TestCase):
+    def setUp(self):
+        self.config = {"host": "1.2.3.4", "port": "17070", "k8s_cloud": "k8s"}
+
+    @patch("os.environ.items")
+    def test_environ_config_lcm(self, mock_environ_items):
+        envs = generate_os_environ_dict(self.config, "OSMLCM_VCA_")
+        envs["not_valid_env"] = "something"
+        mock_environ_items.return_value = envs.items()
+        config = EnvironConfig()
+        self.assertEqual(config, self.config)
+
+    @patch("os.environ.items")
+    def test_environ_config_mon(self, mock_environ_items):
+        envs = generate_os_environ_dict(self.config, "OSMMON_VCA_")
+        envs["not_valid_env"] = "something"
+        mock_environ_items.return_value = envs.items()
+        config = EnvironConfig()
+        self.assertEqual(config, self.config)
+
+
+class TestModelConfig(TestCase):
+    def setUp(self):
+        self.config = {
+            f'model_config_{model_key.replace("-", "_")}': "somevalue"
+            for model_key in MODEL_CONFIG_KEYS
+        }
+        self.config["model_config_invalid"] = "something"
+        self.model_config = {model_key: "somevalue" for model_key in MODEL_CONFIG_KEYS}
+
+    def test_model_config(self):
+        model_config = ModelConfig(self.config)
+        self.assertEqual(model_config, self.model_config)
diff --git a/osm_lcm/n2vc/tests/unit/test_connection.py b/osm_lcm/n2vc/tests/unit/test_connection.py
new file mode 100644 (file)
index 0000000..6092b45
--- /dev/null
@@ -0,0 +1,68 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+
+import asyncio
+from unittest import TestCase
+from unittest.mock import Mock, patch
+
+
+from osm_lcm.n2vc.tests.unit.utils import AsyncMock
+from osm_lcm.n2vc.vca import connection
+
+
+class TestConnection(TestCase):
+    def setUp(self):
+        self.loop = asyncio.get_event_loop()
+        self.store = AsyncMock()
+
+    def test_load_from_store(self):
+        self.loop.run_until_complete(connection.get_connection(self.store, "vim_id"))
+
+        self.store.get_vca_connection_data.assert_called_once()
+
+    def test_cloud_properties(self):
+        conn = self.loop.run_until_complete(
+            connection.get_connection(self.store, "vim_id")
+        )
+        conn._data = Mock()
+        conn._data.lxd_cloud = "name"
+        conn._data.k8s_cloud = "name"
+        conn._data.lxd_credentials = "credential"
+        conn._data.k8s_credentials = "credential"
+
+        self.assertEqual(conn.lxd_cloud.name, "name")
+        self.assertEqual(conn.lxd_cloud.credential_name, "credential")
+        self.assertEqual(conn.k8s_cloud.name, "name")
+        self.assertEqual(conn.k8s_cloud.credential_name, "credential")
+
+    @patch("osm_lcm.n2vc.vca.connection.EnvironConfig")
+    @patch("osm_lcm.n2vc.vca.connection_data.base64_to_cacert")
+    def test_load_from_env(self, mock_base64_to_cacert, mock_env):
+        mock_base64_to_cacert.return_value = "cacert"
+        mock_env.return_value = {
+            "endpoints": "1.2.3.4:17070",
+            "user": "user",
+            "secret": "secret",
+            "cacert": "cacert",
+            "pubkey": "pubkey",
+            "cloud": "cloud",
+            "credentials": "credentials",
+            "k8s_cloud": "k8s_cloud",
+            "k8s_credentials": "k8s_credentials",
+            "model_config": {},
+            "api-proxy": "api_proxy",
+        }
+        self.store.get_vca_endpoints.return_value = ["1.2.3.5:17070"]
+        self.loop.run_until_complete(connection.get_connection(self.store))
+        self.store.get_vca_connection_data.assert_not_called()
diff --git a/osm_lcm/n2vc/tests/unit/test_definitions.py b/osm_lcm/n2vc/tests/unit/test_definitions.py
new file mode 100644 (file)
index 0000000..c80960b
--- /dev/null
@@ -0,0 +1,48 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+
+from typing import NoReturn
+from unittest import TestCase
+from unittest.mock import patch
+
+from osm_lcm.n2vc.definitions import Offer, RelationEndpoint
+
+
+@patch("osm_lcm.n2vc.definitions.get_ee_id_components")
+class RelationEndpointTest(TestCase):
+    def test_success(self, mock_get_ee_id_components) -> NoReturn:
+        mock_get_ee_id_components.return_value = ("model", "application", "machine_id")
+        relation_endpoint = RelationEndpoint(
+            "model.application.machine_id",
+            "vca",
+            "endpoint",
+        )
+        self.assertEqual(relation_endpoint.model_name, "model")
+        self.assertEqual(relation_endpoint.application_name, "application")
+        self.assertEqual(relation_endpoint.vca_id, "vca")
+        self.assertEqual(relation_endpoint.endpoint, "application:endpoint")
+        self.assertEqual(relation_endpoint.endpoint_name, "endpoint")
+        self.assertEqual(
+            str(relation_endpoint), "application:endpoint (model: model, vca: vca)"
+        )
+
+
+class OfferTest(TestCase):
+    def test_success(self) -> NoReturn:
+        url = "admin/test-model.my-offer"
+        offer = Offer(url)
+        self.assertEqual(offer.model_name, "test-model")
+        self.assertEqual(offer.name, "my-offer")
+        self.assertEqual(offer.username, "admin")
+        self.assertEqual(offer.url, url)
diff --git a/osm_lcm/n2vc/tests/unit/test_juju_watcher.py b/osm_lcm/n2vc/tests/unit/test_juju_watcher.py
new file mode 100644 (file)
index 0000000..b60a4b3
--- /dev/null
@@ -0,0 +1,336 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+
+import json
+import os
+from time import sleep
+import asynctest
+import asyncio
+
+from osm_lcm.n2vc.juju_watcher import JujuModelWatcher, entity_ready, status
+from osm_lcm.n2vc.exceptions import EntityInvalidException
+from .utils import FakeN2VC, AsyncMock, Deltas, FakeWatcher
+from juju.application import Application
+from juju.action import Action
+from juju.annotation import Annotation
+from juju.client._definitions import AllWatcherNextResults
+from juju.machine import Machine
+from juju.model import Model
+from juju.unit import Unit
+from unittest import mock, TestCase
+from unittest.mock import Mock
+
+
+class JujuWatcherTest(asynctest.TestCase):
+    def setUp(self):
+        self.n2vc = FakeN2VC()
+        self.model = Mock()
+        self.loop = asyncio.new_event_loop()
+
+    def test_get_status(self):
+        tests = Deltas
+        for test in tests:
+            (status, message, vca_status) = JujuModelWatcher.get_status(test.delta)
+            self.assertEqual(status, test.entity_status.status)
+            self.assertEqual(message, test.entity_status.message)
+            self.assertEqual(vca_status, test.entity_status.vca_status)
+
+    @mock.patch("osm_lcm.n2vc.juju_watcher.client.AllWatcherFacade.from_connection")
+    def test_model_watcher(self, allwatcher):
+        tests = Deltas
+        allwatcher.return_value = FakeWatcher()
+        n2vc = AsyncMock()
+        for test in tests:
+            with self.assertRaises(asyncio.TimeoutError):
+                allwatcher.return_value.delta_to_return = [test.delta]
+                self.loop.run_until_complete(
+                    JujuModelWatcher.model_watcher(
+                        self.model,
+                        test.filter.entity_id,
+                        test.filter.entity_type,
+                        timeout=0,
+                        db_dict={"something"},
+                        n2vc=n2vc,
+                        vca_id=None,
+                    )
+                )
+
+            n2vc.write_app_status_to_db.assert_called()
+
+    @mock.patch("osm_lcm.n2vc.juju_watcher.asyncio.wait")
+    def test_wait_for(self, wait):
+        wait.return_value = asyncio.Future()
+        wait.return_value.set_result(None)
+
+        machine = AsyncMock()
+        self.loop.run_until_complete(JujuModelWatcher.wait_for(self.model, machine))
+
+    @mock.patch("osm_lcm.n2vc.juju_watcher.asyncio.wait")
+    def test_wait_for_exception(self, wait):
+        wait.return_value = asyncio.Future()
+        wait.return_value.set_result(None)
+        wait.side_effect = Exception("error")
+
+        machine = AsyncMock()
+        with self.assertRaises(Exception):
+            self.loop.run_until_complete(JujuModelWatcher.wait_for(self.model, machine))
+
+    def test_wait_for_invalid_entity_exception(self):
+        with self.assertRaises(EntityInvalidException):
+            self.loop.run_until_complete(
+                JujuModelWatcher.wait_for(
+                    self.model,
+                    Annotation(0, self.model),
+                    total_timeout=None,
+                    progress_timeout=None,
+                )
+            )
+
+
+class EntityReadyTest(TestCase):
+    @mock.patch("juju.application.Application.units")
+    def setUp(self, mock_units):
+        self.model = Model()
+        self.model._connector = mock.MagicMock()
+
+    def test_invalid_entity(self):
+        with self.assertRaises(EntityInvalidException):
+            entity_ready(Annotation(0, self.model))
+
+    @mock.patch("juju.machine.Machine.agent_status")
+    def test_machine_entity(self, mock_machine_agent_status):
+        entity = Machine(0, self.model)
+        self.assertEqual(entity.entity_type, "machine")
+        self.assertTrue(isinstance(entity_ready(entity), bool))
+
+    @mock.patch("juju.action.Action.status")
+    def test_action_entity(self, mock_action_status):
+        entity = Action(0, self.model)
+        self.assertEqual(entity.entity_type, "action")
+        self.assertTrue(isinstance(entity_ready(entity), bool))
+
+    @mock.patch("juju.application.Application.status")
+    def test_application_entity(self, mock_application_status):
+        entity = Application(0, self.model)
+        self.assertEqual(entity.entity_type, "application")
+        self.assertTrue(isinstance(entity_ready(entity), bool))
+
+
+@mock.patch("osm_lcm.n2vc.juju_watcher.client.AllWatcherFacade.from_connection")
+class EntityStateTest(TestCase):
+    def setUp(self):
+        self.model = Model()
+        self.model._connector = mock.MagicMock()
+        self.loop = asyncio.new_event_loop()
+        self.application = Mock(Application)
+        self.upgrade_file = None
+        self.line_number = 1
+
+    def _fetch_next_delta(self):
+        delta = None
+        while delta is None:
+            raw_data = self.upgrade_file.readline()
+            if not raw_data:
+                raise EOFError("Log file is out of events")
+            try:
+                delta = json.loads(raw_data)
+            except ValueError:
+                continue
+
+        if delta[0] == "unit":
+            if delta[2]["life"] == "dead":
+                # Remove the unit from the application
+                for unit in self.application.units:
+                    if unit.entity_id == delta[2]["name"]:
+                        self.application.units.remove(unit)
+            else:
+                unit_present = False
+                for unit in self.application.units:
+                    if unit.entity_id == delta[2]["name"]:
+                        unit_present = True
+
+                if not unit_present:
+                    print("Application gets a new unit: {}".format(delta[2]["name"]))
+                    unit = Mock(Unit)
+                    unit.entity_id = delta[2]["name"]
+                    unit.entity_type = "unit"
+                    self.application.units.append(unit)
+
+        print("{}  {}".format(self.line_number, delta))
+        self.line_number = self.line_number + 1
+
+        return AllWatcherNextResults(
+            deltas=[
+                delta,
+            ]
+        )
+
+    def _ensure_state(self, filename, mock_all_watcher):
+        with open(
+            os.path.join(os.path.dirname(__file__), "testdata", filename),
+            "r",
+        ) as self.upgrade_file:
+            all_changes = AsyncMock()
+            all_changes.Next.side_effect = self._fetch_next_delta
+            mock_all_watcher.return_value = all_changes
+
+            self.loop.run_until_complete(
+                JujuModelWatcher.ensure_units_idle(
+                    model=self.model, application=self.application
+                )
+            )
+
+            with self.assertRaises(EOFError, msg="Not all events consumed"):
+                change = self._fetch_next_delta()
+                print(change.deltas[0].deltas)
+
+    def _slow_changes(self):
+        sleep(0.1)
+        return AllWatcherNextResults(
+            deltas=[
+                json.loads(
+                    """["unit","change",
+                {
+                    "name": "app-vnf-7a49ace2b6-z0/2",
+                    "application": "app-vnf-7a49ace2b6-z0",
+                    "workload-status": {
+                        "current": "active",
+                        "message": "",
+                        "since": "2022-04-26T18:50:27.579802723Z"},
+                    "agent-status": {
+                        "current": "idle",
+                        "message": "",
+                        "since": "2022-04-26T18:50:28.592142816Z"}
+                }]"""
+                ),
+            ]
+        )
+
+    def test_timeout(self, mock_all_watcher):
+        unit1 = Mock(Unit)
+        unit1.entity_id = "app-vnf-7a49ace2b6-z0/0"
+        unit1.entity_type = "unit"
+        self.application.units = [
+            unit1,
+        ]
+
+        all_changes = AsyncMock()
+        all_changes.Next.side_effect = self._slow_changes
+        mock_all_watcher.return_value = all_changes
+
+        with self.assertRaises(TimeoutError):
+            self.loop.run_until_complete(
+                JujuModelWatcher.wait_for_units_idle(
+                    model=self.model, application=self.application, timeout=0.01
+                )
+            )
+
+    def test_machine_unit_upgrade(self, mock_all_watcher):
+        unit1 = Mock(Unit)
+        unit1.entity_id = "app-vnf-7a49ace2b6-z0/0"
+        unit1.entity_type = "unit"
+        unit2 = Mock(Unit)
+        unit2.entity_id = "app-vnf-7a49ace2b6-z0/1"
+        unit2.entity_type = "unit"
+        unit3 = Mock(Unit)
+        unit3.entity_id = "app-vnf-7a49ace2b6-z0/2"
+        unit3.entity_type = "unit"
+
+        self.application.units = [unit1, unit2, unit3]
+
+        self._ensure_state("upgrade-machine.log", mock_all_watcher)
+
+    def test_operator_upgrade(self, mock_all_watcher):
+        unit1 = Mock(Unit)
+        unit1.entity_id = "sshproxy/0"
+        unit1.entity_type = "unit"
+        self.application.units = [
+            unit1,
+        ]
+        self._ensure_state("upgrade-operator.log", mock_all_watcher)
+
+    def test_podspec_stateful_upgrade(self, mock_all_watcher):
+        unit1 = Mock(Unit)
+        unit1.entity_id = "mongodb/0"
+        unit1.entity_type = "unit"
+        self.application.units = [
+            unit1,
+        ]
+        self._ensure_state("upgrade-podspec-stateful.log", mock_all_watcher)
+
+    def test_podspec_stateless_upgrade(self, mock_all_watcher):
+        unit1 = Mock(Unit)
+        unit1.entity_id = "lcm/9"
+        unit1.entity_type = "unit"
+        self.application.units = [
+            unit1,
+        ]
+        self._ensure_state("upgrade-podspec-stateless.log", mock_all_watcher)
+
+    def test_sidecar_upgrade(self, mock_all_watcher):
+        unit1 = Mock(Unit)
+        unit1.entity_id = "kafka/0"
+        unit1.entity_type = "unit"
+        self.application.units = [
+            unit1,
+        ]
+        self._ensure_state("upgrade-sidecar.log", mock_all_watcher)
+
+
+class StatusTest(TestCase):
+    def setUp(self):
+        self.model = Model()
+        self.model._connector = mock.MagicMock()
+
+    @mock.patch("osm_lcm.n2vc.juju_watcher.derive_status")
+    def test_invalid_entity(self, mock_derive_status):
+        application = mock.MagicMock()
+        mock_derive_status.return_value = "active"
+
+        class FakeUnit:
+            @property
+            def workload_status(self):
+                return "active"
+
+        application.units = [FakeUnit()]
+        value = status(application)
+        mock_derive_status.assert_called_once()
+        self.assertTrue(isinstance(value, str))
+
+
+@asynctest.mock.patch("asyncio.sleep")
+class WaitForModelTest(asynctest.TestCase):
+    @asynctest.mock.patch("juju.client.connector.Connector.connect")
+    def setUp(self, mock_connect=None):
+        self.loop = asyncio.new_event_loop()
+        self.model = Model()
+
+    @asynctest.mock.patch("juju.model.Model.block_until")
+    def test_wait_for_model(self, mock_block_until, mock_sleep):
+        self.loop.run_until_complete(
+            JujuModelWatcher.wait_for_model(self.model, timeout=None)
+        )
+        mock_block_until.assert_called()
+
+    @asynctest.mock.patch("asyncio.ensure_future")
+    @asynctest.mock.patch("asyncio.wait")
+    def test_wait_for_model_exception(self, mock_wait, mock_ensure_future, mock_sleep):
+        task = Mock()
+        mock_ensure_future.return_value = task
+        mock_wait.side_effect = Exception
+        with self.assertRaises(Exception):
+            self.loop.run_until_complete(
+                JujuModelWatcher.wait_for_model(self.model, timeout=None)
+            )
+        task.cancel.assert_called()
diff --git a/osm_lcm/n2vc/tests/unit/test_k8s_helm3_conn.py b/osm_lcm/n2vc/tests/unit/test_k8s_helm3_conn.py
new file mode 100644 (file)
index 0000000..3ff9197
--- /dev/null
@@ -0,0 +1,849 @@
+##
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: alfonso.tiernosepulveda@telefonica.com
+##
+
+import asynctest
+import logging
+
+from asynctest.mock import Mock, patch
+from osm_common.dbmemory import DbMemory
+from osm_common.fslocal import FsLocal
+from osm_lcm.n2vc.k8s_helm3_conn import K8sHelm3Connector, K8sException
+
+__author__ = "Isabel Lloret <illoret@indra.es>"
+
+
+class TestK8sHelm3Conn(asynctest.TestCase):
+    logging.basicConfig(level=logging.DEBUG)
+    logger = logging.getLogger(__name__)
+    logger.setLevel(logging.DEBUG)
+
+    @patch("osm_lcm.n2vc.k8s_helm_base_conn.EnvironConfig")
+    async def setUp(self, mock_env):
+        mock_env.return_value = {"stablerepourl": "https://charts.helm.sh/stable"}
+        self.db = Mock(DbMemory())
+        self.fs = asynctest.Mock(FsLocal())
+        self.fs.path = "./tmp/"
+        self.namespace = "testk8s"
+        self.cluster_id = "helm3_cluster_id"
+        self.cluster_uuid = self.cluster_id
+        # pass fake kubectl and helm commands to make sure it does not call actual commands
+        K8sHelm3Connector._check_file_exists = asynctest.Mock(return_value=True)
+        cluster_dir = self.fs.path + self.cluster_id
+        self.env = {
+            "HELM_CACHE_HOME": "{}/.cache/helm".format(cluster_dir),
+            "HELM_CONFIG_HOME": "{}/.config/helm".format(cluster_dir),
+            "HELM_DATA_HOME": "{}/.local/share/helm".format(cluster_dir),
+            "KUBECONFIG": "{}/.kube/config".format(cluster_dir),
+        }
+        self.helm_conn = K8sHelm3Connector(self.fs, self.db, log=self.logger)
+        self.logger.debug("Set up executed")
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_init_env(self):
+        k8s_creds = "false_credentials_string"
+        self.helm_conn._get_namespaces = asynctest.CoroutineMock(return_value=[])
+        self.helm_conn._create_namespace = asynctest.CoroutineMock()
+        self.helm_conn.repo_list = asynctest.CoroutineMock(return_value=[])
+        self.helm_conn.repo_add = asynctest.CoroutineMock()
+
+        k8scluster_uuid, installed = await self.helm_conn.init_env(
+            k8s_creds, namespace=self.namespace, reuse_cluster_uuid=self.cluster_id
+        )
+
+        self.assertEqual(
+            k8scluster_uuid,
+            self.cluster_id,
+            "Check cluster_uuid",
+        )
+        self.helm_conn._get_namespaces.assert_called_once_with(self.cluster_id)
+        self.helm_conn._create_namespace.assert_called_once_with(
+            self.cluster_id, self.namespace
+        )
+        self.helm_conn.repo_list.assert_called_once_with(k8scluster_uuid)
+        self.helm_conn.repo_add.assert_called_once_with(
+            k8scluster_uuid, "stable", "https://charts.helm.sh/stable"
+        )
+        self.helm_conn.fs.reverse_sync.assert_called_once_with(
+            from_path=self.cluster_id
+        )
+        self.logger.debug(f"cluster_uuid: {k8scluster_uuid}")
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_repo_add(self):
+        repo_name = "bitnami"
+        repo_url = "https://charts.bitnami.com/bitnami"
+        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=(0, ""))
+
+        await self.helm_conn.repo_add(self.cluster_uuid, repo_name, repo_url)
+
+        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
+        self.helm_conn.fs.reverse_sync.assert_called_once_with(
+            from_path=self.cluster_id
+        )
+        self.assertEqual(
+            self.helm_conn._local_async_exec.call_count,
+            2,
+            "local_async_exec expected 2 calls, called {}".format(
+                self.helm_conn._local_async_exec.call_count
+            ),
+        )
+
+        repo_update_command = (
+            "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config /usr/bin/helm3 repo update {}"
+        ).format(repo_name)
+        repo_add_command = (
+            "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config /usr/bin/helm3 repo add {} {}"
+        ).format(repo_name, repo_url)
+        calls = self.helm_conn._local_async_exec.call_args_list
+        call0_kargs = calls[0][1]
+        self.assertEqual(
+            call0_kargs.get("command"),
+            repo_add_command,
+            "Invalid repo add command: {}".format(call0_kargs.get("command")),
+        )
+        self.assertEqual(
+            call0_kargs.get("env"),
+            self.env,
+            "Invalid env for add command: {}".format(call0_kargs.get("env")),
+        )
+        call1_kargs = calls[1][1]
+        self.assertEqual(
+            call1_kargs.get("command"),
+            repo_update_command,
+            "Invalid repo update command: {}".format(call1_kargs.get("command")),
+        )
+        self.assertEqual(
+            call1_kargs.get("env"),
+            self.env,
+            "Invalid env for update command: {}".format(call1_kargs.get("env")),
+        )
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_repo_list(self):
+        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
+
+        await self.helm_conn.repo_list(self.cluster_uuid)
+
+        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
+        self.helm_conn.fs.reverse_sync.assert_called_once_with(
+            from_path=self.cluster_id
+        )
+        command = "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config /usr/bin/helm3 repo list --output yaml"
+        self.helm_conn._local_async_exec.assert_called_with(
+            command=command, env=self.env, raise_exception_on_error=False
+        )
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_repo_remove(self):
+        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
+        repo_name = "bitnami"
+        await self.helm_conn.repo_remove(self.cluster_uuid, repo_name)
+
+        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
+        self.helm_conn.fs.reverse_sync.assert_called_once_with(
+            from_path=self.cluster_id
+        )
+        command = "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config /usr/bin/helm3 repo remove {}".format(
+            repo_name
+        )
+        self.helm_conn._local_async_exec.assert_called_with(
+            command=command, env=self.env, raise_exception_on_error=True
+        )
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_install(self):
+        kdu_model = "stable/openldap:1.2.2"
+        kdu_instance = "stable-openldap-0005399828"
+        db_dict = {}
+        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
+        self.helm_conn._status_kdu = asynctest.CoroutineMock(return_value=None)
+        self.helm_conn._store_status = asynctest.CoroutineMock()
+        self.helm_conn._repo_to_oci_url = Mock(return_value=None)
+        self.kdu_instance = "stable-openldap-0005399828"
+        self.helm_conn.generate_kdu_instance_name = Mock(return_value=self.kdu_instance)
+        self.helm_conn._get_namespaces = asynctest.CoroutineMock(return_value=[])
+        self.helm_conn._namespace_exists = asynctest.CoroutineMock(
+            side_effect=self.helm_conn._namespace_exists
+        )
+        self.helm_conn._create_namespace = asynctest.CoroutineMock()
+
+        await self.helm_conn.install(
+            self.cluster_uuid,
+            kdu_model,
+            self.kdu_instance,
+            atomic=True,
+            namespace=self.namespace,
+            db_dict=db_dict,
+        )
+
+        self.helm_conn._namespace_exists.assert_called_once()
+        self.helm_conn._get_namespaces.assert_called_once()
+        self.helm_conn._create_namespace.assert_called_once_with(
+            self.cluster_id, self.namespace
+        )
+        self.helm_conn.fs.sync.assert_has_calls(
+            [
+                asynctest.call(from_path=self.cluster_id),
+                asynctest.call(from_path=self.cluster_id),
+            ]
+        )
+        self.helm_conn.fs.reverse_sync.assert_has_calls(
+            [
+                asynctest.call(from_path=self.cluster_id),
+                asynctest.call(from_path=self.cluster_id),
+            ]
+        )
+        self.helm_conn._store_status.assert_called_with(
+            cluster_id=self.cluster_id,
+            kdu_instance=kdu_instance,
+            namespace=self.namespace,
+            db_dict=db_dict,
+            operation="install",
+        )
+        command = (
+            "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config /usr/bin/helm3 "
+            "install stable-openldap-0005399828 --atomic --output yaml   "
+            "--timeout 300s --namespace testk8s   stable/openldap --version 1.2.2"
+        )
+        self.helm_conn._local_async_exec.assert_called_with(
+            command=command, env=self.env, raise_exception_on_error=False
+        )
+
+        # Exception test if namespace could not being created for some reason
+        self.helm_conn._namespace_exists.return_value = False
+        self.helm_conn._create_namespace.side_effect = Exception()
+
+        with self.assertRaises(K8sException):
+            await self.helm_conn.install(
+                self.cluster_uuid,
+                kdu_model,
+                self.kdu_instance,
+                atomic=True,
+                namespace=self.namespace,
+                db_dict=db_dict,
+            )
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_namespace_exists(self):
+        self.helm_conn._get_namespaces = asynctest.CoroutineMock()
+
+        self.helm_conn._get_namespaces.return_value = ["testk8s", "kube-system"]
+        result = await self.helm_conn._namespace_exists(self.cluster_id, self.namespace)
+        self.helm_conn._get_namespaces.assert_called_once()
+        self.assertEqual(result, True)
+
+        self.helm_conn._get_namespaces.reset_mock()
+        result = await self.helm_conn._namespace_exists(
+            self.cluster_id, "none-exists-namespace"
+        )
+        self.helm_conn._get_namespaces.assert_called_once()
+        self.assertEqual(result, False)
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_upgrade(self):
+        kdu_model = "stable/openldap:1.2.3"
+        kdu_instance = "stable-openldap-0005399828"
+        db_dict = {}
+        instance_info = {
+            "chart": "openldap-1.2.2",
+            "name": kdu_instance,
+            "namespace": self.namespace,
+            "revision": 1,
+            "status": "DEPLOYED",
+        }
+        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
+        self.helm_conn._store_status = asynctest.CoroutineMock()
+        self.helm_conn._repo_to_oci_url = Mock(return_value=None)
+        self.helm_conn.get_instance_info = asynctest.CoroutineMock(
+            return_value=instance_info
+        )
+        # TEST-1 (--force true)
+        await self.helm_conn.upgrade(
+            self.cluster_uuid,
+            kdu_instance,
+            kdu_model,
+            atomic=True,
+            db_dict=db_dict,
+            force=True,
+        )
+        self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id)
+        self.helm_conn.fs.reverse_sync.assert_has_calls(
+            [
+                asynctest.call(from_path=self.cluster_id),
+                asynctest.call(from_path=self.cluster_id),
+            ]
+        )
+        self.helm_conn._store_status.assert_called_with(
+            cluster_id=self.cluster_id,
+            kdu_instance=kdu_instance,
+            namespace=self.namespace,
+            db_dict=db_dict,
+            operation="upgrade",
+        )
+        command = (
+            "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config "
+            "/usr/bin/helm3 upgrade stable-openldap-0005399828 stable/openldap "
+            "--namespace testk8s --atomic --force --output yaml  --timeout 300s   "
+            "--reuse-values --version 1.2.3"
+        )
+        self.helm_conn._local_async_exec.assert_called_with(
+            command=command, env=self.env, raise_exception_on_error=False
+        )
+
+        # TEST-2 (--force false)
+        await self.helm_conn.upgrade(
+            self.cluster_uuid,
+            kdu_instance,
+            kdu_model,
+            atomic=True,
+            db_dict=db_dict,
+        )
+        self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id)
+        self.helm_conn.fs.reverse_sync.assert_has_calls(
+            [
+                asynctest.call(from_path=self.cluster_id),
+                asynctest.call(from_path=self.cluster_id),
+            ]
+        )
+        self.helm_conn._store_status.assert_called_with(
+            cluster_id=self.cluster_id,
+            kdu_instance=kdu_instance,
+            namespace=self.namespace,
+            db_dict=db_dict,
+            operation="upgrade",
+        )
+        command = (
+            "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config "
+            "/usr/bin/helm3 upgrade stable-openldap-0005399828 stable/openldap "
+            "--namespace testk8s --atomic --output yaml  --timeout 300s   "
+            "--reuse-values --version 1.2.3"
+        )
+        self.helm_conn._local_async_exec.assert_called_with(
+            command=command, env=self.env, raise_exception_on_error=False
+        )
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_upgrade_namespace(self):
+        kdu_model = "stable/openldap:1.2.3"
+        kdu_instance = "stable-openldap-0005399828"
+        db_dict = {}
+        instance_info = {
+            "chart": "openldap-1.2.2",
+            "name": kdu_instance,
+            "namespace": self.namespace,
+            "revision": 1,
+            "status": "DEPLOYED",
+        }
+        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
+        self.helm_conn._store_status = asynctest.CoroutineMock()
+        self.helm_conn._repo_to_oci_url = Mock(return_value=None)
+        self.helm_conn.get_instance_info = asynctest.CoroutineMock(
+            return_value=instance_info
+        )
+
+        await self.helm_conn.upgrade(
+            self.cluster_uuid,
+            kdu_instance,
+            kdu_model,
+            atomic=True,
+            db_dict=db_dict,
+            namespace="default",
+        )
+        self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id)
+        self.helm_conn.fs.reverse_sync.assert_has_calls(
+            [
+                asynctest.call(from_path=self.cluster_id),
+                asynctest.call(from_path=self.cluster_id),
+            ]
+        )
+        self.helm_conn._store_status.assert_called_with(
+            cluster_id=self.cluster_id,
+            kdu_instance=kdu_instance,
+            namespace="default",
+            db_dict=db_dict,
+            operation="upgrade",
+        )
+        command = (
+            "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config "
+            "/usr/bin/helm3 upgrade stable-openldap-0005399828 stable/openldap "
+            "--namespace default --atomic --output yaml  --timeout 300s   "
+            "--reuse-values --version 1.2.3"
+        )
+        self.helm_conn._local_async_exec.assert_called_with(
+            command=command, env=self.env, raise_exception_on_error=False
+        )
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_scale(self):
+        kdu_model = "stable/openldap:1.2.3"
+        kdu_instance = "stable-openldap-0005399828"
+        db_dict = {}
+        instance_info = {
+            "chart": "openldap-1.2.3",
+            "name": kdu_instance,
+            "namespace": self.namespace,
+            "revision": 1,
+            "status": "DEPLOYED",
+        }
+        repo_list = [
+            {
+                "name": "stable",
+                "url": "https://kubernetes-charts.storage.googleapis.com/",
+            }
+        ]
+        kdu_values = """
+            # Default values for openldap.
+            # This is a YAML-formatted file.
+            # Declare variables to be passed into your templates.
+
+            replicaCount: 1
+            dummy-app:
+              replicas: 2
+        """
+
+        self.helm_conn.repo_list = asynctest.CoroutineMock(return_value=repo_list)
+        self.helm_conn.values_kdu = asynctest.CoroutineMock(return_value=kdu_values)
+        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
+        self.helm_conn._store_status = asynctest.CoroutineMock()
+        self.helm_conn._repo_to_oci_url = Mock(return_value=None)
+        self.helm_conn.get_instance_info = asynctest.CoroutineMock(
+            return_value=instance_info
+        )
+
+        # TEST-1
+        await self.helm_conn.scale(
+            kdu_instance,
+            2,
+            "",
+            kdu_model=kdu_model,
+            cluster_uuid=self.cluster_uuid,
+            atomic=True,
+            db_dict=db_dict,
+        )
+        command = (
+            "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config "
+            "/usr/bin/helm3 upgrade stable-openldap-0005399828 stable/openldap "
+            "--namespace testk8s --atomic --output yaml --set replicaCount=2 --timeout 1800s   "
+            "--reuse-values --version 1.2.3"
+        )
+        self.helm_conn._local_async_exec.assert_called_with(
+            command=command, env=self.env, raise_exception_on_error=False
+        )
+        # TEST-2
+        await self.helm_conn.scale(
+            kdu_instance,
+            3,
+            "dummy-app",
+            kdu_model=kdu_model,
+            cluster_uuid=self.cluster_uuid,
+            atomic=True,
+            db_dict=db_dict,
+        )
+        command = (
+            "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config "
+            "/usr/bin/helm3 upgrade stable-openldap-0005399828 stable/openldap "
+            "--namespace testk8s --atomic --output yaml --set dummy-app.replicas=3 --timeout 1800s   "
+            "--reuse-values --version 1.2.3"
+        )
+        self.helm_conn._local_async_exec.assert_called_with(
+            command=command, env=self.env, raise_exception_on_error=False
+        )
+        self.helm_conn.fs.reverse_sync.assert_called_with(from_path=self.cluster_id)
+        self.helm_conn._store_status.assert_called_with(
+            cluster_id=self.cluster_id,
+            kdu_instance=kdu_instance,
+            namespace=self.namespace,
+            db_dict=db_dict,
+            operation="scale",
+        )
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_rollback(self):
+        kdu_instance = "stable-openldap-0005399828"
+        db_dict = {}
+        instance_info = {
+            "chart": "openldap-1.2.3",
+            "name": kdu_instance,
+            "namespace": self.namespace,
+            "revision": 2,
+            "status": "DEPLOYED",
+        }
+        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
+        self.helm_conn._store_status = asynctest.CoroutineMock()
+        self.helm_conn.get_instance_info = asynctest.CoroutineMock(
+            return_value=instance_info
+        )
+
+        await self.helm_conn.rollback(
+            self.cluster_uuid, kdu_instance=kdu_instance, revision=1, db_dict=db_dict
+        )
+        self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id)
+        self.helm_conn.fs.reverse_sync.assert_called_once_with(
+            from_path=self.cluster_id
+        )
+        self.helm_conn._store_status.assert_called_with(
+            cluster_id=self.cluster_id,
+            kdu_instance=kdu_instance,
+            namespace=self.namespace,
+            db_dict=db_dict,
+            operation="rollback",
+        )
+        command = (
+            "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config /usr/bin/helm3 "
+            "rollback stable-openldap-0005399828 1 --namespace=testk8s --wait"
+        )
+        self.helm_conn._local_async_exec.assert_called_once_with(
+            command=command, env=self.env, raise_exception_on_error=False
+        )
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_uninstall(self):
+        kdu_instance = "stable-openldap-0005399828"
+        instance_info = {
+            "chart": "openldap-1.2.2",
+            "name": kdu_instance,
+            "namespace": self.namespace,
+            "revision": 3,
+            "status": "DEPLOYED",
+        }
+        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
+        self.helm_conn._store_status = asynctest.CoroutineMock()
+        self.helm_conn.get_instance_info = asynctest.CoroutineMock(
+            return_value=instance_info
+        )
+
+        await self.helm_conn.uninstall(self.cluster_uuid, kdu_instance)
+        self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id)
+        self.helm_conn.fs.reverse_sync.assert_called_once_with(
+            from_path=self.cluster_id
+        )
+        command = (
+            "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config /usr/bin/helm3 uninstall {} --namespace={}"
+        ).format(kdu_instance, self.namespace)
+        self.helm_conn._local_async_exec.assert_called_once_with(
+            command=command, env=self.env, raise_exception_on_error=True
+        )
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_get_services(self):
+        kdu_instance = "test_services_1"
+        service = {"name": "testservice", "type": "LoadBalancer"}
+        self.helm_conn._local_async_exec_pipe = asynctest.CoroutineMock(
+            return_value=("", 0)
+        )
+        self.helm_conn._parse_services = Mock(return_value=["testservice"])
+        self.helm_conn._get_service = asynctest.CoroutineMock(return_value=service)
+
+        services = await self.helm_conn.get_services(
+            self.cluster_uuid, kdu_instance, self.namespace
+        )
+        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
+        self.helm_conn.fs.reverse_sync.assert_called_once_with(
+            from_path=self.cluster_id
+        )
+        self.helm_conn._parse_services.assert_called_once()
+        command1 = (
+            "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config /usr/bin/helm3 get manifest {} --namespace=testk8s"
+        ).format(kdu_instance)
+        command2 = "/usr/bin/kubectl get --namespace={} -f -".format(self.namespace)
+        self.helm_conn._local_async_exec_pipe.assert_called_once_with(
+            command1, command2, env=self.env, raise_exception_on_error=True
+        )
+        self.assertEqual(
+            services, [service], "Invalid service returned from get_service"
+        )
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_get_service(self):
+        service_name = "service1"
+
+        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
+        await self.helm_conn.get_service(
+            self.cluster_uuid, service_name, self.namespace
+        )
+
+        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
+        self.helm_conn.fs.reverse_sync.assert_called_once_with(
+            from_path=self.cluster_id
+        )
+        command = (
+            "/usr/bin/kubectl --kubeconfig=./tmp/helm3_cluster_id/.kube/config "
+            "--namespace=testk8s get service service1 -o=yaml"
+        )
+        self.helm_conn._local_async_exec.assert_called_once_with(
+            command=command, env=self.env, raise_exception_on_error=True
+        )
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_inspect_kdu(self):
+        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
+
+        kdu_model = "stable/openldap:1.2.4"
+        repo_url = "https://kubernetes-charts.storage.googleapis.com/"
+        await self.helm_conn.inspect_kdu(kdu_model, repo_url)
+
+        command = (
+            "/usr/bin/helm3 show all openldap --repo "
+            "https://kubernetes-charts.storage.googleapis.com/ "
+            "--version 1.2.4"
+        )
+        self.helm_conn._local_async_exec.assert_called_with(command=command)
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_help_kdu(self):
+        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
+
+        kdu_model = "stable/openldap:1.2.4"
+        repo_url = "https://kubernetes-charts.storage.googleapis.com/"
+        await self.helm_conn.help_kdu(kdu_model, repo_url)
+
+        command = (
+            "/usr/bin/helm3 show readme openldap --repo "
+            "https://kubernetes-charts.storage.googleapis.com/ "
+            "--version 1.2.4"
+        )
+        self.helm_conn._local_async_exec.assert_called_with(command=command)
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_values_kdu(self):
+        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
+
+        kdu_model = "stable/openldap:1.2.4"
+        repo_url = "https://kubernetes-charts.storage.googleapis.com/"
+        await self.helm_conn.values_kdu(kdu_model, repo_url)
+
+        command = (
+            "/usr/bin/helm3 show values openldap --repo "
+            "https://kubernetes-charts.storage.googleapis.com/ "
+            "--version 1.2.4"
+        )
+        self.helm_conn._local_async_exec.assert_called_with(command=command)
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_get_values_kdu(self):
+        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
+
+        kdu_instance = "stable-openldap-0005399828"
+        await self.helm_conn.get_values_kdu(
+            kdu_instance, self.namespace, self.env["KUBECONFIG"]
+        )
+
+        command = (
+            "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config /usr/bin/helm3 get values "
+            "stable-openldap-0005399828 --namespace=testk8s --output yaml"
+        )
+        self.helm_conn._local_async_exec.assert_called_with(command=command)
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_instances_list(self):
+        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
+
+        await self.helm_conn.instances_list(self.cluster_uuid)
+        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
+        self.helm_conn.fs.reverse_sync.assert_called_once_with(
+            from_path=self.cluster_id
+        )
+        command = "/usr/bin/helm3 list --all-namespaces  --output yaml"
+        self.helm_conn._local_async_exec.assert_called_once_with(
+            command=command, env=self.env, raise_exception_on_error=True
+        )
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_status_kdu(self):
+        kdu_instance = "stable-openldap-0005399828"
+        self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0))
+
+        await self.helm_conn._status_kdu(
+            self.cluster_id, kdu_instance, self.namespace, yaml_format=True
+        )
+        command = (
+            "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config /usr/bin/helm3 status {} --namespace={} --output yaml"
+        ).format(kdu_instance, self.namespace)
+        self.helm_conn._local_async_exec.assert_called_once_with(
+            command=command,
+            env=self.env,
+            raise_exception_on_error=True,
+            show_error_log=False,
+        )
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_store_status(self):
+        kdu_instance = "stable-openldap-0005399828"
+        db_dict = {}
+        status = {
+            "info": {
+                "description": "Install complete",
+                "status": {
+                    "code": "1",
+                    "notes": "The openldap helm chart has been installed",
+                },
+            }
+        }
+        self.helm_conn._status_kdu = asynctest.CoroutineMock(return_value=status)
+        self.helm_conn.write_app_status_to_db = asynctest.CoroutineMock(
+            return_value=status
+        )
+
+        await self.helm_conn._store_status(
+            cluster_id=self.cluster_id,
+            kdu_instance=kdu_instance,
+            namespace=self.namespace,
+            db_dict=db_dict,
+            operation="install",
+        )
+        self.helm_conn._status_kdu.assert_called_once_with(
+            cluster_id=self.cluster_id,
+            kdu_instance=kdu_instance,
+            namespace=self.namespace,
+            yaml_format=False,
+        )
+        self.helm_conn.write_app_status_to_db.assert_called_once_with(
+            db_dict=db_dict,
+            status="Install complete",
+            detailed_status=str(status),
+            operation="install",
+        )
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_reset_uninstall_false(self):
+        self.helm_conn._uninstall_sw = asynctest.CoroutineMock()
+
+        await self.helm_conn.reset(self.cluster_uuid, force=False, uninstall_sw=False)
+        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
+        self.helm_conn.fs.file_delete.assert_called_once_with(
+            self.cluster_id, ignore_non_exist=True
+        )
+        self.helm_conn._uninstall_sw.assert_not_called()
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_reset_uninstall(self):
+        kdu_instance = "stable-openldap-0021099429"
+        instances = [
+            {
+                "app_version": "2.4.48",
+                "chart": "openldap-1.2.3",
+                "name": kdu_instance,
+                "namespace": self.namespace,
+                "revision": "1",
+                "status": "deployed",
+                "updated": "2020-10-30 11:11:20.376744191 +0000 UTC",
+            }
+        ]
+        self.helm_conn._get_namespace = Mock(return_value=self.namespace)
+        self.helm_conn._uninstall_sw = asynctest.CoroutineMock()
+        self.helm_conn.instances_list = asynctest.CoroutineMock(return_value=instances)
+        self.helm_conn.uninstall = asynctest.CoroutineMock()
+
+        await self.helm_conn.reset(self.cluster_uuid, force=True, uninstall_sw=True)
+        self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
+        self.helm_conn.fs.file_delete.assert_called_once_with(
+            self.cluster_id, ignore_non_exist=True
+        )
+        self.helm_conn._get_namespace.assert_called_once_with(
+            cluster_uuid=self.cluster_uuid
+        )
+        self.helm_conn.instances_list.assert_called_once_with(
+            cluster_uuid=self.cluster_uuid
+        )
+        self.helm_conn.uninstall.assert_called_once_with(
+            cluster_uuid=self.cluster_uuid, kdu_instance=kdu_instance
+        )
+        self.helm_conn._uninstall_sw.assert_called_once_with(
+            cluster_id=self.cluster_id, namespace=self.namespace
+        )
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_sync_repos_add(self):
+        repo_list = [
+            {
+                "name": "stable",
+                "url": "https://kubernetes-charts.storage.googleapis.com/",
+            }
+        ]
+        self.helm_conn.repo_list = asynctest.CoroutineMock(return_value=repo_list)
+
+        def get_one_result(*args, **kwargs):
+            if args[0] == "k8sclusters":
+                return {
+                    "_admin": {
+                        "helm_chart_repos": ["4b5550a9-990d-4d95-8a48-1f4614d6ac9c"]
+                    }
+                }
+            elif args[0] == "k8srepos":
+                return {
+                    "_id": "4b5550a9-990d-4d95-8a48-1f4614d6ac9c",
+                    "type": "helm-chart",
+                    "name": "bitnami",
+                    "url": "https://charts.bitnami.com/bitnami",
+                }
+
+        self.helm_conn.db.get_one = asynctest.Mock()
+        self.helm_conn.db.get_one.side_effect = get_one_result
+
+        self.helm_conn.repo_add = asynctest.CoroutineMock()
+        self.helm_conn.repo_remove = asynctest.CoroutineMock()
+
+        deleted_repo_list, added_repo_dict = await self.helm_conn.synchronize_repos(
+            self.cluster_uuid
+        )
+        self.helm_conn.repo_remove.assert_not_called()
+        self.helm_conn.repo_add.assert_called_once_with(
+            self.cluster_uuid,
+            "bitnami",
+            "https://charts.bitnami.com/bitnami",
+            cert=None,
+            user=None,
+            password=None,
+            oci=False,
+        )
+        self.assertEqual(deleted_repo_list, [], "Deleted repo list should be empty")
+        self.assertEqual(
+            added_repo_dict,
+            {"4b5550a9-990d-4d95-8a48-1f4614d6ac9c": "bitnami"},
+            "Repos added should include only one bitnami",
+        )
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_sync_repos_delete(self):
+        repo_list = [
+            {
+                "name": "stable",
+                "url": "https://kubernetes-charts.storage.googleapis.com/",
+            },
+            {"name": "bitnami", "url": "https://charts.bitnami.com/bitnami"},
+        ]
+        self.helm_conn.repo_list = asynctest.CoroutineMock(return_value=repo_list)
+
+        def get_one_result(*args, **kwargs):
+            if args[0] == "k8sclusters":
+                return {"_admin": {"helm_chart_repos": []}}
+
+        self.helm_conn.db.get_one = asynctest.Mock()
+        self.helm_conn.db.get_one.side_effect = get_one_result
+
+        self.helm_conn.repo_add = asynctest.CoroutineMock()
+        self.helm_conn.repo_remove = asynctest.CoroutineMock()
+
+        deleted_repo_list, added_repo_dict = await self.helm_conn.synchronize_repos(
+            self.cluster_uuid
+        )
+        self.helm_conn.repo_add.assert_not_called()
+        self.helm_conn.repo_remove.assert_called_once_with(self.cluster_uuid, "bitnami")
+        self.assertEqual(
+            deleted_repo_list, ["bitnami"], "Deleted repo list should be bitnami"
+        )
+        self.assertEqual(added_repo_dict, {}, "No repos should be added")
diff --git a/osm_lcm/n2vc/tests/unit/test_k8s_juju_conn.py b/osm_lcm/n2vc/tests/unit/test_k8s_juju_conn.py
new file mode 100644 (file)
index 0000000..8cf13d9
--- /dev/null
@@ -0,0 +1,826 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+
+
+import asyncio
+import logging
+import asynctest
+from unittest.mock import Mock
+from osm_lcm.n2vc.definitions import Offer, RelationEndpoint
+from osm_lcm.n2vc.k8s_juju_conn import K8sJujuConnector, RBAC_LABEL_KEY_NAME
+from osm_common import fslocal
+from .utils import kubeconfig, FakeModel, FakeFileWrapper, AsyncMock, FakeApplication
+from osm_lcm.n2vc.exceptions import MethodNotImplemented, K8sException
+from osm_lcm.n2vc.vca.connection_data import ConnectionData
+
+
+class K8sJujuConnTestCase(asynctest.TestCase):
+    @asynctest.mock.patch("osm_lcm.n2vc.k8s_juju_conn.Libjuju")
+    @asynctest.mock.patch("osm_lcm.n2vc.k8s_juju_conn.MotorStore")
+    @asynctest.mock.patch("osm_lcm.n2vc.k8s_juju_conn.get_connection")
+    @asynctest.mock.patch("osm_lcm.n2vc.vca.connection_data.base64_to_cacert")
+    def setUp(
+        self,
+        mock_base64_to_cacert=None,
+        mock_get_connection=None,
+        mock_store=None,
+        mock_libjuju=None,
+    ):
+        self.loop = asyncio.get_event_loop()
+        self.db = Mock()
+        mock_base64_to_cacert.return_value = """
+    -----BEGIN CERTIFICATE-----
+    SOMECERT
+    -----END CERTIFICATE-----"""
+        mock_libjuju.return_value = Mock()
+        mock_store.return_value = AsyncMock()
+        mock_vca_connection = Mock()
+        mock_get_connection.return_value = mock_vca_connection
+        mock_vca_connection.data.return_value = ConnectionData(
+            **{
+                "endpoints": ["1.2.3.4:17070"],
+                "user": "user",
+                "secret": "secret",
+                "cacert": "cacert",
+                "pubkey": "pubkey",
+                "lxd-cloud": "cloud",
+                "lxd-credentials": "credentials",
+                "k8s-cloud": "k8s_cloud",
+                "k8s-credentials": "k8s_credentials",
+                "model-config": {},
+                "api-proxy": "api_proxy",
+            }
+        )
+        logging.disable(logging.CRITICAL)
+
+        self.kdu_name = "kdu_name"
+        self.kdu_instance = "{}-{}".format(self.kdu_name, "id")
+        self.default_namespace = self.kdu_instance
+
+        self.k8s_juju_conn = K8sJujuConnector(
+            fs=fslocal.FsLocal(),
+            db=self.db,
+            log=None,
+            on_update_db=None,
+        )
+        self.k8s_juju_conn._store.get_vca_id.return_value = None
+        self.k8s_juju_conn.libjuju = Mock()
+        # Mock Kubectl
+        self.kubectl = Mock()
+        self.kubectl.get_secret_data = AsyncMock()
+        self.kubectl.get_secret_data.return_value = ("token", "cacert")
+        self.kubectl.get_services.return_value = [{}]
+        self.k8s_juju_conn._get_kubectl = Mock()
+        self.k8s_juju_conn._get_kubectl.return_value = self.kubectl
+        self.k8s_juju_conn._obtain_namespace_from_db = Mock(
+            return_value=self.default_namespace
+        )
+
+
+class InitEnvTest(K8sJujuConnTestCase):
+    def setUp(self):
+        super(InitEnvTest, self).setUp()
+        self.k8s_juju_conn.libjuju.add_k8s = AsyncMock()
+
+    def test_with_cluster_uuid(
+        self,
+    ):
+        reuse_cluster_uuid = "uuid"
+        uuid, created = self.loop.run_until_complete(
+            self.k8s_juju_conn.init_env(
+                k8s_creds=kubeconfig, reuse_cluster_uuid=reuse_cluster_uuid
+            )
+        )
+
+        self.assertTrue(created)
+        self.assertEqual(uuid, reuse_cluster_uuid)
+        self.kubectl.get_default_storage_class.assert_called_once()
+        self.k8s_juju_conn.libjuju.add_k8s.assert_called_once()
+
+    def test_with_no_cluster_uuid(
+        self,
+    ):
+        uuid, created = self.loop.run_until_complete(
+            self.k8s_juju_conn.init_env(k8s_creds=kubeconfig)
+        )
+
+        self.assertTrue(created)
+        self.assertTrue(isinstance(uuid, str))
+        self.kubectl.get_default_storage_class.assert_called_once()
+        self.k8s_juju_conn.libjuju.add_k8s.assert_called_once()
+
+    def test_init_env_exception(
+        self,
+    ):
+        self.k8s_juju_conn.libjuju.add_k8s.side_effect = Exception()
+        created = None
+        uuid = None
+        with self.assertRaises(Exception):
+            uuid, created = self.loop.run_until_complete(
+                self.k8s_juju_conn.init_env(k8s_creds=kubeconfig)
+            )
+        self.assertIsNone(created)
+        self.assertIsNone(uuid)
+        self.kubectl.create_cluster_role.assert_called_once()
+        self.kubectl.create_service_account.assert_called_once()
+        self.kubectl.create_cluster_role_binding.assert_called_once()
+        self.kubectl.get_default_storage_class.assert_called_once()
+        self.kubectl.delete_cluster_role.assert_called_once()
+        self.kubectl.delete_service_account.assert_called_once()
+        self.kubectl.delete_cluster_role_binding.assert_called_once()
+        self.k8s_juju_conn.libjuju.add_k8s.assert_called_once()
+
+
+class NotImplementedTest(K8sJujuConnTestCase):
+    def setUp(self):
+        super(NotImplementedTest, self).setUp()
+
+    def test_repo_add(self):
+        with self.assertRaises(MethodNotImplemented):
+            self.loop.run_until_complete(self.k8s_juju_conn.repo_add("", ""))
+
+    def test_repo_list(self):
+        with self.assertRaises(MethodNotImplemented):
+            self.loop.run_until_complete(self.k8s_juju_conn.repo_list())
+
+    def test_repo_remove(self):
+        with self.assertRaises(MethodNotImplemented):
+            self.loop.run_until_complete(self.k8s_juju_conn.repo_remove(""))
+
+    def test_synchronize_repos(self):
+        self.assertIsNone(
+            self.loop.run_until_complete(self.k8s_juju_conn.synchronize_repos("", ""))
+        )
+
+    def test_upgrade(self):
+        with self.assertRaises(MethodNotImplemented):
+            self.loop.run_until_complete(self.k8s_juju_conn.upgrade("", ""))
+
+    def test_rollback(self):
+        with self.assertRaises(MethodNotImplemented):
+            self.loop.run_until_complete(self.k8s_juju_conn.rollback("", ""))
+
+    def test_get_namespace(self):
+        self.assertIsNone(self.k8s_juju_conn.get_namespace(""))
+
+    def test_instances_list(self):
+        res = self.loop.run_until_complete(self.k8s_juju_conn.instances_list(""))
+        self.assertEqual(res, [])
+
+
+class ResetTest(K8sJujuConnTestCase):
+    def setUp(self):
+        super(ResetTest, self).setUp()
+        self.k8s_juju_conn.libjuju.remove_cloud = AsyncMock()
+        self.k8s_juju_conn.libjuju.get_cloud_credentials = AsyncMock()
+        cloud_creds = Mock()
+        cloud_creds.result = {"attrs": {RBAC_LABEL_KEY_NAME: "asd"}}
+        self.k8s_juju_conn.libjuju.get_cloud_credentials.return_value = [cloud_creds]
+        self.k8s_juju_conn.get_credentials = Mock()
+        self.k8s_juju_conn.get_credentials.return_value = kubeconfig
+
+    def test_success(self):
+        removed = self.loop.run_until_complete(self.k8s_juju_conn.reset("uuid"))
+        self.assertTrue(removed)
+        self.k8s_juju_conn.libjuju.remove_cloud.assert_called_once()
+
+    def test_exception(self):
+        removed = None
+        self.k8s_juju_conn.libjuju.remove_cloud.side_effect = Exception()
+        with self.assertRaises(Exception):
+            removed = self.loop.run_until_complete(self.k8s_juju_conn.reset("uuid"))
+        self.assertIsNone(removed)
+        self.k8s_juju_conn.libjuju.remove_cloud.assert_called_once()
+
+
+@asynctest.mock.patch("os.chdir")
+class InstallTest(K8sJujuConnTestCase):
+    def setUp(self):
+        super(InstallTest, self).setUp()
+        self.db_dict = {"filter": {"_id": "id"}}
+        self.local_bundle = "bundle"
+        self.cs_bundle = "cs:bundle"
+        self.http_bundle = "https://example.com/bundle.yaml"
+        self.cluster_uuid = "cluster"
+        self.k8s_juju_conn.libjuju.add_model = AsyncMock()
+        self.k8s_juju_conn.libjuju.deploy = AsyncMock()
+
+    def test_success_local(self, mock_chdir):
+        self.loop.run_until_complete(
+            self.k8s_juju_conn.install(
+                self.cluster_uuid,
+                self.local_bundle,
+                self.kdu_instance,
+                atomic=True,
+                kdu_name=self.kdu_name,
+                db_dict=self.db_dict,
+                timeout=1800,
+                params=None,
+            )
+        )
+        self.assertEqual(mock_chdir.call_count, 2)
+        self.k8s_juju_conn.libjuju.add_model.assert_called_once()
+        self.k8s_juju_conn.libjuju.deploy.assert_called_once_with(
+            "local:{}".format(self.local_bundle),
+            model_name=self.default_namespace,
+            wait=True,
+            timeout=1800,
+            instantiation_params=None,
+        )
+
+    def test_success_cs(self, mock_chdir):
+        self.loop.run_until_complete(
+            self.k8s_juju_conn.install(
+                self.cluster_uuid,
+                self.cs_bundle,
+                self.kdu_instance,
+                atomic=True,
+                kdu_name=self.kdu_name,
+                db_dict=self.db_dict,
+                timeout=1800,
+                params={},
+            )
+        )
+        self.k8s_juju_conn.libjuju.add_model.assert_called_once()
+        self.k8s_juju_conn.libjuju.deploy.assert_called_once_with(
+            self.cs_bundle,
+            model_name=self.default_namespace,
+            wait=True,
+            timeout=1800,
+            instantiation_params=None,
+        )
+
+    def test_success_http(self, mock_chdir):
+        params = {"overlay": {"applications": {"squid": {"scale": 2}}}}
+        self.loop.run_until_complete(
+            self.k8s_juju_conn.install(
+                self.cluster_uuid,
+                self.http_bundle,
+                self.kdu_instance,
+                atomic=True,
+                kdu_name=self.kdu_name,
+                db_dict=self.db_dict,
+                timeout=1800,
+                params=params,
+            )
+        )
+        self.k8s_juju_conn.libjuju.add_model.assert_called_once()
+        self.k8s_juju_conn.libjuju.deploy.assert_called_once_with(
+            self.http_bundle,
+            model_name=self.default_namespace,
+            wait=True,
+            timeout=1800,
+            instantiation_params=params.get("overlay"),
+        )
+
+    def test_success_not_kdu_name(self, mock_chdir):
+        params = {"some_key": {"applications": {"squid": {"scale": 2}}}}
+        self.loop.run_until_complete(
+            self.k8s_juju_conn.install(
+                self.cluster_uuid,
+                self.cs_bundle,
+                self.kdu_instance,
+                atomic=True,
+                db_dict=self.db_dict,
+                timeout=1800,
+                params=params,
+            )
+        )
+        self.k8s_juju_conn.libjuju.add_model.assert_called_once()
+        self.k8s_juju_conn.libjuju.deploy.assert_called_once_with(
+            self.cs_bundle,
+            model_name=self.default_namespace,
+            wait=True,
+            timeout=1800,
+            instantiation_params=None,
+        )
+
+    def test_missing_db_dict(self, mock_chdir):
+        kdu_instance = None
+        with self.assertRaises(K8sException):
+            self.loop.run_until_complete(
+                self.k8s_juju_conn.install(
+                    self.cluster_uuid,
+                    self.cs_bundle,
+                    self.kdu_instance,
+                    atomic=True,
+                    kdu_name=self.kdu_name,
+                    timeout=1800,
+                )
+            )
+        self.assertIsNone(kdu_instance)
+        self.k8s_juju_conn.libjuju.add_model.assert_not_called()
+        self.k8s_juju_conn.libjuju.deploy.assert_not_called()
+
+    @asynctest.mock.patch("os.getcwd")
+    def test_getcwd_exception(self, mock_getcwd, mock_chdir):
+        mock_getcwd.side_effect = FileNotFoundError()
+        self.loop.run_until_complete(
+            self.k8s_juju_conn.install(
+                self.cluster_uuid,
+                self.cs_bundle,
+                self.kdu_instance,
+                atomic=True,
+                kdu_name=self.kdu_name,
+                db_dict=self.db_dict,
+                timeout=1800,
+            )
+        )
+        self.k8s_juju_conn.libjuju.add_model.assert_called_once()
+        self.k8s_juju_conn.libjuju.deploy.assert_called_once_with(
+            self.cs_bundle,
+            model_name=self.default_namespace,
+            wait=True,
+            timeout=1800,
+            instantiation_params=None,
+        )
+
+    def test_missing_bundle(self, mock_chdir):
+        with self.assertRaises(K8sException):
+            self.loop.run_until_complete(
+                self.k8s_juju_conn.install(
+                    self.cluster_uuid,
+                    "",
+                    self.kdu_instance,
+                    atomic=True,
+                    kdu_name=self.kdu_name,
+                    timeout=1800,
+                    db_dict=self.db_dict,
+                )
+            )
+        self.k8s_juju_conn.libjuju.add_model.assert_not_called()
+        self.k8s_juju_conn.libjuju.deploy.assert_not_called()
+
+    def test_missing_exception(self, mock_chdir):
+        self.k8s_juju_conn.libjuju.deploy.side_effect = Exception()
+        with self.assertRaises(Exception):
+            self.loop.run_until_complete(
+                self.k8s_juju_conn.install(
+                    self.cluster_uuid,
+                    self.local_bundle,
+                    self.kdu_instance,
+                    atomic=True,
+                    kdu_name=self.kdu_name,
+                    db_dict=self.db_dict,
+                    timeout=1800,
+                )
+            )
+        self.k8s_juju_conn.libjuju.add_model.assert_called_once()
+        self.k8s_juju_conn.libjuju.deploy.assert_called_once_with(
+            "local:{}".format(self.local_bundle),
+            model_name=self.default_namespace,
+            wait=True,
+            timeout=1800,
+            instantiation_params=None,
+        )
+
+
+class UninstallTest(K8sJujuConnTestCase):
+    def setUp(self):
+        super(UninstallTest, self).setUp()
+        self.k8s_juju_conn.libjuju.destroy_model = AsyncMock()
+
+    def test_success(self):
+        destroyed = self.loop.run_until_complete(
+            self.k8s_juju_conn.uninstall("cluster_uuid", "model_name")
+        )
+        self.assertTrue(destroyed)
+        self.k8s_juju_conn.libjuju.destroy_model.assert_called_once()
+
+    def test_exception(self):
+        destroyed = None
+        self.k8s_juju_conn.libjuju.destroy_model.side_effect = Exception()
+        with self.assertRaises(Exception):
+            destroyed = self.loop.run_until_complete(
+                self.k8s_juju_conn.uninstall("cluster_uuid", "model_name")
+            )
+        self.assertIsNone(destroyed)
+        self.k8s_juju_conn.libjuju.destroy_model.assert_called_once()
+
+
+class ExecPrimitivesTest(K8sJujuConnTestCase):
+    def setUp(self):
+        super(ExecPrimitivesTest, self).setUp()
+        self.action_name = "touch"
+        self.application_name = "myapp"
+        self.k8s_juju_conn.libjuju.get_actions = AsyncMock()
+        self.k8s_juju_conn.libjuju.execute_action = AsyncMock()
+
+    def test_success(self):
+        params = {"application-name": self.application_name}
+        self.k8s_juju_conn.libjuju.get_actions.return_value = [self.action_name]
+        self.k8s_juju_conn.libjuju.execute_action.return_value = (
+            "success",
+            "completed",
+        )
+
+        output = self.loop.run_until_complete(
+            self.k8s_juju_conn.exec_primitive(
+                "cluster", self.kdu_instance, self.action_name, params=params
+            )
+        )
+
+        self.assertEqual(output, "success")
+        self.k8s_juju_conn._obtain_namespace_from_db.assert_called_once_with(
+            kdu_instance=self.kdu_instance
+        )
+        self.k8s_juju_conn.libjuju.get_actions.assert_called_once_with(
+            application_name=self.application_name, model_name=self.default_namespace
+        )
+        self.k8s_juju_conn.libjuju.execute_action.assert_called_once_with(
+            application_name=self.application_name,
+            model_name=self.default_namespace,
+            action_name=self.action_name,
+            **params
+        )
+
+    def test_exception(self):
+        params = {"application-name": self.application_name}
+        self.k8s_juju_conn.libjuju.get_actions.return_value = [self.action_name]
+        self.k8s_juju_conn.libjuju.execute_action.side_effect = Exception()
+        output = None
+
+        with self.assertRaises(Exception):
+            output = self.loop.run_until_complete(
+                self.k8s_juju_conn.exec_primitive(
+                    "cluster", self.kdu_instance, self.action_name, params=params
+                )
+            )
+
+        self.assertIsNone(output)
+        self.k8s_juju_conn._obtain_namespace_from_db.assert_called_once_with(
+            kdu_instance=self.kdu_instance
+        )
+        self.k8s_juju_conn.libjuju.get_actions.assert_called_once_with(
+            application_name=self.application_name, model_name=self.default_namespace
+        )
+        self.k8s_juju_conn.libjuju.execute_action.assert_called_once_with(
+            application_name=self.application_name,
+            model_name=self.default_namespace,
+            action_name=self.action_name,
+            **params
+        )
+
+    def test_missing_application_name_in_params(self):
+        params = {}
+        output = None
+
+        with self.assertRaises(K8sException):
+            output = self.loop.run_until_complete(
+                self.k8s_juju_conn.exec_primitive(
+                    "cluster", self.kdu_instance, self.action_name, params=params
+                )
+            )
+
+        self.assertIsNone(output)
+        self.k8s_juju_conn.libjuju.get_actions.assert_not_called()
+        self.k8s_juju_conn.libjuju.execute_action.assert_not_called()
+
+    def test_missing_params(self):
+        output = None
+        with self.assertRaises(K8sException):
+            output = self.loop.run_until_complete(
+                self.k8s_juju_conn.exec_primitive(
+                    "cluster", self.kdu_instance, self.action_name
+                )
+            )
+
+        self.assertIsNone(output)
+        self.k8s_juju_conn.libjuju.get_actions.assert_not_called()
+        self.k8s_juju_conn.libjuju.execute_action.assert_not_called()
+
+    def test_missing_action(self):
+        output = None
+        params = {"application-name": self.application_name}
+        self.k8s_juju_conn.libjuju.get_actions.return_value = [self.action_name]
+        self.k8s_juju_conn.libjuju.execute_action.return_value = (
+            "success",
+            "completed",
+        )
+        with self.assertRaises(K8sException):
+            output = self.loop.run_until_complete(
+                self.k8s_juju_conn.exec_primitive(
+                    "cluster", self.kdu_instance, "non-existing-action", params=params
+                )
+            )
+
+        self.assertIsNone(output)
+        self.k8s_juju_conn._obtain_namespace_from_db.assert_called_once_with(
+            kdu_instance=self.kdu_instance
+        )
+        self.k8s_juju_conn.libjuju.get_actions.assert_called_once_with(
+            application_name=self.application_name, model_name=self.default_namespace
+        )
+        self.k8s_juju_conn.libjuju.execute_action.assert_not_called()
+
+    def test_missing_not_completed(self):
+        output = None
+        params = {"application-name": self.application_name}
+        self.k8s_juju_conn.libjuju.get_actions.return_value = [self.action_name]
+        self.k8s_juju_conn.libjuju.execute_action.return_value = (None, "failed")
+        with self.assertRaises(K8sException):
+            output = self.loop.run_until_complete(
+                self.k8s_juju_conn.exec_primitive(
+                    "cluster", self.kdu_instance, self.action_name, params=params
+                )
+            )
+
+        self.assertIsNone(output)
+        self.k8s_juju_conn._obtain_namespace_from_db.assert_called_once_with(
+            kdu_instance=self.kdu_instance
+        )
+        self.k8s_juju_conn.libjuju.get_actions.assert_called_once_with(
+            application_name=self.application_name, model_name=self.default_namespace
+        )
+        self.k8s_juju_conn.libjuju.execute_action.assert_called_once_with(
+            application_name=self.application_name,
+            model_name=self.default_namespace,
+            action_name=self.action_name,
+            **params
+        )
+
+
+class InspectKduTest(K8sJujuConnTestCase):
+    def setUp(self):
+        super(InspectKduTest, self).setUp()
+
+    @asynctest.mock.patch("builtins.open")
+    @asynctest.mock.patch("os.path.exists")
+    def test_existing_file(self, mock_exists, mock_open):
+        mock_exists.return_value = True
+        content = """{
+            'description': 'test bundle',
+            'bundle': 'kubernetes',
+            'applications': {'app':{ }, 'app2': { }}
+        }"""
+        mock_open.return_value = FakeFileWrapper(content=content)
+        kdu = self.loop.run_until_complete(self.k8s_juju_conn.inspect_kdu("model"))
+        self.assertEqual(kdu, {"app": {}, "app2": {}})
+        mock_exists.assert_called_once()
+        mock_open.assert_called_once()
+
+    @asynctest.mock.patch("builtins.open")
+    @asynctest.mock.patch("os.path.exists")
+    def test_not_existing_file(self, mock_exists, mock_open):
+        kdu = None
+        mock_exists.return_value = False
+        with self.assertRaises(K8sException):
+            kdu = self.loop.run_until_complete(self.k8s_juju_conn.inspect_kdu("model"))
+        self.assertEqual(kdu, None)
+        mock_exists.assert_called_once_with("model")
+        mock_open.assert_not_called()
+
+
+class HelpKduTest(K8sJujuConnTestCase):
+    def setUp(self):
+        super(HelpKduTest, self).setUp()
+
+    @asynctest.mock.patch("builtins.open")
+    @asynctest.mock.patch("os.listdir")
+    def test_existing_file(self, mock_listdir, mock_open):
+        content = "Readme file content"
+        mock_open.return_value = FakeFileWrapper(content=content)
+        for file in ["README.md", "README.txt", "README"]:
+            mock_listdir.return_value = [file]
+            help = self.loop.run_until_complete(
+                self.k8s_juju_conn.help_kdu("kdu_instance")
+            )
+            self.assertEqual(help, content)
+
+        self.assertEqual(mock_listdir.call_count, 3)
+        self.assertEqual(mock_open.call_count, 3)
+
+    @asynctest.mock.patch("builtins.open")
+    @asynctest.mock.patch("os.listdir")
+    def test_not_existing_file(self, mock_listdir, mock_open):
+        for file in ["src/charm.py", "tox.ini", "requirements.txt"]:
+            mock_listdir.return_value = [file]
+            help = self.loop.run_until_complete(
+                self.k8s_juju_conn.help_kdu("kdu_instance")
+            )
+            self.assertEqual(help, None)
+
+        self.assertEqual(mock_listdir.call_count, 3)
+        self.assertEqual(mock_open.call_count, 0)
+
+
+class StatusKduTest(K8sJujuConnTestCase):
+    def setUp(self):
+        super(StatusKduTest, self).setUp()
+        self.k8s_juju_conn.libjuju.get_model_status = AsyncMock()
+
+    def test_success(self):
+        applications = {"app": {"status": {"status": "active"}}}
+        model = FakeModel(applications=applications)
+        self.k8s_juju_conn.libjuju.get_model_status.return_value = model
+        status = self.loop.run_until_complete(
+            self.k8s_juju_conn.status_kdu("cluster", "kdu_instance")
+        )
+        self.assertEqual(status, {"app": {"status": "active"}})
+        self.k8s_juju_conn.libjuju.get_model_status.assert_called_once()
+
+    def test_exception(self):
+        self.k8s_juju_conn.libjuju.get_model_status.side_effect = Exception()
+        status = None
+        with self.assertRaises(Exception):
+            status = self.loop.run_until_complete(
+                self.k8s_juju_conn.status_kdu("cluster", "kdu_instance")
+            )
+        self.assertIsNone(status)
+        self.k8s_juju_conn.libjuju.get_model_status.assert_called_once()
+
+
+class GetServicesTest(K8sJujuConnTestCase):
+    def setUp(self):
+        super(GetServicesTest, self).setUp()
+
+    @asynctest.mock.patch("osm_lcm.n2vc.k8s_juju_conn.K8sJujuConnector.get_credentials")
+    def test_success(self, mock_get_credentials):
+        mock_get_credentials.return_value = kubeconfig
+        self.loop.run_until_complete(self.k8s_juju_conn.get_services("", "", ""))
+        mock_get_credentials.assert_called_once()
+        self.kubectl.get_services.assert_called_once()
+
+
+class GetServiceTest(K8sJujuConnTestCase):
+    def setUp(self):
+        super(GetServiceTest, self).setUp()
+
+    @asynctest.mock.patch("osm_lcm.n2vc.k8s_juju_conn.K8sJujuConnector.get_credentials")
+    def test_success(self, mock_get_credentials):
+        mock_get_credentials.return_value = kubeconfig
+        self.loop.run_until_complete(self.k8s_juju_conn.get_service("", "", ""))
+        mock_get_credentials.assert_called_once()
+        self.kubectl.get_services.assert_called_once()
+
+
+class GetCredentialsTest(K8sJujuConnTestCase):
+    def setUp(self):
+        super(GetCredentialsTest, self).setUp()
+
+    @asynctest.mock.patch("yaml.safe_dump")
+    def test_success(self, mock_safe_dump):
+        self.k8s_juju_conn.db.get_one.return_value = {
+            "_id": "id",
+            "credentials": "credentials",
+            "schema_version": "2",
+        }
+        self.k8s_juju_conn.get_credentials("cluster_uuid")
+        self.k8s_juju_conn.db.get_one.assert_called_once()
+        self.k8s_juju_conn.db.encrypt_decrypt_fields.assert_called_once()
+        mock_safe_dump.assert_called_once()
+
+
+class UpdateVcaStatusTest(K8sJujuConnTestCase):
+    def setUp(self):
+        super(UpdateVcaStatusTest, self).setUp()
+        self.vcaStatus = {"model": {"applications": {"app": {"actions": {}}}}}
+        self.k8s_juju_conn.libjuju.get_executed_actions = AsyncMock()
+        self.k8s_juju_conn.libjuju.get_actions = AsyncMock()
+        self.k8s_juju_conn.libjuju.get_application_configs = AsyncMock()
+
+    def test_success(self):
+        self.loop.run_until_complete(
+            self.k8s_juju_conn.update_vca_status(self.vcaStatus, self.kdu_instance)
+        )
+        self.k8s_juju_conn.libjuju.get_executed_actions.assert_called_once()
+        self.k8s_juju_conn.libjuju.get_application_configs.assert_called_once()
+
+    def test_exception(self):
+        self.k8s_juju_conn.libjuju.get_model.return_value = None
+        self.k8s_juju_conn.libjuju.get_executed_actions.side_effect = Exception()
+        with self.assertRaises(Exception):
+            self.loop.run_until_complete(
+                self.k8s_juju_conn.update_vca_status(self.vcaStatus, self.kdu_instance)
+            )
+            self.k8s_juju_conn.libjuju.get_executed_actions.assert_not_called()
+            self.k8s_juju_conn.libjuju.get_application_configs.assert_not_called_once()
+
+
+class ScaleTest(K8sJujuConnTestCase):
+    def setUp(self):
+        super(ScaleTest, self).setUp()
+        self.application_name = "app"
+        self.kdu_name = "kdu-instance"
+        self._scale = 2
+        self.k8s_juju_conn.libjuju.scale_application = AsyncMock()
+
+    def test_success(self):
+        self.loop.run_until_complete(
+            self.k8s_juju_conn.scale(self.kdu_name, self._scale, self.application_name)
+        )
+        self.k8s_juju_conn.libjuju.scale_application.assert_called_once()
+
+    def test_exception(self):
+        self.k8s_juju_conn.libjuju.scale_application.side_effect = Exception()
+        with self.assertRaises(Exception):
+            self.loop.run_until_complete(
+                self.k8s_juju_conn.scale(
+                    self.kdu_name, self._scale, self.application_name
+                )
+            )
+        self.k8s_juju_conn.libjuju.scale_application.assert_called_once()
+
+
+class GetScaleCount(K8sJujuConnTestCase):
+    def setUp(self):
+        super(GetScaleCount, self).setUp()
+        self.k8s_juju_conn.libjuju.get_model_status = AsyncMock()
+
+    def test_success(self):
+        applications = {"app": FakeApplication()}
+        model = FakeModel(applications=applications)
+        self.k8s_juju_conn.libjuju.get_model_status.return_value = model
+        status = self.loop.run_until_complete(
+            self.k8s_juju_conn.get_scale_count("app", "kdu_instance")
+        )
+        self.assertEqual(status, 2)
+        self.k8s_juju_conn.libjuju.get_model_status.assert_called_once()
+
+    def test_exception(self):
+        self.k8s_juju_conn.libjuju.get_model_status.side_effect = Exception()
+        status = None
+        with self.assertRaises(Exception):
+            status = self.loop.run_until_complete(
+                self.k8s_juju_conn.status_kdu("app", "kdu_instance")
+            )
+        self.assertIsNone(status)
+        self.k8s_juju_conn.libjuju.get_model_status.assert_called_once()
+
+
+class AddRelationTest(K8sJujuConnTestCase):
+    def setUp(self):
+        super(AddRelationTest, self).setUp()
+        self.k8s_juju_conn.libjuju.add_relation = AsyncMock()
+        self.k8s_juju_conn.libjuju.offer = AsyncMock()
+        self.k8s_juju_conn.libjuju.get_controller = AsyncMock()
+        self.k8s_juju_conn.libjuju.consume = AsyncMock()
+
+    def test_standard_relation_same_model_and_controller(self):
+        relation_endpoint_1 = RelationEndpoint("model-1.app1.0", None, "endpoint1")
+        relation_endpoint_2 = RelationEndpoint("model-1.app2.1", None, "endpoint2")
+        self.loop.run_until_complete(
+            self.k8s_juju_conn.add_relation(relation_endpoint_1, relation_endpoint_2)
+        )
+        self.k8s_juju_conn.libjuju.add_relation.assert_called_once_with(
+            model_name="model-1",
+            endpoint_1="app1:endpoint1",
+            endpoint_2="app2:endpoint2",
+        )
+        self.k8s_juju_conn.libjuju.offer.assert_not_called()
+        self.k8s_juju_conn.libjuju.consume.assert_not_called()
+
+    def test_cmr_relation_same_controller(self):
+        relation_endpoint_1 = RelationEndpoint("model-1.app1.0", None, "endpoint")
+        relation_endpoint_2 = RelationEndpoint("model-2.app2.1", None, "endpoint")
+        offer = Offer("admin/model-1.app1")
+        self.k8s_juju_conn.libjuju.offer.return_value = offer
+        self.k8s_juju_conn.libjuju.consume.return_value = "saas"
+        self.loop.run_until_complete(
+            self.k8s_juju_conn.add_relation(relation_endpoint_1, relation_endpoint_2)
+        )
+        self.k8s_juju_conn.libjuju.offer.assert_called_once_with(relation_endpoint_1)
+        self.k8s_juju_conn.libjuju.consume.assert_called_once()
+        self.k8s_juju_conn.libjuju.add_relation.assert_called_once_with(
+            "model-2", "app2:endpoint", "saas"
+        )
+
+    def test_cmr_relation_different_controller(self):
+        self.k8s_juju_conn._get_libjuju = AsyncMock(
+            return_value=self.k8s_juju_conn.libjuju
+        )
+        relation_endpoint_1 = RelationEndpoint("model-1.app1.0", "vca-id-1", "endpoint")
+        relation_endpoint_2 = RelationEndpoint("model-1.app2.1", "vca-id-2", "endpoint")
+        offer = Offer("admin/model-1.app1")
+        self.k8s_juju_conn.libjuju.offer.return_value = offer
+        self.k8s_juju_conn.libjuju.consume.return_value = "saas"
+        self.loop.run_until_complete(
+            self.k8s_juju_conn.add_relation(relation_endpoint_1, relation_endpoint_2)
+        )
+        self.k8s_juju_conn.libjuju.offer.assert_called_once_with(relation_endpoint_1)
+        self.k8s_juju_conn.libjuju.consume.assert_called_once()
+        self.k8s_juju_conn.libjuju.add_relation.assert_called_once_with(
+            "model-1", "app2:endpoint", "saas"
+        )
+
+    def test_relation_exception(self):
+        relation_endpoint_1 = RelationEndpoint("model-1.app1.0", None, "endpoint")
+        relation_endpoint_2 = RelationEndpoint("model-2.app2.1", None, "endpoint")
+        self.k8s_juju_conn.libjuju.offer.side_effect = Exception()
+        with self.assertRaises(Exception):
+            self.loop.run_until_complete(
+                self.k8s_juju_conn.add_relation(
+                    relation_endpoint_1, relation_endpoint_2
+                )
+            )
diff --git a/osm_lcm/n2vc/tests/unit/test_kubectl.py b/osm_lcm/n2vc/tests/unit/test_kubectl.py
new file mode 100644 (file)
index 0000000..5776fa8
--- /dev/null
@@ -0,0 +1,854 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+
+import asynctest
+import yaml
+import os
+from unittest import TestCase, mock
+from osm_lcm.n2vc.kubectl import Kubectl, CORE_CLIENT, CUSTOM_OBJECT_CLIENT
+from osm_lcm.n2vc.utils import Dict
+from kubernetes.client.rest import ApiException
+from kubernetes.client import (
+    V1ObjectMeta,
+    V1Secret,
+    V1ServiceAccount,
+    V1SecretReference,
+    V1Role,
+    V1RoleBinding,
+    V1RoleRef,
+    RbacV1Subject,
+    V1PolicyRule,
+    V1Namespace,
+)
+
+
+class FakeK8sResourceMetadata:
+    def __init__(
+        self,
+        name: str = None,
+        namespace: str = None,
+        annotations: dict = {},
+        labels: dict = {},
+    ):
+        self._annotations = annotations
+        self._name = name or "name"
+        self._namespace = namespace or "namespace"
+        self._labels = labels or {"juju-app": "squid"}
+
+    @property
+    def name(self):
+        return self._name
+
+    @property
+    def namespace(self):
+        return self._namespace
+
+    @property
+    def labels(self):
+        return self._labels
+
+    @property
+    def annotations(self):
+        return self._annotations
+
+
+class FakeK8sStorageClass:
+    def __init__(self, metadata=None):
+        self._metadata = metadata or FakeK8sResourceMetadata()
+
+    @property
+    def metadata(self):
+        return self._metadata
+
+
+class FakeK8sStorageClassesList:
+    def __init__(self, items=[]):
+        self._items = items
+
+    @property
+    def items(self):
+        return self._items
+
+
+class FakeK8sServiceAccountsList:
+    def __init__(self, items=[]):
+        self._items = items
+
+    @property
+    def items(self):
+        return self._items
+
+
+class FakeK8sSecretList:
+    def __init__(self, items=[]):
+        self._items = items
+
+    @property
+    def items(self):
+        return self._items
+
+
+class FakeK8sRoleList:
+    def __init__(self, items=[]):
+        self._items = items
+
+    @property
+    def items(self):
+        return self._items
+
+
+class FakeK8sRoleBindingList:
+    def __init__(self, items=[]):
+        self._items = items
+
+    @property
+    def items(self):
+        return self._items
+
+
+class FakeK8sVersionApiCode:
+    def __init__(self, major: str, minor: str):
+        self._major = major
+        self._minor = minor
+
+    @property
+    def major(self):
+        return self._major
+
+    @property
+    def minor(self):
+        return self._minor
+
+
+fake_list_services = Dict(
+    {
+        "items": [
+            Dict(
+                {
+                    "metadata": Dict(
+                        {
+                            "name": "squid",
+                            "namespace": "test",
+                            "labels": {"juju-app": "squid"},
+                        }
+                    ),
+                    "spec": Dict(
+                        {
+                            "cluster_ip": "10.152.183.79",
+                            "type": "LoadBalancer",
+                            "ports": [
+                                Dict(
+                                    {
+                                        "name": None,
+                                        "node_port": None,
+                                        "port": 30666,
+                                        "protocol": "TCP",
+                                        "target_port": 30666,
+                                    }
+                                )
+                            ],
+                        }
+                    ),
+                    "status": Dict(
+                        {
+                            "load_balancer": Dict(
+                                {
+                                    "ingress": [
+                                        Dict({"hostname": None, "ip": "192.168.0.201"})
+                                    ]
+                                }
+                            )
+                        }
+                    ),
+                }
+            )
+        ]
+    }
+)
+
+
+class KubectlTestCase(TestCase):
+    def setUp(
+        self,
+    ):
+        pass
+
+
+class FakeCoreV1Api:
+    def list_service_for_all_namespaces(self, **kwargs):
+        return fake_list_services
+
+
+class GetServices(TestCase):
+    @mock.patch("osm_lcm.n2vc.kubectl.config.load_kube_config")
+    @mock.patch("osm_lcm.n2vc.kubectl.client.CoreV1Api")
+    def setUp(self, mock_core, mock_config):
+        mock_core.return_value = mock.MagicMock()
+        mock_config.return_value = mock.MagicMock()
+        self.kubectl = Kubectl()
+
+    @mock.patch("osm_lcm.n2vc.kubectl.client.CoreV1Api")
+    def test_get_service(self, mock_corev1api):
+        mock_corev1api.return_value = FakeCoreV1Api()
+        services = self.kubectl.get_services(
+            field_selector="metadata.namespace", label_selector="juju-operator=squid"
+        )
+        keys = ["name", "cluster_ip", "type", "ports", "external_ip"]
+        self.assertTrue(k in service for service in services for k in keys)
+
+    def test_get_service_exception(self):
+        self.kubectl.clients[
+            CORE_CLIENT
+        ].list_service_for_all_namespaces.side_effect = ApiException()
+        with self.assertRaises(ApiException):
+            self.kubectl.get_services()
+
+
+@mock.patch("osm_lcm.n2vc.kubectl.client")
+@mock.patch("osm_lcm.n2vc.kubectl.config.kube_config.Configuration.get_default_copy")
+@mock.patch("osm_lcm.n2vc.kubectl.config.load_kube_config")
+class GetConfiguration(KubectlTestCase):
+    def setUp(self):
+        super(GetConfiguration, self).setUp()
+
+    def test_get_configuration(
+        self,
+        mock_load_kube_config,
+        mock_configuration,
+        mock_client,
+    ):
+        kubectl = Kubectl()
+        kubectl.configuration
+        mock_configuration.assert_called_once()
+        mock_load_kube_config.assert_called_once()
+        mock_client.CoreV1Api.assert_called_once()
+        mock_client.RbacAuthorizationV1Api.assert_called_once()
+        mock_client.StorageV1Api.assert_called_once()
+
+
+@mock.patch("kubernetes.client.StorageV1Api.list_storage_class")
+@mock.patch("kubernetes.config.load_kube_config")
+class GetDefaultStorageClass(KubectlTestCase):
+    def setUp(self):
+        super(GetDefaultStorageClass, self).setUp()
+
+        # Default Storage Class
+        self.default_sc_name = "default-sc"
+        default_sc_metadata = FakeK8sResourceMetadata(
+            name=self.default_sc_name,
+            annotations={"storageclass.kubernetes.io/is-default-class": "true"},
+        )
+        self.default_sc = FakeK8sStorageClass(metadata=default_sc_metadata)
+
+        # Default Storage Class with old annotation
+        self.default_sc_old_name = "default-sc-old"
+        default_sc_old_metadata = FakeK8sResourceMetadata(
+            name=self.default_sc_old_name,
+            annotations={"storageclass.beta.kubernetes.io/is-default-class": "true"},
+        )
+        self.default_sc_old = FakeK8sStorageClass(metadata=default_sc_old_metadata)
+
+        # Storage class - not default
+        self.sc_name = "default-sc-old"
+        self.sc = FakeK8sStorageClass(
+            metadata=FakeK8sResourceMetadata(name=self.sc_name)
+        )
+
+    def test_get_default_storage_class_exists_default(
+        self, mock_load_kube_config, mock_list_storage_class
+    ):
+        kubectl = Kubectl()
+        items = [self.default_sc]
+        mock_list_storage_class.return_value = FakeK8sStorageClassesList(items=items)
+        sc_name = kubectl.get_default_storage_class()
+        self.assertEqual(sc_name, self.default_sc_name)
+        mock_list_storage_class.assert_called_once()
+
+    def test_get_default_storage_class_exists_default_old(
+        self, mock_load_kube_config, mock_list_storage_class
+    ):
+        kubectl = Kubectl()
+        items = [self.default_sc_old]
+        mock_list_storage_class.return_value = FakeK8sStorageClassesList(items=items)
+        sc_name = kubectl.get_default_storage_class()
+        self.assertEqual(sc_name, self.default_sc_old_name)
+        mock_list_storage_class.assert_called_once()
+
+    def test_get_default_storage_class_none(
+        self, mock_load_kube_config, mock_list_storage_class
+    ):
+        kubectl = Kubectl()
+        mock_list_storage_class.return_value = FakeK8sStorageClassesList(items=[])
+        sc_name = kubectl.get_default_storage_class()
+        self.assertEqual(sc_name, None)
+        mock_list_storage_class.assert_called_once()
+
+    def test_get_default_storage_class_exists_not_default(
+        self, mock_load_kube_config, mock_list_storage_class
+    ):
+        kubectl = Kubectl()
+        items = [self.sc]
+        mock_list_storage_class.return_value = FakeK8sStorageClassesList(items=items)
+        sc_name = kubectl.get_default_storage_class()
+        self.assertEqual(sc_name, self.sc_name)
+        mock_list_storage_class.assert_called_once()
+
+    def test_get_default_storage_class_choose(
+        self, mock_load_kube_config, mock_list_storage_class
+    ):
+        kubectl = Kubectl()
+        items = [self.sc, self.default_sc]
+        mock_list_storage_class.return_value = FakeK8sStorageClassesList(items=items)
+        sc_name = kubectl.get_default_storage_class()
+        self.assertEqual(sc_name, self.default_sc_name)
+        mock_list_storage_class.assert_called_once()
+
+
+@mock.patch("kubernetes.client.VersionApi.get_code")
+@mock.patch("kubernetes.client.CoreV1Api.list_namespaced_secret")
+@mock.patch("kubernetes.client.CoreV1Api.create_namespaced_secret")
+@mock.patch("kubernetes.client.CoreV1Api.create_namespaced_service_account")
+@mock.patch("kubernetes.client.CoreV1Api.list_namespaced_service_account")
+class CreateServiceAccountClass(KubectlTestCase):
+    @mock.patch("kubernetes.config.load_kube_config")
+    def setUp(self, mock_load_kube_config):
+        super(CreateServiceAccountClass, self).setUp()
+        self.service_account_name = "Service_account"
+        self.labels = {"Key1": "Value1", "Key2": "Value2"}
+        self.namespace = "kubernetes"
+        self.token_id = "abc12345"
+        self.kubectl = Kubectl()
+
+    def assert_create_secret(self, mock_create_secret, secret_name):
+        annotations = {"kubernetes.io/service-account.name": self.service_account_name}
+        secret_metadata = V1ObjectMeta(
+            name=secret_name, namespace=self.namespace, annotations=annotations
+        )
+        secret_type = "kubernetes.io/service-account-token"
+        secret = V1Secret(metadata=secret_metadata, type=secret_type)
+        mock_create_secret.assert_called_once_with(self.namespace, secret)
+
+    def assert_create_service_account_v_1_24(
+        self, mock_create_service_account, secret_name
+    ):
+        sevice_account_metadata = V1ObjectMeta(
+            name=self.service_account_name, labels=self.labels, namespace=self.namespace
+        )
+        secrets = [V1SecretReference(name=secret_name, namespace=self.namespace)]
+        service_account = V1ServiceAccount(
+            metadata=sevice_account_metadata, secrets=secrets
+        )
+        mock_create_service_account.assert_called_once_with(
+            self.namespace, service_account
+        )
+
+    def assert_create_service_account_v_1_23(self, mock_create_service_account):
+        metadata = V1ObjectMeta(
+            name=self.service_account_name, labels=self.labels, namespace=self.namespace
+        )
+        service_account = V1ServiceAccount(metadata=metadata)
+        mock_create_service_account.assert_called_once_with(
+            self.namespace, service_account
+        )
+
+    @mock.patch("osm_lcm.n2vc.kubectl.uuid.uuid4")
+    def test_secret_is_created_when_k8s_1_24(
+        self,
+        mock_uuid4,
+        mock_list_service_account,
+        mock_create_service_account,
+        mock_create_secret,
+        mock_list_secret,
+        mock_version,
+    ):
+        mock_list_service_account.return_value = FakeK8sServiceAccountsList(items=[])
+        mock_list_secret.return_value = FakeK8sSecretList(items=[])
+        mock_version.return_value = FakeK8sVersionApiCode("1", "24")
+        mock_uuid4.return_value = self.token_id
+        self.kubectl.create_service_account(
+            self.service_account_name, self.labels, self.namespace
+        )
+        secret_name = "{}-token-{}".format(self.service_account_name, self.token_id[:5])
+        self.assert_create_service_account_v_1_24(
+            mock_create_service_account, secret_name
+        )
+        self.assert_create_secret(mock_create_secret, secret_name)
+
+    def test_secret_is_not_created_when_k8s_1_23(
+        self,
+        mock_list_service_account,
+        mock_create_service_account,
+        mock_create_secret,
+        mock_list_secret,
+        mock_version,
+    ):
+        mock_list_service_account.return_value = FakeK8sServiceAccountsList(items=[])
+        mock_version.return_value = FakeK8sVersionApiCode("1", "23+")
+        self.kubectl.create_service_account(
+            self.service_account_name, self.labels, self.namespace
+        )
+        self.assert_create_service_account_v_1_23(mock_create_service_account)
+        mock_create_secret.assert_not_called()
+        mock_list_secret.assert_not_called()
+
+    def test_raise_exception_if_service_account_already_exists(
+        self,
+        mock_list_service_account,
+        mock_create_service_account,
+        mock_create_secret,
+        mock_list_secret,
+        mock_version,
+    ):
+        mock_list_service_account.return_value = FakeK8sServiceAccountsList(items=[1])
+        with self.assertRaises(Exception) as context:
+            self.kubectl.create_service_account(
+                self.service_account_name, self.labels, self.namespace
+            )
+        self.assertTrue(
+            "Service account with metadata.name={} already exists".format(
+                self.service_account_name
+            )
+            in str(context.exception)
+        )
+        mock_create_service_account.assert_not_called()
+        mock_create_secret.assert_not_called()
+
+    @mock.patch("osm_lcm.n2vc.kubectl.uuid.uuid4")
+    def test_raise_exception_if_secret_already_exists(
+        self,
+        mock_uuid4,
+        mock_list_service_account,
+        mock_create_service_account,
+        mock_create_secret,
+        mock_list_secret,
+        mock_version,
+    ):
+        mock_list_service_account.return_value = FakeK8sServiceAccountsList(items=[])
+        mock_list_secret.return_value = FakeK8sSecretList(items=[1])
+        mock_version.return_value = FakeK8sVersionApiCode("1", "24+")
+        mock_uuid4.return_value = self.token_id
+        with self.assertRaises(Exception) as context:
+            self.kubectl.create_service_account(
+                self.service_account_name, self.labels, self.namespace
+            )
+        self.assertTrue(
+            "Secret with metadata.name={}-token-{} already exists".format(
+                self.service_account_name, self.token_id[:5]
+            )
+            in str(context.exception)
+        )
+        mock_create_service_account.assert_called()
+        mock_create_secret.assert_not_called()
+
+
+@mock.patch("kubernetes.client.CustomObjectsApi.create_namespaced_custom_object")
+class CreateCertificateClass(asynctest.TestCase):
+    @mock.patch("kubernetes.config.load_kube_config")
+    def setUp(self, mock_load_kube_config):
+        super(CreateCertificateClass, self).setUp()
+        self.namespace = "osm"
+        self.name = "test-cert"
+        self.dns_prefix = "*"
+        self.secret_name = "test-cert-secret"
+        self.usages = ["server auth"]
+        self.issuer_name = "ca-issuer"
+        self.kubectl = Kubectl()
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_certificate_is_created(
+        self,
+        mock_create_certificate,
+    ):
+        with open(
+            os.path.join(
+                os.path.dirname(__file__), "testdata", "test_certificate.yaml"
+            ),
+            "r",
+        ) as test_certificate:
+            certificate_body = yaml.safe_load(test_certificate.read())
+            print(certificate_body)
+        await self.kubectl.create_certificate(
+            namespace=self.namespace,
+            name=self.name,
+            dns_prefix=self.dns_prefix,
+            secret_name=self.secret_name,
+            usages=self.usages,
+            issuer_name=self.issuer_name,
+        )
+        mock_create_certificate.assert_called_once_with(
+            group="cert-manager.io",
+            plural="certificates",
+            version="v1",
+            body=certificate_body,
+            namespace=self.namespace,
+        )
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_no_exception_if_alreadyexists(
+        self,
+        mock_create_certificate,
+    ):
+        api_exception = ApiException()
+        api_exception.body = '{"reason": "AlreadyExists"}'
+        self.kubectl.clients[
+            CUSTOM_OBJECT_CLIENT
+        ].create_namespaced_custom_object.side_effect = api_exception
+        raised = False
+        try:
+            await self.kubectl.create_certificate(
+                namespace=self.namespace,
+                name=self.name,
+                dns_prefix=self.dns_prefix,
+                secret_name=self.secret_name,
+                usages=self.usages,
+                issuer_name=self.issuer_name,
+            )
+        except Exception:
+            raised = True
+        self.assertFalse(raised, "An exception was raised")
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_other_exceptions(
+        self,
+        mock_create_certificate,
+    ):
+        self.kubectl.clients[
+            CUSTOM_OBJECT_CLIENT
+        ].create_namespaced_custom_object.side_effect = Exception()
+        with self.assertRaises(Exception):
+            await self.kubectl.create_certificate(
+                namespace=self.namespace,
+                name=self.name,
+                dns_prefix=self.dns_prefix,
+                secret_name=self.secret_name,
+                usages=self.usages,
+                issuer_name=self.issuer_name,
+            )
+
+
+@mock.patch("kubernetes.client.CustomObjectsApi.delete_namespaced_custom_object")
+class DeleteCertificateClass(asynctest.TestCase):
+    @mock.patch("kubernetes.config.load_kube_config")
+    def setUp(self, mock_load_kube_config):
+        super(DeleteCertificateClass, self).setUp()
+        self.namespace = "osm"
+        self.object_name = "test-cert"
+        self.kubectl = Kubectl()
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_no_exception_if_notfound(
+        self,
+        mock_create_certificate,
+    ):
+        api_exception = ApiException()
+        api_exception.body = '{"reason": "NotFound"}'
+        self.kubectl.clients[
+            CUSTOM_OBJECT_CLIENT
+        ].delete_namespaced_custom_object.side_effect = api_exception
+        raised = False
+        try:
+            await self.kubectl.delete_certificate(
+                namespace=self.namespace,
+                object_name=self.object_name,
+            )
+        except Exception:
+            raised = True
+        self.assertFalse(raised, "An exception was raised")
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_other_exceptions(
+        self,
+        mock_create_certificate,
+    ):
+        self.kubectl.clients[
+            CUSTOM_OBJECT_CLIENT
+        ].delete_namespaced_custom_object.side_effect = Exception()
+        with self.assertRaises(Exception):
+            await self.kubectl.delete_certificate(
+                namespace=self.namespace,
+                object_name=self.object_name,
+            )
+
+
+@mock.patch("kubernetes.client.RbacAuthorizationV1Api.create_namespaced_role")
+@mock.patch("kubernetes.client.RbacAuthorizationV1Api.list_namespaced_role")
+class CreateRoleClass(asynctest.TestCase):
+    @mock.patch("kubernetes.config.load_kube_config")
+    def setUp(self, mock_load_kube_config):
+        super(CreateRoleClass, self).setUp()
+        self.name = "role"
+        self.namespace = "osm"
+        self.resources = ["*"]
+        self.api_groups = ["*"]
+        self.verbs = ["*"]
+        self.labels = {}
+        self.kubectl = Kubectl()
+
+    @asynctest.fail_on(active_handles=True)
+    async def assert_create_role(self, mock_create_role):
+        metadata = V1ObjectMeta(
+            name=self.name, labels=self.labels, namespace=self.namespace
+        )
+        role = V1Role(
+            metadata=metadata,
+            rules=[
+                V1PolicyRule(
+                    api_groups=self.api_groups,
+                    resources=self.resources,
+                    verbs=self.verbs,
+                ),
+            ],
+        )
+        await self.kubectl.create_role(
+            namespace=self.namespace,
+            api_groups=self.api_groups,
+            name=self.name,
+            resources=self.resources,
+            verbs=self.verbs,
+            labels=self.labels,
+        )
+        mock_create_role.assert_called_once_with(self.namespace, role)
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_raise_exception_if_role_already_exists(
+        self,
+        mock_list_role,
+        mock_create_role,
+    ):
+        mock_list_role.return_value = FakeK8sRoleList(items=[1])
+        with self.assertRaises(Exception) as context:
+            await self.kubectl.create_role(
+                self.name,
+                self.labels,
+                self.api_groups,
+                self.resources,
+                self.verbs,
+                self.namespace,
+            )
+        self.assertTrue(
+            "Role with metadata.name={} already exists".format(self.name)
+            in str(context.exception)
+        )
+        mock_create_role.assert_not_called()
+
+
+@mock.patch("kubernetes.client.RbacAuthorizationV1Api.create_namespaced_role_binding")
+@mock.patch("kubernetes.client.RbacAuthorizationV1Api.list_namespaced_role_binding")
+class CreateRoleBindingClass(asynctest.TestCase):
+    @mock.patch("kubernetes.config.load_kube_config")
+    def setUp(self, mock_load_kube_config):
+        super(CreateRoleBindingClass, self).setUp()
+        self.name = "rolebinding"
+        self.namespace = "osm"
+        self.role_name = "role"
+        self.sa_name = "Default"
+        self.labels = {}
+        self.kubectl = Kubectl()
+
+    @asynctest.fail_on(active_handles=True)
+    async def assert_create_role_binding(self, mock_create_role_binding):
+        role_binding = V1RoleBinding(
+            metadata=V1ObjectMeta(name=self.name, labels=self.labels),
+            role_ref=V1RoleRef(kind="Role", name=self.role_name, api_group=""),
+            subjects=[
+                RbacV1Subject(
+                    kind="ServiceAccount",
+                    name=self.sa_name,
+                    namespace=self.namespace,
+                )
+            ],
+        )
+        await self.kubectl.create_role_binding(
+            namespace=self.namespace,
+            role_name=self.role_name,
+            name=self.name,
+            sa_name=self.sa_name,
+            labels=self.labels,
+        )
+        mock_create_role_binding.assert_called_once_with(self.namespace, role_binding)
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_raise_exception_if_role_binding_already_exists(
+        self,
+        mock_list_role_binding,
+        mock_create_role_binding,
+    ):
+        mock_list_role_binding.return_value = FakeK8sRoleBindingList(items=[1])
+        with self.assertRaises(Exception) as context:
+            await self.kubectl.create_role_binding(
+                self.name,
+                self.role_name,
+                self.sa_name,
+                self.labels,
+                self.namespace,
+            )
+        self.assertTrue(
+            "Role Binding with metadata.name={} already exists".format(self.name)
+            in str(context.exception)
+        )
+        mock_create_role_binding.assert_not_called()
+
+
+@mock.patch("kubernetes.client.CoreV1Api.create_namespaced_secret")
+class CreateSecretClass(asynctest.TestCase):
+    @mock.patch("kubernetes.config.load_kube_config")
+    def setUp(self, mock_load_kube_config):
+        super(CreateSecretClass, self).setUp()
+        self.name = "secret"
+        self.namespace = "osm"
+        self.data = {"test": "1234"}
+        self.secret_type = "Opaque"
+        self.kubectl = Kubectl()
+
+    @asynctest.fail_on(active_handles=True)
+    async def assert_create_secret(self, mock_create_secret):
+        secret_metadata = V1ObjectMeta(name=self.name, namespace=self.namespace)
+        secret = V1Secret(
+            metadata=secret_metadata,
+            data=self.data,
+            type=self.secret_type,
+        )
+        await self.kubectl.create_secret(
+            namespace=self.namespace,
+            data=self.data,
+            name=self.name,
+            secret_type=self.secret_type,
+        )
+        mock_create_secret.assert_called_once_with(self.namespace, secret)
+
+
+@mock.patch("kubernetes.client.CoreV1Api.create_namespace")
+class CreateNamespaceClass(asynctest.TestCase):
+    @mock.patch("kubernetes.config.load_kube_config")
+    def setUp(self, mock_load_kube_config):
+        super(CreateNamespaceClass, self).setUp()
+        self.namespace = "osm"
+        self.labels = {"key": "value"}
+        self.kubectl = Kubectl()
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_namespace_is_created(
+        self,
+        mock_create_namespace,
+    ):
+        metadata = V1ObjectMeta(name=self.namespace, labels=self.labels)
+        namespace = V1Namespace(
+            metadata=metadata,
+        )
+        await self.kubectl.create_namespace(
+            name=self.namespace,
+            labels=self.labels,
+        )
+        mock_create_namespace.assert_called_once_with(namespace)
+
+    async def test_namespace_is_created_default_labels(
+        self,
+        mock_create_namespace,
+    ):
+        metadata = V1ObjectMeta(name=self.namespace, labels=None)
+        namespace = V1Namespace(
+            metadata=metadata,
+        )
+        await self.kubectl.create_namespace(
+            name=self.namespace,
+        )
+        mock_create_namespace.assert_called_once_with(namespace)
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_no_exception_if_alreadyexists(
+        self,
+        mock_create_namespace,
+    ):
+        api_exception = ApiException()
+        api_exception.body = '{"reason": "AlreadyExists"}'
+        self.kubectl.clients[CORE_CLIENT].create_namespace.side_effect = api_exception
+        raised = False
+        try:
+            await self.kubectl.create_namespace(
+                name=self.namespace,
+            )
+        except Exception:
+            raised = True
+        self.assertFalse(raised, "An exception was raised")
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_other_exceptions(
+        self,
+        mock_create_namespace,
+    ):
+        self.kubectl.clients[CORE_CLIENT].create_namespace.side_effect = Exception()
+        with self.assertRaises(Exception):
+            await self.kubectl.create_namespace(
+                name=self.namespace,
+            )
+
+
+@mock.patch("kubernetes.client.CoreV1Api.delete_namespace")
+class DeleteNamespaceClass(asynctest.TestCase):
+    @mock.patch("kubernetes.config.load_kube_config")
+    def setUp(self, mock_load_kube_config):
+        super(DeleteNamespaceClass, self).setUp()
+        self.namespace = "osm"
+        self.kubectl = Kubectl()
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_no_exception_if_notfound(
+        self,
+        mock_delete_namespace,
+    ):
+        api_exception = ApiException()
+        api_exception.body = '{"reason": "NotFound"}'
+        self.kubectl.clients[CORE_CLIENT].delete_namespace.side_effect = api_exception
+        raised = False
+        try:
+            await self.kubectl.delete_namespace(
+                name=self.namespace,
+            )
+        except Exception:
+            raised = True
+        self.assertFalse(raised, "An exception was raised")
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_other_exceptions(
+        self,
+        mock_delete_namespace,
+    ):
+        self.kubectl.clients[CORE_CLIENT].delete_namespace.side_effect = Exception()
+        with self.assertRaises(Exception):
+            await self.kubectl.delete_namespace(
+                name=self.namespace,
+            )
+
+
+@mock.patch("kubernetes.client.CoreV1Api.read_namespaced_secret")
+class GetSecretContentClass(asynctest.TestCase):
+    @mock.patch("kubernetes.config.load_kube_config")
+    def setUp(self, mock_load_kube_config):
+        super(GetSecretContentClass, self).setUp()
+        self.name = "my_secret"
+        self.namespace = "osm"
+        self.data = {"my_key": "my_value"}
+        self.type = "Opaque"
+        self.kubectl = Kubectl()
+
+    @asynctest.fail_on(active_handles=True)
+    async def test_return_type_is_dict(
+        self,
+        mock_read_namespaced_secret,
+    ):
+        metadata = V1ObjectMeta(name=self.name, namespace=self.namespace)
+        secret = V1Secret(metadata=metadata, data=self.data, type=self.type)
+        mock_read_namespaced_secret.return_value = secret
+        content = await self.kubectl.get_secret_content(self.name, self.namespace)
+        assert type(content) is dict
diff --git a/osm_lcm/n2vc/tests/unit/test_libjuju.py b/osm_lcm/n2vc/tests/unit/test_libjuju.py
new file mode 100644 (file)
index 0000000..c96ee3f
--- /dev/null
@@ -0,0 +1,2797 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+
+import asyncio
+import asynctest
+import tempfile
+from unittest.mock import Mock, patch
+import juju
+import kubernetes
+from juju.errors import JujuAPIError
+import logging
+
+from osm_lcm.n2vc.definitions import Offer, RelationEndpoint
+from .utils import (
+    FakeApplication,
+    FakeMachine,
+    FakeManualMachine,
+    FakeUnit,
+)
+from osm_lcm.n2vc.libjuju import Libjuju
+from osm_lcm.n2vc.exceptions import (
+    JujuControllerFailedConnecting,
+    JujuMachineNotFound,
+    JujuApplicationNotFound,
+    JujuActionNotFound,
+    JujuApplicationExists,
+    JujuInvalidK8sConfiguration,
+    JujuLeaderUnitNotFound,
+    JujuError,
+)
+from osm_lcm.n2vc.k8s_juju_conn import generate_rbac_id
+from osm_lcm.n2vc.tests.unit.utils import AsyncMock
+from osm_lcm.n2vc.vca.connection import Connection
+from osm_lcm.n2vc.vca.connection_data import ConnectionData
+
+
+cacert = """-----BEGIN CERTIFICATE-----
+SOMECERT
+-----END CERTIFICATE-----"""
+
+
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Controller")
+class LibjujuTestCase(asynctest.TestCase):
+    @asynctest.mock.patch("osm_lcm.n2vc.vca.connection_data.base64_to_cacert")
+    def setUp(
+        self,
+        mock_base64_to_cacert=None,
+    ):
+        self.loop = asyncio.get_event_loop()
+        self.db = Mock()
+        mock_base64_to_cacert.return_value = cacert
+        # Connection._load_vca_connection_data = Mock()
+        vca_connection = Connection(AsyncMock())
+        vca_connection._data = ConnectionData(
+            **{
+                "endpoints": ["1.2.3.4:17070"],
+                "user": "user",
+                "secret": "secret",
+                "cacert": "cacert",
+                "pubkey": "pubkey",
+                "lxd-cloud": "cloud",
+                "lxd-credentials": "credentials",
+                "k8s-cloud": "k8s_cloud",
+                "k8s-credentials": "k8s_credentials",
+                "model-config": {},
+                "api-proxy": "api_proxy",
+            }
+        )
+        logging.disable(logging.CRITICAL)
+        self.libjuju = Libjuju(vca_connection)
+        self.loop.run_until_complete(self.libjuju.disconnect())
+
+
+@asynctest.mock.patch("juju.controller.Controller.connect")
+@asynctest.mock.patch(
+    "juju.controller.Controller.api_endpoints",
+    new_callable=asynctest.CoroutineMock(return_value=["127.0.0.1:17070"]),
+)
+class GetControllerTest(LibjujuTestCase):
+    def setUp(self):
+        super(GetControllerTest, self).setUp()
+
+    def test_diff_endpoint(self, mock_api_endpoints, mock_connect):
+        self.libjuju.endpoints = []
+        controller = self.loop.run_until_complete(self.libjuju.get_controller())
+        self.assertIsInstance(controller, juju.controller.Controller)
+
+    @asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_controller")
+    def test_exception(
+        self,
+        mock_disconnect_controller,
+        mock_api_endpoints,
+        mock_connect,
+    ):
+        self.libjuju.endpoints = []
+
+        mock_connect.side_effect = Exception()
+        controller = None
+        with self.assertRaises(JujuControllerFailedConnecting):
+            controller = self.loop.run_until_complete(self.libjuju.get_controller())
+        self.assertIsNone(controller)
+        mock_disconnect_controller.assert_called()
+
+    def test_same_endpoint_get_controller(self, mock_api_endpoints, mock_connect):
+        self.libjuju.endpoints = ["127.0.0.1:17070"]
+        controller = self.loop.run_until_complete(self.libjuju.get_controller())
+        self.assertIsInstance(controller, juju.controller.Controller)
+
+
+class DisconnectTest(LibjujuTestCase):
+    def setUp(self):
+        super(DisconnectTest, self).setUp()
+
+    @asynctest.mock.patch("juju.model.Model.disconnect")
+    def test_disconnect_model(self, mock_disconnect):
+        self.loop.run_until_complete(self.libjuju.disconnect_model(juju.model.Model()))
+        mock_disconnect.assert_called_once()
+
+    @asynctest.mock.patch("juju.controller.Controller.disconnect")
+    def test_disconnect_controller(self, mock_disconnect):
+        self.loop.run_until_complete(
+            self.libjuju.disconnect_controller(juju.controller.Controller())
+        )
+        mock_disconnect.assert_called_once()
+
+
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_controller")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.model_exists")
+@asynctest.mock.patch("juju.controller.Controller.add_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_controller")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_model")
+class AddModelTest(LibjujuTestCase):
+    def setUp(self):
+        super(AddModelTest, self).setUp()
+
+    def test_existing_model(
+        self,
+        mock_disconnect_model,
+        mock_disconnect_controller,
+        mock_add_model,
+        mock_model_exists,
+        mock_get_controller,
+    ):
+        mock_model_exists.return_value = True
+
+        # This should not raise an exception
+        self.loop.run_until_complete(self.libjuju.add_model("existing_model", "cloud"))
+
+        mock_disconnect_controller.assert_called()
+
+    # TODO Check two job executing at the same time and one returning without doing anything.
+
+    def test_non_existing_model(
+        self,
+        mock_disconnect_model,
+        mock_disconnect_controller,
+        mock_add_model,
+        mock_model_exists,
+        mock_get_controller,
+    ):
+        mock_model_exists.return_value = False
+        mock_get_controller.return_value = juju.controller.Controller()
+
+        self.loop.run_until_complete(
+            self.libjuju.add_model("nonexisting_model", Mock())
+        )
+
+        mock_add_model.assert_called_once()
+        mock_disconnect_controller.assert_called()
+        mock_disconnect_model.assert_called()
+
+
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_controller")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_controller")
+@asynctest.mock.patch(
+    "juju.model.Model.applications", new_callable=asynctest.PropertyMock
+)
+@asynctest.mock.patch("juju.model.Model.get_action_status")
+@asynctest.mock.patch("juju.model.Model.get_action_output")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_actions")
+class GetExecutedActionsTest(LibjujuTestCase):
+    def setUp(self):
+        super(GetExecutedActionsTest, self).setUp()
+
+    def test_exception(
+        self,
+        mock_get_actions,
+        mock_get_action_output,
+        mock_get_action_status,
+        mock_applications,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_model.return_value = None
+        with self.assertRaises(JujuError):
+            self.loop.run_until_complete(self.libjuju.get_executed_actions("model"))
+
+        mock_get_controller.assert_called_once()
+        mock_disconnect_controller.assert_called_once()
+        mock_get_model.assert_called_once()
+        mock_disconnect_model.assert_not_called()
+
+    def test_success(
+        self,
+        mock_get_actions,
+        mock_get_action_output,
+        mock_get_action_status,
+        mock_applications,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_model.return_value = juju.model.Model()
+        mock_applications.return_value = {"existing_app"}
+        mock_get_actions.return_value = {"action_name": "description"}
+        mock_get_action_status.return_value = {"id": "status"}
+        mock_get_action_output.return_value = {"output": "completed"}
+
+        executed_actions = self.loop.run_until_complete(
+            self.libjuju.get_executed_actions("model")
+        )
+        expected_result = [
+            {
+                "id": "id",
+                "action": "action_name",
+                "status": "status",
+                "output": "completed",
+            }
+        ]
+        self.assertListEqual(expected_result, executed_actions)
+        self.assertIsInstance(executed_actions, list)
+
+        mock_get_controller.assert_called_once()
+        mock_get_model.assert_called_once()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_controller")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_controller")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju._get_application")
+class GetApplicationConfigsTest(LibjujuTestCase):
+    def setUp(self):
+        super(GetApplicationConfigsTest, self).setUp()
+
+    def test_exception(
+        self,
+        mock_get_application,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_model.return_value = None
+        with self.assertRaises(JujuError):
+            self.loop.run_until_complete(
+                self.libjuju.get_application_configs("model", "app")
+            )
+
+        mock_get_controller.assert_called_once()
+        mock_disconnect_controller.assert_called_once()
+        mock_get_model.assert_called_once()
+        mock_disconnect_model.assert_not_called()
+
+    def test_success(
+        self,
+        mock_get_application,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_application.return_value = FakeApplication()
+        application_configs = self.loop.run_until_complete(
+            self.libjuju.get_application_configs("model", "app")
+        )
+
+        self.assertEqual(application_configs, ["app_config"])
+
+        mock_get_controller.assert_called_once()
+        mock_get_model.assert_called_once()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+
+@asynctest.mock.patch("juju.controller.Controller.get_model")
+class GetModelTest(LibjujuTestCase):
+    def setUp(self):
+        super(GetModelTest, self).setUp()
+
+    def test_get_model(
+        self,
+        mock_get_model,
+    ):
+        mock_get_model.return_value = juju.model.Model()
+        model = self.loop.run_until_complete(
+            self.libjuju.get_model(juju.controller.Controller(), "model")
+        )
+        self.assertIsInstance(model, juju.model.Model)
+
+
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_controller")
+@asynctest.mock.patch("juju.controller.Controller.list_models")
+class ModelExistsTest(LibjujuTestCase):
+    def setUp(self):
+        super(ModelExistsTest, self).setUp()
+
+    async def test_existing_model(
+        self,
+        mock_list_models,
+        mock_get_controller,
+    ):
+        mock_list_models.return_value = ["existing_model"]
+        self.assertTrue(
+            await self.libjuju.model_exists(
+                "existing_model", juju.controller.Controller()
+            )
+        )
+
+    @asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_controller")
+    async def test_no_controller(
+        self,
+        mock_disconnect_controller,
+        mock_list_models,
+        mock_get_controller,
+    ):
+        mock_list_models.return_value = ["existing_model"]
+        mock_get_controller.return_value = juju.controller.Controller()
+        self.assertTrue(await self.libjuju.model_exists("existing_model"))
+        mock_disconnect_controller.assert_called_once()
+
+    async def test_non_existing_model(
+        self,
+        mock_list_models,
+        mock_get_controller,
+    ):
+        mock_list_models.return_value = ["existing_model"]
+        self.assertFalse(
+            await self.libjuju.model_exists(
+                "not_existing_model", juju.controller.Controller()
+            )
+        )
+
+
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_controller")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_controller")
+@asynctest.mock.patch("juju.model.Model.get_status")
+class GetModelStatusTest(LibjujuTestCase):
+    def setUp(self):
+        super(GetModelStatusTest, self).setUp()
+
+    def test_success(
+        self,
+        mock_get_status,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_model.return_value = juju.model.Model()
+        mock_get_status.return_value = {"status"}
+
+        status = self.loop.run_until_complete(self.libjuju.get_model_status("model"))
+
+        mock_get_status.assert_called_once()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+        self.assertEqual(status, {"status"})
+
+    def test_exception(
+        self,
+        mock_get_status,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_model.return_value = juju.model.Model()
+        mock_get_status.side_effect = Exception()
+        status = None
+        with self.assertRaises(Exception):
+            status = self.loop.run_until_complete(
+                self.libjuju.get_model_status("model")
+            )
+
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+        self.assertIsNone(status)
+
+
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_controller")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_controller")
+@asynctest.mock.patch("juju.model.Model.get_machines")
+@asynctest.mock.patch("juju.model.Model.add_machine")
+@asynctest.mock.patch("osm_lcm.n2vc.juju_watcher.JujuModelWatcher.wait_for")
+class CreateMachineTest(LibjujuTestCase):
+    def setUp(self):
+        super(CreateMachineTest, self).setUp()
+
+    def test_existing_machine(
+        self,
+        mock_wait_for,
+        mock_add_machine,
+        mock_get_machines,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_model.return_value = juju.model.Model()
+        mock_get_machines.return_value = {"existing_machine": FakeMachine()}
+        machine, bool_res = self.loop.run_until_complete(
+            self.libjuju.create_machine("model", "existing_machine")
+        )
+
+        self.assertIsInstance(machine, FakeMachine)
+        self.assertFalse(bool_res)
+
+        mock_disconnect_controller.assert_called()
+        mock_disconnect_model.assert_called()
+
+    def test_non_existing_machine(
+        self,
+        mock_wait_for,
+        mock_add_machine,
+        mock_get_machines,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        machine = None
+        bool_res = None
+        mock_get_model.return_value = juju.model.Model()
+        with self.assertRaises(JujuMachineNotFound):
+            machine, bool_res = self.loop.run_until_complete(
+                self.libjuju.create_machine("model", "non_existing_machine")
+            )
+        self.assertIsNone(machine)
+        self.assertIsNone(bool_res)
+
+        mock_disconnect_controller.assert_called()
+        mock_disconnect_model.assert_called()
+
+    def test_no_machine(
+        self,
+        mock_wait_for,
+        mock_add_machine,
+        mock_get_machines,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_model.return_value = juju.model.Model()
+        mock_add_machine.return_value = FakeMachine()
+
+        machine, bool_res = self.loop.run_until_complete(
+            self.libjuju.create_machine("model")
+        )
+
+        self.assertIsInstance(machine, FakeMachine)
+        self.assertTrue(bool_res)
+
+        mock_wait_for.assert_called_once()
+        mock_add_machine.assert_called_once()
+
+        mock_disconnect_controller.assert_called()
+        mock_disconnect_model.assert_called()
+
+
+# TODO test provision machine
+
+
+@asynctest.mock.patch("os.remove")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.yaml.dump")
+@asynctest.mock.patch("builtins.open", create=True)
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_controller")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_controller")
+@asynctest.mock.patch("osm_lcm.n2vc.juju_watcher.JujuModelWatcher.wait_for_model")
+@asynctest.mock.patch("juju.model.Model.deploy")
+@asynctest.mock.patch("juju.model.CharmhubDeployType.resolve")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.BundleHandler")
+@asynctest.mock.patch("juju.url.URL.parse")
+class DeployTest(LibjujuTestCase):
+    def setUp(self):
+        super(DeployTest, self).setUp()
+        self.instantiation_params = {"applications": {"squid": {"scale": 2}}}
+        self.architecture = "amd64"
+        self.uri = "cs:osm"
+        self.url = AsyncMock()
+        self.url.schema = juju.url.Schema.CHARM_HUB
+        self.bundle_instance = None
+
+    def setup_bundle_download_mocks(
+        self, mock_url_parse, mock_bundle, mock_resolve, mock_get_model
+    ):
+        mock_url_parse.return_value = self.url
+        mock_bundle.return_value = AsyncMock()
+        mock_resolve.return_value = AsyncMock()
+        mock_resolve.origin = AsyncMock()
+        mock_get_model.return_value = juju.model.Model()
+        self.bundle_instance = mock_bundle.return_value
+        self.bundle_instance.applications = {"squid"}
+
+    def assert_overlay_file_is_written(self, filename, mocked_file, mock_yaml, mock_os):
+        mocked_file.assert_called_once_with(filename, "w")
+        mock_yaml.assert_called_once_with(
+            self.instantiation_params, mocked_file.return_value.__enter__.return_value
+        )
+        mock_os.assert_called_once_with(filename)
+
+    def assert_overlay_file_is_not_written(self, mocked_file, mock_yaml, mock_os):
+        mocked_file.assert_not_called()
+        mock_yaml.assert_not_called()
+        mock_os.assert_not_called()
+
+    def assert_bundle_is_downloaded(self, mock_resolve, mock_url_parse):
+        mock_resolve.assert_called_once_with(
+            self.url, self.architecture, entity_url=self.uri
+        )
+        mock_url_parse.assert_called_once_with(self.uri)
+        self.bundle_instance.fetch_plan.assert_called_once_with(
+            self.url, mock_resolve.origin
+        )
+
+    def assert_bundle_is_not_downloaded(self, mock_resolve, mock_url_parse):
+        mock_resolve.assert_not_called()
+        mock_url_parse.assert_not_called()
+        self.bundle_instance.fetch_plan.assert_not_called()
+
+    def test_deploy(
+        self,
+        mock_url_parse,
+        mock_bundle,
+        mock_resolve,
+        mock_deploy,
+        mock_wait_for_model,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+        mocked_file,
+        mock_yaml,
+        mock_os,
+    ):
+        self.setup_bundle_download_mocks(
+            mock_url_parse, mock_bundle, mock_resolve, mock_get_model
+        )
+        model_name = "model1"
+
+        self.loop.run_until_complete(
+            self.libjuju.deploy(
+                "cs:osm",
+                model_name,
+                wait=True,
+                timeout=0,
+                instantiation_params=None,
+            )
+        )
+        self.assert_overlay_file_is_not_written(mocked_file, mock_yaml, mock_os)
+        self.assert_bundle_is_not_downloaded(mock_resolve, mock_url_parse)
+        mock_deploy.assert_called_once_with("cs:osm", trust=True, overlays=[])
+        mock_wait_for_model.assert_called_once()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    def test_deploy_no_wait(
+        self,
+        mock_url_parse,
+        mock_bundle,
+        mock_resolve,
+        mock_deploy,
+        mock_wait_for_model,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+        mocked_file,
+        mock_yaml,
+        mock_os,
+    ):
+        self.setup_bundle_download_mocks(
+            mock_url_parse, mock_bundle, mock_resolve, mock_get_model
+        )
+        self.loop.run_until_complete(
+            self.libjuju.deploy(
+                "cs:osm", "model", wait=False, timeout=0, instantiation_params={}
+            )
+        )
+        self.assert_overlay_file_is_not_written(mocked_file, mock_yaml, mock_os)
+        self.assert_bundle_is_not_downloaded(mock_resolve, mock_url_parse)
+        mock_deploy.assert_called_once_with("cs:osm", trust=True, overlays=[])
+        mock_wait_for_model.assert_not_called()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    def test_deploy_exception(
+        self,
+        mock_url_parse,
+        mock_bundle,
+        mock_resolve,
+        mock_deploy,
+        mock_wait_for_model,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+        mocked_file,
+        mock_yaml,
+        mock_os,
+    ):
+        self.setup_bundle_download_mocks(
+            mock_url_parse, mock_bundle, mock_resolve, mock_get_model
+        )
+        mock_deploy.side_effect = Exception()
+        with self.assertRaises(Exception):
+            self.loop.run_until_complete(self.libjuju.deploy("cs:osm", "model"))
+        self.assert_overlay_file_is_not_written(mocked_file, mock_yaml, mock_os)
+        self.assert_bundle_is_not_downloaded(mock_resolve, mock_url_parse)
+        mock_deploy.assert_called_once()
+        mock_wait_for_model.assert_not_called()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    def test_deploy_with_instantiation_params(
+        self,
+        mock_url_parse,
+        mock_bundle,
+        mock_resolve,
+        mock_deploy,
+        mock_wait_for_model,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+        mocked_file,
+        mock_yaml,
+        mock_os,
+    ):
+        self.setup_bundle_download_mocks(
+            mock_url_parse, mock_bundle, mock_resolve, mock_get_model
+        )
+        model_name = "model1"
+        expected_filename = "{}-overlay.yaml".format(model_name)
+        self.loop.run_until_complete(
+            self.libjuju.deploy(
+                self.uri,
+                model_name,
+                wait=True,
+                timeout=0,
+                instantiation_params=self.instantiation_params,
+            )
+        )
+        self.assert_overlay_file_is_written(
+            expected_filename, mocked_file, mock_yaml, mock_os
+        )
+        self.assert_bundle_is_downloaded(mock_resolve, mock_url_parse)
+        mock_deploy.assert_called_once_with(
+            self.uri, trust=True, overlays=[expected_filename]
+        )
+        mock_wait_for_model.assert_called_once()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    def test_deploy_with_instantiation_params_no_applications(
+        self,
+        mock_url_parse,
+        mock_bundle,
+        mock_resolve,
+        mock_deploy,
+        mock_wait_for_model,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+        mocked_file,
+        mock_yaml,
+        mock_os,
+    ):
+        self.instantiation_params = {"applications": {}}
+        self.setup_bundle_download_mocks(
+            mock_url_parse, mock_bundle, mock_resolve, mock_get_model
+        )
+
+        model_name = "model3"
+        expected_filename = "{}-overlay.yaml".format(model_name)
+        self.loop.run_until_complete(
+            self.libjuju.deploy(
+                self.uri,
+                model_name,
+                wait=False,
+                timeout=0,
+                instantiation_params=self.instantiation_params,
+            )
+        )
+
+        self.assert_overlay_file_is_written(
+            expected_filename, mocked_file, mock_yaml, mock_os
+        )
+        self.assert_bundle_is_not_downloaded(mock_resolve, mock_url_parse)
+        mock_deploy.assert_called_once_with(
+            self.uri, trust=True, overlays=[expected_filename]
+        )
+        mock_wait_for_model.assert_not_called()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    def test_deploy_with_instantiation_params_applications_not_found(
+        self,
+        mock_url_parse,
+        mock_bundle,
+        mock_resolve,
+        mock_deploy,
+        mock_wait_for_model,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+        mocked_file,
+        mock_yaml,
+        mock_os,
+    ):
+        self.instantiation_params = {"some_key": {"squid": {"scale": 2}}}
+        self.setup_bundle_download_mocks(
+            mock_url_parse, mock_bundle, mock_resolve, mock_get_model
+        )
+
+        with self.assertRaises(JujuError):
+            self.loop.run_until_complete(
+                self.libjuju.deploy(
+                    self.uri,
+                    "model1",
+                    wait=True,
+                    timeout=0,
+                    instantiation_params=self.instantiation_params,
+                )
+            )
+
+        self.assert_overlay_file_is_not_written(mocked_file, mock_yaml, mock_os)
+        self.assert_bundle_is_not_downloaded(mock_resolve, mock_url_parse)
+        mock_deploy.assert_not_called()
+        mock_wait_for_model.assert_not_called()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    def test_deploy_overlay_contains_invalid_app(
+        self,
+        mock_url_parse,
+        mock_bundle,
+        mock_resolve,
+        mock_deploy,
+        mock_wait_for_model,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+        mocked_file,
+        mock_yaml,
+        mock_os,
+    ):
+        self.setup_bundle_download_mocks(
+            mock_url_parse, mock_bundle, mock_resolve, mock_get_model
+        )
+        self.bundle_instance.applications = {"new_app"}
+
+        with self.assertRaises(JujuApplicationNotFound) as error:
+            self.loop.run_until_complete(
+                self.libjuju.deploy(
+                    self.uri,
+                    "model2",
+                    wait=True,
+                    timeout=0,
+                    instantiation_params=self.instantiation_params,
+                )
+            )
+        error_msg = "Cannot find application ['squid'] in original bundle {'new_app'}"
+        self.assertEqual(str(error.exception), error_msg)
+
+        self.assert_overlay_file_is_not_written(mocked_file, mock_yaml, mock_os)
+        self.assert_bundle_is_downloaded(mock_resolve, mock_url_parse)
+        mock_deploy.assert_not_called()
+        mock_wait_for_model.assert_not_called()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    def test_deploy_exception_with_instantiation_params(
+        self,
+        mock_url_parse,
+        mock_bundle,
+        mock_resolve,
+        mock_deploy,
+        mock_wait_for_model,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+        mocked_file,
+        mock_yaml,
+        mock_os,
+    ):
+        self.setup_bundle_download_mocks(
+            mock_url_parse, mock_bundle, mock_resolve, mock_get_model
+        )
+
+        mock_deploy.side_effect = Exception()
+        model_name = "model2"
+        expected_filename = "{}-overlay.yaml".format(model_name)
+        with self.assertRaises(Exception):
+            self.loop.run_until_complete(
+                self.libjuju.deploy(
+                    self.uri,
+                    model_name,
+                    instantiation_params=self.instantiation_params,
+                )
+            )
+
+        self.assert_overlay_file_is_written(
+            expected_filename, mocked_file, mock_yaml, mock_os
+        )
+        self.assert_bundle_is_downloaded(mock_resolve, mock_url_parse)
+        mock_deploy.assert_called_once_with(
+            self.uri, trust=True, overlays=[expected_filename]
+        )
+        mock_wait_for_model.assert_not_called()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    @asynctest.mock.patch("logging.Logger.warning")
+    def test_deploy_exception_when_deleting_file_is_not_propagated(
+        self,
+        mock_warning,
+        mock_url_parse,
+        mock_bundle,
+        mock_resolve,
+        mock_deploy,
+        mock_wait_for_model,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+        mocked_file,
+        mock_yaml,
+        mock_os,
+    ):
+        self.setup_bundle_download_mocks(
+            mock_url_parse, mock_bundle, mock_resolve, mock_get_model
+        )
+
+        mock_os.side_effect = OSError("Error")
+        model_name = "model2"
+        expected_filename = "{}-overlay.yaml".format(model_name)
+        self.loop.run_until_complete(
+            self.libjuju.deploy(
+                self.uri,
+                model_name,
+                instantiation_params=self.instantiation_params,
+            )
+        )
+
+        self.assert_overlay_file_is_written(
+            expected_filename, mocked_file, mock_yaml, mock_os
+        )
+        self.assert_bundle_is_downloaded(mock_resolve, mock_url_parse)
+        mock_deploy.assert_called_once_with(
+            self.uri, trust=True, overlays=[expected_filename]
+        )
+        mock_wait_for_model.assert_called_once()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+        mock_warning.assert_called_with(
+            "Overlay file {} could not be removed: Error".format(expected_filename)
+        )
+
+
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_controller")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_controller")
+@asynctest.mock.patch(
+    "juju.model.Model.applications", new_callable=asynctest.PropertyMock
+)
+@asynctest.mock.patch("juju.model.Model.machines", new_callable=asynctest.PropertyMock)
+@asynctest.mock.patch("juju.model.Model.deploy")
+@asynctest.mock.patch("osm_lcm.n2vc.juju_watcher.JujuModelWatcher.wait_for")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.create_machine")
+class DeployCharmTest(LibjujuTestCase):
+    def setUp(self):
+        super(DeployCharmTest, self).setUp()
+
+    def test_existing_app(
+        self,
+        mock_create_machine,
+        mock_wait_for,
+        mock_deploy,
+        mock_machines,
+        mock_applications,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_model.return_value = juju.model.Model()
+        mock_applications.return_value = {"existing_app"}
+
+        application = None
+        with self.assertRaises(JujuApplicationExists):
+            application = self.loop.run_until_complete(
+                self.libjuju.deploy_charm(
+                    "existing_app",
+                    "path",
+                    "model",
+                    "machine",
+                )
+            )
+        self.assertIsNone(application)
+
+        mock_disconnect_controller.assert_called()
+        mock_disconnect_model.assert_called()
+
+    def test_non_existing_machine(
+        self,
+        mock_create_machine,
+        mock_wait_for,
+        mock_deploy,
+        mock_machines,
+        mock_applications,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_model.return_value = juju.model.Model()
+        mock_machines.return_value = {"existing_machine": FakeMachine()}
+        application = None
+        with self.assertRaises(JujuMachineNotFound):
+            application = self.loop.run_until_complete(
+                self.libjuju.deploy_charm(
+                    "app",
+                    "path",
+                    "model",
+                    "machine",
+                )
+            )
+
+        self.assertIsNone(application)
+
+        mock_disconnect_controller.assert_called()
+        mock_disconnect_model.assert_called()
+
+    def test_2_units(
+        self,
+        mock_create_machine,
+        mock_wait_for,
+        mock_deploy,
+        mock_machines,
+        mock_applications,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_model.return_value = juju.model.Model()
+        mock_machines.return_value = {"existing_machine": FakeMachine()}
+        mock_create_machine.return_value = (FakeMachine(), "other")
+        mock_deploy.return_value = FakeApplication()
+        application = self.loop.run_until_complete(
+            self.libjuju.deploy_charm(
+                "app",
+                "path",
+                "model",
+                "existing_machine",
+                num_units=2,
+            )
+        )
+
+        self.assertIsInstance(application, FakeApplication)
+
+        mock_deploy.assert_called_once()
+        mock_wait_for.assert_called_once()
+
+        mock_create_machine.assert_called_once()
+
+        mock_disconnect_controller.assert_called()
+        mock_disconnect_model.assert_called()
+
+    def test_1_unit(
+        self,
+        mock_create_machine,
+        mock_wait_for,
+        mock_deploy,
+        mock_machines,
+        mock_applications,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_model.return_value = juju.model.Model()
+        mock_machines.return_value = {"existing_machine": FakeMachine()}
+        mock_deploy.return_value = FakeApplication()
+        application = self.loop.run_until_complete(
+            self.libjuju.deploy_charm("app", "path", "model", "existing_machine")
+        )
+
+        self.assertIsInstance(application, FakeApplication)
+
+        mock_deploy.assert_called_once()
+        mock_wait_for.assert_called_once()
+
+        mock_disconnect_controller.assert_called()
+        mock_disconnect_model.assert_called()
+
+
+@asynctest.mock.patch(
+    "juju.model.Model.applications", new_callable=asynctest.PropertyMock
+)
+class GetApplicationTest(LibjujuTestCase):
+    def setUp(self):
+        super(GetApplicationTest, self).setUp()
+
+    def test_existing_application(
+        self,
+        mock_applications,
+    ):
+        mock_applications.return_value = {"existing_app": "exists"}
+        model = juju.model.Model()
+        result = self.libjuju._get_application(model, "existing_app")
+        self.assertEqual(result, "exists")
+
+    def test_non_existing_application(
+        self,
+        mock_applications,
+    ):
+        mock_applications.return_value = {"existing_app": "exists"}
+        model = juju.model.Model()
+        result = self.libjuju._get_application(model, "nonexisting_app")
+        self.assertIsNone(result)
+
+
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_controller")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_controller")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju._get_application")
+@asynctest.mock.patch("osm_lcm.n2vc.juju_watcher.JujuModelWatcher.wait_for")
+@asynctest.mock.patch("juju.model.Model.get_action_output")
+@asynctest.mock.patch("juju.model.Model.get_action_status")
+class ExecuteActionTest(LibjujuTestCase):
+    def setUp(self):
+        super(ExecuteActionTest, self).setUp()
+
+    def test_no_application(
+        self,
+        mock_get_action_status,
+        mock_get_action_output,
+        mock_wait_for,
+        mock__get_application,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock__get_application.return_value = None
+        mock_get_model.return_value = juju.model.Model()
+        output = None
+        status = None
+        with self.assertRaises(JujuApplicationNotFound):
+            output, status = self.loop.run_until_complete(
+                self.libjuju.execute_action(
+                    "app",
+                    "model",
+                    "action",
+                )
+            )
+        self.assertIsNone(output)
+        self.assertIsNone(status)
+
+        mock_disconnect_controller.assert_called()
+        mock_disconnect_model.assert_called()
+
+    def test_no_action(
+        self,
+        mock_get_action_status,
+        mock_get_action_output,
+        mock_wait_for,
+        mock__get_application,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_model.return_value = juju.model.Model()
+        mock__get_application.return_value = FakeApplication()
+        output = None
+        status = None
+        with self.assertRaises(JujuActionNotFound):
+            output, status = self.loop.run_until_complete(
+                self.libjuju.execute_action(
+                    "app",
+                    "model",
+                    "action",
+                )
+            )
+        self.assertIsNone(output)
+        self.assertIsNone(status)
+
+        mock_disconnect_controller.assert_called()
+        mock_disconnect_model.assert_called()
+
+    @asynctest.mock.patch("asyncio.sleep")
+    @asynctest.mock.patch(
+        "osm_lcm.n2vc.tests.unit.utils.FakeUnit.is_leader_from_status"
+    )
+    def test_no_leader(
+        self,
+        mock_is_leader_from_status,
+        mock_sleep,
+        mock_get_action_status,
+        mock_get_action_output,
+        mock_wait_for,
+        mock__get_application,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_model.return_value = juju.model.Model()
+        mock__get_application.return_value = FakeApplication()
+        mock_is_leader_from_status.return_value = False
+        output = None
+        status = None
+        with self.assertRaises(JujuLeaderUnitNotFound):
+            output, status = self.loop.run_until_complete(
+                self.libjuju.execute_action(
+                    "app",
+                    "model",
+                    "action",
+                )
+            )
+        self.assertIsNone(output)
+        self.assertIsNone(status)
+
+        mock_disconnect_controller.assert_called()
+        mock_disconnect_model.assert_called()
+
+    def test_successful_exec(
+        self,
+        mock_get_action_status,
+        mock_get_action_output,
+        mock_wait_for,
+        mock__get_application,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_model.return_value = juju.model.Model()
+        mock__get_application.return_value = FakeApplication()
+        mock_get_action_output.return_value = "output"
+        mock_get_action_status.return_value = {"id": "status"}
+        output, status = self.loop.run_until_complete(
+            self.libjuju.execute_action("app", "model", "existing_action")
+        )
+        self.assertEqual(output, "output")
+        self.assertEqual(status, "status")
+
+        mock_wait_for.assert_called_once()
+
+        mock_disconnect_controller.assert_called()
+        mock_disconnect_model.assert_called()
+
+
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_controller")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_controller")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju._get_application")
+class GetActionTest(LibjujuTestCase):
+    def setUp(self):
+        super(GetActionTest, self).setUp()
+
+    def test_exception(
+        self,
+        mock_get_application,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_application.side_effect = Exception()
+        actions = None
+        with self.assertRaises(Exception):
+            actions = self.loop.run_until_complete(
+                self.libjuju.get_actions("app", "model")
+            )
+
+        self.assertIsNone(actions)
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    def test_success(
+        self,
+        mock_get_application,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_application.return_value = FakeApplication()
+
+        actions = self.loop.run_until_complete(self.libjuju.get_actions("app", "model"))
+
+        self.assertEqual(actions, ["existing_action"])
+
+        mock_get_controller.assert_called_once()
+        mock_get_model.assert_called_once()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_controller")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_controller")
+@asynctest.mock.patch("juju.application.Application.get_metrics")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju._get_application")
+class GetMetricsTest(LibjujuTestCase):
+    def setUp(self):
+        super(GetMetricsTest, self).setUp()
+
+    def test_get_metrics_success(
+        self,
+        mock_get_application,
+        mock_get_metrics,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_application.return_value = FakeApplication()
+        mock_get_model.return_value = juju.model.Model()
+
+        self.loop.run_until_complete(self.libjuju.get_metrics("model", "app1"))
+
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    def test_get_metrics_exception(
+        self,
+        mock_get_application,
+        mock_get_metrics,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_model.return_value = juju.model.Model()
+        mock_get_metrics.side_effect = Exception()
+        with self.assertRaises(Exception):
+            self.loop.run_until_complete(self.libjuju.get_metrics("model", "app1"))
+
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    def test_missing_args_exception(
+        self,
+        mock_get_application,
+        mock_get_metrics,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_model.return_value = juju.model.Model()
+
+        with self.assertRaises(Exception):
+            self.loop.run_until_complete(self.libjuju.get_metrics("", ""))
+
+        mock_get_controller.assert_not_called()
+        mock_get_model.assert_not_called()
+        mock_disconnect_controller.assert_not_called()
+        mock_disconnect_model.assert_not_called()
+
+
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_controller")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_controller")
+@asynctest.mock.patch("juju.model.Model.add_relation")
+class AddRelationTest(LibjujuTestCase):
+    def setUp(self):
+        super(AddRelationTest, self).setUp()
+
+    @asynctest.mock.patch("logging.Logger.warning")
+    def test_not_found(
+        self,
+        mock_warning,
+        mock_add_relation,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        # TODO in libjuju.py should this fail only with a log message?
+        result = {"error": "not found", "response": "response", "request-id": 1}
+
+        mock_get_model.return_value = juju.model.Model()
+        mock_add_relation.side_effect = JujuAPIError(result)
+
+        self.loop.run_until_complete(
+            self.libjuju.add_relation(
+                "model",
+                "app1:relation1",
+                "app2:relation2",
+            )
+        )
+
+        mock_warning.assert_called_with("Relation not found: not found")
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    @asynctest.mock.patch("logging.Logger.warning")
+    def test_not_found_in_error_code(
+        self,
+        mock_warning,
+        mock_add_relation,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        result = {
+            "error": "relation cannot be added",
+            "error-code": "not found",
+            "response": "response",
+            "request-id": 1,
+        }
+
+        mock_get_model.return_value = juju.model.Model()
+        mock_add_relation.side_effect = JujuAPIError(result)
+
+        self.loop.run_until_complete(
+            self.libjuju.add_relation(
+                "model",
+                "app1:relation1",
+                "app2:relation2",
+            )
+        )
+
+        mock_warning.assert_called_with("Relation not found: relation cannot be added")
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    @asynctest.mock.patch("logging.Logger.warning")
+    def test_already_exists(
+        self,
+        mock_warning,
+        mock_add_relation,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        # TODO in libjuju.py should this fail silently?
+        result = {"error": "already exists", "response": "response", "request-id": 1}
+
+        mock_get_model.return_value = juju.model.Model()
+        mock_add_relation.side_effect = JujuAPIError(result)
+
+        self.loop.run_until_complete(
+            self.libjuju.add_relation(
+                "model",
+                "app1:relation1",
+                "app2:relation2",
+            )
+        )
+
+        mock_warning.assert_called_with("Relation already exists: already exists")
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    @asynctest.mock.patch("logging.Logger.warning")
+    def test_already_exists_error_code(
+        self,
+        mock_warning,
+        mock_add_relation,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        result = {
+            "error": "relation cannot be added",
+            "error-code": "already exists",
+            "response": "response",
+            "request-id": 1,
+        }
+
+        mock_get_model.return_value = juju.model.Model()
+        mock_add_relation.side_effect = JujuAPIError(result)
+
+        self.loop.run_until_complete(
+            self.libjuju.add_relation(
+                "model",
+                "app1:relation1",
+                "app2:relation2",
+            )
+        )
+
+        mock_warning.assert_called_with(
+            "Relation already exists: relation cannot be added"
+        )
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    def test_exception(
+        self,
+        mock_add_relation,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_model.return_value = juju.model.Model()
+        result = {"error": "", "response": "response", "request-id": 1}
+        mock_add_relation.side_effect = JujuAPIError(result)
+
+        with self.assertRaises(JujuAPIError):
+            self.loop.run_until_complete(
+                self.libjuju.add_relation(
+                    "model",
+                    "app1:relation1",
+                    "app2:relation2",
+                )
+            )
+
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    def test_success(
+        self,
+        mock_add_relation,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_model.return_value = juju.model.Model()
+
+        self.loop.run_until_complete(
+            self.libjuju.add_relation(
+                "model",
+                "app1:relation1",
+                "app2:relation2",
+            )
+        )
+
+        mock_add_relation.assert_called_with("app1:relation1", "app2:relation2")
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    def test_saas(
+        self,
+        mock_add_relation,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_model.return_value = juju.model.Model()
+
+        self.loop.run_until_complete(
+            self.libjuju.add_relation(
+                "model",
+                "app1:relation1",
+                "saas_name",
+            )
+        )
+
+        mock_add_relation.assert_called_with("app1:relation1", "saas_name")
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+
+# TODO destroy_model testcase
+
+
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_controller")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_controller")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju._get_application")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_model")
+class DestroyApplicationTest(LibjujuTestCase):
+    def setUp(self):
+        super(DestroyApplicationTest, self).setUp()
+
+    def test_success(
+        self,
+        mock_get_controller,
+        mock_get_model,
+        mock_disconnect_controller,
+        mock_get_application,
+        mock_disconnect_model,
+    ):
+        mock_get_application.return_value = FakeApplication()
+        mock_get_model.return_value = None
+        self.loop.run_until_complete(
+            self.libjuju.destroy_application(
+                "existing_model",
+                "existing_app",
+                3600,
+            )
+        )
+        mock_get_application.assert_called()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    def test_no_application(
+        self,
+        mock_get_controller,
+        mock_get_model,
+        mock_disconnect_controller,
+        mock_get_application,
+        mock_disconnect_model,
+    ):
+        mock_get_model.return_value = None
+        mock_get_application.return_value = None
+
+        self.loop.run_until_complete(
+            self.libjuju.destroy_application(
+                "existing_model",
+                "existing_app",
+                3600,
+            )
+        )
+        mock_get_application.assert_called()
+
+    def test_exception(
+        self,
+        mock_get_controller,
+        mock_get_model,
+        mock_disconnect_controller,
+        mock_get_application,
+        mock_disconnect_model,
+    ):
+        mock_get_application.return_value = FakeApplication
+        mock_get_model.return_value = None
+
+        with self.assertRaises(Exception):
+            self.loop.run_until_complete(
+                self.libjuju.destroy_application(
+                    "existing_model",
+                    "existing_app",
+                    0,
+                )
+            )
+            mock_get_application.assert_called_once()
+
+
+# @asynctest.mock.patch("juju.model.Model.get_machines")
+# @asynctest.mock.patch("logging.Logger.debug")
+# class DestroyMachineTest(LibjujuTestCase):
+#     def setUp(self):
+#         super(DestroyMachineTest, self).setUp()
+
+#     def test_success_manual_machine(
+#         self, mock_debug, mock_get_machines,
+#     ):
+#         mock_get_machines.side_effect = [
+#             {"machine": FakeManualMachine()},
+#             {"machine": FakeManualMachine()},
+#             {},
+#         ]
+#         self.loop.run_until_complete(
+#             self.libjuju.destroy_machine(juju.model.Model(), "machine", 2,)
+#         )
+#         calls = [
+#             asynctest.call("Waiting for machine machine is destroyed"),
+#             asynctest.call("Machine destroyed: machine"),
+#         ]
+#         mock_debug.assert_has_calls(calls)
+
+#     def test_no_machine(
+#         self, mock_debug, mock_get_machines,
+#     ):
+#         mock_get_machines.return_value = {}
+#         self.loop.run_until_complete(
+#             self.libjuju.destroy_machine(juju.model.Model(), "machine", 2)
+#         )
+#         mock_debug.assert_called_with("Machine not found: machine")
+
+
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_controller")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_controller")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju._get_application")
+class ConfigureApplicationTest(LibjujuTestCase):
+    def setUp(self):
+        super(ConfigureApplicationTest, self).setUp()
+
+    def test_success(
+        self,
+        mock_get_application,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_application.return_value = FakeApplication()
+
+        self.loop.run_until_complete(
+            self.libjuju.configure_application(
+                "model",
+                "app",
+                {"config"},
+            )
+        )
+        mock_get_application.assert_called_once()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    def test_exception(
+        self,
+        mock_get_application,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_application.side_effect = Exception()
+
+        with self.assertRaises(Exception):
+            self.loop.run_until_complete(
+                self.libjuju.configure_application(
+                    "model",
+                    "app",
+                    {"config"},
+                )
+            )
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    def test_controller_exception(
+        self,
+        mock_get_application,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        result = {"error": "not found", "response": "response", "request-id": 1}
+
+        mock_get_controller.side_effect = JujuAPIError(result)
+
+        with self.assertRaises(JujuAPIError):
+            self.loop.run_until_complete(
+                self.libjuju.configure_application(
+                    "model",
+                    "app",
+                    {"config"},
+                )
+            )
+        mock_get_model.assert_not_called()
+        mock_disconnect_controller.assert_not_called()
+        mock_disconnect_model.assert_not_called()
+
+    def test_get_model_exception(
+        self,
+        mock_get_application,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        result = {"error": "not found", "response": "response", "request-id": 1}
+        mock_get_model.side_effect = JujuAPIError(result)
+
+        with self.assertRaises(JujuAPIError):
+            self.loop.run_until_complete(
+                self.libjuju.configure_application(
+                    "model",
+                    "app",
+                    {"config"},
+                )
+            )
+        mock_get_model.assert_called_once()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_not_called()
+
+
+# TODO _get_api_endpoints_db test case
+# TODO _update_api_endpoints_db test case
+# TODO healthcheck test case
+
+
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_controller")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_controller")
+@asynctest.mock.patch("juju.controller.Controller.list_models")
+class ListModelsTest(LibjujuTestCase):
+    def setUp(self):
+        super(ListModelsTest, self).setUp()
+
+    def test_containing(
+        self,
+        mock_list_models,
+        mock_disconnect_controller,
+        mock_get_controller,
+    ):
+        mock_get_controller.return_value = juju.controller.Controller()
+        mock_list_models.return_value = ["existingmodel"]
+        models = self.loop.run_until_complete(self.libjuju.list_models("existing"))
+
+        mock_disconnect_controller.assert_called_once()
+        self.assertEquals(models, ["existingmodel"])
+
+    def test_not_containing(
+        self,
+        mock_list_models,
+        mock_disconnect_controller,
+        mock_get_controller,
+    ):
+        mock_get_controller.return_value = juju.controller.Controller()
+        mock_list_models.return_value = ["existingmodel", "model"]
+        models = self.loop.run_until_complete(self.libjuju.list_models("mdl"))
+
+        mock_disconnect_controller.assert_called_once()
+        self.assertEquals(models, [])
+
+    def test_no_contains_arg(
+        self,
+        mock_list_models,
+        mock_disconnect_controller,
+        mock_get_controller,
+    ):
+        mock_get_controller.return_value = juju.controller.Controller()
+        mock_list_models.return_value = ["existingmodel", "model"]
+        models = self.loop.run_until_complete(self.libjuju.list_models())
+
+        mock_disconnect_controller.assert_called_once()
+        self.assertEquals(models, ["existingmodel", "model"])
+
+
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.list_models")
+class ModelsExistTest(LibjujuTestCase):
+    def setUp(self):
+        super(ModelsExistTest, self).setUp()
+
+    def test_model_names_none(self, mock_list_models):
+        mock_list_models.return_value = []
+        with self.assertRaises(Exception):
+            self.loop.run_until_complete(self.libjuju.models_exist(None))
+
+    def test_model_names_empty(self, mock_list_models):
+        mock_list_models.return_value = []
+        with self.assertRaises(Exception):
+            (exist, non_existing_models) = self.loop.run_until_complete(
+                self.libjuju.models_exist([])
+            )
+
+    def test_model_names_not_existing(self, mock_list_models):
+        mock_list_models.return_value = ["prometheus", "grafana"]
+        (exist, non_existing_models) = self.loop.run_until_complete(
+            self.libjuju.models_exist(["prometheus2", "grafana"])
+        )
+        self.assertFalse(exist)
+        self.assertEqual(non_existing_models, ["prometheus2"])
+
+    def test_model_names_exist(self, mock_list_models):
+        mock_list_models.return_value = ["prometheus", "grafana"]
+        (exist, non_existing_models) = self.loop.run_until_complete(
+            self.libjuju.models_exist(["prometheus", "grafana"])
+        )
+        self.assertTrue(exist)
+        self.assertEqual(non_existing_models, [])
+
+
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_controller")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_controller")
+@asynctest.mock.patch("juju.controller.Controller.list_offers")
+class ListOffers(LibjujuTestCase):
+    def setUp(self):
+        super(ListOffers, self).setUp()
+
+    def test_disconnect_controller(
+        self,
+        mock_list_offers,
+        mock_disconnect_controller,
+        mock_get_controller,
+    ):
+        mock_get_controller.return_value = juju.controller.Controller()
+        mock_list_offers.side_effect = Exception()
+        with self.assertRaises(Exception):
+            self.loop.run_until_complete(self.libjuju._list_offers("model"))
+        mock_disconnect_controller.assert_called_once()
+
+    def test_empty_list(
+        self,
+        mock_list_offers,
+        mock_disconnect_controller,
+        mock_get_controller,
+    ):
+        mock_get_controller.return_value = juju.controller.Controller()
+        offer_results = Mock()
+        offer_results.results = []
+        mock_list_offers.return_value = offer_results
+        offers = self.loop.run_until_complete(self.libjuju._list_offers("model"))
+        self.assertEqual(offers, [])
+        mock_disconnect_controller.assert_called_once()
+
+    def test_non_empty_list(
+        self,
+        mock_list_offers,
+        mock_disconnect_controller,
+        mock_get_controller,
+    ):
+        mock_get_controller.return_value = juju.controller.Controller()
+        offer = Mock()
+        offer_results = Mock()
+        offer_results.results = [offer]
+        mock_list_offers.return_value = offer_results
+        offers = self.loop.run_until_complete(self.libjuju._list_offers("model"))
+        self.assertEqual(offers, [offer])
+        mock_disconnect_controller.assert_called_once()
+
+    def test_matching_offer_name(
+        self,
+        mock_list_offers,
+        mock_disconnect_controller,
+        mock_get_controller,
+    ):
+        mock_get_controller.return_value = juju.controller.Controller()
+        offer_1 = Mock()
+        offer_1.offer_name = "offer1"
+        offer_2 = Mock()
+        offer_2.offer_name = "offer2"
+        offer_results = Mock()
+        offer_results.results = [offer_1, offer_2]
+        mock_list_offers.return_value = offer_results
+        offers = self.loop.run_until_complete(
+            self.libjuju._list_offers("model", offer_name="offer2")
+        )
+        self.assertEqual(offers, [offer_2])
+        mock_disconnect_controller.assert_called_once()
+
+    def test_not_matching_offer_name(
+        self,
+        mock_list_offers,
+        mock_disconnect_controller,
+        mock_get_controller,
+    ):
+        mock_get_controller.return_value = juju.controller.Controller()
+        offer_1 = Mock()
+        offer_1.offer_name = "offer1"
+        offer_2 = Mock()
+        offer_2.offer_name = "offer2"
+        offer_results = Mock()
+        offer_results.results = [offer_1, offer_2]
+        mock_list_offers.return_value = offer_results
+        offers = self.loop.run_until_complete(
+            self.libjuju._list_offers("model", offer_name="offer3")
+        )
+        self.assertEqual(offers, [])
+        mock_disconnect_controller.assert_called_once()
+
+
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_controller")
+@asynctest.mock.patch("juju.controller.Controller.get_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_controller")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju._list_offers")
+@asynctest.mock.patch("juju.model.Model.create_offer")
+class OfferTest(LibjujuTestCase):
+    def setUp(self):
+        super(OfferTest, self).setUp()
+
+    def test_offer(
+        self,
+        mock_create_offer,
+        mock__list_offers,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        controller = juju.controller.Controller()
+        model = juju.model.Model()
+        mock_get_controller.return_value = controller
+        mock_get_model.return_value = model
+        endpoint = RelationEndpoint("model.app-name.0", "vca", "endpoint")
+        self.loop.run_until_complete(self.libjuju.offer(endpoint))
+        mock_create_offer.assert_called_with(
+            "app-name:endpoint", offer_name="app-name-endpoint"
+        )
+        mock_disconnect_model.assert_called_once_with(model)
+        mock_disconnect_controller.assert_called_once_with(controller)
+
+    def test_offer_exception(
+        self,
+        mock_create_offer,
+        mock__list_offers,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        controller = juju.controller.Controller()
+        model = juju.model.Model()
+        mock_get_controller.return_value = controller
+        mock_get_model.return_value = model
+        mock__list_offers.return_value = []
+        endpoint = RelationEndpoint("model.app-name.0", "vca", "endpoint")
+        with self.assertRaises(Exception):
+            self.loop.run_until_complete(self.libjuju.offer(endpoint))
+        mock_create_offer.assert_called_with(
+            "app-name:endpoint", offer_name="app-name-endpoint"
+        )
+        mock_disconnect_model.assert_called_once_with(model)
+        mock_disconnect_controller.assert_called_once_with(controller)
+
+
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_controller")
+@asynctest.mock.patch("juju.controller.Controller.get_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_controller")
+@asynctest.mock.patch("juju.model.Model.consume")
+class ConsumeTest(LibjujuTestCase):
+    def setUp(self):
+        self.offer_url = "admin/model.offer_name"
+        super(ConsumeTest, self).setUp()
+        self.provider_libjuju = self.libjuju
+
+    def test_consume(
+        self,
+        mock_consume,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        self_controller = juju.controller.Controller()
+        provider_controller = juju.controller.Controller()
+        mock_get_controller.side_effect = [self_controller, provider_controller]
+        mock_get_model.return_value = juju.model.Model()
+
+        self.loop.run_until_complete(
+            self.libjuju.consume(
+                "model_name",
+                Offer(self.offer_url, vca_id="vca-id"),
+                self.provider_libjuju,
+            )
+        )
+        mock_consume.assert_called_once_with(
+            "admin/model.offer_name",
+            application_alias="offer_name-model-vca-id",
+            controller=provider_controller,
+        )
+        mock_disconnect_model.assert_called_once()
+        self.assertEqual(mock_disconnect_controller.call_count, 2)
+
+    def test_parsing_error_exception(
+        self,
+        mock_consume,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_controller.return_value = juju.controller.Controller()
+        mock_get_model.return_value = juju.model.Model()
+        mock_consume.side_effect = juju.offerendpoints.ParseError("")
+
+        with self.assertRaises(juju.offerendpoints.ParseError):
+            self.loop.run_until_complete(
+                self.libjuju.consume(
+                    "model_name", Offer(self.offer_url), self.provider_libjuju
+                )
+            )
+        mock_consume.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+        self.assertEqual(mock_disconnect_controller.call_count, 2)
+
+    def test_juju_error_exception(
+        self,
+        mock_consume,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_controller.return_value = juju.controller.Controller()
+        mock_get_model.return_value = juju.model.Model()
+        mock_consume.side_effect = juju.errors.JujuError("")
+
+        with self.assertRaises(juju.errors.JujuError):
+            self.loop.run_until_complete(
+                self.libjuju.consume(
+                    "model_name", Offer(self.offer_url), self.provider_libjuju
+                )
+            )
+        mock_consume.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+        self.assertEqual(mock_disconnect_controller.call_count, 2)
+
+    def test_juju_api_error_exception(
+        self,
+        mock_consume,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_controller.return_value = juju.controller.Controller()
+        mock_get_model.return_value = juju.model.Model()
+        mock_consume.side_effect = juju.errors.JujuAPIError(
+            {"error": "", "response": "", "request-id": ""}
+        )
+
+        with self.assertRaises(juju.errors.JujuAPIError):
+            self.loop.run_until_complete(
+                self.libjuju.consume(
+                    "model_name", Offer(self.offer_url), self.provider_libjuju
+                )
+            )
+        mock_consume.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+        self.assertEqual(mock_disconnect_controller.call_count, 2)
+
+
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_k8s_cloud_credential")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.add_cloud")
+class AddK8sTest(LibjujuTestCase):
+    def setUp(self):
+        super(AddK8sTest, self).setUp()
+        name = "cloud"
+        rbac_id = generate_rbac_id()
+        token = "token"
+        client_cert_data = "cert"
+        configuration = kubernetes.client.configuration.Configuration()
+        storage_class = "storage_class"
+        credential_name = name
+
+        self._add_k8s_args = {
+            "name": name,
+            "rbac_id": rbac_id,
+            "token": token,
+            "client_cert_data": client_cert_data,
+            "configuration": configuration,
+            "storage_class": storage_class,
+            "credential_name": credential_name,
+        }
+
+    def test_add_k8s(self, mock_add_cloud, mock_get_k8s_cloud_credential):
+        self.loop.run_until_complete(self.libjuju.add_k8s(**self._add_k8s_args))
+        mock_add_cloud.assert_called_once()
+        mock_get_k8s_cloud_credential.assert_called_once()
+
+    def test_add_k8s_exception(self, mock_add_cloud, mock_get_k8s_cloud_credential):
+        mock_add_cloud.side_effect = Exception()
+        with self.assertRaises(Exception):
+            self.loop.run_until_complete(self.libjuju.add_k8s(**self._add_k8s_args))
+        mock_add_cloud.assert_called_once()
+        mock_get_k8s_cloud_credential.assert_called_once()
+
+    def test_add_k8s_missing_name(self, mock_add_cloud, mock_get_k8s_cloud_credential):
+        self._add_k8s_args["name"] = ""
+        with self.assertRaises(Exception):
+            self.loop.run_until_complete(self.libjuju.add_k8s(**self._add_k8s_args))
+        mock_add_cloud.assert_not_called()
+
+    def test_add_k8s_missing_storage_name(
+        self, mock_add_cloud, mock_get_k8s_cloud_credential
+    ):
+        self._add_k8s_args["storage_class"] = ""
+        with self.assertRaises(Exception):
+            self.loop.run_until_complete(self.libjuju.add_k8s(**self._add_k8s_args))
+        mock_add_cloud.assert_not_called()
+
+    def test_add_k8s_missing_configuration_keys(
+        self, mock_add_cloud, mock_get_k8s_cloud_credential
+    ):
+        self._add_k8s_args["configuration"] = None
+        with self.assertRaises(Exception):
+            self.loop.run_until_complete(self.libjuju.add_k8s(**self._add_k8s_args))
+        mock_add_cloud.assert_not_called()
+
+
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_controller")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_controller")
+@asynctest.mock.patch("juju.controller.Controller.add_cloud")
+@asynctest.mock.patch("juju.controller.Controller.add_credential")
+class AddCloudTest(LibjujuTestCase):
+    def setUp(self):
+        super(AddCloudTest, self).setUp()
+        self.cloud = juju.client.client.Cloud()
+        self.credential = juju.client.client.CloudCredential()
+
+    def test_add_cloud_with_credential(
+        self,
+        mock_add_credential,
+        mock_add_cloud,
+        mock_disconnect_controller,
+        mock_get_controller,
+    ):
+        mock_get_controller.return_value = juju.controller.Controller()
+
+        cloud = self.loop.run_until_complete(
+            self.libjuju.add_cloud("cloud", self.cloud, credential=self.credential)
+        )
+        self.assertEqual(cloud, self.cloud)
+        mock_add_cloud.assert_called_once_with("cloud", self.cloud)
+        mock_add_credential.assert_called_once_with(
+            "cloud", credential=self.credential, cloud="cloud"
+        )
+        mock_disconnect_controller.assert_called_once()
+
+    def test_add_cloud_no_credential(
+        self,
+        mock_add_credential,
+        mock_add_cloud,
+        mock_disconnect_controller,
+        mock_get_controller,
+    ):
+        mock_get_controller.return_value = juju.controller.Controller()
+
+        cloud = self.loop.run_until_complete(
+            self.libjuju.add_cloud("cloud", self.cloud)
+        )
+        self.assertEqual(cloud, self.cloud)
+        mock_add_cloud.assert_called_once_with("cloud", self.cloud)
+        mock_add_credential.assert_not_called()
+        mock_disconnect_controller.assert_called_once()
+
+    def test_add_cloud_exception(
+        self,
+        mock_add_credential,
+        mock_add_cloud,
+        mock_disconnect_controller,
+        mock_get_controller,
+    ):
+        mock_get_controller.return_value = juju.controller.Controller()
+        mock_add_cloud.side_effect = Exception()
+        with self.assertRaises(Exception):
+            self.loop.run_until_complete(
+                self.libjuju.add_cloud("cloud", self.cloud, credential=self.credential)
+            )
+
+        mock_add_cloud.assert_called_once_with("cloud", self.cloud)
+        mock_add_credential.assert_not_called()
+        mock_disconnect_controller.assert_called_once()
+
+    def test_add_credential_exception(
+        self,
+        mock_add_credential,
+        mock_add_cloud,
+        mock_disconnect_controller,
+        mock_get_controller,
+    ):
+        mock_get_controller.return_value = juju.controller.Controller()
+        mock_add_credential.side_effect = Exception()
+        with self.assertRaises(Exception):
+            self.loop.run_until_complete(
+                self.libjuju.add_cloud("cloud", self.cloud, credential=self.credential)
+            )
+
+        mock_add_cloud.assert_called_once_with("cloud", self.cloud)
+        mock_add_credential.assert_called_once_with(
+            "cloud", credential=self.credential, cloud="cloud"
+        )
+        mock_disconnect_controller.assert_called_once()
+
+
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_controller")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_controller")
+@asynctest.mock.patch("juju.controller.Controller.remove_cloud")
+class RemoveCloudTest(LibjujuTestCase):
+    def setUp(self):
+        super(RemoveCloudTest, self).setUp()
+
+    def test_remove_cloud(
+        self,
+        mock_remove_cloud,
+        mock_disconnect_controller,
+        mock_get_controller,
+    ):
+        mock_get_controller.return_value = juju.controller.Controller()
+
+        self.loop.run_until_complete(self.libjuju.remove_cloud("cloud"))
+        mock_remove_cloud.assert_called_once_with("cloud")
+        mock_disconnect_controller.assert_called_once()
+
+    def test_remove_cloud_exception(
+        self,
+        mock_remove_cloud,
+        mock_disconnect_controller,
+        mock_get_controller,
+    ):
+        mock_get_controller.return_value = juju.controller.Controller()
+        mock_remove_cloud.side_effect = Exception()
+
+        with self.assertRaises(Exception):
+            self.loop.run_until_complete(self.libjuju.remove_cloud("cloud"))
+        mock_remove_cloud.assert_called_once_with("cloud")
+        mock_disconnect_controller.assert_called_once()
+
+
+@asynctest.mock.patch("kubernetes.client.configuration.Configuration")
+class GetK8sCloudCredentials(LibjujuTestCase):
+    def setUp(self):
+        super(GetK8sCloudCredentials, self).setUp()
+        self.cert_data = "cert"
+        self.token = "token"
+
+    @asynctest.mock.patch("osm_lcm.n2vc.exceptions.JujuInvalidK8sConfiguration")
+    def test_not_supported(self, mock_exception, mock_configuration):
+        mock_configuration.username = ""
+        mock_configuration.password = ""
+        mock_configuration.ssl_ca_cert = None
+        mock_configuration.cert_file = None
+        mock_configuration.key_file = None
+        exception_raised = False
+        self.token = None
+        self.cert_data = None
+        try:
+            _ = self.libjuju.get_k8s_cloud_credential(
+                mock_configuration,
+                self.cert_data,
+                self.token,
+            )
+        except JujuInvalidK8sConfiguration as e:
+            exception_raised = True
+            self.assertEqual(
+                e.message,
+                "authentication method not supported",
+            )
+        self.assertTrue(exception_raised)
+
+    def test_user_pass(self, mock_configuration):
+        mock_configuration.username = "admin"
+        mock_configuration.password = "admin"
+        mock_configuration.ssl_ca_cert = None
+        mock_configuration.cert_file = None
+        mock_configuration.key_file = None
+        self.token = None
+        self.cert_data = None
+        credential = self.libjuju.get_k8s_cloud_credential(
+            mock_configuration,
+            self.cert_data,
+            self.token,
+        )
+        self.assertEqual(
+            credential,
+            juju.client._definitions.CloudCredential(
+                attrs={"username": "admin", "password": "admin"}, auth_type="userpass"
+            ),
+        )
+
+    def test_user_pass_with_cert(self, mock_configuration):
+        mock_configuration.username = "admin"
+        mock_configuration.password = "admin"
+        mock_configuration.ssl_ca_cert = None
+        mock_configuration.cert_file = None
+        mock_configuration.key_file = None
+        self.token = None
+        credential = self.libjuju.get_k8s_cloud_credential(
+            mock_configuration,
+            self.cert_data,
+            self.token,
+        )
+        self.assertEqual(
+            credential,
+            juju.client._definitions.CloudCredential(
+                attrs={
+                    "ClientCertificateData": self.cert_data,
+                    "username": "admin",
+                    "password": "admin",
+                },
+                auth_type="userpasswithcert",
+            ),
+        )
+
+    def test_user_no_pass(self, mock_configuration):
+        mock_configuration.username = "admin"
+        mock_configuration.password = ""
+        mock_configuration.ssl_ca_cert = None
+        mock_configuration.cert_file = None
+        mock_configuration.key_file = None
+        self.token = None
+        self.cert_data = None
+        with patch.object(self.libjuju.log, "debug") as mock_debug:
+            credential = self.libjuju.get_k8s_cloud_credential(
+                mock_configuration,
+                self.cert_data,
+                self.token,
+            )
+            self.assertEqual(
+                credential,
+                juju.client._definitions.CloudCredential(
+                    attrs={"username": "admin", "password": ""}, auth_type="userpass"
+                ),
+            )
+            mock_debug.assert_called_once_with(
+                "credential for user admin has empty password"
+            )
+
+    def test_cert(self, mock_configuration):
+        mock_configuration.username = ""
+        mock_configuration.password = ""
+        mock_configuration.api_key = {"authorization": "Bearer Token"}
+        ssl_ca_cert = tempfile.NamedTemporaryFile()
+        with open(ssl_ca_cert.name, "w") as ssl_ca_cert_file:
+            ssl_ca_cert_file.write("cacert")
+        mock_configuration.ssl_ca_cert = ssl_ca_cert.name
+        mock_configuration.cert_file = None
+        mock_configuration.key_file = None
+        credential = self.libjuju.get_k8s_cloud_credential(
+            mock_configuration,
+            self.cert_data,
+            self.token,
+        )
+        self.assertEqual(
+            credential,
+            juju.client._definitions.CloudCredential(
+                attrs={"ClientCertificateData": self.cert_data, "Token": self.token},
+                auth_type="certificate",
+            ),
+        )
+
+    # TODO: Fix this test when oauth authentication is supported
+    # def test_oauth2(self, mock_configuration):
+    #     mock_configuration.username = ""
+    #     mock_configuration.password = ""
+    #     mock_configuration.api_key = {"authorization": "Bearer Token"}
+    #     key = tempfile.NamedTemporaryFile()
+    #     with open(key.name, "w") as key_file:
+    #         key_file.write("key")
+    #     mock_configuration.ssl_ca_cert = None
+    #     mock_configuration.cert_file = None
+    #     mock_configuration.key_file = key.name
+    #     credential = self.libjuju.get_k8s_cloud_credential(
+    #         mock_configuration,
+    #         self.cert_data,
+    #         self.token,
+    #     )
+    #     self.assertEqual(
+    #         credential,
+    #         juju.client._definitions.CloudCredential(
+    #             attrs={"ClientKeyData": "key", "Token": "Token"},
+    #             auth_type="oauth2",
+    #         ),
+    #     )
+
+    # @asynctest.mock.patch("osm_lcm.n2vc.exceptions.JujuInvalidK8sConfiguration")
+    # def test_oauth2_missing_token(self, mock_exception, mock_configuration):
+    #     mock_configuration.username = ""
+    #     mock_configuration.password = ""
+    #     key = tempfile.NamedTemporaryFile()
+    #     with open(key.name, "w") as key_file:
+    #         key_file.write("key")
+    #     mock_configuration.ssl_ca_cert = None
+    #     mock_configuration.cert_file = None
+    #     mock_configuration.key_file = key.name
+    #     exception_raised = False
+    #     try:
+    #         _ = self.libjuju.get_k8s_cloud_credential(
+    #             mock_configuration,
+    #             self.cert_data,
+    #             self.token,
+    #         )
+    #     except JujuInvalidK8sConfiguration as e:
+    #         exception_raised = True
+    #         self.assertEqual(
+    #             e.message,
+    #             "missing token for auth type oauth2",
+    #         )
+    #     self.assertTrue(exception_raised)
+
+    def test_exception_cannot_set_token_and_userpass(self, mock_configuration):
+        mock_configuration.username = "admin"
+        mock_configuration.password = "pass"
+        mock_configuration.api_key = {"authorization": "No_bearer_token"}
+        mock_configuration.ssl_ca_cert = None
+        mock_configuration.cert_file = None
+        mock_configuration.key_file = None
+        exception_raised = False
+        try:
+            _ = self.libjuju.get_k8s_cloud_credential(
+                mock_configuration,
+                self.cert_data,
+                self.token,
+            )
+        except JujuInvalidK8sConfiguration as e:
+            exception_raised = True
+            self.assertEqual(
+                e.message,
+                "Cannot set both token and user/pass",
+            )
+        self.assertTrue(exception_raised)
+
+
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_controller")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju._get_application")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_controller")
+@asynctest.mock.patch("osm_lcm.n2vc.juju_watcher.JujuModelWatcher.wait_for_model")
+class ScaleApplicationTest(LibjujuTestCase):
+    def setUp(self):
+        super(ScaleApplicationTest, self).setUp()
+
+    @asynctest.mock.patch("asyncio.sleep")
+    def test_scale_application(
+        self,
+        mock_sleep,
+        mock_wait_for_model,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_application,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_model.return_value = juju.model.Model()
+        mock_get_application.return_value = FakeApplication()
+        self.loop.run_until_complete(self.libjuju.scale_application("model", "app", 2))
+        mock_wait_for_model.assert_called_once()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    def test_no_application(
+        self,
+        mock_wait_for,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_application,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_application.return_value = None
+        mock_get_model.return_value = juju.model.Model()
+        with self.assertRaises(JujuApplicationNotFound):
+            self.loop.run_until_complete(
+                self.libjuju.scale_application("model", "app", 2)
+            )
+        mock_disconnect_controller.assert_called()
+        mock_disconnect_model.assert_called()
+
+    def test_exception(
+        self,
+        mock_wait_for,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_application,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_model.return_value = None
+        mock_get_application.return_value = FakeApplication()
+        with self.assertRaises(Exception):
+            self.loop.run_until_complete(
+                self.libjuju.scale_application("model", "app", 2, total_timeout=0)
+            )
+        mock_disconnect_controller.assert_called_once()
+
+
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju._get_application")
+class GetUnitNumberTest(LibjujuTestCase):
+    def setUp(self):
+        super(GetUnitNumberTest, self).setUp()
+
+    def test_successful_get_unit_number(
+        self,
+        mock_get_applications,
+    ):
+        mock_get_applications.return_value = FakeApplication()
+        model = juju.model.Model()
+        result = self.libjuju._get_application_count(model, "app")
+        self.assertEqual(result, 2)
+
+    def test_non_existing_application(
+        self,
+        mock_get_applications,
+    ):
+        mock_get_applications.return_value = None
+        model = juju.model.Model()
+        result = self.libjuju._get_application_count(model, "app")
+        self.assertEqual(result, None)
+
+
+@asynctest.mock.patch("juju.model.Model.machines", new_callable=asynctest.PropertyMock)
+class GetMachineInfoTest(LibjujuTestCase):
+    def setUp(self):
+        super(GetMachineInfoTest, self).setUp()
+
+    def test_successful(
+        self,
+        mock_machines,
+    ):
+        machine_id = "existing_machine"
+        model = juju.model.Model()
+        mock_machines.return_value = {"existing_machine": FakeManualMachine()}
+        machine, series = self.libjuju._get_machine_info(
+            machine_id=machine_id,
+            model=model,
+        )
+        self.assertIsNotNone(machine, series)
+
+    def test_exception(
+        self,
+        mock_machines,
+    ):
+        machine_id = "not_existing_machine"
+        machine = series = None
+        model = juju.model.Model()
+        mock_machines.return_value = {"existing_machine": FakeManualMachine()}
+        with self.assertRaises(JujuMachineNotFound):
+            machine, series = self.libjuju._get_machine_info(
+                machine_id=machine_id,
+                model=model,
+            )
+        self.assertIsNone(machine, series)
+
+
+class GetUnitTest(LibjujuTestCase):
+    def setUp(self):
+        super(GetUnitTest, self).setUp()
+
+    def test_successful(self):
+        result = self.libjuju._get_unit(FakeApplication(), "existing_machine_id")
+        self.assertIsInstance(result, FakeUnit)
+
+    def test_return_none(self):
+        result = self.libjuju._get_unit(FakeApplication(), "not_existing_machine_id")
+        self.assertIsNone(result)
+
+
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_controller")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_controller")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju._get_application")
+class CheckApplicationExists(LibjujuTestCase):
+    def setUp(self):
+        super(CheckApplicationExists, self).setUp()
+
+    def test_successful(
+        self,
+        mock_get_application,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_model.return_value = juju.model.Model()
+        mock_get_application.return_value = FakeApplication()
+        result = self.loop.run_until_complete(
+            self.libjuju.check_application_exists(
+                "model",
+                "app",
+            )
+        )
+        self.assertEqual(result, True)
+
+        mock_get_application.assert_called_once()
+        mock_get_controller.assert_called_once()
+        mock_get_model.assert_called_once()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    def test_no_application(
+        self,
+        mock_get_application,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_model.return_value = juju.model.Model()
+        mock_get_application.return_value = None
+        result = self.loop.run_until_complete(
+            self.libjuju.check_application_exists(
+                "model",
+                "app",
+            )
+        )
+        self.assertEqual(result, False)
+
+        mock_get_application.assert_called_once()
+        mock_get_controller.assert_called_once()
+        mock_get_model.assert_called_once()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_controller")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_controller")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju._get_application")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju._get_machine_info")
+class AddUnitTest(LibjujuTestCase):
+    def setUp(self):
+        super(AddUnitTest, self).setUp()
+
+    @asynctest.mock.patch("osm_lcm.n2vc.juju_watcher.JujuModelWatcher.wait_for")
+    @asynctest.mock.patch("asyncio.sleep")
+    def test_successful(
+        self,
+        mock_sleep,
+        mock_wait_for,
+        mock_get_machine_info,
+        mock_get_application,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_model.return_value = juju.model.Model()
+        mock_get_application.return_value = FakeApplication()
+        mock_get_machine_info.return_value = FakeMachine(), "series"
+        self.loop.run_until_complete(
+            self.libjuju.add_unit(
+                "existing_app",
+                "model",
+                "machine",
+            )
+        )
+
+        mock_wait_for.assert_called_once()
+        mock_get_application.assert_called_once()
+        mock_get_controller.assert_called_once()
+        mock_get_model.assert_called_once()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    def test_no_app(
+        self,
+        mock_get_machine_info,
+        mock_get_application,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_model.return_value = juju.model.Model()
+        mock_get_application.return_value = None
+        with self.assertRaises(JujuApplicationNotFound):
+            self.loop.run_until_complete(
+                self.libjuju.add_unit(
+                    "existing_app",
+                    "model",
+                    "machine",
+                )
+            )
+
+        mock_get_application.assert_called_once()
+        mock_get_controller.assert_called_once()
+        mock_get_model.assert_called_once()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    def test_no_machine(
+        self,
+        mock_get_machine_info,
+        mock_get_application,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_model.return_value = juju.model.Model()
+        mock_get_application.return_value = FakeApplication()
+        mock_get_machine_info.side_effect = JujuMachineNotFound()
+        with self.assertRaises(JujuMachineNotFound):
+            self.loop.run_until_complete(
+                self.libjuju.add_unit(
+                    "existing_app",
+                    "model",
+                    "machine",
+                )
+            )
+
+        mock_get_application.assert_called_once()
+        mock_get_controller.assert_called_once()
+        mock_get_model.assert_called_once()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_controller")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.get_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_model")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju.disconnect_controller")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju._get_application")
+@asynctest.mock.patch("osm_lcm.n2vc.libjuju.Libjuju._get_unit")
+class DestroyUnitTest(LibjujuTestCase):
+    def setUp(self):
+        super(DestroyUnitTest, self).setUp()
+
+    @asynctest.mock.patch("asyncio.sleep")
+    def test_successful(
+        self,
+        mock_sleep,
+        mock_get_unit,
+        mock_get_application,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_model.return_value = juju.model.Model()
+        mock_get_application.return_value = FakeApplication()
+
+        self.loop.run_until_complete(
+            self.libjuju.destroy_unit("app", "model", "machine", 0)
+        )
+
+        mock_get_unit.assert_called()
+        mock_get_application.assert_called_once()
+        mock_get_controller.assert_called_once()
+        mock_get_model.assert_called_once()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    def test_no_app(
+        self,
+        mock_get_unit,
+        mock_get_application,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_model.return_value = juju.model.Model()
+        mock_get_application.return_value = None
+
+        with self.assertRaises(JujuApplicationNotFound):
+            self.loop.run_until_complete(
+                self.libjuju.destroy_unit("app", "model", "machine")
+            )
+
+        mock_get_application.assert_called_once()
+        mock_get_controller.assert_called_once()
+        mock_get_model.assert_called_once()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
+
+    def test_no_unit(
+        self,
+        mock_get_unit,
+        mock_get_application,
+        mock_disconnect_controller,
+        mock_disconnect_model,
+        mock_get_model,
+        mock_get_controller,
+    ):
+        mock_get_model.return_value = juju.model.Model()
+        mock_get_application.return_value = FakeApplication()
+        mock_get_unit.return_value = None
+
+        with self.assertRaises(JujuError):
+            self.loop.run_until_complete(
+                self.libjuju.destroy_unit("app", "model", "machine")
+            )
+
+        mock_get_unit.assert_called_once()
+        mock_get_application.assert_called_once()
+        mock_get_controller.assert_called_once()
+        mock_get_model.assert_called_once()
+        mock_disconnect_controller.assert_called_once()
+        mock_disconnect_model.assert_called_once()
diff --git a/osm_lcm/n2vc/tests/unit/test_n2vc_juju_conn.py b/osm_lcm/n2vc/tests/unit/test_n2vc_juju_conn.py
new file mode 100644 (file)
index 0000000..6d0363d
--- /dev/null
@@ -0,0 +1,1495 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+
+
+import asyncio
+import logging
+from unittest.mock import Mock, MagicMock
+from unittest.mock import patch
+
+
+import asynctest
+from osm_lcm.n2vc.definitions import Offer, RelationEndpoint
+from osm_lcm.n2vc.n2vc_juju_conn import N2VCJujuConnector
+from osm_common import fslocal
+from osm_common.dbmemory import DbMemory
+from osm_lcm.n2vc.exceptions import (
+    N2VCBadArgumentsException,
+    N2VCException,
+    JujuApplicationNotFound,
+)
+from osm_lcm.n2vc.tests.unit.utils import AsyncMock
+from osm_lcm.n2vc.vca.connection_data import ConnectionData
+from osm_lcm.n2vc.tests.unit.testdata import test_db_descriptors as descriptors
+import yaml
+
+
+class N2VCJujuConnTestCase(asynctest.TestCase):
+    @asynctest.mock.patch("osm_lcm.n2vc.n2vc_juju_conn.MotorStore")
+    @asynctest.mock.patch("osm_lcm.n2vc.n2vc_juju_conn.get_connection")
+    @asynctest.mock.patch("osm_lcm.n2vc.vca.connection_data.base64_to_cacert")
+    def setUp(
+        self, mock_base64_to_cacert=None, mock_get_connection=None, mock_store=None
+    ):
+        self.loop = asyncio.get_event_loop()
+        self.db = Mock()
+        mock_base64_to_cacert.return_value = """
+    -----BEGIN CERTIFICATE-----
+    SOMECERT
+    -----END CERTIFICATE-----"""
+        mock_store.return_value = AsyncMock()
+        mock_vca_connection = Mock()
+        mock_get_connection.return_value = mock_vca_connection
+        mock_vca_connection.data.return_value = ConnectionData(
+            **{
+                "endpoints": ["1.2.3.4:17070"],
+                "user": "user",
+                "secret": "secret",
+                "cacert": "cacert",
+                "pubkey": "pubkey",
+                "lxd-cloud": "cloud",
+                "lxd-credentials": "credentials",
+                "k8s-cloud": "k8s_cloud",
+                "k8s-credentials": "k8s_credentials",
+                "model-config": {},
+                "api-proxy": "api_proxy",
+            }
+        )
+        logging.disable(logging.CRITICAL)
+
+        N2VCJujuConnector.get_public_key = Mock()
+        self.n2vc = N2VCJujuConnector(
+            db=self.db,
+            fs=fslocal.FsLocal(),
+            log=None,
+            on_update_db=None,
+        )
+        N2VCJujuConnector.get_public_key.assert_not_called()
+        self.n2vc.libjuju = Mock()
+
+
+class GetMetricssTest(N2VCJujuConnTestCase):
+    def setUp(self):
+        super(GetMetricssTest, self).setUp()
+        self.n2vc.libjuju.get_metrics = AsyncMock()
+
+    def test_success(self):
+        _ = self.loop.run_until_complete(self.n2vc.get_metrics("model", "application"))
+        self.n2vc.libjuju.get_metrics.assert_called_once()
+
+    def test_except(self):
+        self.n2vc.libjuju.get_metrics.side_effect = Exception()
+        with self.assertRaises(Exception):
+            _ = self.loop.run_until_complete(
+                self.n2vc.get_metrics("model", "application")
+            )
+        self.n2vc.libjuju.get_metrics.assert_called_once()
+
+
+class UpdateVcaStatusTest(N2VCJujuConnTestCase):
+    def setUp(self):
+        super(UpdateVcaStatusTest, self).setUp()
+        self.n2vc.libjuju.get_controller = AsyncMock()
+        self.n2vc.libjuju.get_model = AsyncMock()
+        self.n2vc.libjuju.get_executed_actions = AsyncMock()
+        self.n2vc.libjuju.get_actions = AsyncMock()
+        self.n2vc.libjuju.get_application_configs = AsyncMock()
+        self.n2vc.libjuju._get_application = AsyncMock()
+
+    def test_success(
+        self,
+    ):
+        self.loop.run_until_complete(
+            self.n2vc.update_vca_status(
+                {"model": {"applications": {"app": {"actions": {}}}}}
+            )
+        )
+        self.n2vc.libjuju.get_executed_actions.assert_called_once()
+        self.n2vc.libjuju.get_actions.assert_called_once()
+        self.n2vc.libjuju.get_application_configs.assert_called_once()
+
+    def test_exception(self):
+        self.n2vc.libjuju.get_model.return_value = None
+        self.n2vc.libjuju.get_executed_actions.side_effect = Exception()
+        with self.assertRaises(Exception):
+            self.loop.run_until_complete(
+                self.n2vc.update_vca_status(
+                    {"model": {"applications": {"app": {"actions": {}}}}}
+                )
+            )
+            self.n2vc.libjuju.get_executed_actions.assert_not_called()
+            self.n2vc.libjuju.get_actions.assert_not_called_once()
+            self.n2vc.libjuju.get_application_configs.assert_not_called_once()
+
+
+class K8sProxyCharmsTest(N2VCJujuConnTestCase):
+    def setUp(self):
+        super(K8sProxyCharmsTest, self).setUp()
+        self.n2vc.libjuju.model_exists = AsyncMock()
+        self.n2vc.libjuju.add_model = AsyncMock()
+        self.n2vc.libjuju.deploy_charm = AsyncMock()
+        self.n2vc.libjuju.model_exists.return_value = False
+        self.db = DbMemory()
+        self.fs = fslocal.FsLocal()
+        self.fs.path = "/"
+        self.n2vc.fs = self.fs
+        self.n2vc.db = self.db
+        self.db.create_list("nsrs", yaml.safe_load(descriptors.db_nsrs_text))
+        self.db.create_list("vnfrs", yaml.safe_load(descriptors.db_vnfrs_text))
+
+    @patch(
+        "osm_lcm.n2vc.n2vc_juju_conn.generate_random_alfanum_string",
+        **{"return_value": "random"}
+    )
+    def test_success(self, mock_generate_random_alfanum_string):
+        self.n2vc.fs.file_exists = MagicMock(create_autospec=True)
+        self.n2vc.fs.file_exists.return_value = True
+        ee_id = self.loop.run_until_complete(
+            self.n2vc.install_k8s_proxy_charm(
+                "simple",
+                ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0",
+                "path",
+                {},
+            )
+        )
+
+        self.n2vc.libjuju.add_model.assert_called_once()
+        self.n2vc.libjuju.deploy_charm.assert_called_once_with(
+            model_name="dbfbd751-3de4-4e68-bd40-ec5ae0a53898-k8s",
+            application_name="simple-ee-z0-vnf1-vnf",
+            path="//path",
+            machine_id=None,
+            db_dict={},
+            progress_timeout=None,
+            total_timeout=None,
+            config=None,
+        )
+        self.assertEqual(
+            ee_id, "dbfbd751-3de4-4e68-bd40-ec5ae0a53898-k8s.simple-ee-z0-vnf1-vnf.k8s"
+        )
+
+    def test_no_artifact_path(
+        self,
+    ):
+        with self.assertRaises(N2VCBadArgumentsException):
+            ee_id = self.loop.run_until_complete(
+                self.n2vc.install_k8s_proxy_charm(
+                    "simple",
+                    ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0",
+                    "",
+                    {},
+                )
+            )
+            self.assertIsNone(ee_id)
+
+    def test_no_db(
+        self,
+    ):
+        with self.assertRaises(N2VCBadArgumentsException):
+            ee_id = self.loop.run_until_complete(
+                self.n2vc.install_k8s_proxy_charm(
+                    "simple",
+                    ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0",
+                    "path",
+                    None,
+                )
+            )
+            self.assertIsNone(ee_id)
+
+    def test_file_not_exists(
+        self,
+    ):
+        self.n2vc.fs.file_exists = MagicMock(create_autospec=True)
+        self.n2vc.fs.file_exists.return_value = False
+        with self.assertRaises(N2VCBadArgumentsException):
+            ee_id = self.loop.run_until_complete(
+                self.n2vc.install_k8s_proxy_charm(
+                    "simple",
+                    ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0",
+                    "path",
+                    {},
+                )
+            )
+            self.assertIsNone(ee_id)
+
+    def test_exception(
+        self,
+    ):
+        self.n2vc.fs.file_exists = MagicMock(create_autospec=True)
+        self.n2vc.fs.file_exists.return_value = True
+        self.n2vc.fs.path = MagicMock(create_autospec=True)
+        self.n2vc.fs.path.return_value = "path"
+        self.n2vc.libjuju.deploy_charm.side_effect = Exception()
+        with self.assertRaises(N2VCException):
+            ee_id = self.loop.run_until_complete(
+                self.n2vc.install_k8s_proxy_charm(
+                    "simple",
+                    ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0",
+                    "path",
+                    {},
+                )
+            )
+            self.assertIsNone(ee_id)
+
+
+class AddRelationTest(N2VCJujuConnTestCase):
+    def setUp(self):
+        super(AddRelationTest, self).setUp()
+        self.n2vc.libjuju.add_relation = AsyncMock()
+        self.n2vc.libjuju.offer = AsyncMock()
+        self.n2vc.libjuju.get_controller = AsyncMock()
+        self.n2vc.libjuju.consume = AsyncMock()
+
+    def test_standard_relation_same_model_and_controller(self):
+        relation_endpoint_1 = RelationEndpoint("model-1.app1.0", None, "endpoint1")
+        relation_endpoint_2 = RelationEndpoint("model-1.app2.1", None, "endpoint2")
+        self.loop.run_until_complete(
+            self.n2vc.add_relation(relation_endpoint_1, relation_endpoint_2)
+        )
+        self.n2vc.libjuju.add_relation.assert_called_once_with(
+            model_name="model-1",
+            endpoint_1="app1:endpoint1",
+            endpoint_2="app2:endpoint2",
+        )
+        self.n2vc.libjuju.offer.assert_not_called()
+        self.n2vc.libjuju.consume.assert_not_called()
+
+    def test_cmr_relation_same_controller(self):
+        relation_endpoint_1 = RelationEndpoint("model-1.app1.0", None, "endpoint")
+        relation_endpoint_2 = RelationEndpoint("model-2.app2.1", None, "endpoint")
+        offer = Offer("admin/model-1.app1")
+        self.n2vc.libjuju.offer.return_value = offer
+        self.n2vc.libjuju.consume.return_value = "saas"
+        self.loop.run_until_complete(
+            self.n2vc.add_relation(relation_endpoint_1, relation_endpoint_2)
+        )
+        self.n2vc.libjuju.offer.assert_called_once_with(relation_endpoint_1)
+        self.n2vc.libjuju.consume.assert_called_once()
+        self.n2vc.libjuju.add_relation.assert_called_once_with(
+            "model-2", "app2:endpoint", "saas"
+        )
+
+    def test_cmr_relation_different_controller(self):
+        self.n2vc._get_libjuju = AsyncMock(return_value=self.n2vc.libjuju)
+        relation_endpoint_1 = RelationEndpoint(
+            "model-1.app1.0", "vca-id-1", "endpoint1"
+        )
+        relation_endpoint_2 = RelationEndpoint(
+            "model-1.app2.1", "vca-id-2", "endpoint2"
+        )
+        offer = Offer("admin/model-1.app1")
+        self.n2vc.libjuju.offer.return_value = offer
+        self.n2vc.libjuju.consume.return_value = "saas"
+        self.loop.run_until_complete(
+            self.n2vc.add_relation(relation_endpoint_1, relation_endpoint_2)
+        )
+        self.n2vc.libjuju.offer.assert_called_once_with(relation_endpoint_1)
+        self.n2vc.libjuju.consume.assert_called_once()
+        self.n2vc.libjuju.add_relation.assert_called_once_with(
+            "model-1", "app2:endpoint2", "saas"
+        )
+
+    def test_relation_exception(self):
+        relation_endpoint_1 = RelationEndpoint("model-1.app1.0", None, "endpoint")
+        relation_endpoint_2 = RelationEndpoint("model-2.app2.1", None, "endpoint")
+        self.n2vc.libjuju.offer.side_effect = Exception()
+        with self.assertRaises(N2VCException):
+            self.loop.run_until_complete(
+                self.n2vc.add_relation(relation_endpoint_1, relation_endpoint_2)
+            )
+
+
+class UpgradeCharmTest(N2VCJujuConnTestCase):
+    def setUp(self):
+        super(UpgradeCharmTest, self).setUp()
+        self.n2vc._get_libjuju = AsyncMock(return_value=self.n2vc.libjuju)
+        N2VCJujuConnector._get_ee_id_components = Mock()
+        self.n2vc.libjuju.upgrade_charm = AsyncMock()
+
+    def test_empty_ee_id(self):
+        with self.assertRaises(N2VCBadArgumentsException):
+            self.loop.run_until_complete(
+                self.n2vc.upgrade_charm(
+                    "", "/sample_charm_path", "sample_charm_id", "native-charm", None
+                )
+            )
+        self.n2vc._get_libjuju.assert_called()
+        self.n2vc._get_ee_id_components.assert_not_called()
+        self.n2vc.libjuju.upgrade_charm.assert_not_called()
+
+    def test_wrong_ee_id(self):
+        N2VCJujuConnector._get_ee_id_components.side_effect = Exception
+        with self.assertRaises(N2VCBadArgumentsException):
+            self.loop.run_until_complete(
+                self.n2vc.upgrade_charm(
+                    "ns-id-k8s.app-vnf-vnf-id-vdu-vdu-random.k8s",
+                    "/sample_charm_path",
+                    "sample_charm_id",
+                    "native-charm",
+                    500,
+                )
+            )
+        self.n2vc._get_libjuju.assert_called()
+        self.n2vc._get_ee_id_components.assert_called()
+        self.n2vc.libjuju.upgrade_charm.assert_not_called()
+
+    def test_charm_upgrade_succeded(self):
+        N2VCJujuConnector._get_ee_id_components.return_value = (
+            "sample_model",
+            "sample_app",
+            "sample_machine_id",
+        )
+        self.loop.run_until_complete(
+            self.n2vc.upgrade_charm(
+                "ns-id-k8s.app-vnf-vnf-id-vdu-vdu-random.k8s",
+                "/sample_charm_path",
+                "sample_charm_id",
+                "native-charm",
+                500,
+            )
+        )
+        self.n2vc._get_libjuju.assert_called()
+        self.n2vc._get_ee_id_components.assert_called()
+        self.n2vc.libjuju.upgrade_charm.assert_called_with(
+            application_name="sample_app",
+            path="/sample_charm_path",
+            model_name="sample_model",
+            total_timeout=500,
+        )
+
+    def test_charm_upgrade_failed(self):
+        N2VCJujuConnector._get_ee_id_components.return_value = (
+            "sample_model",
+            "sample_app",
+            "sample_machine_id",
+        )
+        self.n2vc.libjuju.upgrade_charm.side_effect = JujuApplicationNotFound
+        with self.assertRaises(N2VCException):
+            self.loop.run_until_complete(
+                self.n2vc.upgrade_charm(
+                    "ns-id-k8s.app-vnf-vnf-id-vdu-vdu-random.k8s",
+                    "/sample_charm_path",
+                    "sample_charm_id",
+                    "native-charm",
+                    None,
+                )
+            )
+        self.n2vc._get_libjuju.assert_called()
+        self.n2vc._get_ee_id_components.assert_called()
+        self.n2vc.libjuju.upgrade_charm.assert_called_with(
+            application_name="sample_app",
+            path="/sample_charm_path",
+            model_name="sample_model",
+            total_timeout=None,
+        )
+
+
+class GenerateApplicationNameTest(N2VCJujuConnTestCase):
+    vnf_id = "dbfbd751-3de4-4e68-bd40-ec5ae0a53898"
+
+    def setUp(self):
+        super(GenerateApplicationNameTest, self).setUp()
+        self.db = MagicMock(DbMemory)
+
+    @patch(
+        "osm_lcm.n2vc.n2vc_juju_conn.generate_random_alfanum_string",
+        **{"return_value": "random"}
+    )
+    def test_generate_backward_compatible_application_name(
+        self, mock_generate_random_alfanum
+    ):
+        vdu_id = "mgmtVM"
+        vdu_count = "0"
+        expected_result = "app-vnf-ec5ae0a53898-vdu-mgmtVM-cnt-0-random"
+
+        application_name = self.n2vc._generate_backward_compatible_application_name(
+            GenerateApplicationNameTest.vnf_id, vdu_id, vdu_count
+        )
+        self.assertEqual(application_name, expected_result)
+
+    @patch(
+        "osm_lcm.n2vc.n2vc_juju_conn.generate_random_alfanum_string",
+        **{"return_value": "random"}
+    )
+    def test_generate_backward_compatible_application_name_without_vnf_id_vdu_id(
+        self, mock_generate_random_alfanum
+    ):
+        vnf_id = None
+        vdu_id = ""
+        vdu_count = None
+        expected_result = "app--random"
+        application_name = self.n2vc._generate_backward_compatible_application_name(
+            vnf_id, vdu_id, vdu_count
+        )
+
+        self.assertEqual(application_name, expected_result)
+        self.assertLess(len(application_name), 50)
+
+    def test_find_charm_level_with_vnf_id(self):
+        vdu_id = ""
+        expected_result = "vnf-level"
+        charm_level = self.n2vc._find_charm_level(
+            GenerateApplicationNameTest.vnf_id, vdu_id
+        )
+        self.assertEqual(charm_level, expected_result)
+
+    def test_find_charm_level_with_vdu_id(self):
+        vnf_id = ""
+        vdu_id = "mgmtVM"
+        with self.assertRaises(N2VCException):
+            self.n2vc._find_charm_level(vnf_id, vdu_id)
+
+    def test_find_charm_level_with_vnf_id_and_vdu_id(self):
+        vdu_id = "mgmtVM"
+        expected_result = "vdu-level"
+        charm_level = self.n2vc._find_charm_level(
+            GenerateApplicationNameTest.vnf_id, vdu_id
+        )
+        self.assertEqual(charm_level, expected_result)
+
+    def test_find_charm_level_without_vnf_id_and_vdu_id(self):
+        vnf_id = ""
+        vdu_id = ""
+        expected_result = "ns-level"
+        charm_level = self.n2vc._find_charm_level(vnf_id, vdu_id)
+        self.assertEqual(charm_level, expected_result)
+
+    def test_generate_application_name_ns_charm(self):
+        charm_level = "ns-level"
+        vnfrs = {}
+        vca_records = [
+            {
+                "target_element": "ns",
+                "member-vnf-index": "",
+                "vdu_id": None,
+                "kdu_name": None,
+                "vdu_count_index": None,
+                "vnfd_id": None,
+                "vdu_name": None,
+                "type": "proxy_charm",
+                "ee_descriptor_id": None,
+                "charm_name": "simple-ns-charm-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh",
+                "ee_id": None,
+                "application": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            }
+        ]
+        vnf_count = ""
+        vdu_count = ""
+        vdu_id = None
+        expected_result = "simple-ns-charm-abc-000-rrrr-nnnn-4444-h-ns"
+        application_name = self.n2vc._generate_application_name(
+            charm_level,
+            vnfrs,
+            vca_records,
+            vnf_count=vnf_count,
+            vdu_id=vdu_id,
+            vdu_count=vdu_count,
+        )
+        self.assertEqual(application_name, expected_result)
+        self.assertLess(len(application_name), 50)
+
+    def test_generate_application_name_ns_charm_empty_vca_records(self):
+        charm_level = "ns-level"
+        vnfrs = {}
+        vca_records = []
+        vnf_count = ""
+        vdu_count = ""
+        vdu_id = None
+        with self.assertRaises(N2VCException):
+            self.n2vc._generate_application_name(
+                charm_level,
+                vnfrs,
+                vca_records,
+                vnf_count=vnf_count,
+                vdu_id=vdu_id,
+                vdu_count=vdu_count,
+            )
+
+    def test_generate_application_name_vnf_charm(self):
+        charm_level = "vnf-level"
+        vnfrs = {"member-vnf-index-ref": "vnf111-xxx-yyy-zzz"}
+        vca_records = [
+            {
+                "target_element": "vnf/vnf1",
+                "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                "charm_name": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            }
+        ]
+        vnf_count = "1"
+        vdu_count = ""
+        vdu_id = None
+        expected_result = "simple-ee-ab-1-vnf111-xxx-y-vnf"
+        application_name = self.n2vc._generate_application_name(
+            charm_level,
+            vnfrs,
+            vca_records,
+            vnf_count=vnf_count,
+            vdu_id=vdu_id,
+            vdu_count=vdu_count,
+        )
+        self.assertEqual(application_name, expected_result)
+        self.assertLess(len(application_name), 50)
+
+    def test_generate_application_name_vdu_charm_kdu_name_in_vca_record_is_none(self):
+        charm_level = "vdu-level"
+        vnfrs = {
+            "member-vnf-index-ref": "vnf111-xxx-yyy-zzz",
+            "vdur": [
+                {"_id": "38912ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "mgmtVM"},
+                {"_id": "45512ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "dataVM"},
+            ],
+        }
+        vca_records = [
+            {
+                "target_element": "vnf/vnf1/mgmtvm",
+                "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                "vdu_id": "mgmtVM",
+                "kdu_name": None,
+                "vdu_count_index": None,
+                "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                "vdu_name": "mgmtvm",
+                "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                "charm_name": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            },
+            {
+                "target_element": "vnf/vnf1/dataVM",
+                "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                "vdu_id": "dataVM",
+                "kdu_name": None,
+                "vdu_count_index": None,
+                "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                "vdu_name": "datavm",
+                "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-8888-hhh-3333-yyyy-888-hhh-ttt-444",
+                "charm_name": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            },
+        ]
+        vnf_count = "2"
+        vdu_count = "0"
+        vdu_id = "mgmtVM"
+        expected_result = "simple-ee-ab-2-vnf111-xxx-y-mgmtVM-0-vdu"
+        application_name = self.n2vc._generate_application_name(
+            charm_level,
+            vnfrs,
+            vca_records,
+            vnf_count=vnf_count,
+            vdu_id=vdu_id,
+            vdu_count=vdu_count,
+        )
+        self.assertEqual(application_name, expected_result)
+        self.assertLess(len(application_name), 50)
+
+    def test_generate_application_name_vdu_charm_vdu_id_kdu_name_in_vca_record_are_both_set(
+        self,
+    ):
+        charm_level = "vdu-level"
+        vnfrs = {
+            "member-vnf-index-ref": "vnf111-xxx-yyy-zzz",
+            "vdur": [
+                {"_id": "38912ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "mgmtVM"},
+                {"_id": "45512ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "dataVM"},
+            ],
+        }
+        vca_records = [
+            {
+                "target_element": "vnf/vnf1/mgmtVM",
+                "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                "vdu_id": "mgmtVM",
+                "kdu_name": "mgmtVM",
+                "vdu_count_index": None,
+                "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                "vdu_name": "mgmtvm",
+                "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                "charm_name": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            },
+            {
+                "target_element": "vnf/vnf1/dataVM",
+                "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                "vdu_id": "dataVM",
+                "kdu_name": None,
+                "vdu_count_index": None,
+                "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                "vdu_name": "datavm",
+                "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-8888-hhh-3333-yyyy-888-hhh-ttt-444",
+                "charm_name": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            },
+        ]
+        vnf_count = "2"
+        vdu_count = "0"
+        vdu_id = "mgmtVM"
+        expected_result = "simple-ee-ab-2-vnf111-xxx-y-mgmtVM-0-vdu"
+        application_name = self.n2vc._generate_application_name(
+            charm_level,
+            vnfrs,
+            vca_records,
+            vnf_count=vnf_count,
+            vdu_id=vdu_id,
+            vdu_count=vdu_count,
+        )
+        self.assertEqual(application_name, expected_result)
+        self.assertLess(len(application_name), 50)
+
+    def test_generate_application_name_vdu_charm_both_vdu_id_kdu_name_in_vca_record_are_none(
+        self,
+    ):
+        charm_level = "vdu-level"
+        vnfrs = {"member-vnf-index-ref": "vnf111-xxx-yyy-zzz"}
+        vca_records = [
+            {
+                "target_element": "vnf/vnf1/mgmtVM",
+                "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                "vdu_id": None,
+                "kdu_name": None,
+                "vdu_count_index": None,
+                "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                "vdu_name": "mgmtvm",
+                "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                "charm_name": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            }
+        ]
+        vnf_count = "2"
+        vdu_count = "0"
+        vdu_id = "mgmtVM"
+        with self.assertRaises(KeyError):
+            self.n2vc._generate_application_name(
+                charm_level,
+                vnfrs,
+                vca_records,
+                vnf_count=vnf_count,
+                vdu_id=vdu_id,
+                vdu_count=vdu_count,
+            )
+
+    def test_generate_application_name_vdu_charm_given_vdu_id_is_none(self):
+        charm_level = "vdu-level"
+        vnfrs = {"member-vnf-index-ref": "vnf111-xxx-yyy-zzz"}
+        vca_records = [
+            {
+                "target_element": "vnf/vnf1/mgmtvVM",
+                "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                "vdu_id": None,
+                "kdu_name": "mgmtVM",
+                "vdu_count_index": None,
+                "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                "vdu_name": "mgmtvm",
+                "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                "charm_name": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            }
+        ]
+        vnf_count = "2"
+        vdu_count = "0"
+        vdu_id = None
+        with self.assertRaises(N2VCException):
+            self.n2vc._generate_application_name(
+                charm_level,
+                vnfrs,
+                vca_records,
+                vnf_count=vnf_count,
+                vdu_id=vdu_id,
+                vdu_count=vdu_count,
+            )
+
+    def test_generate_application_name_vdu_charm_vdu_id_does_not_match_with_the_key_in_vca_record(
+        self,
+    ):
+        charm_level = "vdu-level"
+        vnfrs = {"member-vnf-index-ref": "vnf111-xxx-yyy-zzz"}
+        vca_records = [
+            {
+                "target_element": "vnf/vnf1/mgmtVM",
+                "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                "vdu_id": None,
+                "kdu_name": "mgmtVM",
+                "vdu_count_index": None,
+                "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                "vdu_name": "mgmtvm",
+                "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                "charm_name": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            }
+        ]
+        vnf_count = "2"
+        vdu_count = "0"
+        vdu_id = "mgmtvm"
+        with self.assertRaises(KeyError):
+            self.n2vc._generate_application_name(
+                charm_level,
+                vnfrs,
+                vca_records,
+                vnf_count=vnf_count,
+                vdu_id=vdu_id,
+                vdu_count=vdu_count,
+            )
+
+    def test_generate_application_name_vdu_charm_vdu_id_in_vca_record_is_none(self):
+        charm_level = "vdu-level"
+        vnfrs = {"member-vnf-index-ref": "vnf111-xxx-yyy-zzz"}
+        vca_records = [
+            {
+                "target_element": "vnf/vnf1/mgmtVM",
+                "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                "vdu_id": None,
+                "kdu_name": "mgmtVM",
+                "vdu_count_index": None,
+                "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                "vdu_name": "mgmtvm",
+                "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                "charm_name": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            }
+        ]
+        vnf_count = "2"
+        vdu_count = "0"
+        vdu_id = "mgmtVM"
+        expected_result = "simple-ee-ab-2-vnf111-xxx-y-mgmtVM-0-vdu"
+        application_name = self.n2vc._generate_application_name(
+            charm_level,
+            vnfrs,
+            vca_records,
+            vnf_count=vnf_count,
+            vdu_id=vdu_id,
+            vdu_count=vdu_count,
+        )
+        self.assertEqual(application_name, expected_result)
+        self.assertLess(len(application_name), 50)
+
+    def test_get_vnf_count_db_vnfr_ns_charm(self):
+        self.db.get_one.return_value = {"member-vnf-index-ref": "sample-ref"}
+        charm_level = "ns-level"
+        vnf_id_and_count = "m7fbd751-3de4-4e68-bd40-ec5ae0a53898-4"
+        with patch.object(self.n2vc, "db", self.db):
+            vnf_count, db_vnfr = self.n2vc._get_vnf_count_and_record(
+                charm_level, vnf_id_and_count
+            )
+        self.assertEqual(vnf_count, "")
+        self.assertEqual(db_vnfr, {})
+
+    def test_get_vnf_count_db_vnfr_vnf_charm(self):
+        self.db.get_one.return_value = {"member-vnf-index-ref": "sample-ref"}
+        charm_level = "vnf-level"
+        vnf_id_and_count = "m7fbd751-3de4-4e68-bd40-ec5ae0a53898-4"
+        with patch.object(self.n2vc, "db", self.db):
+            vnf_count, db_vnfr = self.n2vc._get_vnf_count_and_record(
+                charm_level, vnf_id_and_count
+            )
+        self.assertEqual(vnf_count, "4")
+        self.assertEqual(db_vnfr, {"member-vnf-index-ref": "sample-ref"})
+
+    def test_get_vnf_count_db_vnfr_vdu_charm(self):
+        self.db.get_one.return_value = {"member-vnf-index-ref": "sample-ref"}
+        charm_level = "vdu-level"
+        vnf_id_and_count = "m7fbd751-3de4-4e68-bd40-ec5ae0a53898-2"
+        with patch.object(self.n2vc, "db", self.db):
+            vnf_count, db_vnfr = self.n2vc._get_vnf_count_and_record(
+                charm_level, vnf_id_and_count
+            )
+        self.assertEqual(vnf_count, "2")
+        self.assertEqual(db_vnfr, {"member-vnf-index-ref": "sample-ref"})
+
+    def test_get_vca_records_vdu_charm(self):
+        charm_level = "vdu-level"
+        db_vnfr = {
+            "member-vnf-index-ref": "vnf111-xxx-yyy-zzz",
+            "vdur": [
+                {"_id": "38912ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "mgmtVM"},
+                {"_id": "45512ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "dataVM"},
+            ],
+        }
+        db_nsr = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "vnf/vnf1/mgmtvm",
+                            "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                            "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a",
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                            "vdu_name": "mgmtvm",
+                            "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                        {
+                            "target_element": "vnf/vnf2/datavm",
+                            "member-vnf-index": "vnf222-xxx-yyy-zzz",
+                            "vdu_id": "45512ff7-5bdd-4228-911f-c2bee259c44a",
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                            "vdu_name": "datavm",
+                            "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-8888-hhh-3333-yyyy-888-hhh-ttt-444",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                    ]
+                }
+            }
+        }
+        expected_result = [
+            {
+                "target_element": "vnf/vnf1/mgmtvm",
+                "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a",
+                "kdu_name": None,
+                "vdu_count_index": None,
+                "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                "vdu_name": "mgmtvm",
+                "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                "charm_name": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            }
+        ]
+        vca_records = self.n2vc._get_vca_records(charm_level, db_nsr, db_vnfr)
+        self.assertEqual(vca_records, expected_result)
+
+    def test_get_vca_records_vnf_charm_member_vnf_index_mismatch(self):
+        charm_level = "vnf-level"
+        db_vnfr = {"member-vnf-index-ref": "vnf222-xxx-yyy-zzz"}
+        db_nsr = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "vnf/vnf1/mgmtvm",
+                            "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                            "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a",
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                            "vdu_name": "mgmtvm",
+                            "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                        {
+                            "target_element": "vnf/vnf1/mgmtvm",
+                            "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                            "vdu_id": "45512ff7-5bdd-4228-911f-c2bee259c44a",
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                            "vdu_name": "datavm",
+                            "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-8888-hhh-3333-yyyy-888-hhh-ttt-444",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                    ]
+                }
+            }
+        }
+        expected_result = []
+        vca_records = self.n2vc._get_vca_records(charm_level, db_nsr, db_vnfr)
+        self.assertEqual(vca_records, expected_result)
+
+    def test_get_vca_records_ns_charm(self):
+        charm_level = "ns-level"
+        db_vnfr = {"member-vnf-index-ref": "vnf222-xxx-yyy-zzz"}
+        db_nsr = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "vnf/vnf1/mgmtvm",
+                            "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                            "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a",
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                            "vdu_name": "mgmtvm",
+                            "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                        {
+                            "target_element": "ns",
+                            "member-vnf-index": None,
+                            "vdu_id": None,
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "",
+                            "vdu_name": "",
+                            "ee_descriptor_id": "",
+                            "charm_name": "simple-ns-charm-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                    ]
+                }
+            }
+        }
+        expected_result = [
+            {
+                "target_element": "ns",
+                "member-vnf-index": None,
+                "vdu_id": None,
+                "kdu_name": None,
+                "vdu_count_index": None,
+                "vnfd_id": "",
+                "vdu_name": "",
+                "ee_descriptor_id": "",
+                "charm_name": "simple-ns-charm-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            }
+        ]
+        vca_records = self.n2vc._get_vca_records(charm_level, db_nsr, db_vnfr)
+        self.assertEqual(vca_records, expected_result)
+
+    def test_get_vca_records_ns_charm_empty_charm_name(self):
+        charm_level = "ns-level"
+        db_vnfr = {"member-vnf-index-ref": "vnf222-xxx-yyy-zzz"}
+        db_nsr = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "vnf/vnf1/mgmtvm",
+                            "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                            "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a",
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                            "vdu_name": "mgmtvm",
+                            "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                        {
+                            "target_element": "ns",
+                            "member-vnf-index": None,
+                            "vdu_id": None,
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "",
+                            "vdu_name": "",
+                            "ee_descriptor_id": "",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                    ]
+                }
+            }
+        }
+        expected_result = [
+            {
+                "target_element": "ns",
+                "member-vnf-index": None,
+                "vdu_id": None,
+                "kdu_name": None,
+                "vdu_count_index": None,
+                "vnfd_id": "",
+                "vdu_name": "",
+                "ee_descriptor_id": "",
+                "charm_name": "",
+                "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+            }
+        ]
+        vca_records = self.n2vc._get_vca_records(charm_level, db_nsr, db_vnfr)
+        self.assertEqual(vca_records, expected_result)
+
+    def test_get_application_name_vnf_charm(self):
+        namespace = ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0"
+        self.db.get_one.return_value = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "vnf/vnf1/mgmtvm",
+                            "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                            "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a",
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                            "vdu_name": "mgmtvm",
+                            "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                        {
+                            "target_element": "ns",
+                            "member-vnf-index": None,
+                            "vdu_id": None,
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "",
+                            "vdu_name": "",
+                            "ee_descriptor_id": "",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                    ]
+                }
+            }
+        }
+        mock_vnf_count_and_record = MagicMock()
+        db_vnfr = {"member-vnf-index-ref": "vnf111-xxx-yyy-zzz"}
+        vnf_count = "0"
+        mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr)
+        expected_result = "simple-ee-ab-z0-vnf111-xxx-y-vnf"
+        with patch.object(self.n2vc, "db", self.db), patch.object(
+            self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record
+        ):
+            application_name = self.n2vc._get_application_name(namespace)
+            self.assertEqual(application_name, expected_result)
+            self.assertLess(len(application_name), 50)
+            mock_vnf_count_and_record.assert_called_once_with(
+                "vnf-level", "1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0"
+            )
+            self.db.get_one.assert_called_once()
+
+    @patch(
+        "osm_lcm.n2vc.n2vc_juju_conn.generate_random_alfanum_string",
+        **{"return_value": "random"}
+    )
+    def test_get_application_name_vnf_charm_old_naming(
+        self, mock_generate_random_alfanum
+    ):
+        namespace = ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0"
+        self.db.get_one.return_value = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "vnf/vnf1/mgmtvm",
+                            "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                            "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a",
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                            "vdu_name": "mgmtvm",
+                            "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                        {
+                            "target_element": "ns",
+                            "member-vnf-index": None,
+                            "vdu_id": None,
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "",
+                            "vdu_name": "",
+                            "ee_descriptor_id": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                    ]
+                }
+            }
+        }
+        mock_vnf_count_and_record = MagicMock()
+        db_vnfr = {"member-vnf-index-ref": "vnf111-xxx-yyy-zzz"}
+        vnf_count = "0"
+        mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr)
+        expected_result = "app-vnf-eb3161eec0-z0-random"
+        with patch.object(self.n2vc, "db", self.db), patch.object(
+            self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record
+        ):
+            application_name = self.n2vc._get_application_name(namespace)
+            self.assertEqual(application_name, expected_result)
+            mock_vnf_count_and_record.assert_called_once_with(
+                "vnf-level", "1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0"
+            )
+            self.db.get_one.assert_called_once()
+
+    def test_get_application_name_vnf_charm_vnf_index_ref_mismatch(self):
+        namespace = ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0"
+        self.db.get_one.return_value = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "vnf/vnf1/mgmtvm",
+                            "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                            "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a",
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                            "vdu_name": "mgmtvm",
+                            "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                        {
+                            "target_element": "ns",
+                            "member-vnf-index": None,
+                            "vdu_id": None,
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "",
+                            "vdu_name": "",
+                            "ee_descriptor_id": "",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                    ]
+                }
+            }
+        }
+        mock_vnf_count_and_record = MagicMock()
+        db_vnfr = {"member-vnf-index-ref": "vnf222-xxx-yyy-zzz"}
+        vnf_count = "0"
+        mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr)
+        with patch.object(self.n2vc, "db", self.db), patch.object(
+            self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record
+        ):
+            with self.assertRaises(N2VCException):
+                self.n2vc._get_application_name(namespace)
+                mock_vnf_count_and_record.assert_called_once_with(
+                    "vnf-level", "1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0"
+                )
+                self.db.get_one.assert_called_once()
+
+    def test_get_application_name_vdu_charm(self):
+        namespace = ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0.mgmtVM-0"
+        self.db.get_one.return_value = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "vnf/vnf1/mgmtvm",
+                            "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                            "vdu_id": "mgmtVM",
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                            "vdu_name": "mgmtvm",
+                            "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                        {
+                            "target_element": "ns",
+                            "member-vnf-index": None,
+                            "vdu_id": None,
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "",
+                            "vdu_name": "",
+                            "ee_descriptor_id": "",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                    ]
+                }
+            }
+        }
+        mock_vnf_count_and_record = MagicMock()
+        db_vnfr = {
+            "member-vnf-index-ref": "vnf111-xxx-yyy-zzz",
+            "vdur": [
+                {"_id": "38912ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "mgmtVM"},
+                {"_id": "45512ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "dataVM"},
+            ],
+        }
+        vnf_count = "0"
+        mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr)
+        expected_result = "simple-ee-ab-z0-vnf111-xxx-y-mgmtvm-z0-vdu"
+        with patch.object(self.n2vc, "db", self.db), patch.object(
+            self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record
+        ):
+            application_name = self.n2vc._get_application_name(namespace)
+            self.assertEqual(application_name, expected_result)
+            self.assertLess(len(application_name), 50)
+            mock_vnf_count_and_record.assert_called_once_with(
+                "vdu-level", "1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0"
+            )
+            self.db.get_one.assert_called_once()
+
+    def test_get_application_name_kdu_charm(self):
+        namespace = ".82b11965-e580-47c0-9ee0-329f318a305b.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0.ldap"
+        self.db.get_one.return_value = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "vnf/openldap/kdu/ldap",
+                            "member-vnf-index": "openldap",
+                            "vdu_id": None,
+                            "kdu_name": "ldap",
+                            "vdu_count_index": 0,
+                            "operational-status": "init",
+                            "detailed-status": "",
+                            "step": "initial-deploy",
+                            "vnfd_id": "openldap_knf",
+                            "vdu_name": None,
+                            "type": "lxc_proxy_charm",
+                            "ee_descriptor_id": "openldap-ee",
+                            "charm_name": "",
+                            "ee_id": "",
+                            "application": "openldap-ee-z0-openldap-vdu",
+                            "model": "82b11965-e580-47c0-9ee0-329f318a305b",
+                            "config_sw_installed": True,
+                        }
+                    ]
+                }
+            }
+        }
+        mock_vnf_count_and_record = MagicMock()
+        db_vnfr = {"member-vnf-index-ref": "openldap", "vdur": {}}
+        vnf_count = "0"
+        mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr)
+        expected_result = "openldap-ee-z0-openldap-ldap-vdu"
+        with patch.object(self.n2vc, "db", self.db), patch.object(
+            self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record
+        ):
+            application_name = self.n2vc._get_application_name(namespace)
+            self.assertEqual(application_name, expected_result)
+            self.assertLess(len(application_name), 50)
+            mock_vnf_count_and_record.assert_called_once_with(
+                "vdu-level", "1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0"
+            )
+            self.db.get_one.assert_called_once()
+
+    @patch(
+        "osm_lcm.n2vc.n2vc_juju_conn.generate_random_alfanum_string",
+        **{"return_value": "random"}
+    )
+    def test_get_application_name_vdu_charm_old_naming(
+        self, mock_generate_random_alfanum
+    ):
+        namespace = ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0.mgmtVM-0"
+        self.db.get_one.return_value = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "vnf/vnf1/mgmtVM",
+                            "member-vnf-index": "vnf111-xxx-yyy-zzz",
+                            "vdu_id": "mgmtVM",
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898",
+                            "vdu_name": "mgmtvm",
+                            "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                        {
+                            "target_element": "ns",
+                            "member-vnf-index": None,
+                            "vdu_id": None,
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "",
+                            "vdu_name": "",
+                            "ee_descriptor_id": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        },
+                    ]
+                }
+            }
+        }
+        mock_vnf_count_and_record = MagicMock()
+        db_vnfr = {
+            "member-vnf-index-ref": "vnf111-xxx-yyy-zzz",
+            "vdur": [
+                {"_id": "38912ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "mgmtVM"},
+                {"_id": "45512ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "dataVM"},
+            ],
+        }
+        vnf_count = "0"
+        mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr)
+        expected_result = "app-vnf-eb3161eec0-z0-vdu-mgmtvm-cnt-z0-random"
+
+        with patch.object(self.n2vc, "db", self.db), patch.object(
+            self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record
+        ):
+            application_name = self.n2vc._get_application_name(namespace)
+            self.assertEqual(application_name, expected_result)
+            self.assertLess(len(application_name), 50)
+            mock_vnf_count_and_record.assert_called_once_with(
+                "vdu-level", "1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0"
+            )
+            self.db.get_one.assert_called_once()
+
+    def test_get_application_name_ns_charm(self):
+        namespace = ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898"
+        self.db.get_one.return_value = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "ns",
+                            "member-vnf-index": None,
+                            "vdu_id": None,
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "",
+                            "vdu_name": "",
+                            "ee_descriptor_id": "",
+                            "charm_name": "simple-ns-charm-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        }
+                    ]
+                }
+            }
+        }
+        mock_vnf_count_and_record = MagicMock()
+        db_vnfr = {}
+        vnf_count = ""
+        mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr)
+        expected_result = "simple-ns-charm-abc-z000-rrrr-nnnn-z4444-h-ns"
+        with patch.object(self.n2vc, "db", self.db), patch.object(
+            self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record
+        ):
+            application_name = self.n2vc._get_application_name(namespace)
+            self.assertEqual(application_name, expected_result)
+            self.assertLess(len(application_name), 50)
+            mock_vnf_count_and_record.assert_called_once_with("ns-level", None)
+            self.db.get_one.assert_called_once()
+
+    def test_get_application_name_ns_charm_empty_charm_name(self):
+        namespace = ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898"
+        self.db.get_one.return_value = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "ns",
+                            "member-vnf-index": None,
+                            "vdu_id": None,
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "",
+                            "vdu_name": "",
+                            "ee_descriptor_id": "",
+                            "charm_name": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        }
+                    ]
+                }
+            }
+        }
+        mock_vnf_count_and_record = MagicMock()
+        db_vnfr = {}
+        vnf_count = ""
+        mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr)
+        with patch.object(self.n2vc, "db", self.db), patch.object(
+            self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record
+        ):
+            with self.assertRaises(N2VCException):
+                self.n2vc._get_application_name(namespace)
+                mock_vnf_count_and_record.assert_called_once_with("ns-level", None)
+                self.db.get_one.assert_called_once()
+
+    @patch(
+        "osm_lcm.n2vc.n2vc_juju_conn.generate_random_alfanum_string",
+        **{"return_value": "random"}
+    )
+    def test_get_application_name_ns_charm_old_naming(
+        self, mock_generate_random_alfanum
+    ):
+        namespace = ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898"
+        self.db.get_one.return_value = {
+            "_admin": {
+                "deployed": {
+                    "VCA": [
+                        {
+                            "target_element": "ns",
+                            "member-vnf-index": None,
+                            "vdu_id": None,
+                            "kdu_name": None,
+                            "vdu_count_index": None,
+                            "vnfd_id": "",
+                            "vdu_name": "",
+                            "ee_descriptor_id": "",
+                            "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898",
+                        }
+                    ]
+                }
+            }
+        }
+        mock_vnf_count_and_record = MagicMock()
+        db_vnfr = {}
+        vnf_count = ""
+        mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr)
+        expected_result = "app-random"
+        with patch.object(self.n2vc, "db", self.db), patch.object(
+            self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record
+        ):
+            application_name = self.n2vc._get_application_name(namespace)
+            self.assertEqual(application_name, expected_result)
+            self.assertLess(len(application_name), 50)
+            mock_vnf_count_and_record.assert_called_once_with("ns-level", None)
+            self.db.get_one.assert_called_once()
+
+
+class DeleteExecutionEnvironmentTest(N2VCJujuConnTestCase):
+    def setUp(self):
+        super(DeleteExecutionEnvironmentTest, self).setUp()
+        self.n2vc.libjuju.get_controller = AsyncMock()
+        self.n2vc.libjuju.destroy_model = AsyncMock()
+        self.n2vc.libjuju.destroy_application = AsyncMock()
+
+    def test_remove_ee__target_application_exists__model_is_deleted(self):
+        get_ee_id_components = MagicMock()
+        get_ee_id_components.return_value = ("my_model", "my_app", None)
+        model = MagicMock(create_autospec=True)
+        model.applications = {}
+        self.n2vc.libjuju.get_model = AsyncMock()
+        self.n2vc.libjuju.get_model.return_value = model
+        with patch.object(self.n2vc, "_get_ee_id_components", get_ee_id_components):
+            self.loop.run_until_complete(
+                self.n2vc.delete_execution_environment(
+                    "my_ee", application_to_delete="my_app"
+                )
+            )
+        self.n2vc.libjuju.destroy_application.assert_called_with(
+            model_name="my_model",
+            application_name="my_app",
+            total_timeout=None,
+        )
+        self.n2vc.libjuju.destroy_model.assert_called_with(
+            model_name="my_model",
+            total_timeout=None,
+        )
+
+    def test_remove_ee__multiple_applications_exist__model_is_not_deleted(self):
+        get_ee_id_components = MagicMock()
+        get_ee_id_components.return_value = ("my_model", "my_app", None)
+        model = MagicMock(create_autospec=True)
+        model.applications = {MagicMock(create_autospec=True)}
+        self.n2vc.libjuju.get_model = AsyncMock()
+        self.n2vc.libjuju.get_model.return_value = model
+        with patch.object(self.n2vc, "_get_ee_id_components", get_ee_id_components):
+            self.loop.run_until_complete(
+                self.n2vc.delete_execution_environment(
+                    "my_ee", application_to_delete="my_app"
+                )
+            )
+        self.n2vc.libjuju.destroy_application.assert_called_with(
+            model_name="my_model",
+            application_name="my_app",
+            total_timeout=None,
+        )
+        self.n2vc.libjuju.destroy_model.assert_not_called()
+
+    def test_remove_ee__target_application_does_not_exist__model_is_deleted(self):
+        get_ee_id_components = MagicMock()
+        get_ee_id_components.return_value = ("my_model", "my_app", None)
+        with patch.object(self.n2vc, "_get_ee_id_components", get_ee_id_components):
+            self.loop.run_until_complete(
+                self.n2vc.delete_execution_environment("my_ee")
+            )
+        self.n2vc.libjuju.destroy_model.assert_called_with(
+            model_name="my_model",
+            total_timeout=None,
+        )
diff --git a/osm_lcm/n2vc/tests/unit/test_provisioner.py b/osm_lcm/n2vc/tests/unit/test_provisioner.py
new file mode 100644 (file)
index 0000000..a4572b3
--- /dev/null
@@ -0,0 +1,20 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+
+from unittest import TestCase
+
+
+class ProvisionerTest(TestCase):
+    def setUp(self):
+        pass
diff --git a/osm_lcm/n2vc/tests/unit/test_store.py b/osm_lcm/n2vc/tests/unit/test_store.py
new file mode 100644 (file)
index 0000000..56fb836
--- /dev/null
@@ -0,0 +1,295 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+
+import asyncio
+from base64 import b64decode
+from unittest import TestCase
+from unittest.mock import Mock, patch
+
+
+from osm_lcm.n2vc.store import DbMongoStore, MotorStore
+from osm_lcm.n2vc.vca.connection_data import ConnectionData
+from osm_lcm.n2vc.tests.unit.utils import AsyncMock
+from osm_common.dbmongo import DbException
+
+
+class TestDbMongoStore(TestCase):
+    def setUp(self):
+        self.store = DbMongoStore(Mock())
+        self.loop = asyncio.get_event_loop()
+
+    @patch("osm_lcm.n2vc.vca.connection_data.base64_to_cacert")
+    def test_get_vca_connection_data(self, mock_base64_to_cacert):
+        mock_base64_to_cacert.return_value = "cacert"
+        conn_data = {
+            "endpoints": ["1.2.3.4:17070"],
+            "user": "admin",
+            "secret": "1234",
+            "cacert": "cacert",
+            "pubkey": "pubkey",
+            "lxd-cloud": "lxd-cloud",
+            "lxd-credentials": "lxd-credentials",
+            "k8s-cloud": "k8s-cloud",
+            "k8s-credentials": "k8s-credentials",
+            "model-config": {},
+            "api-proxy": None,
+        }
+        db_get_one = conn_data.copy()
+        db_get_one.update({"schema_version": "1.1", "_id": "id"})
+        self.store.db.get_one.return_value = db_get_one
+        connection_data = self.loop.run_until_complete(
+            self.store.get_vca_connection_data("vca_id")
+        )
+        self.assertTrue(
+            all(
+                connection_data.__dict__[k.replace("-", "_")] == v
+                for k, v in conn_data.items()
+            )
+        )
+
+    def test_update_vca_endpoints(self):
+        endpoints = ["1.2.3.4:17070"]
+        self.store.db.get_one.side_effect = [None, {"api_endpoints": []}]
+        self.store.db.create.side_effect = DbException("already exists")
+        self.loop.run_until_complete(self.store.update_vca_endpoints(endpoints))
+        self.assertEqual(self.store.db.get_one.call_count, 2)
+        Mock()
+        self.store.db.set_one.assert_called_once_with(
+            "vca", {"_id": "juju"}, {"api_endpoints": endpoints}
+        )
+
+    def test_update_vca_endpoints_exception(self):
+        endpoints = ["1.2.3.4:17070"]
+        self.store.db.get_one.side_effect = [None, None]
+        self.store.db.create.side_effect = DbException("already exists")
+        with self.assertRaises(DbException):
+            self.loop.run_until_complete(self.store.update_vca_endpoints(endpoints))
+        self.assertEqual(self.store.db.get_one.call_count, 2)
+        self.store.db.set_one.assert_not_called()
+
+    def test_update_vca_endpoints_with_vca_id(self):
+        endpoints = ["1.2.3.4:17070"]
+        self.store.db.get_one.return_value = {}
+        self.loop.run_until_complete(
+            self.store.update_vca_endpoints(endpoints, "vca_id")
+        )
+        self.store.db.get_one.assert_called_once_with("vca", q_filter={"_id": "vca_id"})
+        self.store.db.replace.assert_called_once_with(
+            "vca", "vca_id", {"endpoints": endpoints}
+        )
+
+    def test_get_vca_endpoints(self):
+        endpoints = ["1.2.3.4:17070"]
+        db_data = {"api_endpoints": endpoints}
+        db_returns = [db_data, None]
+        expected_returns = [endpoints, []]
+        returns = []
+        self.store._get_juju_info = Mock()
+        self.store._get_juju_info.side_effect = db_returns
+        for _ in range(len(db_returns)):
+            e = self.loop.run_until_complete(self.store.get_vca_endpoints())
+            returns.append(e)
+        self.assertEqual(expected_returns, returns)
+
+    @patch("osm_lcm.n2vc.vca.connection_data.base64_to_cacert")
+    def test_get_vca_endpoints_with_vca_id(self, mock_base64_to_cacert):
+        expected_endpoints = ["1.2.3.4:17070"]
+        mock_base64_to_cacert.return_value = "cacert"
+        self.store.get_vca_connection_data = Mock()
+        self.store.get_vca_connection_data.return_value = ConnectionData(
+            **{
+                "endpoints": expected_endpoints,
+                "user": "admin",
+                "secret": "1234",
+                "cacert": "cacert",
+            }
+        )
+        endpoints = self.loop.run_until_complete(self.store.get_vca_endpoints("vca_id"))
+        self.store.get_vca_connection_data.assert_called_with("vca_id")
+        self.assertEqual(expected_endpoints, endpoints)
+
+    def test_get_vca_id(self):
+        self.assertIsNone(self.loop.run_until_complete(self.store.get_vca_id()))
+
+    def test_get_vca_id_with_vim_id(self):
+        self.store.db.get_one.return_value = {"vca": "vca_id"}
+        vca_id = self.loop.run_until_complete(self.store.get_vca_id("vim_id"))
+        self.store.db.get_one.assert_called_once_with(
+            "vim_accounts", q_filter={"_id": "vim_id"}, fail_on_empty=False
+        )
+        self.assertEqual(vca_id, "vca_id")
+
+
+class TestMotorStore(TestCase):
+    def setUp(self):
+        self.store = MotorStore("uri")
+        self.vca_collection = Mock()
+        self.vca_collection.find_one = AsyncMock()
+        self.vca_collection.insert_one = AsyncMock()
+        self.vca_collection.replace_one = AsyncMock()
+        self.encryption = Mock()
+        self.encryption.admin_collection = Mock()
+        self.encryption.admin_collection.find_one = AsyncMock()
+        self.admin_collection = Mock()
+        self.admin_collection.find_one = AsyncMock()
+        self.admin_collection.insert_one = AsyncMock()
+        self.admin_collection.replace_one = AsyncMock()
+        self.vim_accounts_collection = Mock()
+        self.vim_accounts_collection.find_one = AsyncMock()
+        self.store.encryption._client = {
+            "osm": {
+                "admin": self.encryption.admin_collection,
+            }
+        }
+        self.store._client = {
+            "osm": {
+                "vca": self.vca_collection,
+                "admin": self.admin_collection,
+                "vim_accounts": self.vim_accounts_collection,
+            }
+        }
+        self.store._config = {"database_commonkey": "osm"}
+        self.store.encryption._config = {"database_commonkey": "osm"}
+        self.loop = asyncio.get_event_loop()
+
+    @patch("osm_lcm.n2vc.vca.connection_data.base64_to_cacert")
+    def test_get_vca_connection_data(self, mock_base64_to_cacert):
+        mock_base64_to_cacert.return_value = "cacert"
+        conn_data = {
+            "endpoints": ["1.2.3.4:17070"],
+            "user": "admin",
+            "secret": "1234",
+            "cacert": "cacert",
+            "pubkey": "pubkey",
+            "lxd-cloud": "lxd-cloud",
+            "lxd-credentials": "lxd-credentials",
+            "k8s-cloud": "k8s-cloud",
+            "k8s-credentials": "k8s-credentials",
+            "model-config": {},
+            "api-proxy": None,
+        }
+        db_find_one = conn_data.copy()
+        db_find_one.update({"schema_version": "1.1", "_id": "id"})
+        self.vca_collection.find_one.return_value = db_find_one
+        self.store.encryption.decrypt_fields = AsyncMock()
+        connection_data = self.loop.run_until_complete(
+            self.store.get_vca_connection_data("vca_id")
+        )
+        self.assertTrue(
+            all(
+                connection_data.__dict__[k.replace("-", "_")] == v
+                for k, v in conn_data.items()
+            )
+        )
+
+    @patch("osm_lcm.n2vc.vca.connection_data.base64_to_cacert")
+    def test_get_vca_connection_data_exception(self, mock_base64_to_cacert):
+        mock_base64_to_cacert.return_value = "cacert"
+        self.vca_collection.find_one.return_value = None
+        with self.assertRaises(Exception):
+            self.loop.run_until_complete(self.store.get_vca_connection_data("vca_id"))
+
+    def test_update_vca_endpoints(self):
+        endpoints = ["1.2.3.4:17070"]
+        self.admin_collection.find_one.side_effect = [None, {"api_endpoints": []}]
+        self.admin_collection.insert_one.side_effect = DbException("already exists")
+        self.loop.run_until_complete(self.store.update_vca_endpoints(endpoints))
+        self.assertEqual(self.admin_collection.find_one.call_count, 2)
+        self.admin_collection.replace_one.assert_called_once_with(
+            {"_id": "juju"}, {"api_endpoints": ["1.2.3.4:17070"]}
+        )
+
+    def test_get_vca_connection_data_with_id(self):
+        secret = "e7b253af37785045d1ca08b8d929e556"
+        encrypted_secret = "kI46kRJh828ExSNpr16OG/q5a5/qTsE0bsHrv/W/2/g="
+        cacert = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQ4ekNDQWx1Z0F3SUJBZ0lVRWlzTTBoQWxiYzQ0Z1ZhZWh6bS80ZUsyNnRZd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0lURU5NQXNHQTFVRUNoTUVTblZxZFRFUU1BNEdBMVVFQXhNSGFuVnFkUzFqWVRBZUZ3MHlNVEEwTWpNeApNRFV3TXpSYUZ3MHpNVEEwTWpNeE1EVTFNelJhTUNFeERUQUxCZ05WQkFvVEJFcDFhblV4RURBT0JnTlZCQU1UCkIycDFhblV0WTJFd2dnR2lNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJqd0F3Z2dHS0FvSUJnUUNhTmFvNGZab2gKTDJWYThtdy9LdCs3RG9tMHBYTlIvbEUxSHJyVmZvbmZqZFVQV01zSHpTSjJZZXlXcUNSd3BiaHlLaE82N1c1dgpUY2RsV3Y3WGFLTGtsdVkraDBZY3BQT3BFTmZZYmxrNGk0QkV1L0wzYVY5MFFkUFFrMG94S01CS2R5QlBNZVNNCkJmS2pPWXdyOGgzM0ZWUWhmVkJnMXVGZ2tGaDdTamNuNHczUFdvc1BCMjNiVHBCbGR3VE9zemN4Qm9TaDNSVTkKTzZjb3lQdDdEN0drOCtHRlA3RGRUQTdoV1RkaUM4cDBkeHp2RUNmY0psMXNFeFEyZVprS1QvVzZyelNtVDhUTApCM0ErM1FDRDhEOEVsQU1IVy9zS25SeHphYU8welpNVmVlQnRnNlFGZ1F3M0dJMGo2ZTY0K2w3VExoOW8wSkZVCjdpUitPY01xUzVDY0NROGpWV3JPSk9Xc2dEbDZ4T2FFREczYnR5SVJHY29jbVcvcEZFQjNZd1A2S1BRTUIrNXkKWDdnZExEWmFGRFBVakZmblhkMnhHdUZlMnpRTDNVbXZEUkZuUlBBaW02QlpQbWo1OFh2emFhZXROa3lyaUZLZwp4Z0Z1dVpTcDUwV2JWdjF0MkdzOTMrRE53NlhFZHRFYnlWWUNBa28xTTY0MkozczFnN3NoQnRFQ0F3RUFBYU1qCk1DRXdEZ1lEVlIwUEFRSC9CQVFEQWdLa01BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUwKQlFBRGdnR0JBRXYxM2o2ZGFVbDBqeERPSnNTV1ZJZS9JdXNXVTRpN2ZXSWlqMHAwRU1GNS9LTE8yemRndTR5SQoreVd2T3N5aVFPanEzMlRYVlo2bTRDSnBkR1dGVE5HK2lLdXVOU3M0N3g3Q3dmVUNBWm5VVzhyamd3ZWJyS3BmCkJMNEVQcTZTcW0rSmltN0VPankyMWJkY2cyUXdZb3A3eUhvaHcveWEvL0l6RTMzVzZxNHlJeEFvNDBVYUhPTEMKTGtGbnNVYitjcFZBeFlPZGp6bjFzNWhnclpuWXlETEl3WmtIdFdEWm94alUzeC9jdnZzZ1FzLytzTWYrRFU4RgpZMkJKRHJjQ1VQM2xzclc0QVpFMFplZkEwOTlncFEvb3dSN0REYnMwSjZUeFM4NGt6Tldjc1FuWnRraXZheHJNClkyVHNnaWVndFExVFdGRWpxLy9sUFV4emJCdmpnd1FBZm5CQXZGeVNKejdTa0VuVm5rUXJGaUlUQVArTHljQVIKMlg4UFI2ZGI1bEt0SitBSENDM3kvZmNQS2k0ZzNTL3djeXRRdmdvOXJ6ODRFalp5YUNTaGJXNG9jNzNrMS9RcAowQWtHRDU0ZGVDWWVPYVJNbW96c0w3ZzdxWkpFekhtODdOcVBYSy9EZFoweWNxaVFhMXY2T3QxNjdXNUlzMUkzCjBWb0IzUzloSlE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCgo="  # noqa: E501
+        encrypted_cacert = "QeV4evTLXzcKwZZvmXQ/OvSHToXH3ISwfoLmU+Q9JlQWAFUHSJ9IhO0ewaQrJmx3NkfFb7NCxsQhh+wE57zDW4rWgn4w/SWkzvwSi1h2xYOO3ECEHzzVqgUm15Sk0xaj1Fv9Ed4hipf6PRijeOZ7A1G9zekr1w9WIvebMyJZrK+f6QJ8AP20NUZqG/3k+MeJr3kjrl+8uwU5aPOrHAexSQGAqSKTkWzW7glmlyMWTjwkuSgNVgFg0ctdWTZ5JnNwxXbpjwIKrC4E4sIHcxko2vsTeLF8pZFPk+3QUZIg8BrgtyM3lJC2kO1g3emPQhCIk3VDb5GBgssc/GyFyRXNS651d5BNgcABOKZ4Rv/gGnprB35zP7TKJKkST44XJTEBiugWMkSZg+T9H98/l3eE34O6thfTZXgIyG+ZM6uGlW2XOce0OoEIyJiEL039WJe3izjbD3b9sCCdgQc0MgS+hTaayJI6oCUWPsJLmRji19jLi/wjOsU5gPItCFWw3pBye/A4Zf8Hxm+hShvqBnk8R2yx1fPTiyw/Zx4Jn8m49XQJyjDSZnhIck0PVHR9xWzKCr++PKljLMLdkdFxVRVPFQk/FBbesqofjSXsq9DASY6ACTL3Jmignx2OXD6ac4SlBqCTjV2dIM0yEgZF7zwMNCtppRdXTV8S29JP4W2mfaiqXCUSRTggv8EYU+9diCE+8sPB6HjuLrsfiySbFlYR2m4ysDGXjsVx5CDAf0Nh4IRfcSceYnnBGIQ2sfgGcJFOZoJqr/QeE2NWz6jlWYbWT7MjS/0decpKxP7L88qrR+F48WXQvfsvjWgKjlMKw7lHmFF8FeY836VWWICTRZx+y6IlY1Ys2ML4kySF27Hal4OPhOOoBljMNMVwUEvBulOnKUWw4BGz8eGCl8Hw6tlyJdC7kcBj/aCyNCR/NnuDk4Wck6e//He8L6mS83OJi/hIFc8vYQxnCJMXj9Ou7wr5hxtBnvxXzZM3kFHxCDO24Cd5UyBV9GD8TiQJfBGAy7a2BCBMb5ESVX8NOkyyv2hXMHOjpnKhUM9yP3Ke4CBImO7mCKJNHdFVtAmuyVKJ+jT6ooAAArkX2xwEAvBEpvGNmW2jgs6wxSuKY0h5aUm0rA4v/s8fqSZhzdInB54sMldyAnt9G+9e+g933DfyA/tkc56Ed0vZ/XEvTkThVHyUbfYR/Gjsoab1RpnDBi4aZ2E7iceoBshy+L6NXdL0jlWEs4ZubiWlbVNWlN/MqJcjV/quLU7q4HtkG0MDEFm6To3o48x7xpv8otih6YBduNqBFnwQ6Qz9rM2chFgOR4IgNSZKPxHO0AGCi1gnK/CeCvrSfWYAMn+2rmw0hMZybqKMStG28+rXsKDdqmy6vAwL/+dJwkAW+ix68rWRXpeqHlWidu4SkIBELuwEkFIC/GJU/DRvcN2GG9uP1m+VFifCIS2UdiO4OVrP6PVoW1O+jBJvFH3K1YT7CRqevb9OzjS9fO1wjkOff0W8zZyJK9Mp25aynpf0k3oMpZDpjnlOsFXFUb3N6SvXD1Yi95szIlmsr5yRYaeGUJH7/SAmMr8R6RqsCR0ANptL2dtRoGPi/qcDQE15vnjJ+QMYCg9KbCdV+Qq5di93XAjmwPj6tKZv0aXQuaTZgYR7bdLmAnJaFLbHWcQG1k6F/vdKNEb7llLsoAD9KuKXPZT/LErIyKcI0RZySy9yvhTZb4jQWn17b83yfvqfd5/2NpcyaY4gNERhDRJHw7VhoS5Leai5ZnFaO3C1vU9tIJ85XgCUASTsBLoQWVCKPSQZGxzF7PVLnHui3YA5OsOQpVqAPtgGZ12tP9XkEKj+u2/Atj2bgYrqBF7zUL64X/AQpwr/UElWDhJLSD/KStVeDOUx3AwAVVi9eTUJr6NiNMutCE1sqUf9XVIddgZ/BaG5t3NV2L+T+11QzAl+Xrh8wH/XeUCTmnU3NGkvCz/9Y7PMS+qQL7T7WeGdYmEhb5s/5p/yjSYeqybr5sANOHs83OdeSXbop9cLWW+JksHmS//rHHcrrJhZgCb3P0EOpEoEMCarT6sJq0V1Hwf/YNFdJ9V7Ac654ALS+a9ffNthMUEJeY21QMtNOrEg3QH5RWBPn+yOYN/f38tzwlT1k6Ec94y/sBmeQVv8rRzkkiMSXeAL5ATdJntq8NQq5JbvLQDNnZnHQthZt+uhcUf08mWlRrxxBUaE6xLppgMqFdYSjLGvgn/d8FZ9y7UCg5ZBhgP1rrRQL1COpNKKlJLf5laqwiGAucIDmzSbhO+MidSauDLWuv+fsdd2QYk98PHxqNrPYLrlAlABFi3JEApBm4IlrGbHxKg6dRiy7L1c9xWnAD7E3XrZrSc6DXvGRsjMXWoQdlp4CX5H3cdH9sjIE6akWqiwwrOP6QTbJcxmJGv/MVhsDVrVKmrKSn2H0/Us1fyYCHCOyCSc2L96uId8i9wQO1NXj+1PJmUq3tJ8U0TUwTblOEQdYej99xEI8EzsXLjNJHCgbDygtHBYd/SHToXH3ISwfoLmU+Q9JlS1woaUpVa5sdvbsr4BXR6J"  # noqa: E501
+        self.vca_collection.find_one.return_value = {
+            "_id": "2ade7f0e-9b58-4dbd-93a3-4ec076185d39",
+            "schema_version": "1.11",
+            "endpoints": [],
+            "user": "admin",
+            "secret": encrypted_secret,
+            "cacert": encrypted_cacert,
+        }
+        self.encryption.admin_collection.find_one.return_value = {
+            "serial": b"l+U3HDp9td+UjQ+AN+Ypj/Uh7n3C+rMJueQNNxkIpWI="
+        }
+        connection_data = self.loop.run_until_complete(
+            self.store.get_vca_connection_data("vca_id")
+        )
+        self.assertEqual(connection_data.endpoints, [])
+        self.assertEqual(connection_data.user, "admin")
+        self.assertEqual(connection_data.secret, secret)
+        self.assertEqual(
+            connection_data.cacert, b64decode(cacert.encode("utf-8")).decode("utf-8")
+        )
+
+    def test_update_vca_endpoints_exception(self):
+        endpoints = ["1.2.3.4:17070"]
+        self.admin_collection.find_one.side_effect = [None, None]
+        self.admin_collection.insert_one.side_effect = DbException("already exists")
+        with self.assertRaises(DbException):
+            self.loop.run_until_complete(self.store.update_vca_endpoints(endpoints))
+        self.assertEqual(self.admin_collection.find_one.call_count, 2)
+        self.admin_collection.replace_one.assert_not_called()
+
+    def test_update_vca_endpoints_with_vca_id(self):
+        endpoints = ["1.2.3.4:17070"]
+        self.vca_collection.find_one.return_value = {}
+        self.loop.run_until_complete(
+            self.store.update_vca_endpoints(endpoints, "vca_id")
+        )
+        self.vca_collection.find_one.assert_called_once_with({"_id": "vca_id"})
+        self.vca_collection.replace_one.assert_called_once_with(
+            {"_id": "vca_id"}, {"endpoints": endpoints}
+        )
+
+    def test_get_vca_endpoints(self):
+        endpoints = ["1.2.3.4:17070"]
+        db_data = {"api_endpoints": endpoints}
+        db_returns = [db_data, None]
+        expected_returns = [endpoints, []]
+        returns = []
+        self.admin_collection.find_one.side_effect = db_returns
+        for _ in range(len(db_returns)):
+            e = self.loop.run_until_complete(self.store.get_vca_endpoints())
+            returns.append(e)
+        self.assertEqual(expected_returns, returns)
+
+    @patch("osm_lcm.n2vc.vca.connection_data.base64_to_cacert")
+    def test_get_vca_endpoints_with_vca_id(self, mock_base64_to_cacert):
+        expected_endpoints = ["1.2.3.4:17070"]
+        mock_base64_to_cacert.return_value = "cacert"
+        self.store.get_vca_connection_data = AsyncMock()
+        self.store.get_vca_connection_data.return_value = ConnectionData(
+            **{
+                "endpoints": expected_endpoints,
+                "user": "admin",
+                "secret": "1234",
+                "cacert": "cacert",
+            }
+        )
+        endpoints = self.loop.run_until_complete(self.store.get_vca_endpoints("vca_id"))
+        self.store.get_vca_connection_data.assert_called_with("vca_id")
+        self.assertEqual(expected_endpoints, endpoints)
+
+    def test_get_vca_id(self):
+        self.assertIsNone(self.loop.run_until_complete((self.store.get_vca_id())))
+
+    def test_get_vca_id_with_vim_id(self):
+        self.vim_accounts_collection.find_one.return_value = {"vca": "vca_id"}
+        vca_id = self.loop.run_until_complete(self.store.get_vca_id("vim_id"))
+        self.vim_accounts_collection.find_one.assert_called_once_with({"_id": "vim_id"})
+        self.assertEqual(vca_id, "vca_id")
diff --git a/osm_lcm/n2vc/tests/unit/test_utils.py b/osm_lcm/n2vc/tests/unit/test_utils.py
new file mode 100644 (file)
index 0000000..a99d2ee
--- /dev/null
@@ -0,0 +1,106 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+
+from unittest import TestCase
+
+from osm_lcm.n2vc.utils import (
+    Dict,
+    EntityType,
+    JujuStatusToOSM,
+    N2VCDeploymentStatus,
+    get_ee_id_components,
+)
+from juju.machine import Machine
+from juju.application import Application
+from juju.action import Action
+from juju.unit import Unit
+
+
+class UtilsTest(TestCase):
+    def test_dict(self):
+        example = Dict({"key": "value"})
+        self.assertEqual(example["key"], example.key)
+
+    def test_entity_type(self):
+        self.assertFalse(EntityType.has_value("machine2"))
+        values = [Machine, Application, Action, Unit]
+        for value in values:
+            self.assertTrue(EntityType.has_value(value))
+
+        self.assertEqual(EntityType.MACHINE, EntityType.get_entity(Machine))
+        self.assertEqual(EntityType.APPLICATION, EntityType.get_entity(Application))
+        self.assertEqual(EntityType.UNIT, EntityType.get_entity(Unit))
+        self.assertEqual(EntityType.ACTION, EntityType.get_entity(Action))
+
+    def test_juju_status_to_osm(self):
+        tests = [
+            {
+                "entity_type": "machine",
+                "status": [
+                    {"juju": "pending", "osm": N2VCDeploymentStatus.PENDING},
+                    {"juju": "started", "osm": N2VCDeploymentStatus.COMPLETED},
+                ],
+            },
+            {
+                "entity_type": "application",
+                "status": [
+                    {"juju": "waiting", "osm": N2VCDeploymentStatus.RUNNING},
+                    {"juju": "maintenance", "osm": N2VCDeploymentStatus.RUNNING},
+                    {"juju": "blocked", "osm": N2VCDeploymentStatus.RUNNING},
+                    {"juju": "error", "osm": N2VCDeploymentStatus.FAILED},
+                    {"juju": "active", "osm": N2VCDeploymentStatus.COMPLETED},
+                ],
+            },
+            {
+                "entity_type": "unit",
+                "status": [
+                    {"juju": "waiting", "osm": N2VCDeploymentStatus.RUNNING},
+                    {"juju": "maintenance", "osm": N2VCDeploymentStatus.RUNNING},
+                    {"juju": "blocked", "osm": N2VCDeploymentStatus.RUNNING},
+                    {"juju": "error", "osm": N2VCDeploymentStatus.FAILED},
+                    {"juju": "active", "osm": N2VCDeploymentStatus.COMPLETED},
+                ],
+            },
+            {
+                "entity_type": "action",
+                "status": [
+                    {"juju": "running", "osm": N2VCDeploymentStatus.RUNNING},
+                    {"juju": "completed", "osm": N2VCDeploymentStatus.COMPLETED},
+                ],
+            },
+        ]
+
+        for test in tests:
+            entity_type = test["entity_type"]
+            self.assertTrue(entity_type in JujuStatusToOSM)
+
+            for status in test["status"]:
+                juju_status = status["juju"]
+                osm_status = status["osm"]
+                self.assertTrue(juju_status in JujuStatusToOSM[entity_type])
+                self.assertEqual(osm_status, JujuStatusToOSM[entity_type][juju_status])
+
+
+class GetEEComponentTest(TestCase):
+    def test_valid(self):
+        model, application, machine = get_ee_id_components("model.application.machine")
+        self.assertEqual(model, "model")
+        self.assertEqual(application, "application")
+        self.assertEqual(machine, "machine")
+
+    def test_invalid(self):
+        with self.assertRaises(Exception):
+            get_ee_id_components("model.application.machine.1")
+        with self.assertRaises(Exception):
+            get_ee_id_components("model.application")
diff --git a/osm_lcm/n2vc/tests/unit/testdata/__init__.py b/osm_lcm/n2vc/tests/unit/testdata/__init__.py
new file mode 100644 (file)
index 0000000..5aa2ec8
--- /dev/null
@@ -0,0 +1,12 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
diff --git a/osm_lcm/n2vc/tests/unit/testdata/test_certificate.yaml b/osm_lcm/n2vc/tests/unit/testdata/test_certificate.yaml
new file mode 100644 (file)
index 0000000..d21e022
--- /dev/null
@@ -0,0 +1,42 @@
+# Copyright 2022 Whitestack, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+#
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+  name: test-cert
+  namespace: osm
+spec:
+  secretName: test-cert-secret
+  privateKey:
+    rotationPolicy: Always
+    algorithm: ECDSA
+    size: 256
+  duration: 8760h
+  renewBefore: 2208h
+  subject:
+    organizations:
+      - osm
+  commonName: osm
+  isCA: false
+  usages:
+    - server auth
+  dnsNames:
+    - "*.osm"
+    - "*.osm.svc"
+    - "*.osm.svc.cluster"
+    - "*.osm.svc.cluster.local"
+  issuerRef:
+    name: ca-issuer
+    kind: ClusterIssuer
diff --git a/osm_lcm/n2vc/tests/unit/testdata/test_db_descriptors.py b/osm_lcm/n2vc/tests/unit/testdata/test_db_descriptors.py
new file mode 100644 (file)
index 0000000..c6f3670
--- /dev/null
@@ -0,0 +1,414 @@
+# Copyright 2022 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+
+db_nsrs_text = """
+---
+-   _id: dbfbd751-3de4-4e68-bd40-ec5ae0a53898
+    name: k8s-ns
+    name-ref: k8s-ns
+    short-name: k8s-ns
+    admin-status: ENABLED
+    nsState: READY
+    currentOperation: IDLE
+    currentOperationID: null
+    errorDescription: null
+    errorDetail: null
+    deploymentStatus: null
+    configurationStatus:
+      - elementType: VNF
+        elementUnderConfiguration: 1b6a4eb3-4fbf-415e-985c-4aeb3161eec0
+        status: READY
+      - elementType: VNF
+        elementUnderConfiguration: 17892d73-aa19-4b87-9a00-1d094f07a6b3
+        status: READY
+    vcaStatus: null
+    nsd:
+      _id: 12f320b5-2a57-40f4-82b5-020a6b1171d7
+      id: k8s_proxy_charm-ns
+      version: '1.0'
+      name: k8s_proxy_charm-ns
+      vnfd-id:
+        - k8s_proxy_charm-vnf
+      virtual-link-desc:
+        - id: mgmtnet
+          mgmt-network: true
+        - id: datanet
+      df:
+        - id: default-df
+          vnf-profile:
+            - id: vnf1
+              virtual-link-connectivity:
+                - constituent-cpd-id:
+                    - constituent-base-element-id: vnf1
+                      constituent-cpd-id: vnf-mgmt-ext
+                  virtual-link-profile-id: mgmtnet
+                - constituent-cpd-id:
+                    - constituent-base-element-id: vnf1
+                      constituent-cpd-id: vnf-data-ext
+                  virtual-link-profile-id: datanet
+              vnfd-id: k8s_proxy_charm-vnf
+            - id: vnf2
+              virtual-link-connectivity:
+                - constituent-cpd-id:
+                    - constituent-base-element-id: vnf2
+                      constituent-cpd-id: vnf-mgmt-ext
+                  virtual-link-profile-id: mgmtnet
+                - constituent-cpd-id:
+                    - constituent-base-element-id: vnf2
+                      constituent-cpd-id: vnf-data-ext
+                  virtual-link-profile-id: datanet
+              vnfd-id: k8s_proxy_charm-vnf
+      description: NS with 2 VNFs with cloudinit connected by datanet and mgmtnet VLs
+      _admin:
+        userDefinedData: {}
+        revision: 1
+        created: 1658990740.88281
+        modified: 1658990741.09266
+        projects_read:
+          - 51e0e80fe533469d98766caa16552a3e
+        projects_write:
+          - 51e0e80fe533469d98766caa16552a3e
+        onboardingState: ONBOARDED
+        operationalState: ENABLED
+        usageState: NOT_IN_USE
+        storage:
+          fs: mongo
+          path: /app/storage/
+          folder: '12f320b5-2a57-40f4-82b5-020a6b1171d7:1'
+          pkg-dir: k8s_proxy_charm_ns
+          descriptor: k8s_proxy_charm_ns/k8s_proxy_charm_nsd.yaml
+          zipfile: k8s_proxy_charm_ns.tar.gz
+    datacenter: bad7338b-ae46-43d4-a434-c3337a8054ac
+    resource-orchestrator: osmopenmano
+    description: default description
+    constituent-vnfr-ref:
+      - 1b6a4eb3-4fbf-415e-985c-4aeb3161eec0
+      - 17892d73-aa19-4b87-9a00-1d094f07a6b3
+    operational-status: running
+    config-status: configured
+    detailed-status: Done
+    orchestration-progress: {}
+    create-time: 1658998097.57611
+    nsd-name-ref: k8s_proxy_charm-ns
+    operational-events: []
+    nsd-ref: k8s_proxy_charm-ns
+    nsd-id: 12f320b5-2a57-40f4-82b5-020a6b1171d7
+    vnfd-id:
+      - 6d9e1ca1-f387-4d01-9876-066fc7311e0f
+    instantiate_params:
+      nsdId: 12f320b5-2a57-40f4-82b5-020a6b1171d7
+      nsName: k8s-ns
+      nsDescription: default description
+      vimAccountId: bad7338b-ae46-43d4-a434-c3337a8054ac
+      vld:
+        - name: mgmtnet
+          vim-network-name: osm-ext
+    additionalParamsForNs: null
+    ns-instance-config-ref: dbfbd751-3de4-4e68-bd40-ec5ae0a53898
+    id: dbfbd751-3de4-4e68-bd40-ec5ae0a53898
+    ssh-authorized-key: null
+    flavor:
+      - id: '0'
+        memory-mb: 1024
+        name: mgmtVM-flv
+        storage-gb: '10'
+        vcpu-count: 1
+        vim_info:
+          'vim:bad7338b-ae46-43d4-a434-c3337a8054ac':
+            vim_details: null
+            vim_id: 17a9ba76-beb7-4ad4-a481-97de37174866
+            vim_status: DONE
+      - vcpu-count: 1
+        memory-mb: 1024
+        storage-gb: '10'
+        name: mgmtVM-flv
+        id: '1'
+    image:
+      - id: '0'
+        image: ubuntu18.04
+        vim_info:
+          'vim:bad7338b-ae46-43d4-a434-c3337a8054ac':
+            vim_details: null
+            vim_id: 919fc71a-6acd-4ee3-8123-739a9abbc2e7
+            vim_status: DONE
+      - image: 'Canonical:UbuntuServer:18.04-LTS:latest'
+        vim-type: azure
+        id: '1'
+      - image: 'ubuntu-os-cloud:image-family:ubuntu-1804-lts'
+        vim-type: gcp
+        id: '2'
+      - image: ubuntu/images/hvm-ssd/ubuntu-artful-17.10-amd64-server-20180509
+        vim-type: aws
+        id: '3'
+    affinity-or-anti-affinity-group: []
+    revision: 1
+    vld:
+      - id: mgmtnet
+        mgmt-network: true
+        name: mgmtnet
+        type: null
+        vim_info:
+          'vim:bad7338b-ae46-43d4-a434-c3337a8054ac':
+            vim_account_id: bad7338b-ae46-43d4-a434-c3337a8054ac
+            vim_network_name: osm-ext
+            vim_details: >
+              {admin_state_up: true, availability_zone_hints: [],
+              availability_zones: [nova], created_at: '2019-10-17T23:44:03Z',
+              description: '', encapsulation: vlan, encapsulation_id: 2148,
+              encapsulation_type: vlan, id: 21ea5d92-24f1-40ab-8d28-83230e277a49,
+              ipv4_address_scope: null,
+                ipv6_address_scope: null, is_default: false, mtu: 1500, name: osm-ext, port_security_enabled: true, project_id: 456b6471010b4737b47a0dd599c920c5, 'provider:network_type': vlan, 'provider:physical_network': physnet1, 'provider:segmentation_id': 2148, revision_number: 1009,
+                'router:external': true, segmentation_id: 2148, shared: true, status: ACTIVE, subnets: [{subnet: {allocation_pools: [{end: 172.21.249.255, start: 172.21.248.1}], cidr: 172.21.248.0/22, created_at: '2019-10-17T23:44:07Z', description: '', dns_nameservers: [],
+                      enable_dhcp: true, gateway_ip: 172.21.251.254, host_routes: [], id: d14f68b7-8287-41fe-b533-dafb2240680a, ip_version: 4, ipv6_address_mode: null, ipv6_ra_mode: null, name: osm-ext-subnet, network_id: 21ea5d92-24f1-40ab-8d28-83230e277a49, project_id: 456b6471010b4737b47a0dd599c920c5,
+                      revision_number: 5, service_types: [], subnetpool_id: null, tags: [], tenant_id: 456b6471010b4737b47a0dd599c920c5, updated_at: '2020-09-14T15:15:06Z'}}], tags: [], tenant_id: 456b6471010b4737b47a0dd599c920c5, type: data, updated_at: '2022-07-05T18:39:02Z'}
+            vim_id: 21ea5d92-24f1-40ab-8d28-83230e277a49
+            vim_status: ACTIVE
+      - id: datanet
+        mgmt-network: false
+        name: datanet
+        type: null
+        vim_info:
+          'vim:bad7338b-ae46-43d4-a434-c3337a8054ac':
+            vim_account_id: bad7338b-ae46-43d4-a434-c3337a8054ac
+            vim_network_name: null
+            vim_details: >
+              {admin_state_up: true, availability_zone_hints: [],
+              availability_zones: [nova], created_at: '2022-07-28T08:41:59Z',
+              description: '', encapsulation: vxlan, encapsulation_id: 27,
+              encapsulation_type: vxlan, id: 34056287-3cd5-42cb-92d3-413382b50813,
+              ipv4_address_scope: null,
+                ipv6_address_scope: null, mtu: 1450, name: k8s-ns-datanet, port_security_enabled: true, project_id: 71c7971a7cab4b72bd5c10dbe6617f1e, 'provider:network_type': vxlan, 'provider:physical_network': null, 'provider:segmentation_id': 27, revision_number: 2, 'router:external': false,
+                segmentation_id: 27, shared: false, status: ACTIVE, subnets: [{subnet: {allocation_pools: [{end: 192.168.181.254, start: 192.168.181.1}], cidr: 192.168.181.0/24, created_at: '2022-07-28T08:41:59Z', description: '', dns_nameservers: [], enable_dhcp: true, gateway_ip: null,
+                      host_routes: [], id: ab2920f8-881b-4bef-82a5-9582a7930786, ip_version: 4, ipv6_address_mode: null, ipv6_ra_mode: null, name: k8s-ns-datanet-subnet, network_id: 34056287-3cd5-42cb-92d3-413382b50813, project_id: 71c7971a7cab4b72bd5c10dbe6617f1e, revision_number: 0,
+                      service_types: [], subnetpool_id: null, tags: [], tenant_id: 71c7971a7cab4b72bd5c10dbe6617f1e, updated_at: '2022-07-28T08:41:59Z'}}], tags: [], tenant_id: 71c7971a7cab4b72bd5c10dbe6617f1e, type: bridge, updated_at: '2022-07-28T08:41:59Z'}
+            vim_id: 34056287-3cd5-42cb-92d3-413382b50813
+            vim_status: ACTIVE
+    _admin:
+      created: 1658998097.58182
+      modified: 1658998193.42562
+      projects_read:
+        - 51e0e80fe533469d98766caa16552a3e
+      projects_write:
+        - 51e0e80fe533469d98766caa16552a3e
+      nsState: INSTANTIATED
+      current-operation: null
+      nslcmop: null
+      operation-type: null
+      deployed:
+        RO:
+          vnfd: []
+          operational-status: running
+        VCA:
+          - target_element: vnf/vnf1
+            member-vnf-index: vnf1
+            vdu_id: null
+            kdu_name: null
+            vdu_count_index: 0
+            operational-status: init
+            detailed-status: ''
+            step: initial-deploy
+            vnfd_id: k8s_proxy_charm-vnf
+            vdu_name: null
+            type: k8s_proxy_charm
+            ee_descriptor_id: simple-ee
+            charm_name: ''
+            ee_id: dbfbd751-3de4-4e68-bd40-ec5ae0a53898-k8s.simple-ee-z0-vnf1-vnf.k8s
+            application: simple-ee-z0-vnf1-vnf
+            model: dbfbd751-3de4-4e68-bd40-ec5ae0a53898-k8s
+            config_sw_installed: true
+          - target_element: vnf/vnf2
+            member-vnf-index: vnf2
+            vdu_id: null
+            kdu_name: null
+            vdu_count_index: 0
+            operational-status: init
+            detailed-status: ''
+            step: initial-deploy
+            vnfd_id: k8s_proxy_charm-vnf
+            vdu_name: null
+            type: k8s_proxy_charm
+            ee_descriptor_id: simple-ee
+            charm_name: ''
+            ee_id: dbfbd751-3de4-4e68-bd40-ec5ae0a53898-k8s.simple-ee-z0-vnf2-vnf.k8s
+            application: simple-ee-z0-vnf2-vnf
+            model: dbfbd751-3de4-4e68-bd40-ec5ae0a53898-k8s
+            config_sw_installed: true
+        K8s: []
+"""
+
+db_vnfrs_text = """
+-   _id: 1b6a4eb3-4fbf-415e-985c-4aeb3161eec0
+    id: 1b6a4eb3-4fbf-415e-985c-4aeb3161eec0
+    nsr-id-ref: dbfbd751-3de4-4e68-bd40-ec5ae0a53898
+    member-vnf-index-ref: vnf1
+    additionalParamsForVnf: null
+    created-time: 1658998097.58036
+    vnfd-ref: k8s_proxy_charm-vnf
+    vnfd-id: 6d9e1ca1-f387-4d01-9876-066fc7311e0f
+    vim-account-id: bad7338b-ae46-43d4-a434-c3337a8054ac
+    vca-id: null
+    vdur:
+      - _id: 38912ff7-5bdd-4228-911f-c2bee259c44a
+        additionalParams:
+          OSM:
+            count_index: 0
+            member_vnf_index: vnf1
+            ns_id: dbfbd751-3de4-4e68-bd40-ec5ae0a53898
+            vdu:
+              mgmtVM-0:
+                count_index: 0
+                interfaces:
+                  dataVM-xe0:
+                    name: dataVM-xe0
+                  mgmtVM-eth0:
+                    name: mgmtVM-eth0
+                vdu_id: mgmtVM
+            vdu_id: mgmtVM
+            vim_account_id: bad7338b-ae46-43d4-a434-c3337a8054ac
+            vnf_id: 1b6a4eb3-4fbf-415e-985c-4aeb3161eec0
+            vnfd_id: 6d9e1ca1-f387-4d01-9876-066fc7311e0f
+            vnfd_ref: k8s_proxy_charm-vnf
+        affinity-or-anti-affinity-group-id: []
+        alt-image-ids:
+          - '1'
+          - '2'
+          - '3'
+        cloud-init: '6d9e1ca1-f387-4d01-9876-066fc7311e0f:file:cloud-config.txt'
+        count-index: 0
+        id: 38912ff7-5bdd-4228-911f-c2bee259c44a
+        interfaces:
+          - external-connection-point-ref: vnf-mgmt-ext
+            internal-connection-point-ref: mgmtVM-eth0-int
+            mgmt-interface: true
+            mgmt-vnf: true
+            name: mgmtVM-eth0
+            ns-vld-id: mgmtnet
+            position: 1
+            type: PARAVIRT
+            compute_node: nfvisrv11
+            ip-address: 172.21.248.199
+            mac-address: 'fa:16:3e:4d:65:e9'
+            pci: null
+            vlan: 2148
+          - external-connection-point-ref: vnf-data-ext
+            internal-connection-point-ref: dataVM-xe0-int
+            name: dataVM-xe0
+            ns-vld-id: datanet
+            position: 2
+            type: PARAVIRT
+            compute_node: nfvisrv11
+            ip-address: 192.168.181.179
+            mac-address: 'fa:16:3e:ca:b5:d3'
+            pci: null
+            vlan: null
+        internal-connection-point:
+          - connection-point-id: mgmtVM-eth0-int
+            id: mgmtVM-eth0-int
+            name: mgmtVM-eth0-int
+          - connection-point-id: dataVM-xe0-int
+            id: dataVM-xe0-int
+            name: dataVM-xe0-int
+        ip-address: 172.21.248.199
+        ns-flavor-id: '0'
+        ns-image-id: '0'
+        ssh-access-required: true
+        ssh-keys:
+          - >
+            ssh-rsa
+            AAAAB3NzaC1yc2EAAAADAQABAAACAQDW3dtEDKfwZL0WZp6LeJUZFlZzYAHP7M4AsJwl2YFO/wmblfrTpWZ8tRyGwyjQacB7Zb7J07wD5AZACE71A3Nc9zjI22/gWN7N8X+ZxH6ywcr1GdXBqZDBeOdzD4pRb11E9mydGZ9l++KtFRtlF4G7IFYuxkOiSCJrkgiKuVDGodtQ/6VUKwxuI8U6N7MxtIBN2L3IfvMwuNyTo1daiUabQMwQKt/Q8Zpp78zsZ6SoxU+eYAHzbeTjAfNwhA88nRzRZn7tQW+gWl9wbSINbr2+JetTN+BTot/CMPmKzzul9tZrzhSzck1QSM3UDrD36ctRdaLABnWCoxpm0wJthNt693xVrFP+bMgK2BR0fyu9WwVEcHkC9CZ8yoi37k5rGVtoDw6sW6lxQ5QKS+Plv/YjGKqK3Ro/UoIEhgxcW53uz4PveyMBss4geB9ad/1T8dtugd288qfCWJRBpJBrE497EalhHolF3L/2bEu3uCKN0TY4POzqP/5cuAUc/uTJ2mjZewJdlJtrn7IyFtSUypeuVmXRx5LwByQw9EwPhUZlKVjYEHYmu5YTKlFSWyorWgRLBBIK7LLPj+bCGgLeT+fXmip6eFquAyVtoQfDofQ/gc0OXEA1uKfK2VFKg1le+joz1WA/XieGSvKRQ4aZorYgi/FzbpxKj2a60cZubJMq5w==
+            root@lcm-7b6bcf7cdd-5h2ql
+          - >-
+            ssh-rsa
+            AAAAB3NzaC1yc2EAAAADAQABAAABAQDtg65/Jh3KDWC9+YzkTz8Md/uhalkjPo15DSxlUNWzYQNFUzaG5Pt0trDwQ29UOQIUy1CB9HpWSZMTA1ESet/+cyXWkZ9MznAmGLQBdnwqWU792UQf6rv74Zpned8MbnKQXfs8gog1ZFFKRMcwitNRqs8xs8XsPLE/l1Jo2QemhM0fIRofjJiLKYaKeGP59Fb8UlIeGDaxmIFgLs8bAZvrmjbae3o4b1fZDNboqlQbHb9rakxI9uCnsaBrCmelXpP9EFmENx85vdHEwCAfCRvSWKnbXuOojJJzFM5odoWFZo8AuIhEb5ZiLkGet3CvCfWZZPpQc4TuNDaY0t1XUegH
+            juju-client-key
+        vdu-id-ref: mgmtVM
+        vdu-name: mgmtVM
+        vim_info:
+          'vim:bad7338b-ae46-43d4-a434-c3337a8054ac':
+            interfaces:
+              - vim_info: >
+                  {admin_state_up: true, allowed_address_pairs: [],
+                  'binding:host_id': nfvisrv11, 'binding:profile': {},
+                  'binding:vif_details': {bridge_name: br-int, connectivity: l2,
+                  datapath_type: system, ovs_hybrid_plug: true, port_filter: true},
+                  'binding:vif_type': ovs, 'binding:vnic_type': normal,
+                    created_at: '2022-07-28T08:42:04Z', description: '', device_id: 1fabddca-0dcf-4702-a5f3-5cc028c2aba7, device_owner: 'compute:nova', extra_dhcp_opts: [], fixed_ips: [{ip_address: 172.21.248.199, subnet_id: d14f68b7-8287-41fe-b533-dafb2240680a}], id: e053d44f-1d67-4274-b85d-1cef243353d6,
+                    mac_address: 'fa:16:3e:4d:65:e9', name: mgmtVM-eth0, network_id: 21ea5d92-24f1-40ab-8d28-83230e277a49, port_security_enabled: true, project_id: 71c7971a7cab4b72bd5c10dbe6617f1e, revision_number: 4, security_groups: [1de4b2c2-e4be-4e91-985c-d887e2715949], status: ACTIVE,
+                    tags: [], tenant_id: 71c7971a7cab4b72bd5c10dbe6617f1e, updated_at: '2022-07-28T08:42:16Z'}
+                mac_address: 'fa:16:3e:4d:65:e9'
+                vim_net_id: 21ea5d92-24f1-40ab-8d28-83230e277a49
+                vim_interface_id: e053d44f-1d67-4274-b85d-1cef243353d6
+                compute_node: nfvisrv11
+                pci: null
+                vlan: 2148
+                ip_address: 172.21.248.199
+                mgmt_vnf_interface: true
+                mgmt_vdu_interface: true
+              - vim_info: >
+                  {admin_state_up: true, allowed_address_pairs: [],
+                  'binding:host_id': nfvisrv11, 'binding:profile': {},
+                  'binding:vif_details': {bridge_name: br-int, connectivity: l2,
+                  datapath_type: system, ovs_hybrid_plug: true, port_filter: true},
+                  'binding:vif_type': ovs, 'binding:vnic_type': normal,
+                    created_at: '2022-07-28T08:42:04Z', description: '', device_id: 1fabddca-0dcf-4702-a5f3-5cc028c2aba7, device_owner: 'compute:nova', extra_dhcp_opts: [], fixed_ips: [{ip_address: 192.168.181.179, subnet_id: ab2920f8-881b-4bef-82a5-9582a7930786}], id: 8a34c944-0fc1-41ae-9dbc-9743e5988162,
+                    mac_address: 'fa:16:3e:ca:b5:d3', name: dataVM-xe0, network_id: 34056287-3cd5-42cb-92d3-413382b50813, port_security_enabled: true, project_id: 71c7971a7cab4b72bd5c10dbe6617f1e, revision_number: 4, security_groups: [1de4b2c2-e4be-4e91-985c-d887e2715949], status: ACTIVE,
+                    tags: [], tenant_id: 71c7971a7cab4b72bd5c10dbe6617f1e, updated_at: '2022-07-28T08:42:15Z'}
+                mac_address: 'fa:16:3e:ca:b5:d3'
+                vim_net_id: 34056287-3cd5-42cb-92d3-413382b50813
+                vim_interface_id: 8a34c944-0fc1-41ae-9dbc-9743e5988162
+                compute_node: nfvisrv11
+                pci: null
+                vlan: null
+                ip_address: 192.168.181.179
+            vim_details: >
+              {'OS-DCF:diskConfig': MANUAL, 'OS-EXT-AZ:availability_zone': nova,
+              'OS-EXT-SRV-ATTR:host': nfvisrv11,
+              'OS-EXT-SRV-ATTR:hypervisor_hostname': nfvisrv11,
+              'OS-EXT-SRV-ATTR:instance_name': instance-0002967a,
+              'OS-EXT-STS:power_state': 1, 'OS-EXT-STS:task_state': null,
+                'OS-EXT-STS:vm_state': active, 'OS-SRV-USG:launched_at': '2022-07-28T08:42:17.000000', 'OS-SRV-USG:terminated_at': null, accessIPv4: '', accessIPv6: '', addresses: {k8s-ns-datanet: [{'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:ca:b5:d3', 'OS-EXT-IPS:type': fixed,
+                      addr: 192.168.181.179, version: 4}], osm-ext: [{'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:4d:65:e9', 'OS-EXT-IPS:type': fixed, addr: 172.21.248.199, version: 4}]}, config_drive: '', created: '2022-07-28T08:42:06Z', flavor: {id: 17a9ba76-beb7-4ad4-a481-97de37174866,
+                  links: [{href: 'http://172.21.247.1:8774/flavors/17a9ba76-beb7-4ad4-a481-97de37174866', rel: bookmark}]}, hostId: 2aa7155bd281bd308d8e3776af56d428210c21aab788a8cbdf5ef500, id: 1fabddca-0dcf-4702-a5f3-5cc028c2aba7, image: {id: 919fc71a-6acd-4ee3-8123-739a9abbc2e7,
+                  links: [{href: 'http://172.21.247.1:8774/images/919fc71a-6acd-4ee3-8123-739a9abbc2e7', rel: bookmark}]}, key_name: null, links: [{href: 'http://172.21.247.1:8774/v2.1/servers/1fabddca-0dcf-4702-a5f3-5cc028c2aba7', rel: self}, {href: 'http://172.21.247.1:8774/servers/1fabddca-0dcf-4702-a5f3-5cc028c2aba7',
+                    rel: bookmark}], metadata: {}, name: k8s-ns-vnf1-mgmtVM-0, 'os-extended-volumes:volumes_attached': [], progress: 0, security_groups: [{name: default}, {name: default}], status: ACTIVE, tenant_id: 71c7971a7cab4b72bd5c10dbe6617f1e, updated: '2022-07-28T08:42:17Z',
+                user_id: f043c84f940b4fc8a01a98714ea97c80}
+            vim_id: 1fabddca-0dcf-4702-a5f3-5cc028c2aba7
+            vim_status: ACTIVE
+            vim_name: k8s-ns-vnf1-mgmtVM-0
+        virtual-storages:
+          - id: mgmtVM-storage
+            size-of-storage: '10'
+        status: ACTIVE
+        vim-id: 1fabddca-0dcf-4702-a5f3-5cc028c2aba7
+        name: k8s-ns-vnf1-mgmtVM-0
+    connection-point:
+      - name: vnf-mgmt-ext
+        connection-point-id: mgmtVM-eth0-int
+        connection-point-vdu-id: mgmtVM
+        id: vnf-mgmt-ext
+      - name: vnf-data-ext
+        connection-point-id: dataVM-xe0-int
+        connection-point-vdu-id: mgmtVM
+        id: vnf-data-ext
+    ip-address: 172.21.248.199
+    revision: 1
+    _admin:
+      created: 1658998097.58048
+      modified: 1658998097.58048
+      projects_read:
+        - 51e0e80fe533469d98766caa16552a3e
+      projects_write:
+        - 51e0e80fe533469d98766caa16552a3e
+      nsState: INSTANTIATED
+"""
diff --git a/osm_lcm/n2vc/tests/unit/testdata/upgrade-machine.log b/osm_lcm/n2vc/tests/unit/testdata/upgrade-machine.log
new file mode 100644 (file)
index 0000000..6311432
--- /dev/null
@@ -0,0 +1,38 @@
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+#
+["charm", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "charm-url": "local:bionic/simple-ha-proxy-29", "charm-version": "", "life": "alive", "profile": null, "config": {"ssh-hostname": "", "ssh-key-bits": 4096, "ssh-key-type": "rsa", "ssh-password": "", "ssh-public-key": "", "ssh-username": ""}}]
+["application", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0", "exposed": false, "charm-url": "local:bionic/simple-ha-proxy-29", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "config": {"ssh-hostname": "172.21.249.28", "ssh-password": "osm4u", "ssh-username": "ubuntu"}, "subordinate": false, "status": {"current": "unset", "message": "", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/0", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.54", "private-address": "10.37.209.54", "machine-id": "1", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-26T18:50:27.56175336Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T16:21:56.481875662Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/2", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.93", "private-address": "10.37.209.93", "machine-id": "3", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-26T18:50:27.579802723Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T16:20:44.69125318Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/1", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.4", "private-address": "10.37.209.4", "machine-id": "2", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-26T18:50:27.563068618Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T16:20:48.695716332Z", "version": "2.9.22"}}]
+["charm", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "charm-url": "local:bionic/simple-ha-proxy-28", "charm-version": "", "life": "dying", "profile": null, "config": {"ssh-hostname": "", "ssh-key-bits": 4096, "ssh-key-type": "rsa", "ssh-password": "", "ssh-public-key": "", "ssh-username": ""}}]
+["charm", "remove", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "charm-url": "local:bionic/simple-ha-proxy-28", "charm-version": "", "life": "dying", "profile": null, "config": {"ssh-hostname": "", "ssh-key-bits": 4096, "ssh-key-type": "rsa", "ssh-password": "", "ssh-public-key": "", "ssh-username": ""}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/0", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.54", "private-address": "10.37.209.54", "machine-id": "1", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-26T18:50:27.56175336Z", "version": ""}, "agent-status": {"current": "executing", "message": "running upgrade-charm hook", "since": "2022-04-27T16:22:54.354997486Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/2", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.93", "private-address": "10.37.209.93", "machine-id": "3", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-26T18:50:27.579802723Z", "version": ""}, "agent-status": {"current": "executing", "message": "running upgrade-charm hook", "since": "2022-04-27T16:22:54.400387228Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/1", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.4", "private-address": "10.37.209.4", "machine-id": "2", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-26T18:50:27.563068618Z", "version": ""}, "agent-status": {"current": "executing", "message": "running upgrade-charm hook", "since": "2022-04-27T16:22:54.523797611Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/0", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.54", "private-address": "10.37.209.54", "machine-id": "1", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-26T18:50:27.56175336Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T16:22:54.934760959Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/2", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.93", "private-address": "10.37.209.93", "machine-id": "3", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-26T18:50:27.579802723Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T16:22:54.982259225Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/0", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.54", "private-address": "10.37.209.54", "machine-id": "1", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "waiting", "message": "Waiting for SSH credentials", "since": "2022-04-27T16:22:55.091278959Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T16:22:54.934760959Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/1", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.4", "private-address": "10.37.209.4", "machine-id": "2", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-26T18:50:27.563068618Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T16:22:55.091697191Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/2", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.93", "private-address": "10.37.209.93", "machine-id": "3", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "waiting", "message": "Waiting for SSH credentials", "since": "2022-04-27T16:22:55.153254035Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T16:22:54.982259225Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/1", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.4", "private-address": "10.37.209.4", "machine-id": "2", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "waiting", "message": "Waiting for SSH credentials", "since": "2022-04-27T16:22:55.307204975Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T16:22:55.091697191Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/1", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.4", "private-address": "10.37.209.4", "machine-id": "2", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T16:22:58.698041924Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T16:22:55.091697191Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/1", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.4", "private-address": "10.37.209.4", "machine-id": "2", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T16:22:58.698041924Z", "version": ""}, "agent-status": {"current": "executing", "message": "running leader-settings-changed hook", "since": "2022-04-27T16:22:59.098429743Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/1", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.4", "private-address": "10.37.209.4", "machine-id": "2", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T16:22:58.698041924Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T16:22:59.636191881Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/0", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.54", "private-address": "10.37.209.54", "machine-id": "1", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T16:23:00.173022824Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T16:22:54.934760959Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/2", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.93", "private-address": "10.37.209.93", "machine-id": "3", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T16:23:00.5376781Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T16:22:54.982259225Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/0", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.54", "private-address": "10.37.209.54", "machine-id": "1", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T16:23:00.173022824Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T16:23:00.529675913Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/2", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.93", "private-address": "10.37.209.93", "machine-id": "3", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T16:23:00.5376781Z", "version": ""}, "agent-status": {"current": "executing", "message": "running leader-settings-changed hook", "since": "2022-04-27T16:23:00.948967357Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "1ee18c0b-bd69-4e85-8ebc-01eec76c964d", "name": "app-vnf-7a49ace2b6-z0/2", "application": "app-vnf-7a49ace2b6-z0", "series": "bionic", "charm-url": "local:bionic/simple-ha-proxy-29", "life": "alive", "public-address": "10.37.209.93", "private-address": "10.37.209.93", "machine-id": "3", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T16:23:00.5376781Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T16:23:01.449283589Z", "version": "2.9.22"}}]
\ No newline at end of file
diff --git a/osm_lcm/n2vc/tests/unit/testdata/upgrade-operator.log b/osm_lcm/n2vc/tests/unit/testdata/upgrade-operator.log
new file mode 100644 (file)
index 0000000..66a5895
--- /dev/null
@@ -0,0 +1,27 @@
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+#
+["charm", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "charm-url": "local:kubernetes/sshproxy-1", "charm-version": "", "life": "alive", "profile": null}]
+["charm", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "charm-url": "local:kubernetes/sshproxy-1", "charm-version": "", "life": "alive", "profile": null, "config": {"apt-mirror": null, "security-apt-mirror": null, "ssh-hostname": "", "ssh-key-bits": 4096, "ssh-key-type": "rsa", "ssh-password": "", "ssh-public-key": "", "ssh-username": ""}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "sshproxy", "exposed": false, "charm-url": "local:kubernetes/sshproxy-1", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "config": {"ssh-hostname": "127.0.0.1", "ssh-password": "osm4u", "ssh-username": "ubuntu"}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-27T18:06:37.951722352Z", "version": ""}, "workload-version": ""}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "sshproxy", "exposed": false, "charm-url": "local:kubernetes/sshproxy-1", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "config": {"ssh-hostname": "127.0.0.1", "ssh-password": "osm4u", "ssh-username": "ubuntu"}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-27T18:06:37.951722352Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "sshproxy/0", "application": "sshproxy", "series": "kubernetes", "charm-url": "local:kubernetes/sshproxy-1", "life": "alive", "public-address": "", "private-address": "10.152.183.24", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T18:08:40.533982098Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T18:08:41.574108719Z", "version": ""}}]
+["charm", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "charm-url": "local:kubernetes/sshproxy-0", "charm-version": "", "life": "dying", "profile": null, "config": {"apt-mirror": null, "security-apt-mirror": null, "ssh-hostname": "", "ssh-key-bits": 4096, "ssh-key-type": "rsa", "ssh-password": "", "ssh-public-key": "", "ssh-username": ""}}]
+["charm", "remove", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "charm-url": "local:kubernetes/sshproxy-0", "charm-version": "", "life": "dying", "profile": null, "config": {"apt-mirror": null, "security-apt-mirror": null, "ssh-hostname": "", "ssh-key-bits": 4096, "ssh-key-type": "rsa", "ssh-password": "", "ssh-public-key": "", "ssh-username": ""}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "sshproxy", "exposed": false, "charm-url": "local:kubernetes/sshproxy-1", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "config": {"ssh-hostname": "127.0.0.1", "ssh-password": "osm4u", "ssh-username": "ubuntu"}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-27T18:06:37.951722352Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "sshproxy/0", "application": "sshproxy", "series": "kubernetes", "charm-url": "local:kubernetes/sshproxy-1", "life": "alive", "public-address": "", "private-address": "10.152.183.24", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "Active", "since": "2022-04-27T18:09:49.713279872Z", "version": ""}, "agent-status": {"current": "executing", "message": "running upgrade-charm hook", "since": "2022-04-27T18:09:48.529774773Z", "version": ""}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "sshproxy", "exposed": false, "charm-url": "local:kubernetes/sshproxy-1", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "config": {"ssh-hostname": "127.0.0.1", "ssh-password": "osm4u", "ssh-username": "ubuntu"}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-27T18:06:37.951722352Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "sshproxy/0", "application": "sshproxy", "series": "kubernetes", "charm-url": "local:kubernetes/sshproxy-1", "life": "alive", "public-address": "", "private-address": "10.152.183.24", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "Active", "since": "2022-04-27T18:09:49.713279872Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T18:09:50.760612389Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "sshproxy/0", "application": "sshproxy", "series": "kubernetes", "charm-url": "local:kubernetes/sshproxy-1", "life": "alive", "public-address": "", "private-address": "10.152.183.24", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T18:09:51.90389784Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T18:09:50.760612389Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "sshproxy/0", "application": "sshproxy", "series": "kubernetes", "charm-url": "local:kubernetes/sshproxy-1", "life": "alive", "public-address": "", "private-address": "10.152.183.24", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T18:09:51.90389784Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T18:09:52.859465812Z", "version": ""}}]
\ No newline at end of file
diff --git a/osm_lcm/n2vc/tests/unit/testdata/upgrade-podspec-stateful.log b/osm_lcm/n2vc/tests/unit/testdata/upgrade-podspec-stateful.log
new file mode 100644 (file)
index 0000000..7d671b8
--- /dev/null
@@ -0,0 +1,41 @@
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+#
+["charm", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "charm-url": "local:kubernetes/mongodb-k8s-0", "charm-version": "", "life": "alive", "profile": null}]
+["charm", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "charm-url": "local:kubernetes/mongodb-k8s-0", "charm-version": "", "life": "alive", "profile": null, "config": {"replica_set_name": "rs0"}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb", "exposed": false, "charm-url": "local:kubernetes/mongodb-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-25T15:03:59.520286015Z", "version": ""}, "workload-version": ""}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb", "exposed": false, "charm-url": "local:kubernetes/mongodb-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-25T15:03:59.520286015Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "local:kubernetes/mongodb-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-24T08:22:00.904010692Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T17:36:42.739482369Z", "version": ""}}]
+["charm", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "charm-url": "ch:amd64/kubernetes/mongodb-k8s-1", "charm-version": "", "life": "dying", "profile": null, "config": {"replica_set_name": "rs0"}}]
+["charm", "remove", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "charm-url": "ch:amd64/kubernetes/mongodb-k8s-1", "charm-version": "", "life": "dying", "profile": null, "config": {"replica_set_name": "rs0"}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb", "exposed": false, "charm-url": "local:kubernetes/mongodb-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-25T15:03:59.520286015Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "local:kubernetes/mongodb-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-24T08:22:00.904010692Z", "version": ""}, "agent-status": {"current": "executing", "message": "running upgrade-charm hook", "since": "2022-04-27T18:23:25.164370911Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "local:kubernetes/mongodb-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "waiting", "message": "Fetching image information", "since": "2022-04-27T18:23:26.17972471Z", "version": ""}, "agent-status": {"current": "executing", "message": "running upgrade-charm hook", "since": "2022-04-27T18:23:25.164370911Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "local:kubernetes/mongodb-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "waiting", "message": "Assembling pod spec", "since": "2022-04-27T18:23:26.4876642Z", "version": ""}, "agent-status": {"current": "executing", "message": "running upgrade-charm hook", "since": "2022-04-27T18:23:25.164370911Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "local:kubernetes/mongodb-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T18:23:26.747039555Z", "version": ""}, "agent-status": {"current": "executing", "message": "running upgrade-charm hook", "since": "2022-04-27T18:23:25.164370911Z", "version": ""}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb", "exposed": false, "charm-url": "local:kubernetes/mongodb-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-25T15:03:59.520286015Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "local:kubernetes/mongodb-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T18:23:26.747039555Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T18:23:27.665397171Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "local:kubernetes/mongodb-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "waiting", "message": "Fetching image information", "since": "2022-04-27T18:23:28.405317887Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T18:23:27.665397171Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "local:kubernetes/mongodb-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "waiting", "message": "Assembling pod spec", "since": "2022-04-27T18:23:28.701544881Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T18:23:27.665397171Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "local:kubernetes/mongodb-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T18:23:29.040857644Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T18:23:27.665397171Z", "version": ""}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb", "exposed": false, "charm-url": "local:kubernetes/mongodb-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-25T15:03:59.520286015Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "local:kubernetes/mongodb-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T18:23:29.040857644Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T18:23:29.956508325Z", "version": ""}}]
+##########################################################################################################################################################################################################################################################
+# These next events are visible on steful charm upgrade, but so far there is no method to link them to the overall upgrade change
+#["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb", "exposed": false, "charm-url": "local:kubernetes/mongodb-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-27T18:23:30.879168477Z", "version": ""}, "workload-version": ""}]
+#["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb", "exposed": false, "charm-url": "local:kubernetes/mongodb-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "waiting", "message": "", "since": "2022-04-27T18:23:33.296232835Z", "version": ""}, "workload-version": ""}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "local:kubernetes/mongodb-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T18:23:29.040857644Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T18:23:29.956508325Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "local:kubernetes/mongodb-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T18:23:29.040857644Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T18:23:29.956508325Z", "version": ""}}]
+#["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb", "exposed": false, "charm-url": "local:kubernetes/mongodb-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-27T18:23:53.480017079Z", "version": ""}, "workload-version": ""}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "local:kubernetes/mongodb-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T18:23:29.040857644Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T18:23:54.070335385Z", "version": ""}}]
\ No newline at end of file
diff --git a/osm_lcm/n2vc/tests/unit/testdata/upgrade-podspec-stateless.log b/osm_lcm/n2vc/tests/unit/testdata/upgrade-podspec-stateless.log
new file mode 100644 (file)
index 0000000..0cfe2f5
--- /dev/null
@@ -0,0 +1,102 @@
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+#
+["charm", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "charm-url": "local:kubernetes/osm-lcm-0", "charm-version": "", "life": "alive", "profile": null}]
+["charm", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "charm-url": "local:kubernetes/osm-lcm-0", "charm-version": "", "life": "alive", "profile": null, "config": {"database_commonkey": "osm", "debug_common_local_path": null, "debug_lcm_local_path": null, "debug_mode": false, "debug_n2vc_local_path": null, "debug_pubkey": null, "image_pull_policy": "always", "log_level": "INFO", "mongodb_uri": null, "security_context": false, "vca_apiproxy": null, "vca_cacert": null, "vca_cloud": null, "vca_helm_ca_certs": "", "vca_host": null, "vca_k8s_cloud": null, "vca_model_config_agent_metadata_url": null, "vca_model_config_agent_stream": null, "vca_model_config_apt_ftp_proxy": null, "vca_model_config_apt_http_proxy": null, "vca_model_config_apt_https_proxy": null, "vca_model_config_apt_mirror": null, "vca_model_config_apt_no_proxy": null, "vca_model_config_automatically_retry_hooks": null, "vca_model_config_backup_dir": null, "vca_model_config_cloudinit_userdata": null, "vca_model_config_container_image_metadata_url": null, "vca_model_config_container_image_stream": null, "vca_model_config_container_inherit_properties": null, "vca_model_config_container_networking_method": null, "vca_model_config_default_series": null, "vca_model_config_default_space": null, "vca_model_config_development": null, "vca_model_config_disable_network_management": null, "vca_model_config_egress_subnets": null, "vca_model_config_enable_os_refresh_update": null, "vca_model_config_enable_os_upgrade": null, "vca_model_config_fan_config": null, "vca_model_config_firewall_mode": null, "vca_model_config_ftp_proxy": null, "vca_model_config_http_proxy": null, "vca_model_config_https_proxy": null, "vca_model_config_ignore_machine_addresses": null, "vca_model_config_image_metadata_url": null, "vca_model_config_image_stream": null, "vca_model_config_juju_ftp_proxy": null, "vca_model_config_juju_http_proxy": null, "vca_model_config_juju_https_proxy": null, "vca_model_config_juju_no_proxy": null, "vca_model_config_logforward_enabled": null, "vca_model_config_logging_config": null, "vca_model_config_lxd_snap_channel": null, "vca_model_config_max_action_results_age": null, "vca_model_config_max_action_results_size": null, "vca_model_config_max_status_history_age": null, "vca_model_config_max_status_history_size": null, "vca_model_config_net_bond_reconfigure_delay": null, "vca_model_config_no_proxy": null, "vca_model_config_provisioner_harvest_mode": null, "vca_model_config_proxy_ssh": null, "vca_model_config_snap_http_proxy": null, "vca_model_config_snap_https_proxy": null, "vca_model_config_snap_store_assertions": null, "vca_model_config_snap_store_proxy": null, "vca_model_config_snap_store_proxy_url": null, "vca_model_config_ssl_hostname_verification": null, "vca_model_config_test_mode": null, "vca_model_config_transmit_vendor_metrics": null, "vca_model_config_update_status_hook_interval": null, "vca_port": null, "vca_pubkey": null, "vca_secret": null, "vca_stablerepourl": "https://charts.helm.sh/stable", "vca_user": null}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm", "exposed": false, "charm-url": "local:kubernetes/osm-lcm-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "config": {"database_commonkey": "osm", "debug_common_local_path": "/home/ubuntu/mark/git/osm/branches/master/common", "debug_lcm_local_path": "/home/ubuntu/mark/git/osm/branches/master/LCM", "debug_mode": true, "debug_n2vc_local_path": "/home/ubuntu/mark/git/osm/branches/master/N2VC", "debug_pubkey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDUrOLpYylC9lRlIknpGeda2mzY+mqTYxLDj9Q5t2jerT/aHARSr7DBbkLroqb8bZLsHw3QSHOy9AjF7Y8z5HpkFHGL0do1A/a3MkY+TIX3+FVP8FuvSIb7fNofC2odH5Pj/5kY2TSQhGcsAeYejoYn6qQ0xElNJtWaoqPKkAe825TJkANc31YvokxYCbY9oHfzUPEXtS2nADJrn5drEgc/R8cAwPRNPs2EU/XT2u1m+UP5T9nHbFV9rjv7RhrezB1ynQ5IGsPteOCDIsLswLKpuSQ0JBpuYb6wKjzBlYYyMe1lQF+m9ZWEnywGzCEQncsOxF+GzSbxrrtTLOFgDAbT mark.beierl@canonical.com", "log_level": "DEBUG", "vca_cacert": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVFekNDQW51Z0F3SUJBZ0lWQU1iT21oMThUaFc3NDNlSGhIckZQL1JzcXd5U01BMEdDU3FHU0liM0RRRUIKQ3dVQU1DRXhEVEFMQmdOVkJBb1RCRXAxYW5VeEVEQU9CZ05WQkFNVEIycDFhblV0WTJFd0hoY05Nakl3TkRBNApNREl5TXpReldoY05Nekl3TkRBNE1ESXlPRFF6V2pBaE1RMHdDd1lEVlFRS0V3UktkV3AxTVJBd0RnWURWUVFECkV3ZHFkV3AxTFdOaE1JSUJvakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBWThBTUlJQmlnS0NBWUVBMWJMYUMwemMKQzdKOGRSdkg0UEtSZzU5MEI0S0EzRXpTNXUxbW81QldLblBoQ3RPb1hIVU03ZnhvU2RlV1dKb0FKb0hWOFlaUApiVzF0MnBzZEtGbWlZYWxhdGNkUSs5VGU5dWMxbnRNRDRlTVFTSjVKQ0MrSW83SDdCSjY0bkV4dms4RWNmT0F3CnNxL1lvMnZJaHcwVTNDZk5LaWNPNHE4MW1jamlkc001Nmp3eHA2R05SaVY5bEszV2hXd0JKWjZMdkovUDZySDAKNU8yV2crK0pNOFMzdGlFV1N3SzhZTmxiYTVKUExkdnNPVkVWWVVsK0NUc0RpRGhzZ2hFSHU2RHBzSzd5dGw2aApWa3NFRjI4Y1krRmhhVXpXejk2d0JqM1M0UUdKQXV5K1dBWStnRVZZcXIrQ0dySkVNeEJLK0VPWjJ2MjJ1YW9iClJyNmo5dkZRQ2I5YVQ5RTV1SDRZWGhIelJ2YUZLQVB4b2J5OFFpR0cwRXJkZTA1ajFYU0NaS0EyMXEyODcvR2wKT0NWWUxMMVNBb1VIbUMzUEZGU25ycDYzdUxLSWVJVTAyb1o0L3JQODlnbUM4VXBJNXgxTEdKQXJ1OEd1NHltRApBR2FxSjNWdjQ0MjIyWEhJaThXZGdwUFNLZWpQeUlReW9HMHBuTTZRUk5KaUdYdXg5S3dIblV5bEFnTUJBQUdqClFqQkFNQTRHQTFVZER3RUIvd1FFQXdJQ3BEQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRV0JCU2cKM3VmTzhhajJCc2V2R0lMaEUxQUZpaTR3VWpBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVlFQWs1eXFQeDFFWEF3MApIM2VoeVJienhCa1VKQkgwU2RDT3drelY4MVp3dmxBbktLS3hOTTFBd2VSQUxrUTNIRlMrOW11L2szK2ZZRG1uCkxMR3ZLMjM2THYzWWZpMkVHd2ZpbC9icnQ3bU5pQkRtTDdvd0Vta09oVzVKYlRiM2RRcmxtelJsVXhIU1R4d0MKUUM2NWdQTkJJcTNUZUZmU2t6ZlA1N0FIK0ZHemZYNTVBa0pnbEZpQXRRcGFoQXgxVlRaWitPK3RjbWZSSW5mUQpDSzArZE5qc3VUd2NHbzhvYUpOamJmWHNPYlA1eWFneWV5d2ZxQ3lvRExnT2gwdUlGUVBiUlBRM1g0OUw3bzhmCnNGRm9CcmVNbjFJWjJBUlplc0dWYXRKSFhRb01WRzcrK3F1L0g2dVNEMFZtK3piNTBJbGVhZHZiQVR2aUlUTlgKYWNtQkRvSmdOQ1JsTEhBR3hWN2pabFIrSFBjUitOTGordklJOUloeHVaY09STW5kTHpyT2hGSjVqM2FuOG5kbApOdW9sR2c3WW1hRmJWdFo3aUdEWnBISTdSQVFSZitjNVlKbFVIbUwrMnpNR2xiZHlKL3B5cTRjUEJsVDZlWUhtCmxSVEhseXVRaTd2ZndneXJwVU53ajMvbkNUekxjWDVqaHp3L1h2aDlGeGZpL1FTTmxKREIKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQoK", "vca_cloud": "lxd-cloud", "vca_host": "10.0.2.68", "vca_k8s_cloud": "microk8s", "vca_port": 17070, "vca_pubkey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+tPyU/gOogK/jQFbDgHtlaYhba8Y1SshxC5vL908ST2I6ku4+1XfIgVi8gfCUDRG8kzHL9S0i8iCvPYqCIasSEVD7+LCjYn19JZXWhnkwmlmHoW3a7ljw++d4aNWGKNWxiQOKKtM26ZH5yu1kKHtmZ1bcgrKGkQdiYBhzsKZ/8lRoWakGwZdDTdny6ZxmcvJ52GLyDs/K4jK730ogRVcsj7h3hb7KXKedNkX89ciAaus8m3HA9nMWsf8C0GRXR9ymGDml9pUORO8/6uOsccn5VQWHl5sitSG4K2W/5jBBNNmRQ8obV2ey7N+3nhb9luzhgk2Slj0XTjhnKOLP01Jn juju-client-key", "vca_secret": "86bbee23c74c078a3a67a95349788748", "vca_user": "admin"}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-25T15:19:30.580696141Z", "version": ""}, "workload-version": ""}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm", "exposed": false, "charm-url": "local:kubernetes/osm-lcm-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "config": {"database_commonkey": "osm", "debug_common_local_path": "/home/ubuntu/mark/git/osm/branches/master/common", "debug_lcm_local_path": "/home/ubuntu/mark/git/osm/branches/master/LCM", "debug_mode": true, "debug_n2vc_local_path": "/home/ubuntu/mark/git/osm/branches/master/N2VC", "debug_pubkey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDUrOLpYylC9lRlIknpGeda2mzY+mqTYxLDj9Q5t2jerT/aHARSr7DBbkLroqb8bZLsHw3QSHOy9AjF7Y8z5HpkFHGL0do1A/a3MkY+TIX3+FVP8FuvSIb7fNofC2odH5Pj/5kY2TSQhGcsAeYejoYn6qQ0xElNJtWaoqPKkAe825TJkANc31YvokxYCbY9oHfzUPEXtS2nADJrn5drEgc/R8cAwPRNPs2EU/XT2u1m+UP5T9nHbFV9rjv7RhrezB1ynQ5IGsPteOCDIsLswLKpuSQ0JBpuYb6wKjzBlYYyMe1lQF+m9ZWEnywGzCEQncsOxF+GzSbxrrtTLOFgDAbT mark.beierl@canonical.com", "log_level": "DEBUG", "vca_cacert": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVFekNDQW51Z0F3SUJBZ0lWQU1iT21oMThUaFc3NDNlSGhIckZQL1JzcXd5U01BMEdDU3FHU0liM0RRRUIKQ3dVQU1DRXhEVEFMQmdOVkJBb1RCRXAxYW5VeEVEQU9CZ05WQkFNVEIycDFhblV0WTJFd0hoY05Nakl3TkRBNApNREl5TXpReldoY05Nekl3TkRBNE1ESXlPRFF6V2pBaE1RMHdDd1lEVlFRS0V3UktkV3AxTVJBd0RnWURWUVFECkV3ZHFkV3AxTFdOaE1JSUJvakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBWThBTUlJQmlnS0NBWUVBMWJMYUMwemMKQzdKOGRSdkg0UEtSZzU5MEI0S0EzRXpTNXUxbW81QldLblBoQ3RPb1hIVU03ZnhvU2RlV1dKb0FKb0hWOFlaUApiVzF0MnBzZEtGbWlZYWxhdGNkUSs5VGU5dWMxbnRNRDRlTVFTSjVKQ0MrSW83SDdCSjY0bkV4dms4RWNmT0F3CnNxL1lvMnZJaHcwVTNDZk5LaWNPNHE4MW1jamlkc001Nmp3eHA2R05SaVY5bEszV2hXd0JKWjZMdkovUDZySDAKNU8yV2crK0pNOFMzdGlFV1N3SzhZTmxiYTVKUExkdnNPVkVWWVVsK0NUc0RpRGhzZ2hFSHU2RHBzSzd5dGw2aApWa3NFRjI4Y1krRmhhVXpXejk2d0JqM1M0UUdKQXV5K1dBWStnRVZZcXIrQ0dySkVNeEJLK0VPWjJ2MjJ1YW9iClJyNmo5dkZRQ2I5YVQ5RTV1SDRZWGhIelJ2YUZLQVB4b2J5OFFpR0cwRXJkZTA1ajFYU0NaS0EyMXEyODcvR2wKT0NWWUxMMVNBb1VIbUMzUEZGU25ycDYzdUxLSWVJVTAyb1o0L3JQODlnbUM4VXBJNXgxTEdKQXJ1OEd1NHltRApBR2FxSjNWdjQ0MjIyWEhJaThXZGdwUFNLZWpQeUlReW9HMHBuTTZRUk5KaUdYdXg5S3dIblV5bEFnTUJBQUdqClFqQkFNQTRHQTFVZER3RUIvd1FFQXdJQ3BEQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRV0JCU2cKM3VmTzhhajJCc2V2R0lMaEUxQUZpaTR3VWpBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVlFQWs1eXFQeDFFWEF3MApIM2VoeVJienhCa1VKQkgwU2RDT3drelY4MVp3dmxBbktLS3hOTTFBd2VSQUxrUTNIRlMrOW11L2szK2ZZRG1uCkxMR3ZLMjM2THYzWWZpMkVHd2ZpbC9icnQ3bU5pQkRtTDdvd0Vta09oVzVKYlRiM2RRcmxtelJsVXhIU1R4d0MKUUM2NWdQTkJJcTNUZUZmU2t6ZlA1N0FIK0ZHemZYNTVBa0pnbEZpQXRRcGFoQXgxVlRaWitPK3RjbWZSSW5mUQpDSzArZE5qc3VUd2NHbzhvYUpOamJmWHNPYlA1eWFneWV5d2ZxQ3lvRExnT2gwdUlGUVBiUlBRM1g0OUw3bzhmCnNGRm9CcmVNbjFJWjJBUlplc0dWYXRKSFhRb01WRzcrK3F1L0g2dVNEMFZtK3piNTBJbGVhZHZiQVR2aUlUTlgKYWNtQkRvSmdOQ1JsTEhBR3hWN2pabFIrSFBjUitOTGordklJOUloeHVaY09STW5kTHpyT2hGSjVqM2FuOG5kbApOdW9sR2c3WW1hRmJWdFo3aUdEWnBISTdSQVFSZitjNVlKbFVIbUwrMnpNR2xiZHlKL3B5cTRjUEJsVDZlWUhtCmxSVEhseXVRaTd2ZndneXJwVU53ajMvbkNUekxjWDVqaHp3L1h2aDlGeGZpL1FTTmxKREIKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQoK", "vca_cloud": "lxd-cloud", "vca_host": "10.0.2.68", "vca_k8s_cloud": "microk8s", "vca_port": 17070, "vca_pubkey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+tPyU/gOogK/jQFbDgHtlaYhba8Y1SshxC5vL908ST2I6ku4+1XfIgVi8gfCUDRG8kzHL9S0i8iCvPYqCIasSEVD7+LCjYn19JZXWhnkwmlmHoW3a7ljw++d4aNWGKNWxiQOKKtM26ZH5yu1kKHtmZ1bcgrKGkQdiYBhzsKZ/8lRoWakGwZdDTdny6ZxmcvJ52GLyDs/K4jK730ogRVcsj7h3hb7KXKedNkX89ciAaus8m3HA9nMWsf8C0GRXR9ymGDml9pUORO8/6uOsccn5VQWHl5sitSG4K2W/5jBBNNmRQ8obV2ey7N+3nhb9luzhgk2Slj0XTjhnKOLP01Jn juju-client-key", "vca_secret": "86bbee23c74c078a3a67a95349788748", "vca_user": "admin"}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-25T15:19:30.580696141Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-25T15:19:46.158217393Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-25T15:19:47.020240886Z", "version": ""}}]
+["charm", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "charm-url": "ch:amd64/kubernetes/osm-lcm-1", "charm-version": "", "life": "dying", "profile": null, "config": {"database_commonkey": "osm", "debug_common_local_path": null, "debug_lcm_local_path": null, "debug_mode": false, "debug_n2vc_local_path": null, "debug_pubkey": null, "image_pull_policy": "always", "log_level": "INFO", "mongodb_uri": null, "security_context": false, "vca_apiproxy": null, "vca_cacert": null, "vca_cloud": null, "vca_helm_ca_certs": "", "vca_host": null, "vca_k8s_cloud": null, "vca_model_config_agent_metadata_url": null, "vca_model_config_agent_stream": null, "vca_model_config_apt_ftp_proxy": null, "vca_model_config_apt_http_proxy": null, "vca_model_config_apt_https_proxy": null, "vca_model_config_apt_mirror": null, "vca_model_config_apt_no_proxy": null, "vca_model_config_automatically_retry_hooks": null, "vca_model_config_backup_dir": null, "vca_model_config_cloudinit_userdata": null, "vca_model_config_container_image_metadata_url": null, "vca_model_config_container_image_stream": null, "vca_model_config_container_inherit_properties": null, "vca_model_config_container_networking_method": null, "vca_model_config_default_series": null, "vca_model_config_default_space": null, "vca_model_config_development": null, "vca_model_config_disable_network_management": null, "vca_model_config_egress_subnets": null, "vca_model_config_enable_os_refresh_update": null, "vca_model_config_enable_os_upgrade": null, "vca_model_config_fan_config": null, "vca_model_config_firewall_mode": null, "vca_model_config_ftp_proxy": null, "vca_model_config_http_proxy": null, "vca_model_config_https_proxy": null, "vca_model_config_ignore_machine_addresses": null, "vca_model_config_image_metadata_url": null, "vca_model_config_image_stream": null, "vca_model_config_juju_ftp_proxy": null, "vca_model_config_juju_http_proxy": null, "vca_model_config_juju_https_proxy": null, "vca_model_config_juju_no_proxy": null, "vca_model_config_logforward_enabled": null, "vca_model_config_logging_config": null, "vca_model_config_lxd_snap_channel": null, "vca_model_config_max_action_results_age": null, "vca_model_config_max_action_results_size": null, "vca_model_config_max_status_history_age": null, "vca_model_config_max_status_history_size": null, "vca_model_config_net_bond_reconfigure_delay": null, "vca_model_config_no_proxy": null, "vca_model_config_provisioner_harvest_mode": null, "vca_model_config_proxy_ssh": null, "vca_model_config_snap_http_proxy": null, "vca_model_config_snap_https_proxy": null, "vca_model_config_snap_store_assertions": null, "vca_model_config_snap_store_proxy": null, "vca_model_config_snap_store_proxy_url": null, "vca_model_config_ssl_hostname_verification": null, "vca_model_config_test_mode": null, "vca_model_config_transmit_vendor_metrics": null, "vca_model_config_update_status_hook_interval": null, "vca_port": null, "vca_pubkey": null, "vca_secret": null, "vca_stablerepourl": "https://charts.helm.sh/stable", "vca_user": null}}]
+["charm", "remove", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "charm-url": "ch:amd64/kubernetes/osm-lcm-1", "charm-version": "", "life": "dying", "profile": null, "config": {"database_commonkey": "osm", "debug_common_local_path": null, "debug_lcm_local_path": null, "debug_mode": false, "debug_n2vc_local_path": null, "debug_pubkey": null, "image_pull_policy": "always", "log_level": "INFO", "mongodb_uri": null, "security_context": false, "vca_apiproxy": null, "vca_cacert": null, "vca_cloud": null, "vca_helm_ca_certs": "", "vca_host": null, "vca_k8s_cloud": null, "vca_model_config_agent_metadata_url": null, "vca_model_config_agent_stream": null, "vca_model_config_apt_ftp_proxy": null, "vca_model_config_apt_http_proxy": null, "vca_model_config_apt_https_proxy": null, "vca_model_config_apt_mirror": null, "vca_model_config_apt_no_proxy": null, "vca_model_config_automatically_retry_hooks": null, "vca_model_config_backup_dir": null, "vca_model_config_cloudinit_userdata": null, "vca_model_config_container_image_metadata_url": null, "vca_model_config_container_image_stream": null, "vca_model_config_container_inherit_properties": null, "vca_model_config_container_networking_method": null, "vca_model_config_default_series": null, "vca_model_config_default_space": null, "vca_model_config_development": null, "vca_model_config_disable_network_management": null, "vca_model_config_egress_subnets": null, "vca_model_config_enable_os_refresh_update": null, "vca_model_config_enable_os_upgrade": null, "vca_model_config_fan_config": null, "vca_model_config_firewall_mode": null, "vca_model_config_ftp_proxy": null, "vca_model_config_http_proxy": null, "vca_model_config_https_proxy": null, "vca_model_config_ignore_machine_addresses": null, "vca_model_config_image_metadata_url": null, "vca_model_config_image_stream": null, "vca_model_config_juju_ftp_proxy": null, "vca_model_config_juju_http_proxy": null, "vca_model_config_juju_https_proxy": null, "vca_model_config_juju_no_proxy": null, "vca_model_config_logforward_enabled": null, "vca_model_config_logging_config": null, "vca_model_config_lxd_snap_channel": null, "vca_model_config_max_action_results_age": null, "vca_model_config_max_action_results_size": null, "vca_model_config_max_status_history_age": null, "vca_model_config_max_status_history_size": null, "vca_model_config_net_bond_reconfigure_delay": null, "vca_model_config_no_proxy": null, "vca_model_config_provisioner_harvest_mode": null, "vca_model_config_proxy_ssh": null, "vca_model_config_snap_http_proxy": null, "vca_model_config_snap_https_proxy": null, "vca_model_config_snap_store_assertions": null, "vca_model_config_snap_store_proxy": null, "vca_model_config_snap_store_proxy_url": null, "vca_model_config_ssl_hostname_verification": null, "vca_model_config_test_mode": null, "vca_model_config_transmit_vendor_metrics": null, "vca_model_config_update_status_hook_interval": null, "vca_port": null, "vca_pubkey": null, "vca_secret": null, "vca_stablerepourl": "https://charts.helm.sh/stable", "vca_user": null}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm", "exposed": false, "charm-url": "local:kubernetes/osm-lcm-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "config": {"database_commonkey": "osm", "debug_common_local_path": "/home/ubuntu/mark/git/osm/branches/master/common", "debug_lcm_local_path": "/home/ubuntu/mark/git/osm/branches/master/LCM", "debug_mode": true, "debug_n2vc_local_path": "/home/ubuntu/mark/git/osm/branches/master/N2VC", "debug_pubkey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDUrOLpYylC9lRlIknpGeda2mzY+mqTYxLDj9Q5t2jerT/aHARSr7DBbkLroqb8bZLsHw3QSHOy9AjF7Y8z5HpkFHGL0do1A/a3MkY+TIX3+FVP8FuvSIb7fNofC2odH5Pj/5kY2TSQhGcsAeYejoYn6qQ0xElNJtWaoqPKkAe825TJkANc31YvokxYCbY9oHfzUPEXtS2nADJrn5drEgc/R8cAwPRNPs2EU/XT2u1m+UP5T9nHbFV9rjv7RhrezB1ynQ5IGsPteOCDIsLswLKpuSQ0JBpuYb6wKjzBlYYyMe1lQF+m9ZWEnywGzCEQncsOxF+GzSbxrrtTLOFgDAbT mark.beierl@canonical.com", "log_level": "DEBUG", "vca_cacert": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVFekNDQW51Z0F3SUJBZ0lWQU1iT21oMThUaFc3NDNlSGhIckZQL1JzcXd5U01BMEdDU3FHU0liM0RRRUIKQ3dVQU1DRXhEVEFMQmdOVkJBb1RCRXAxYW5VeEVEQU9CZ05WQkFNVEIycDFhblV0WTJFd0hoY05Nakl3TkRBNApNREl5TXpReldoY05Nekl3TkRBNE1ESXlPRFF6V2pBaE1RMHdDd1lEVlFRS0V3UktkV3AxTVJBd0RnWURWUVFECkV3ZHFkV3AxTFdOaE1JSUJvakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBWThBTUlJQmlnS0NBWUVBMWJMYUMwemMKQzdKOGRSdkg0UEtSZzU5MEI0S0EzRXpTNXUxbW81QldLblBoQ3RPb1hIVU03ZnhvU2RlV1dKb0FKb0hWOFlaUApiVzF0MnBzZEtGbWlZYWxhdGNkUSs5VGU5dWMxbnRNRDRlTVFTSjVKQ0MrSW83SDdCSjY0bkV4dms4RWNmT0F3CnNxL1lvMnZJaHcwVTNDZk5LaWNPNHE4MW1jamlkc001Nmp3eHA2R05SaVY5bEszV2hXd0JKWjZMdkovUDZySDAKNU8yV2crK0pNOFMzdGlFV1N3SzhZTmxiYTVKUExkdnNPVkVWWVVsK0NUc0RpRGhzZ2hFSHU2RHBzSzd5dGw2aApWa3NFRjI4Y1krRmhhVXpXejk2d0JqM1M0UUdKQXV5K1dBWStnRVZZcXIrQ0dySkVNeEJLK0VPWjJ2MjJ1YW9iClJyNmo5dkZRQ2I5YVQ5RTV1SDRZWGhIelJ2YUZLQVB4b2J5OFFpR0cwRXJkZTA1ajFYU0NaS0EyMXEyODcvR2wKT0NWWUxMMVNBb1VIbUMzUEZGU25ycDYzdUxLSWVJVTAyb1o0L3JQODlnbUM4VXBJNXgxTEdKQXJ1OEd1NHltRApBR2FxSjNWdjQ0MjIyWEhJaThXZGdwUFNLZWpQeUlReW9HMHBuTTZRUk5KaUdYdXg5S3dIblV5bEFnTUJBQUdqClFqQkFNQTRHQTFVZER3RUIvd1FFQXdJQ3BEQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRV0JCU2cKM3VmTzhhajJCc2V2R0lMaEUxQUZpaTR3VWpBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVlFQWs1eXFQeDFFWEF3MApIM2VoeVJienhCa1VKQkgwU2RDT3drelY4MVp3dmxBbktLS3hOTTFBd2VSQUxrUTNIRlMrOW11L2szK2ZZRG1uCkxMR3ZLMjM2THYzWWZpMkVHd2ZpbC9icnQ3bU5pQkRtTDdvd0Vta09oVzVKYlRiM2RRcmxtelJsVXhIU1R4d0MKUUM2NWdQTkJJcTNUZUZmU2t6ZlA1N0FIK0ZHemZYNTVBa0pnbEZpQXRRcGFoQXgxVlRaWitPK3RjbWZSSW5mUQpDSzArZE5qc3VUd2NHbzhvYUpOamJmWHNPYlA1eWFneWV5d2ZxQ3lvRExnT2gwdUlGUVBiUlBRM1g0OUw3bzhmCnNGRm9CcmVNbjFJWjJBUlplc0dWYXRKSFhRb01WRzcrK3F1L0g2dVNEMFZtK3piNTBJbGVhZHZiQVR2aUlUTlgKYWNtQkRvSmdOQ1JsTEhBR3hWN2pabFIrSFBjUitOTGordklJOUloeHVaY09STW5kTHpyT2hGSjVqM2FuOG5kbApOdW9sR2c3WW1hRmJWdFo3aUdEWnBISTdSQVFSZitjNVlKbFVIbUwrMnpNR2xiZHlKL3B5cTRjUEJsVDZlWUhtCmxSVEhseXVRaTd2ZndneXJwVU53ajMvbkNUekxjWDVqaHp3L1h2aDlGeGZpL1FTTmxKREIKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQoK", "vca_cloud": "lxd-cloud", "vca_host": "10.0.2.68", "vca_k8s_cloud": "microk8s", "vca_port": 17070, "vca_pubkey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+tPyU/gOogK/jQFbDgHtlaYhba8Y1SshxC5vL908ST2I6ku4+1XfIgVi8gfCUDRG8kzHL9S0i8iCvPYqCIasSEVD7+LCjYn19JZXWhnkwmlmHoW3a7ljw++d4aNWGKNWxiQOKKtM26ZH5yu1kKHtmZ1bcgrKGkQdiYBhzsKZ/8lRoWakGwZdDTdny6ZxmcvJ52GLyDs/K4jK730ogRVcsj7h3hb7KXKedNkX89ciAaus8m3HA9nMWsf8C0GRXR9ymGDml9pUORO8/6uOsccn5VQWHl5sitSG4K2W/5jBBNNmRQ8obV2ey7N+3nhb9luzhgk2Slj0XTjhnKOLP01Jn juju-client-key", "vca_secret": "86bbee23c74c078a3a67a95349788748", "vca_user": "admin"}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-25T15:19:30.580696141Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-25T15:19:46.158217393Z", "version": ""}, "agent-status": {"current": "executing", "message": "running upgrade-charm hook", "since": "2022-04-27T13:52:43.299439405Z", "version": ""}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm", "exposed": false, "charm-url": "local:kubernetes/osm-lcm-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "config": {"database_commonkey": "osm", "debug_common_local_path": "/home/ubuntu/mark/git/osm/branches/master/common", "debug_lcm_local_path": "/home/ubuntu/mark/git/osm/branches/master/LCM", "debug_mode": true, "debug_n2vc_local_path": "/home/ubuntu/mark/git/osm/branches/master/N2VC", "debug_pubkey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDUrOLpYylC9lRlIknpGeda2mzY+mqTYxLDj9Q5t2jerT/aHARSr7DBbkLroqb8bZLsHw3QSHOy9AjF7Y8z5HpkFHGL0do1A/a3MkY+TIX3+FVP8FuvSIb7fNofC2odH5Pj/5kY2TSQhGcsAeYejoYn6qQ0xElNJtWaoqPKkAe825TJkANc31YvokxYCbY9oHfzUPEXtS2nADJrn5drEgc/R8cAwPRNPs2EU/XT2u1m+UP5T9nHbFV9rjv7RhrezB1ynQ5IGsPteOCDIsLswLKpuSQ0JBpuYb6wKjzBlYYyMe1lQF+m9ZWEnywGzCEQncsOxF+GzSbxrrtTLOFgDAbT mark.beierl@canonical.com", "log_level": "DEBUG", "vca_cacert": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVFekNDQW51Z0F3SUJBZ0lWQU1iT21oMThUaFc3NDNlSGhIckZQL1JzcXd5U01BMEdDU3FHU0liM0RRRUIKQ3dVQU1DRXhEVEFMQmdOVkJBb1RCRXAxYW5VeEVEQU9CZ05WQkFNVEIycDFhblV0WTJFd0hoY05Nakl3TkRBNApNREl5TXpReldoY05Nekl3TkRBNE1ESXlPRFF6V2pBaE1RMHdDd1lEVlFRS0V3UktkV3AxTVJBd0RnWURWUVFECkV3ZHFkV3AxTFdOaE1JSUJvakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBWThBTUlJQmlnS0NBWUVBMWJMYUMwemMKQzdKOGRSdkg0UEtSZzU5MEI0S0EzRXpTNXUxbW81QldLblBoQ3RPb1hIVU03ZnhvU2RlV1dKb0FKb0hWOFlaUApiVzF0MnBzZEtGbWlZYWxhdGNkUSs5VGU5dWMxbnRNRDRlTVFTSjVKQ0MrSW83SDdCSjY0bkV4dms4RWNmT0F3CnNxL1lvMnZJaHcwVTNDZk5LaWNPNHE4MW1jamlkc001Nmp3eHA2R05SaVY5bEszV2hXd0JKWjZMdkovUDZySDAKNU8yV2crK0pNOFMzdGlFV1N3SzhZTmxiYTVKUExkdnNPVkVWWVVsK0NUc0RpRGhzZ2hFSHU2RHBzSzd5dGw2aApWa3NFRjI4Y1krRmhhVXpXejk2d0JqM1M0UUdKQXV5K1dBWStnRVZZcXIrQ0dySkVNeEJLK0VPWjJ2MjJ1YW9iClJyNmo5dkZRQ2I5YVQ5RTV1SDRZWGhIelJ2YUZLQVB4b2J5OFFpR0cwRXJkZTA1ajFYU0NaS0EyMXEyODcvR2wKT0NWWUxMMVNBb1VIbUMzUEZGU25ycDYzdUxLSWVJVTAyb1o0L3JQODlnbUM4VXBJNXgxTEdKQXJ1OEd1NHltRApBR2FxSjNWdjQ0MjIyWEhJaThXZGdwUFNLZWpQeUlReW9HMHBuTTZRUk5KaUdYdXg5S3dIblV5bEFnTUJBQUdqClFqQkFNQTRHQTFVZER3RUIvd1FFQXdJQ3BEQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRV0JCU2cKM3VmTzhhajJCc2V2R0lMaEUxQUZpaTR3VWpBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVlFQWs1eXFQeDFFWEF3MApIM2VoeVJienhCa1VKQkgwU2RDT3drelY4MVp3dmxBbktLS3hOTTFBd2VSQUxrUTNIRlMrOW11L2szK2ZZRG1uCkxMR3ZLMjM2THYzWWZpMkVHd2ZpbC9icnQ3bU5pQkRtTDdvd0Vta09oVzVKYlRiM2RRcmxtelJsVXhIU1R4d0MKUUM2NWdQTkJJcTNUZUZmU2t6ZlA1N0FIK0ZHemZYNTVBa0pnbEZpQXRRcGFoQXgxVlRaWitPK3RjbWZSSW5mUQpDSzArZE5qc3VUd2NHbzhvYUpOamJmWHNPYlA1eWFneWV5d2ZxQ3lvRExnT2gwdUlGUVBiUlBRM1g0OUw3bzhmCnNGRm9CcmVNbjFJWjJBUlplc0dWYXRKSFhRb01WRzcrK3F1L0g2dVNEMFZtK3piNTBJbGVhZHZiQVR2aUlUTlgKYWNtQkRvSmdOQ1JsTEhBR3hWN2pabFIrSFBjUitOTGordklJOUloeHVaY09STW5kTHpyT2hGSjVqM2FuOG5kbApOdW9sR2c3WW1hRmJWdFo3aUdEWnBISTdSQVFSZitjNVlKbFVIbUwrMnpNR2xiZHlKL3B5cTRjUEJsVDZlWUhtCmxSVEhseXVRaTd2ZndneXJwVU53ajMvbkNUekxjWDVqaHp3L1h2aDlGeGZpL1FTTmxKREIKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQoK", "vca_cloud": "lxd-cloud", "vca_host": "10.0.2.68", "vca_k8s_cloud": "microk8s", "vca_port": 17070, "vca_pubkey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+tPyU/gOogK/jQFbDgHtlaYhba8Y1SshxC5vL908ST2I6ku4+1XfIgVi8gfCUDRG8kzHL9S0i8iCvPYqCIasSEVD7+LCjYn19JZXWhnkwmlmHoW3a7ljw++d4aNWGKNWxiQOKKtM26ZH5yu1kKHtmZ1bcgrKGkQdiYBhzsKZ/8lRoWakGwZdDTdny6ZxmcvJ52GLyDs/K4jK730ogRVcsj7h3hb7KXKedNkX89ciAaus8m3HA9nMWsf8C0GRXR9ymGDml9pUORO8/6uOsccn5VQWHl5sitSG4K2W/5jBBNNmRQ8obV2ey7N+3nhb9luzhgk2Slj0XTjhnKOLP01Jn juju-client-key", "vca_secret": "86bbee23c74c078a3a67a95349788748", "vca_user": "admin"}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-25T15:19:30.580696141Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-25T15:19:46.158217393Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T13:52:44.718162892Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "Assembling pod spec", "since": "2022-04-27T13:52:45.691682061Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T13:52:44.718162892Z", "version": ""}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm", "exposed": false, "charm-url": "local:kubernetes/osm-lcm-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "config": {"database_commonkey": "osm", "debug_common_local_path": "/home/ubuntu/mark/git/osm/branches/master/common", "debug_lcm_local_path": "/home/ubuntu/mark/git/osm/branches/master/LCM", "debug_mode": true, "debug_n2vc_local_path": "/home/ubuntu/mark/git/osm/branches/master/N2VC", "debug_pubkey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDUrOLpYylC9lRlIknpGeda2mzY+mqTYxLDj9Q5t2jerT/aHARSr7DBbkLroqb8bZLsHw3QSHOy9AjF7Y8z5HpkFHGL0do1A/a3MkY+TIX3+FVP8FuvSIb7fNofC2odH5Pj/5kY2TSQhGcsAeYejoYn6qQ0xElNJtWaoqPKkAe825TJkANc31YvokxYCbY9oHfzUPEXtS2nADJrn5drEgc/R8cAwPRNPs2EU/XT2u1m+UP5T9nHbFV9rjv7RhrezB1ynQ5IGsPteOCDIsLswLKpuSQ0JBpuYb6wKjzBlYYyMe1lQF+m9ZWEnywGzCEQncsOxF+GzSbxrrtTLOFgDAbT mark.beierl@canonical.com", "log_level": "DEBUG", "vca_cacert": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVFekNDQW51Z0F3SUJBZ0lWQU1iT21oMThUaFc3NDNlSGhIckZQL1JzcXd5U01BMEdDU3FHU0liM0RRRUIKQ3dVQU1DRXhEVEFMQmdOVkJBb1RCRXAxYW5VeEVEQU9CZ05WQkFNVEIycDFhblV0WTJFd0hoY05Nakl3TkRBNApNREl5TXpReldoY05Nekl3TkRBNE1ESXlPRFF6V2pBaE1RMHdDd1lEVlFRS0V3UktkV3AxTVJBd0RnWURWUVFECkV3ZHFkV3AxTFdOaE1JSUJvakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBWThBTUlJQmlnS0NBWUVBMWJMYUMwemMKQzdKOGRSdkg0UEtSZzU5MEI0S0EzRXpTNXUxbW81QldLblBoQ3RPb1hIVU03ZnhvU2RlV1dKb0FKb0hWOFlaUApiVzF0MnBzZEtGbWlZYWxhdGNkUSs5VGU5dWMxbnRNRDRlTVFTSjVKQ0MrSW83SDdCSjY0bkV4dms4RWNmT0F3CnNxL1lvMnZJaHcwVTNDZk5LaWNPNHE4MW1jamlkc001Nmp3eHA2R05SaVY5bEszV2hXd0JKWjZMdkovUDZySDAKNU8yV2crK0pNOFMzdGlFV1N3SzhZTmxiYTVKUExkdnNPVkVWWVVsK0NUc0RpRGhzZ2hFSHU2RHBzSzd5dGw2aApWa3NFRjI4Y1krRmhhVXpXejk2d0JqM1M0UUdKQXV5K1dBWStnRVZZcXIrQ0dySkVNeEJLK0VPWjJ2MjJ1YW9iClJyNmo5dkZRQ2I5YVQ5RTV1SDRZWGhIelJ2YUZLQVB4b2J5OFFpR0cwRXJkZTA1ajFYU0NaS0EyMXEyODcvR2wKT0NWWUxMMVNBb1VIbUMzUEZGU25ycDYzdUxLSWVJVTAyb1o0L3JQODlnbUM4VXBJNXgxTEdKQXJ1OEd1NHltRApBR2FxSjNWdjQ0MjIyWEhJaThXZGdwUFNLZWpQeUlReW9HMHBuTTZRUk5KaUdYdXg5S3dIblV5bEFnTUJBQUdqClFqQkFNQTRHQTFVZER3RUIvd1FFQXdJQ3BEQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRV0JCU2cKM3VmTzhhajJCc2V2R0lMaEUxQUZpaTR3VWpBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVlFQWs1eXFQeDFFWEF3MApIM2VoeVJienhCa1VKQkgwU2RDT3drelY4MVp3dmxBbktLS3hOTTFBd2VSQUxrUTNIRlMrOW11L2szK2ZZRG1uCkxMR3ZLMjM2THYzWWZpMkVHd2ZpbC9icnQ3bU5pQkRtTDdvd0Vta09oVzVKYlRiM2RRcmxtelJsVXhIU1R4d0MKUUM2NWdQTkJJcTNUZUZmU2t6ZlA1N0FIK0ZHemZYNTVBa0pnbEZpQXRRcGFoQXgxVlRaWitPK3RjbWZSSW5mUQpDSzArZE5qc3VUd2NHbzhvYUpOamJmWHNPYlA1eWFneWV5d2ZxQ3lvRExnT2gwdUlGUVBiUlBRM1g0OUw3bzhmCnNGRm9CcmVNbjFJWjJBUlplc0dWYXRKSFhRb01WRzcrK3F1L0g2dVNEMFZtK3piNTBJbGVhZHZiQVR2aUlUTlgKYWNtQkRvSmdOQ1JsTEhBR3hWN2pabFIrSFBjUitOTGordklJOUloeHVaY09STW5kTHpyT2hGSjVqM2FuOG5kbApOdW9sR2c3WW1hRmJWdFo3aUdEWnBISTdSQVFSZitjNVlKbFVIbUwrMnpNR2xiZHlKL3B5cTRjUEJsVDZlWUhtCmxSVEhseXVRaTd2ZndneXJwVU53ajMvbkNUekxjWDVqaHp3L1h2aDlGeGZpL1FTTmxKREIKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQoK", "vca_cloud": "lxd-cloud", "vca_host": "10.0.2.68", "vca_k8s_cloud": "microk8s", "vca_port": 17070, "vca_pubkey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+tPyU/gOogK/jQFbDgHtlaYhba8Y1SshxC5vL908ST2I6ku4+1XfIgVi8gfCUDRG8kzHL9S0i8iCvPYqCIasSEVD7+LCjYn19JZXWhnkwmlmHoW3a7ljw++d4aNWGKNWxiQOKKtM26ZH5yu1kKHtmZ1bcgrKGkQdiYBhzsKZ/8lRoWakGwZdDTdny6ZxmcvJ52GLyDs/K4jK730ogRVcsj7h3hb7KXKedNkX89ciAaus8m3HA9nMWsf8C0GRXR9ymGDml9pUORO8/6uOsccn5VQWHl5sitSG4K2W/5jBBNNmRQ8obV2ey7N+3nhb9luzhgk2Slj0XTjhnKOLP01Jn juju-client-key", "vca_secret": "86bbee23c74c078a3a67a95349788748", "vca_user": "admin"}, "subordinate": false, "status": {"current": "waiting", "message": "", "since": "2022-04-27T13:52:46.113865949Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:52:46.270309812Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T13:52:44.718162892Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "waiting", "message": "installing agent", "since": "2022-04-27T13:52:46.170916945Z", "version": ""}, "agent-status": {"current": "allocating", "message": "", "since": "2022-04-27T13:52:46.170916945Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:52:46.270309812Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:52:46.185629877Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "waiting", "message": "agent initializing", "since": "2022-04-27T13:52:46.396291377Z", "version": ""}, "agent-status": {"current": "allocating", "message": "", "since": "2022-04-27T13:52:46.170916945Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "waiting", "message": "agent initializing", "since": "2022-04-27T13:52:46.396291377Z", "version": ""}, "agent-status": {"current": "allocating", "message": "", "since": "2022-04-27T13:52:46.170916945Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:52:46.270309812Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:52:47.626524855Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:52:46.270309812Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:52:47.626524855Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:52:46.270309812Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:52:49.020057468Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:52:46.270309812Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:52:49.020057468Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "installing charm software", "since": "2022-04-27T13:52:50.379089676Z", "version": ""}, "agent-status": {"current": "allocating", "message": "", "since": "2022-04-27T13:52:46.170916945Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "installing charm software", "since": "2022-04-27T13:52:50.379089676Z", "version": ""}, "agent-status": {"current": "executing", "message": "running install hook", "since": "2022-04-27T13:52:50.406261397Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "installing charm software", "since": "2022-04-27T13:52:50.379089676Z", "version": ""}, "agent-status": {"current": "executing", "message": "running mongodb-relation-created hook", "since": "2022-04-27T13:52:52.218957218Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "ro/0", "application": "ro", "series": "kubernetes", "charm-url": "ch:amd64/kubernetes/osm-ro-1", "life": "alive", "public-address": "", "private-address": "10.152.183.73", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-25T15:03:19.691982951Z", "version": ""}, "agent-status": {"current": "executing", "message": "running ro-relation-joined hook for lcm/10", "since": "2022-04-27T13:52:52.325816598Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "ch:amd64/kubernetes/mongodb-k8s-1", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-24T08:22:00.904010692Z", "version": ""}, "agent-status": {"current": "executing", "message": "running database-relation-joined hook for lcm/10", "since": "2022-04-27T13:52:52.333131271Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "ch:amd64/focal/kafka-k8s-5", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-24T17:38:51.675080168Z", "version": ""}, "agent-status": {"current": "executing", "message": "running kafka-relation-joined hook for lcm/10", "since": "2022-04-27T13:52:52.343941917Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "ch:amd64/focal/kafka-k8s-5", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-24T17:38:51.675080168Z", "version": ""}, "agent-status": {"current": "executing", "message": "running kafka-relation-changed hook for lcm/10", "since": "2022-04-27T13:52:53.180263675Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "installing charm software", "since": "2022-04-27T13:52:50.379089676Z", "version": ""}, "agent-status": {"current": "executing", "message": "running ro-relation-created hook", "since": "2022-04-27T13:52:53.81029874Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "ch:amd64/focal/kafka-k8s-5", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-24T17:38:51.675080168Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:52:53.921515789Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "ch:amd64/kubernetes/mongodb-k8s-1", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-24T08:22:00.904010692Z", "version": ""}, "agent-status": {"current": "executing", "message": "running database-relation-changed hook for lcm/10", "since": "2022-04-27T13:52:54.095455492Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "ro/0", "application": "ro", "series": "kubernetes", "charm-url": "ch:amd64/kubernetes/osm-ro-1", "life": "alive", "public-address": "", "private-address": "10.152.183.73", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-25T15:03:19.691982951Z", "version": ""}, "agent-status": {"current": "executing", "message": "running ro-relation-changed hook for lcm/10", "since": "2022-04-27T13:52:54.485374136Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "installing charm software", "since": "2022-04-27T13:52:50.379089676Z", "version": ""}, "agent-status": {"current": "executing", "message": "running kafka-relation-created hook", "since": "2022-04-27T13:52:55.252323315Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "ro/0", "application": "ro", "series": "kubernetes", "charm-url": "ch:amd64/kubernetes/osm-ro-1", "life": "alive", "public-address": "", "private-address": "10.152.183.73", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-25T15:03:19.691982951Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:52:55.946718559Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "ch:amd64/kubernetes/mongodb-k8s-1", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-24T08:22:00.904010692Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:52:56.207634629Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "installing charm software", "since": "2022-04-27T13:52:50.379089676Z", "version": ""}, "agent-status": {"current": "executing", "message": "running leader-settings-changed hook", "since": "2022-04-27T13:52:56.781189067Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "installing charm software", "since": "2022-04-27T13:52:50.379089676Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T13:53:00.13054224Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:01.044522359Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T13:53:00.13054224Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:01.044522359Z", "version": ""}, "agent-status": {"current": "executing", "message": "running start hook", "since": "2022-04-27T13:53:02.069519075Z", "version": ""}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:01.044522359Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:53:03.159295668Z", "version": ""}}]
+##########################################################################################################################################################################################################################################################
+# These next events are visible on stateless charm upgrade, but so far there is no method to link them to the overall upgrade change
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:01.044522359Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:53:03.159295668Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:52:46.270309812Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:53:03.161083444Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:01.044522359Z", "version": ""}, "agent-status": {"current": "executing", "message": "running mongodb-relation-joined hook for mongodb/0", "since": "2022-04-27T13:53:03.638418924Z", "version": ""}}]
+#["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm", "exposed": false, "charm-url": "local:kubernetes/osm-lcm-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "config": {"database_commonkey": "osm", "debug_common_local_path": "/home/ubuntu/mark/git/osm/branches/master/common", "debug_lcm_local_path": "/home/ubuntu/mark/git/osm/branches/master/LCM", "debug_mode": true, "debug_n2vc_local_path": "/home/ubuntu/mark/git/osm/branches/master/N2VC", "debug_pubkey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDUrOLpYylC9lRlIknpGeda2mzY+mqTYxLDj9Q5t2jerT/aHARSr7DBbkLroqb8bZLsHw3QSHOy9AjF7Y8z5HpkFHGL0do1A/a3MkY+TIX3+FVP8FuvSIb7fNofC2odH5Pj/5kY2TSQhGcsAeYejoYn6qQ0xElNJtWaoqPKkAe825TJkANc31YvokxYCbY9oHfzUPEXtS2nADJrn5drEgc/R8cAwPRNPs2EU/XT2u1m+UP5T9nHbFV9rjv7RhrezB1ynQ5IGsPteOCDIsLswLKpuSQ0JBpuYb6wKjzBlYYyMe1lQF+m9ZWEnywGzCEQncsOxF+GzSbxrrtTLOFgDAbT mark.beierl@canonical.com", "log_level": "DEBUG", "vca_cacert": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVFekNDQW51Z0F3SUJBZ0lWQU1iT21oMThUaFc3NDNlSGhIckZQL1JzcXd5U01BMEdDU3FHU0liM0RRRUIKQ3dVQU1DRXhEVEFMQmdOVkJBb1RCRXAxYW5VeEVEQU9CZ05WQkFNVEIycDFhblV0WTJFd0hoY05Nakl3TkRBNApNREl5TXpReldoY05Nekl3TkRBNE1ESXlPRFF6V2pBaE1RMHdDd1lEVlFRS0V3UktkV3AxTVJBd0RnWURWUVFECkV3ZHFkV3AxTFdOaE1JSUJvakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBWThBTUlJQmlnS0NBWUVBMWJMYUMwemMKQzdKOGRSdkg0UEtSZzU5MEI0S0EzRXpTNXUxbW81QldLblBoQ3RPb1hIVU03ZnhvU2RlV1dKb0FKb0hWOFlaUApiVzF0MnBzZEtGbWlZYWxhdGNkUSs5VGU5dWMxbnRNRDRlTVFTSjVKQ0MrSW83SDdCSjY0bkV4dms4RWNmT0F3CnNxL1lvMnZJaHcwVTNDZk5LaWNPNHE4MW1jamlkc001Nmp3eHA2R05SaVY5bEszV2hXd0JKWjZMdkovUDZySDAKNU8yV2crK0pNOFMzdGlFV1N3SzhZTmxiYTVKUExkdnNPVkVWWVVsK0NUc0RpRGhzZ2hFSHU2RHBzSzd5dGw2aApWa3NFRjI4Y1krRmhhVXpXejk2d0JqM1M0UUdKQXV5K1dBWStnRVZZcXIrQ0dySkVNeEJLK0VPWjJ2MjJ1YW9iClJyNmo5dkZRQ2I5YVQ5RTV1SDRZWGhIelJ2YUZLQVB4b2J5OFFpR0cwRXJkZTA1ajFYU0NaS0EyMXEyODcvR2wKT0NWWUxMMVNBb1VIbUMzUEZGU25ycDYzdUxLSWVJVTAyb1o0L3JQODlnbUM4VXBJNXgxTEdKQXJ1OEd1NHltRApBR2FxSjNWdjQ0MjIyWEhJaThXZGdwUFNLZWpQeUlReW9HMHBuTTZRUk5KaUdYdXg5S3dIblV5bEFnTUJBQUdqClFqQkFNQTRHQTFVZER3RUIvd1FFQXdJQ3BEQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRV0JCU2cKM3VmTzhhajJCc2V2R0lMaEUxQUZpaTR3VWpBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVlFQWs1eXFQeDFFWEF3MApIM2VoeVJienhCa1VKQkgwU2RDT3drelY4MVp3dmxBbktLS3hOTTFBd2VSQUxrUTNIRlMrOW11L2szK2ZZRG1uCkxMR3ZLMjM2THYzWWZpMkVHd2ZpbC9icnQ3bU5pQkRtTDdvd0Vta09oVzVKYlRiM2RRcmxtelJsVXhIU1R4d0MKUUM2NWdQTkJJcTNUZUZmU2t6ZlA1N0FIK0ZHemZYNTVBa0pnbEZpQXRRcGFoQXgxVlRaWitPK3RjbWZSSW5mUQpDSzArZE5qc3VUd2NHbzhvYUpOamJmWHNPYlA1eWFneWV5d2ZxQ3lvRExnT2gwdUlGUVBiUlBRM1g0OUw3bzhmCnNGRm9CcmVNbjFJWjJBUlplc0dWYXRKSFhRb01WRzcrK3F1L0g2dVNEMFZtK3piNTBJbGVhZHZiQVR2aUlUTlgKYWNtQkRvSmdOQ1JsTEhBR3hWN2pabFIrSFBjUitOTGordklJOUloeHVaY09STW5kTHpyT2hGSjVqM2FuOG5kbApOdW9sR2c3WW1hRmJWdFo3aUdEWnBISTdSQVFSZitjNVlKbFVIbUwrMnpNR2xiZHlKL3B5cTRjUEJsVDZlWUhtCmxSVEhseXVRaTd2ZndneXJwVU53ajMvbkNUekxjWDVqaHp3L1h2aDlGeGZpL1FTTmxKREIKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQoK", "vca_cloud": "lxd-cloud", "vca_host": "10.0.2.68", "vca_k8s_cloud": "microk8s", "vca_port": 17070, "vca_pubkey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+tPyU/gOogK/jQFbDgHtlaYhba8Y1SshxC5vL908ST2I6ku4+1XfIgVi8gfCUDRG8kzHL9S0i8iCvPYqCIasSEVD7+LCjYn19JZXWhnkwmlmHoW3a7ljw++d4aNWGKNWxiQOKKtM26ZH5yu1kKHtmZ1bcgrKGkQdiYBhzsKZ/8lRoWakGwZdDTdny6ZxmcvJ52GLyDs/K4jK730ogRVcsj7h3hb7KXKedNkX89ciAaus8m3HA9nMWsf8C0GRXR9ymGDml9pUORO8/6uOsccn5VQWHl5sitSG4K2W/5jBBNNmRQ8obV2ey7N+3nhb9luzhgk2Slj0XTjhnKOLP01Jn juju-client-key", "vca_secret": "86bbee23c74c078a3a67a95349788748", "vca_user": "admin"}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-27T13:53:04.151820427Z", "version": ""}, "workload-version": ""}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:52:46.270309812Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:53:04.183165399Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:01.044522359Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:53:04.374726337Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "ch:amd64/focal/kafka-k8s-5", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-24T17:38:51.675080168Z", "version": ""}, "agent-status": {"current": "executing", "message": "running kafka-relation-departed hook for lcm/9", "since": "2022-04-27T13:53:04.530097985Z", "version": "2.9.22"}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "ch:amd64/kubernetes/mongodb-k8s-1", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-24T08:22:00.904010692Z", "version": ""}, "agent-status": {"current": "executing", "message": "running database-relation-departed hook for lcm/9", "since": "2022-04-27T13:53:04.546541075Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "ro/0", "application": "ro", "series": "kubernetes", "charm-url": "ch:amd64/kubernetes/osm-ro-1", "life": "alive", "public-address": "", "private-address": "10.152.183.73", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-25T15:03:19.691982951Z", "version": ""}, "agent-status": {"current": "executing", "message": "running ro-relation-departed hook for lcm/9", "since": "2022-04-27T13:53:04.579582114Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:52:46.270309812Z", "version": ""}, "agent-status": {"current": "executing", "message": "running leader-settings-changed hook", "since": "2022-04-27T13:53:05.20239186Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "ch:amd64/focal/kafka-k8s-5", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-24T17:38:51.675080168Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:53:05.375082613Z", "version": "2.9.22"}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "ro/0", "application": "ro", "series": "kubernetes", "charm-url": "ch:amd64/kubernetes/osm-ro-1", "life": "alive", "public-address": "", "private-address": "10.152.183.73", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-25T15:03:19.691982951Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:53:06.287930066Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "mongodb/0", "application": "mongodb", "series": "kubernetes", "charm-url": "ch:amd64/kubernetes/mongodb-k8s-1", "life": "alive", "public-address": "", "private-address": "10.152.183.147", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-24T08:22:00.904010692Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:53:06.339773748Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:01.044522359Z", "version": ""}, "agent-status": {"current": "executing", "message": "running mongodb-relation-changed hook for mongodb/0", "since": "2022-04-27T13:53:06.794110477Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:52:46.270309812Z", "version": ""}, "agent-status": {"current": "executing", "message": "running ro-relation-departed hook for ro/0", "since": "2022-04-27T13:53:08.598222736Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:01.044522359Z", "version": ""}, "agent-status": {"current": "executing", "message": "running ro-relation-changed hook", "since": "2022-04-27T13:53:09.895852655Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:52:46.270309812Z", "version": ""}, "agent-status": {"current": "executing", "message": "running kafka-relation-departed hook for kafka/0", "since": "2022-04-27T13:53:11.774748891Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:01.044522359Z", "version": ""}, "agent-status": {"current": "executing", "message": "running kafka-relation-changed hook", "since": "2022-04-27T13:53:13.053185245Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:52:46.270309812Z", "version": ""}, "agent-status": {"current": "executing", "message": "running mongodb-relation-departed hook for mongodb/0", "since": "2022-04-27T13:53:14.670972589Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:01.044522359Z", "version": ""}, "agent-status": {"current": "executing", "message": "running kafka-relation-joined hook for kafka/0", "since": "2022-04-27T13:53:16.050710673Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:52:46.270309812Z", "version": ""}, "agent-status": {"current": "executing", "message": "running mongodb-relation-broken hook", "since": "2022-04-27T13:53:17.564633836Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "Assembling pod spec", "since": "2022-04-27T13:53:18.455720015Z", "version": ""}, "agent-status": {"current": "executing", "message": "running mongodb-relation-broken hook", "since": "2022-04-27T13:53:17.564633836Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "blocked", "message": "Need kafka, mongodb, ro relations", "since": "2022-04-27T13:53:18.840406999Z", "version": ""}, "agent-status": {"current": "executing", "message": "running mongodb-relation-broken hook", "since": "2022-04-27T13:53:17.564633836Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:01.044522359Z", "version": ""}, "agent-status": {"current": "executing", "message": "running ro-relation-joined hook for ro/0", "since": "2022-04-27T13:53:19.686052274Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "blocked", "message": "Need kafka, mongodb, ro relations", "since": "2022-04-27T13:53:18.840406999Z", "version": ""}, "agent-status": {"current": "executing", "message": "running kafka-relation-broken hook", "since": "2022-04-27T13:53:21.149271009Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "Assembling pod spec", "since": "2022-04-27T13:53:22.073656705Z", "version": ""}, "agent-status": {"current": "executing", "message": "running kafka-relation-broken hook", "since": "2022-04-27T13:53:21.149271009Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "blocked", "message": "Need kafka, mongodb, ro relations", "since": "2022-04-27T13:53:22.515373602Z", "version": ""}, "agent-status": {"current": "executing", "message": "running kafka-relation-broken hook", "since": "2022-04-27T13:53:21.149271009Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:01.044522359Z", "version": ""}, "agent-status": {"current": "executing", "message": "running ro-relation-changed hook for ro/0", "since": "2022-04-27T13:53:23.355070294Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "blocked", "message": "Need kafka, mongodb, ro relations", "since": "2022-04-27T13:53:22.515373602Z", "version": ""}, "agent-status": {"current": "executing", "message": "running ro-relation-broken hook", "since": "2022-04-27T13:53:25.062699158Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "Assembling pod spec", "since": "2022-04-27T13:53:25.93483528Z", "version": ""}, "agent-status": {"current": "executing", "message": "running ro-relation-broken hook", "since": "2022-04-27T13:53:25.062699158Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "blocked", "message": "Need kafka, mongodb, ro relations", "since": "2022-04-27T13:53:26.411373222Z", "version": ""}, "agent-status": {"current": "executing", "message": "running ro-relation-broken hook", "since": "2022-04-27T13:53:25.062699158Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:01.044522359Z", "version": ""}, "agent-status": {"current": "executing", "message": "running kafka-relation-changed hook for kafka/0", "since": "2022-04-27T13:53:27.418670221Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:01.044522359Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:53:29.278763461Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "stopping charm software", "since": "2022-04-27T13:53:29.342891381Z", "version": ""}, "agent-status": {"current": "executing", "message": "running ro-relation-broken hook", "since": "2022-04-27T13:53:25.062699158Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "stopping charm software", "since": "2022-04-27T13:53:29.342891381Z", "version": ""}, "agent-status": {"current": "executing", "message": "running stop hook", "since": "2022-04-27T13:53:29.36656005Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "", "since": "2022-04-27T13:53:30.616545619Z", "version": ""}, "agent-status": {"current": "executing", "message": "running stop hook", "since": "2022-04-27T13:53:29.36656005Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "cleaning up prior to charm deletion", "since": "2022-04-27T13:53:31.150790695Z", "version": ""}, "agent-status": {"current": "executing", "message": "running stop hook", "since": "2022-04-27T13:53:29.36656005Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "cleaning up prior to charm deletion", "since": "2022-04-27T13:53:31.150790695Z", "version": ""}, "agent-status": {"current": "executing", "message": "running remove hook", "since": "2022-04-27T13:53:31.185567398Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dying", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "terminated", "message": "", "since": "2022-04-27T13:53:32.316608499Z", "version": ""}, "agent-status": {"current": "executing", "message": "running remove hook", "since": "2022-04-27T13:53:31.185567398Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dead", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "terminated", "message": "", "since": "2022-04-27T13:53:32.316608499Z", "version": ""}, "agent-status": {"current": "executing", "message": "running remove hook", "since": "2022-04-27T13:53:31.185567398Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:01.044522359Z", "version": ""}, "agent-status": {"current": "executing", "message": "running leader-elected hook", "since": "2022-04-27T13:53:32.725754517Z", "version": ""}}]
+#["unit", "remove", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/9", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "dead", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "terminated", "message": "", "since": "2022-04-27T13:53:32.316608499Z", "version": ""}, "agent-status": {"current": "executing", "message": "running remove hook", "since": "2022-04-27T13:53:31.185567398Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "Assembling pod spec", "since": "2022-04-27T13:53:33.678220029Z", "version": ""}, "agent-status": {"current": "executing", "message": "running leader-elected hook", "since": "2022-04-27T13:53:32.725754517Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:34.328293936Z", "version": ""}, "agent-status": {"current": "executing", "message": "running leader-elected hook", "since": "2022-04-27T13:53:32.725754517Z", "version": ""}}]
+#["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "lcm/10", "application": "lcm", "series": "kubernetes", "charm-url": "local:kubernetes/osm-lcm-0", "life": "alive", "public-address": "", "private-address": "10.152.183.135", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "ready", "since": "2022-04-27T13:53:34.328293936Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T13:53:34.770344608Z", "version": ""}}]
\ No newline at end of file
diff --git a/osm_lcm/n2vc/tests/unit/testdata/upgrade-sidecar.log b/osm_lcm/n2vc/tests/unit/testdata/upgrade-sidecar.log
new file mode 100644 (file)
index 0000000..c6608b8
--- /dev/null
@@ -0,0 +1,39 @@
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+#
+["charm", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "charm-url": "local:focal/kafka-k8s-0", "charm-version": "", "life": "alive", "profile": null}]
+["charm", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "charm-url": "local:focal/kafka-k8s-0", "charm-version": "", "life": "alive", "profile": null, "config": {"kafka-properties": "clientPort=2181\nbroker.id.generation.enable=true\nlisteners=PLAINTEXT://:9092\nadvertised.listeners=PLAINTEXT://:9092\nlog.dirs=/var/lib/kafka/data\nauto.create.topics.enable=true\nauto.leader.rebalance.enable=true\nbackground.threads=10\ncompression.type=producer\ndelete.topic.enable=false\nleader.imbalance.check.interval.seconds=300\nleader.imbalance.per.broker.percentage=10\nlog.flush.interval.messages=9223372036854775807\nlog.flush.offset.checkpoint.interval.ms=60000\nlog.flush.scheduler.interval.ms=9223372036854775807\nlog.retention.bytes=-1\nlog.retention.hours=168\nlog.roll.hours=168\nlog.roll.jitter.hours=0\nlog.segment.bytes=1073741824\nlog.segment.delete.delay.ms=60000\nmessage.max.bytes=1000012\nmin.insync.replicas=1\nnum.io.threads=8\nnum.network.threads=1\nnum.recovery.threads.per.data.dir=1\nnum.replica.fetchers=1\noffset.metadata.max.bytes=4096\noffsets.commit.required.acks=-1\noffsets.commit.timeout.ms=5000\noffsets.load.buffer.size=5242880\noffsets.retention.check.interval.ms=600000\noffsets.retention.minutes=1440\noffsets.topic.compression.codec=0\noffsets.topic.num.partitions=50\noffsets.topic.replication.factor=1\noffsets.topic.segment.bytes=104857600\nqueued.max.requests=500\nquota.consumer.default=9223372036854775807\nquota.producer.default=9223372036854775807\nreplica.fetch.min.bytes=1\nreplica.fetch.wait.max.ms=500\nreplica.high.watermark.checkpoint.interval.ms=5000\nreplica.lag.time.max.ms=10000\nreplica.socket.receive.buffer.bytes=65536\nreplica.socket.timeout.ms=30000\nrequest.timeout.ms=30000\nsocket.receive.buffer.bytes=102400\nsocket.request.max.bytes=104857600\nsocket.send.buffer.bytes=102400\nunclean.leader.election.enable=true\nzookeeper.session.timeout.ms=6000\nzookeeper.set.acl=false\nbroker.id.generation.enable=true\nconnections.max.idle.ms=600000\ncontrolled.shutdown.enable=true\ncontrolled.shutdown.max.retries=3\ncontrolled.shutdown.retry.backoff.ms=5000\ncontroller.socket.timeout.ms=30000\ndefault.replication.factor=1\nfetch.purgatory.purge.interval.requests=1000\ngroup.max.session.timeout.ms=300000\ngroup.min.session.timeout.ms=6000\nlog.cleaner.backoff.ms=15000\nlog.cleaner.dedupe.buffer.size=134217728\nlog.cleaner.delete.retention.ms=86400000\nlog.cleaner.enable=true\nlog.cleaner.io.buffer.load.factor=0.9\nlog.cleaner.io.buffer.size=524288\nlog.cleaner.io.max.bytes.per.second=1.7976931348623157E308\nlog.cleaner.min.cleanable.ratio=0.5\nlog.cleaner.min.compaction.lag.ms=0\nlog.cleaner.threads=1\nlog.cleanup.policy=delete\nlog.index.interval.bytes=4096\nlog.index.size.max.bytes=10485760\nlog.message.timestamp.difference.max.ms=9223372036854775807\nlog.message.timestamp.type=CreateTime\nlog.preallocate=false\nlog.retention.check.interval.ms=300000\nmax.connections.per.ip=2147483647\nnum.partitions=1\nproducer.purgatory.purge.interval.requests=1000\nreplica.fetch.backoff.ms=1000\nreplica.fetch.max.bytes=1048576\nreplica.fetch.response.max.bytes=10485760\nreserved.broker.max.id=1000\n", "metrics": true}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka", "exposed": false, "charm-url": "local:focal/kafka-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-24T18:39:35.346890724Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "ch:amd64/focal/kafka-k8s-5", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "stopping charm software", "since": "2022-04-27T18:32:59.063102743Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T17:36:41.956361285Z", "version": "2.9.22"}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka", "exposed": false, "charm-url": "local:focal/kafka-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-24T18:39:35.346890724Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "ch:amd64/focal/kafka-k8s-5", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "stopping charm software", "since": "2022-04-27T18:32:59.063102743Z", "version": ""}, "agent-status": {"current": "executing", "message": "running stop hook", "since": "2022-04-27T18:32:59.129679756Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "ch:amd64/focal/kafka-k8s-5", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "", "since": "2022-04-27T18:32:59.701881796Z", "version": ""}, "agent-status": {"current": "executing", "message": "running stop hook", "since": "2022-04-27T18:32:59.129679756Z", "version": "2.9.22"}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka", "exposed": false, "charm-url": "local:focal/kafka-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-27T18:33:00.285536226Z", "version": ""}, "workload-version": ""}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka", "exposed": false, "charm-url": "local:focal/kafka-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-27T18:33:01.685500631Z", "version": ""}, "workload-version": ""}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka", "exposed": false, "charm-url": "local:focal/kafka-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-27T18:33:03.885273135Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "ch:amd64/focal/kafka-k8s-5", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "", "since": "2022-04-27T18:32:59.701881796Z", "version": ""}, "agent-status": {"current": "allocating", "message": "Started container charm-init", "since": "2022-04-27T18:33:03.9134045Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "local:focal/kafka-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "", "since": "2022-04-27T18:32:59.701881796Z", "version": ""}, "agent-status": {"current": "allocating", "message": "Started container charm-init", "since": "2022-04-27T18:33:03.9134045Z", "version": "2.9.22"}}]
+["charm", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "charm-url": "ch:amd64/focal/kafka-k8s-5", "charm-version": "", "life": "dying", "profile": null, "config": {"kafka-properties": "clientPort=2181\nbroker.id.generation.enable=true\nlisteners=PLAINTEXT://:9092\nadvertised.listeners=PLAINTEXT://:9092\nlog.dirs=/var/lib/kafka/data\nauto.create.topics.enable=true\nauto.leader.rebalance.enable=true\nbackground.threads=10\ncompression.type=producer\ndelete.topic.enable=false\nleader.imbalance.check.interval.seconds=300\nleader.imbalance.per.broker.percentage=10\nlog.flush.interval.messages=9223372036854775807\nlog.flush.offset.checkpoint.interval.ms=60000\nlog.flush.scheduler.interval.ms=9223372036854775807\nlog.retention.bytes=-1\nlog.retention.hours=168\nlog.roll.hours=168\nlog.roll.jitter.hours=0\nlog.segment.bytes=1073741824\nlog.segment.delete.delay.ms=60000\nmessage.max.bytes=1000012\nmin.insync.replicas=1\nnum.io.threads=8\nnum.network.threads=1\nnum.recovery.threads.per.data.dir=1\nnum.replica.fetchers=1\noffset.metadata.max.bytes=4096\noffsets.commit.required.acks=-1\noffsets.commit.timeout.ms=5000\noffsets.load.buffer.size=5242880\noffsets.retention.check.interval.ms=600000\noffsets.retention.minutes=1440\noffsets.topic.compression.codec=0\noffsets.topic.num.partitions=50\noffsets.topic.replication.factor=1\noffsets.topic.segment.bytes=104857600\nqueued.max.requests=500\nquota.consumer.default=9223372036854775807\nquota.producer.default=9223372036854775807\nreplica.fetch.min.bytes=1\nreplica.fetch.wait.max.ms=500\nreplica.high.watermark.checkpoint.interval.ms=5000\nreplica.lag.time.max.ms=10000\nreplica.socket.receive.buffer.bytes=65536\nreplica.socket.timeout.ms=30000\nrequest.timeout.ms=30000\nsocket.receive.buffer.bytes=102400\nsocket.request.max.bytes=104857600\nsocket.send.buffer.bytes=102400\nunclean.leader.election.enable=true\nzookeeper.session.timeout.ms=6000\nzookeeper.set.acl=false\nbroker.id.generation.enable=true\nconnections.max.idle.ms=600000\ncontrolled.shutdown.enable=true\ncontrolled.shutdown.max.retries=3\ncontrolled.shutdown.retry.backoff.ms=5000\ncontroller.socket.timeout.ms=30000\ndefault.replication.factor=1\nfetch.purgatory.purge.interval.requests=1000\ngroup.max.session.timeout.ms=300000\ngroup.min.session.timeout.ms=6000\nlog.cleaner.backoff.ms=15000\nlog.cleaner.dedupe.buffer.size=134217728\nlog.cleaner.delete.retention.ms=86400000\nlog.cleaner.enable=true\nlog.cleaner.io.buffer.load.factor=0.9\nlog.cleaner.io.buffer.size=524288\nlog.cleaner.io.max.bytes.per.second=1.7976931348623157E308\nlog.cleaner.min.cleanable.ratio=0.5\nlog.cleaner.min.compaction.lag.ms=0\nlog.cleaner.threads=1\nlog.cleanup.policy=delete\nlog.index.interval.bytes=4096\nlog.index.size.max.bytes=10485760\nlog.message.timestamp.difference.max.ms=9223372036854775807\nlog.message.timestamp.type=CreateTime\nlog.preallocate=false\nlog.retention.check.interval.ms=300000\nmax.connections.per.ip=2147483647\nnum.partitions=1\nproducer.purgatory.purge.interval.requests=1000\nreplica.fetch.backoff.ms=1000\nreplica.fetch.max.bytes=1048576\nreplica.fetch.response.max.bytes=10485760\nreserved.broker.max.id=1000\n", "metrics": true}}]
+["charm", "remove", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "charm-url": "ch:amd64/focal/kafka-k8s-5", "charm-version": "", "life": "dying", "profile": null, "config": {"kafka-properties": "clientPort=2181\nbroker.id.generation.enable=true\nlisteners=PLAINTEXT://:9092\nadvertised.listeners=PLAINTEXT://:9092\nlog.dirs=/var/lib/kafka/data\nauto.create.topics.enable=true\nauto.leader.rebalance.enable=true\nbackground.threads=10\ncompression.type=producer\ndelete.topic.enable=false\nleader.imbalance.check.interval.seconds=300\nleader.imbalance.per.broker.percentage=10\nlog.flush.interval.messages=9223372036854775807\nlog.flush.offset.checkpoint.interval.ms=60000\nlog.flush.scheduler.interval.ms=9223372036854775807\nlog.retention.bytes=-1\nlog.retention.hours=168\nlog.roll.hours=168\nlog.roll.jitter.hours=0\nlog.segment.bytes=1073741824\nlog.segment.delete.delay.ms=60000\nmessage.max.bytes=1000012\nmin.insync.replicas=1\nnum.io.threads=8\nnum.network.threads=1\nnum.recovery.threads.per.data.dir=1\nnum.replica.fetchers=1\noffset.metadata.max.bytes=4096\noffsets.commit.required.acks=-1\noffsets.commit.timeout.ms=5000\noffsets.load.buffer.size=5242880\noffsets.retention.check.interval.ms=600000\noffsets.retention.minutes=1440\noffsets.topic.compression.codec=0\noffsets.topic.num.partitions=50\noffsets.topic.replication.factor=1\noffsets.topic.segment.bytes=104857600\nqueued.max.requests=500\nquota.consumer.default=9223372036854775807\nquota.producer.default=9223372036854775807\nreplica.fetch.min.bytes=1\nreplica.fetch.wait.max.ms=500\nreplica.high.watermark.checkpoint.interval.ms=5000\nreplica.lag.time.max.ms=10000\nreplica.socket.receive.buffer.bytes=65536\nreplica.socket.timeout.ms=30000\nrequest.timeout.ms=30000\nsocket.receive.buffer.bytes=102400\nsocket.request.max.bytes=104857600\nsocket.send.buffer.bytes=102400\nunclean.leader.election.enable=true\nzookeeper.session.timeout.ms=6000\nzookeeper.set.acl=false\nbroker.id.generation.enable=true\nconnections.max.idle.ms=600000\ncontrolled.shutdown.enable=true\ncontrolled.shutdown.max.retries=3\ncontrolled.shutdown.retry.backoff.ms=5000\ncontroller.socket.timeout.ms=30000\ndefault.replication.factor=1\nfetch.purgatory.purge.interval.requests=1000\ngroup.max.session.timeout.ms=300000\ngroup.min.session.timeout.ms=6000\nlog.cleaner.backoff.ms=15000\nlog.cleaner.dedupe.buffer.size=134217728\nlog.cleaner.delete.retention.ms=86400000\nlog.cleaner.enable=true\nlog.cleaner.io.buffer.load.factor=0.9\nlog.cleaner.io.buffer.size=524288\nlog.cleaner.io.max.bytes.per.second=1.7976931348623157E308\nlog.cleaner.min.cleanable.ratio=0.5\nlog.cleaner.min.compaction.lag.ms=0\nlog.cleaner.threads=1\nlog.cleanup.policy=delete\nlog.index.interval.bytes=4096\nlog.index.size.max.bytes=10485760\nlog.message.timestamp.difference.max.ms=9223372036854775807\nlog.message.timestamp.type=CreateTime\nlog.preallocate=false\nlog.retention.check.interval.ms=300000\nmax.connections.per.ip=2147483647\nnum.partitions=1\nproducer.purgatory.purge.interval.requests=1000\nreplica.fetch.backoff.ms=1000\nreplica.fetch.max.bytes=1048576\nreplica.fetch.response.max.bytes=10485760\nreserved.broker.max.id=1000\n", "metrics": true}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka", "exposed": false, "charm-url": "local:focal/kafka-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-27T18:33:05.885991239Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "local:focal/kafka-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "", "since": "2022-04-27T18:32:59.701881796Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T18:33:05.939780569Z", "version": "2.9.22"}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka", "exposed": false, "charm-url": "local:focal/kafka-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-27T18:33:07.685099274Z", "version": ""}, "workload-version": ""}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka", "exposed": false, "charm-url": "local:focal/kafka-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-27T18:33:09.485853048Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "local:focal/kafka-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "", "since": "2022-04-27T18:32:59.701881796Z", "version": ""}, "agent-status": {"current": "executing", "message": "running upgrade-charm hook", "since": "2022-04-27T18:33:11.686940017Z", "version": "2.9.22"}}]
+["application", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka", "exposed": false, "charm-url": "local:focal/kafka-k8s-0", "owner-tag": "", "life": "alive", "min-units": 0, "constraints": {}, "subordinate": false, "status": {"current": "active", "message": "", "since": "2022-04-27T18:33:09.485853048Z", "version": ""}, "workload-version": ""}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "local:focal/kafka-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "maintenance", "message": "", "since": "2022-04-27T18:32:59.701881796Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T18:33:13.166304447Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "local:focal/kafka-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T18:33:14.910656736Z", "version": ""}, "agent-status": {"current": "executing", "message": "running config-changed hook", "since": "2022-04-27T18:33:13.166304447Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "local:focal/kafka-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T18:33:14.910656736Z", "version": ""}, "agent-status": {"current": "executing", "message": "running start hook", "since": "2022-04-27T18:33:15.313510973Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "local:focal/kafka-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T18:33:14.910656736Z", "version": ""}, "agent-status": {"current": "executing", "message": "running kafka-pebble-ready hook", "since": "2022-04-27T18:33:16.205042856Z", "version": "2.9.22"}}]
+["unit", "change", {"model-uuid": "835bd5cf-237b-44dd-8fac-1f5db45e5a06", "name": "kafka/0", "application": "kafka", "series": "focal", "charm-url": "local:focal/kafka-k8s-0", "life": "alive", "public-address": "", "private-address": "10.152.183.188", "machine-id": "", "ports": null, "port-ranges": null, "principal": "", "subordinate": false, "workload-status": {"current": "active", "message": "", "since": "2022-04-27T18:33:14.910656736Z", "version": ""}, "agent-status": {"current": "idle", "message": "", "since": "2022-04-27T18:33:17.168708577Z", "version": "2.9.22"}}]
\ No newline at end of file
diff --git a/osm_lcm/n2vc/tests/unit/utils.py b/osm_lcm/n2vc/tests/unit/utils.py
new file mode 100644 (file)
index 0000000..4ca76f8
--- /dev/null
@@ -0,0 +1,565 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+
+import asyncio
+
+from osm_lcm.n2vc.utils import Dict, N2VCDeploymentStatus
+from osm_lcm.n2vc.n2vc_conn import N2VCConnector
+from unittest.mock import MagicMock
+
+
+kubeconfig = """apiVersion: v1
+clusters:
+- cluster:
+    certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1\
+        JSURBVENDQWVtZ0F3SUJBZ0lKQUxjMk9xVUpwcnVCTUEwR0NTcUdTSWIzRFFFQk\
+        N3VUFNQmN4RlRBVEJnTlYKQkFNTURERXdMakUxTWk0eE9ETXVNVEFlRncweU1EQ\
+        TVNVEV4TkRJeU16VmFGdzB6TURBNU1Ea3hOREl5TXpWYQpNQmN4RlRBVEJnTlZC\
+        QU1NRERFd0xqRTFNaTR4T0RNdU1UQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQUR\
+        nZ0VQCkFEQ0NBUW9DZ2dFQkFNV0tyQkdxWlJRT0VONDExR2RESmY2ckZWRDcvMU\
+        xHNlZMWjNhd1BRdHBhRTRxdVdyNisKWjExTWwra2kwVEU1cGZFV3dKenVUZXlCU\
+        XVkUEpnYm1QTjF1VWROdGRiNlpocHEzeC9oT0hCMVJLNC9iSlNFUgpiZ0dITmN6\
+        MzR6SHRaZ1dwb2NPTXpPOW9oRUdhMTZUaDhmQWVxYU1CQTJRaklmeUFlaVp3VHJ\
+        nZ3BrY2dBMUlOCjBvQkdqSURnSGVoSU5tbGZOOURkQ3hNN1FNTmtSbzRXdE13bF\
+        JSRWZ4QnFiVkNpZGFjbVhhb1VPUjJPeFVmQWEKN1orSUU1TmN5ZFQ1TGovazdwd\
+        XZCVkdIa0JQWnE0TmlBa3R4aXd5NVB5R29GTk9mT0NrV2I2VnBzVzNhTlNJeAo4\
+        aXBITkc3enV3elc1TGQ5TkhQYWpRckZwdFZBSHpJNWNhRUNBd0VBQWFOUU1FNHd\
+        IUVlEVlIwT0JCWUVGQ1dVCkFaTXNaeE13L1k1OGlXMGZJWVAzcDdTYk1COEdBMV\
+        VkSXdRWU1CYUFGQ1dVQVpNc1p4TXcvWTU4aVcwZklZUDMKcDdTYk1Bd0dBMVVkR\
+        XdRRk1BTUJBZjh3RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUJaMlYxMWowRzhh\
+        Z1Z6Twp2YWtKTGt4UGZ0UE1NMFFOaVRzZmV6RzlicnBkdEVLSjFyalFCblNXYTN\
+        WbThWRGZTYkhLQUNXaGh0OEhzcXhtCmNzdVQyOWUyaGZBNHVIOUxMdy9MVG5EdE\
+        tJSjZ6aWFzaTM5RGh3UGwwaExuamJRMjk4VVo5TGovVlpnZGlqemIKWnVPdHlpT\
+        nVOS0E2Nmd0dGxXcWZRQ2hkbnJ5MlZUbjBjblR5dU9UalByYWdOdXJMdlVwL3Nl\
+        eURhZmsxNXJ4egozcmlYZldiQnRhUUk1dnM0ekFKU2xneUg2RnpiZStoTUhlUzF\
+        mM2ppb3dJV0lRR2NNbHpGT1RpMm1xWFRybEJYCnh1WmpLZlpOcndjQVNGbk9qYV\
+        BWeFQ1ODJ4WWhtTm8wR3J2MlZEck51bDlSYkgvK3lNS2J5NEhkOFRvVThMU2kKY\
+        3Uxajh3cz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
+    server: https://192.168.0.22:16443
+  name: microk8s-cluster
+contexts:
+- context:
+    cluster: microk8s-cluster
+    user: admin
+  name: microk8s
+current-context: microk8s
+kind: Config
+preferences: {}
+users:
+- name: admin
+  user:
+    token: clhkRExRem5Xd1dCdnFEVXdvRGtDRGE5b1F3WnNrZk5qeHFCOU10bHBZRT0K
+"""
+
+
+async def AsyncMockFunc():
+    await asyncio.sleep(1)
+
+
+class AsyncMock(MagicMock):
+    async def __call__(self, *args, **kwargs):
+        return super(AsyncMock, self).__call__(*args, **kwargs)
+
+
+class FakeN2VC(MagicMock):
+    last_written_values = None
+
+    async def write_app_status_to_db(
+        self,
+        db_dict: dict,
+        status: N2VCDeploymentStatus,
+        detailed_status: str,
+        vca_status: str,
+        entity_type: str,
+        vca_id: str = None,
+    ):
+        """
+        Write application status to database
+
+        :param: db_dict: DB dictionary
+        :param: status: Status of the application
+        :param: detailed_status: Detailed status
+        :param: vca_status: VCA status
+        :param: entity_type: Entity type ("application", "machine, and "action")
+        :param: vca_id: Id of the VCA. If None, the default VCA will be used.
+        """
+        self.last_written_values = Dict(
+            {
+                "n2vc_status": status,
+                "message": detailed_status,
+                "vca_status": vca_status,
+                "entity": entity_type,
+            }
+        )
+
+    osm_status = N2VCConnector.osm_status
+
+
+class FakeMachine(MagicMock):
+    entity_id = "2"
+    dns_name = "FAKE ENDPOINT"
+    model_name = "FAKE MODEL"
+    entity_type = "machine"
+    safe_data = {"instance-id": "myid"}
+
+    async def destroy(self, force):
+        pass
+
+
+class FakeManualMachine(MagicMock):
+    entity_id = "2"
+    dns_name = "FAKE ENDPOINT"
+    model_name = "FAKE MODEL"
+    entity_type = "machine"
+    safe_data = {"instance-id": "manual:myid"}
+    series = "FAKE SERIES"
+
+    async def destroy(self, force):
+        pass
+
+
+class FakeWatcher(AsyncMock):
+    delta_to_return = None
+
+    async def Next(self):
+        return Dict({"deltas": self.delta_to_return})
+
+
+class FakeConnection(MagicMock):
+    endpoint = None
+    is_open = False
+
+
+class FakeAction(MagicMock):
+    entity_id = "id"
+    status = "ready"
+
+
+class FakeModel:
+    def __init__(self, applications: dict = {}):
+        self._applications = applications
+
+    @property
+    def applications(self):
+        return self._applications
+
+
+class FakeUnit(MagicMock):
+    async def is_leader_from_status(self):
+        return True
+
+    async def run_action(self, action_name, **kwargs):
+        return FakeAction()
+
+    @property
+    def machine_id(self):
+        return "existing_machine_id"
+
+    name = "existing_unit"
+
+
+class FakeApplication(AsyncMock):
+    async def set_config(self, config):
+        pass
+
+    async def add_unit(self, to):
+        pass
+
+    async def destroy_unit(self, unit_name):
+        pass
+
+    async def get_actions(self):
+        return ["existing_action"]
+
+    async def get_config(self):
+        return ["app_config"]
+
+    async def scale(self, scale):
+        pass
+
+    units = [FakeUnit(), FakeUnit()]
+
+
+class FakeFile:
+    def __init__(self, content: str = ""):
+        self.content = content
+
+    def read(self, size: int = -1):
+        return self.content
+
+
+class FakeFileWrapper:
+    def __init__(self, content: str = ""):
+        self.file = FakeFile(content=content)
+
+    def __enter__(self):
+        return self.file
+
+    def __exit__(self, type, value, traceback):
+        pass
+
+
+FAKE_DELTA_MACHINE_PENDING = Dict(
+    {
+        "deltas": ["machine", "change", {}],
+        "entity": "machine",
+        "type": "change",
+        "data": {
+            "id": "2",
+            "instance-id": "juju-1b5808-2",
+            "agent-status": {"current": "pending", "message": "", "version": ""},
+            "instance-status": {"current": "running", "message": "Running"},
+        },
+    }
+)
+FAKE_DELTA_MACHINE_STARTED = Dict(
+    {
+        "deltas": ["machine", "change", {}],
+        "entity": "machine",
+        "type": "change",
+        "data": {
+            "id": "2",
+            "instance-id": "juju-1b5808-2",
+            "agent-status": {"current": "started", "message": "", "version": ""},
+            "instance-status": {"current": "running", "message": "Running"},
+        },
+    }
+)
+
+FAKE_DELTA_UNIT_PENDING = Dict(
+    {
+        "deltas": ["unit", "change", {}],
+        "entity": "unit",
+        "type": "change",
+        "data": {
+            "name": "git/0",
+            "application": "git",
+            "machine-id": "6",
+            "workload-status": {"current": "waiting", "message": ""},
+            "agent-status": {"current": "idle", "message": ""},
+        },
+    }
+)
+
+FAKE_DELTA_UNIT_STARTED = Dict(
+    {
+        "deltas": ["unit", "change", {}],
+        "entity": "unit",
+        "type": "change",
+        "data": {
+            "name": "git/0",
+            "application": "git",
+            "machine-id": "6",
+            "workload-status": {"current": "active", "message": ""},
+            "agent-status": {"current": "idle", "message": ""},
+        },
+    }
+)
+
+FAKE_DELTA_APPLICATION_MAINTENANCE = Dict(
+    {
+        "deltas": ["application", "change", {}],
+        "entity": "application",
+        "type": "change",
+        "data": {
+            "name": "git",
+            "status": {
+                "current": "maintenance",
+                "message": "installing charm software",
+            },
+        },
+    }
+)
+
+FAKE_DELTA_APPLICATION_ACTIVE = Dict(
+    {
+        "deltas": ["application", "change", {}],
+        "entity": "application",
+        "type": "change",
+        "data": {"name": "git", "status": {"current": "active", "message": "Ready!"}},
+    }
+)
+
+FAKE_DELTA_ACTION_COMPLETED = Dict(
+    {
+        "deltas": ["action", "change", {}],
+        "entity": "action",
+        "type": "change",
+        "data": {
+            "model-uuid": "af19cdd4-374a-4d9f-86b1-bfed7b1b5808",
+            "id": "1",
+            "receiver": "git/0",
+            "name": "add-repo",
+            "status": "completed",
+            "message": "",
+        },
+    }
+)
+
+Deltas = [
+    Dict(
+        {
+            "entity": Dict({"id": "2", "type": "machine"}),
+            "filter": Dict({"entity_id": "2", "entity_type": "machine"}),
+            "delta": FAKE_DELTA_MACHINE_PENDING,
+            "entity_status": Dict(
+                {"status": "pending", "message": "Running", "vca_status": "running"}
+            ),
+            "db": Dict(
+                {
+                    "written": True,
+                    "data": Dict(
+                        {
+                            "message": "Running",
+                            "entity": "machine",
+                            "vca_status": "running",
+                            "n2vc_status": N2VCDeploymentStatus.PENDING,
+                        }
+                    ),
+                }
+            ),
+        }
+    ),
+    Dict(
+        {
+            "entity": Dict({"id": "2", "type": "machine"}),
+            "filter": Dict({"entity_id": "1", "entity_type": "machine"}),
+            "delta": FAKE_DELTA_MACHINE_PENDING,
+            "entity_status": Dict(
+                {"status": "pending", "message": "Running", "vca_status": "running"}
+            ),
+            "db": Dict({"written": False, "data": None}),
+        }
+    ),
+    Dict(
+        {
+            "entity": Dict({"id": "2", "type": "machine"}),
+            "filter": Dict({"entity_id": "2", "entity_type": "machine"}),
+            "delta": FAKE_DELTA_MACHINE_STARTED,
+            "entity_status": Dict(
+                {"status": "started", "message": "Running", "vca_status": "running"}
+            ),
+            "db": Dict(
+                {
+                    "written": True,
+                    "data": Dict(
+                        {
+                            "message": "Running",
+                            "entity": "machine",
+                            "vca_status": "running",
+                            "n2vc_status": N2VCDeploymentStatus.COMPLETED,
+                        }
+                    ),
+                }
+            ),
+        }
+    ),
+    Dict(
+        {
+            "entity": Dict({"id": "2", "type": "machine"}),
+            "filter": Dict({"entity_id": "1", "entity_type": "machine"}),
+            "delta": FAKE_DELTA_MACHINE_STARTED,
+            "entity_status": Dict(
+                {"status": "started", "message": "Running", "vca_status": "running"}
+            ),
+            "db": Dict({"written": False, "data": None}),
+        }
+    ),
+    Dict(
+        {
+            "entity": Dict({"id": "git/0", "type": "unit"}),
+            "filter": Dict({"entity_id": "git", "entity_type": "application"}),
+            "delta": FAKE_DELTA_UNIT_PENDING,
+            "entity_status": Dict(
+                {"status": "waiting", "message": "", "vca_status": "waiting"}
+            ),
+            "db": Dict(
+                {
+                    "written": True,
+                    "data": Dict(
+                        {
+                            "message": "",
+                            "entity": "unit",
+                            "vca_status": "waiting",
+                            "n2vc_status": N2VCDeploymentStatus.RUNNING,
+                        }
+                    ),
+                }
+            ),
+        }
+    ),
+    Dict(
+        {
+            "entity": Dict({"id": "git/0", "type": "unit"}),
+            "filter": Dict({"entity_id": "2", "entity_type": "machine"}),
+            "delta": FAKE_DELTA_UNIT_PENDING,
+            "entity_status": Dict(
+                {"status": "waiting", "message": "", "vca_status": "waiting"}
+            ),
+            "db": Dict({"written": False, "data": None}),
+        }
+    ),
+    Dict(
+        {
+            "entity": Dict({"id": "git/0", "type": "unit"}),
+            "filter": Dict({"entity_id": "git", "entity_type": "application"}),
+            "delta": FAKE_DELTA_UNIT_STARTED,
+            "entity_status": Dict(
+                {"status": "active", "message": "", "vca_status": "active"}
+            ),
+            "db": Dict(
+                {
+                    "written": True,
+                    "data": Dict(
+                        {
+                            "message": "",
+                            "entity": "unit",
+                            "vca_status": "active",
+                            "n2vc_status": N2VCDeploymentStatus.COMPLETED,
+                        }
+                    ),
+                }
+            ),
+        }
+    ),
+    Dict(
+        {
+            "entity": Dict({"id": "git/0", "type": "unit"}),
+            "filter": Dict({"entity_id": "1", "entity_type": "action"}),
+            "delta": FAKE_DELTA_UNIT_STARTED,
+            "entity_status": Dict(
+                {"status": "active", "message": "", "vca_status": "active"}
+            ),
+            "db": Dict({"written": False, "data": None}),
+        }
+    ),
+    Dict(
+        {
+            "entity": Dict({"id": "git", "type": "application"}),
+            "filter": Dict({"entity_id": "git", "entity_type": "application"}),
+            "delta": FAKE_DELTA_APPLICATION_MAINTENANCE,
+            "entity_status": Dict(
+                {
+                    "status": "maintenance",
+                    "message": "installing charm software",
+                    "vca_status": "maintenance",
+                }
+            ),
+            "db": Dict(
+                {
+                    "written": True,
+                    "data": Dict(
+                        {
+                            "message": "installing charm software",
+                            "entity": "application",
+                            "vca_status": "maintenance",
+                            "n2vc_status": N2VCDeploymentStatus.RUNNING,
+                        }
+                    ),
+                }
+            ),
+        }
+    ),
+    Dict(
+        {
+            "entity": Dict({"id": "git", "type": "application"}),
+            "filter": Dict({"entity_id": "2", "entity_type": "machine"}),
+            "delta": FAKE_DELTA_APPLICATION_MAINTENANCE,
+            "entity_status": Dict(
+                {
+                    "status": "maintenance",
+                    "message": "installing charm software",
+                    "vca_status": "maintenance",
+                }
+            ),
+            "db": Dict({"written": False, "data": None}),
+        }
+    ),
+    Dict(
+        {
+            "entity": Dict({"id": "git", "type": "application"}),
+            "filter": Dict({"entity_id": "git", "entity_type": "application"}),
+            "delta": FAKE_DELTA_APPLICATION_ACTIVE,
+            "entity_status": Dict(
+                {"status": "active", "message": "Ready!", "vca_status": "active"}
+            ),
+            "db": Dict(
+                {
+                    "written": True,
+                    "data": Dict(
+                        {
+                            "message": "Ready!",
+                            "entity": "application",
+                            "vca_status": "active",
+                            "n2vc_status": N2VCDeploymentStatus.COMPLETED,
+                        }
+                    ),
+                }
+            ),
+        }
+    ),
+    Dict(
+        {
+            "entity": Dict({"id": "git", "type": "application"}),
+            "filter": Dict({"entity_id": "1", "entity_type": "action"}),
+            "delta": FAKE_DELTA_APPLICATION_ACTIVE,
+            "entity_status": Dict(
+                {"status": "active", "message": "Ready!", "vca_status": "active"}
+            ),
+            "db": Dict({"written": False, "data": None}),
+        }
+    ),
+    Dict(
+        {
+            "entity": Dict({"id": "1", "type": "action"}),
+            "filter": Dict({"entity_id": "1", "entity_type": "action"}),
+            "delta": FAKE_DELTA_ACTION_COMPLETED,
+            "entity_status": Dict(
+                {
+                    "status": "completed",
+                    "message": "completed",
+                    "vca_status": "completed",
+                }
+            ),
+            "db": Dict(
+                {
+                    "written": True,
+                    "data": Dict(
+                        {
+                            "message": "completed",
+                            "entity": "action",
+                            "vca_status": "completed",
+                            "n2vc_status": N2VCDeploymentStatus.COMPLETED,
+                        }
+                    ),
+                }
+            ),
+        }
+    ),
+    Dict(
+        {
+            "entity": Dict({"id": "git", "type": "action"}),
+            "filter": Dict({"entity_id": "1", "entity_type": "machine"}),
+            "delta": FAKE_DELTA_ACTION_COMPLETED,
+            "entity_status": Dict(
+                {
+                    "status": "completed",
+                    "message": "completed",
+                    "vca_status": "completed",
+                }
+            ),
+            "db": Dict({"written": False, "data": None}),
+        }
+    ),
+]
diff --git a/osm_lcm/n2vc/utils.py b/osm_lcm/n2vc/utils.py
new file mode 100644 (file)
index 0000000..c1680d0
--- /dev/null
@@ -0,0 +1,179 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+
+import base64
+import re
+import binascii
+import yaml
+import string
+import secrets
+from enum import Enum
+from juju.machine import Machine
+from juju.application import Application
+from juju.action import Action
+from juju.unit import Unit
+from osm_lcm.n2vc.exceptions import N2VCInvalidCertificate
+from typing import Tuple
+
+
+def base64_to_cacert(b64string):
+    """Convert the base64-encoded string containing the VCA CACERT.
+
+    The input string....
+
+    """
+    try:
+        cacert = base64.b64decode(b64string).decode("utf-8")
+
+        cacert = re.sub(
+            r"\\n",
+            r"\n",
+            cacert,
+        )
+    except binascii.Error as e:
+        raise N2VCInvalidCertificate(message="Invalid CA Certificate: {}".format(e))
+
+    return cacert
+
+
+class N2VCDeploymentStatus(Enum):
+    PENDING = "pending"
+    RUNNING = "running"
+    COMPLETED = "completed"
+    FAILED = "failed"
+    UNKNOWN = "unknown"
+
+
+class Dict(dict):
+    """
+    Dict class that allows to access the keys like attributes
+    """
+
+    def __getattribute__(self, name):
+        if name in self:
+            return self[name]
+
+
+class EntityType(Enum):
+    MACHINE = Machine
+    APPLICATION = Application
+    ACTION = Action
+    UNIT = Unit
+
+    @classmethod
+    def has_value(cls, value):
+        return value in cls._value2member_map_  # pylint: disable=E1101
+
+    @classmethod
+    def get_entity(cls, value):
+        return (
+            cls._value2member_map_[value]  # pylint: disable=E1101
+            if value in cls._value2member_map_  # pylint: disable=E1101
+            else None  # pylint: disable=E1101
+        )
+
+    @classmethod
+    def get_entity_from_delta(cls, delta_entity: str):
+        """
+        Get Value from delta entity
+
+        :param: delta_entity: Possible values are "machine", "application", "unit", "action"
+        """
+        for v in cls._value2member_map_:  # pylint: disable=E1101
+            if v.__name__.lower() == delta_entity:
+                return cls.get_entity(v)
+
+
+JujuStatusToOSM = {
+    "machine": {
+        "pending": N2VCDeploymentStatus.PENDING,
+        "started": N2VCDeploymentStatus.COMPLETED,
+    },
+    "application": {
+        "waiting": N2VCDeploymentStatus.RUNNING,
+        "maintenance": N2VCDeploymentStatus.RUNNING,
+        "blocked": N2VCDeploymentStatus.RUNNING,
+        "error": N2VCDeploymentStatus.FAILED,
+        "active": N2VCDeploymentStatus.COMPLETED,
+    },
+    "action": {
+        "pending": N2VCDeploymentStatus.PENDING,
+        "running": N2VCDeploymentStatus.RUNNING,
+        "completed": N2VCDeploymentStatus.COMPLETED,
+    },
+    "unit": {
+        "waiting": N2VCDeploymentStatus.RUNNING,
+        "maintenance": N2VCDeploymentStatus.RUNNING,
+        "blocked": N2VCDeploymentStatus.RUNNING,
+        "error": N2VCDeploymentStatus.FAILED,
+        "active": N2VCDeploymentStatus.COMPLETED,
+    },
+}
+
+
+def obj_to_yaml(obj: object) -> str:
+    """
+    Converts object to yaml format
+    :return: yaml data
+    """
+    # dump to yaml
+    dump_text = yaml.dump(obj, default_flow_style=False, indent=2)
+    # split lines
+    lines = dump_text.splitlines()
+    # remove !!python/object tags
+    yaml_text = ""
+    for line in lines:
+        index = line.find("!!python/object")
+        if index >= 0:
+            line = line[:index]
+        yaml_text += line + "\n"
+    return yaml_text
+
+
+def obj_to_dict(obj: object) -> dict:
+    """
+    Converts object to dictionary format
+    :return: dict data
+    """
+    # convert obj to yaml
+    yaml_text = obj_to_yaml(obj)
+    # parse to dict
+    return yaml.load(yaml_text, Loader=yaml.SafeLoader)
+
+
+def get_ee_id_components(ee_id: str) -> Tuple[str, str, str]:
+    """
+    Get model, application and machine components from an execution environment id
+    :param ee_id:
+    :return: model_name, application_name, machine_id
+    """
+    parts = ee_id.split(".")
+    if len(parts) != 3:
+        raise Exception("invalid ee id.")
+    model_name = parts[0]
+    application_name = parts[1]
+    machine_id = parts[2]
+    return model_name, application_name, machine_id
+
+
+def generate_random_alfanum_string(size: int) -> str:
+    """
+    Generate random alfa-numeric string with a size given by argument
+    :param size:
+    :return: random generated string
+    """
+
+    return "".join(
+        secrets.choice(string.ascii_letters + string.digits) for i in range(size)
+    )
diff --git a/osm_lcm/n2vc/vca/__init__.py b/osm_lcm/n2vc/vca/__init__.py
new file mode 100644 (file)
index 0000000..aa5cee8
--- /dev/null
@@ -0,0 +1,13 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
diff --git a/osm_lcm/n2vc/vca/cloud.py b/osm_lcm/n2vc/vca/cloud.py
new file mode 100644 (file)
index 0000000..970fd93
--- /dev/null
@@ -0,0 +1,25 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+
+
+class Cloud:
+    def __init__(self, name: str, credential_name: str):
+        """
+        Constructor
+
+        :param: name: Name of the Cloud
+        :param: credential_name: Credential name for the Cloud
+        """
+        self.name = name
+        self.credential_name = credential_name
diff --git a/osm_lcm/n2vc/vca/connection.py b/osm_lcm/n2vc/vca/connection.py
new file mode 100644 (file)
index 0000000..dd2da28
--- /dev/null
@@ -0,0 +1,113 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+
+import typing
+
+from osm_lcm.n2vc.config import EnvironConfig, ModelConfig
+from osm_lcm.n2vc.store import Store
+from osm_lcm.n2vc.vca.cloud import Cloud
+from osm_lcm.n2vc.vca.connection_data import ConnectionData
+
+
+class Connection:
+    def __init__(self, store: Store, vca_id: str = None):
+        """
+        Contructor
+
+        :param: store: Store object. Used to communicate wuth the DB
+        :param: vca_id: Id of the VCA. If none specified, the default VCA will be used.
+        """
+        self._data = None
+        self.default = vca_id is None
+        self._vca_id = vca_id
+        self._store = store
+
+    async def load(self):
+        """Load VCA connection data"""
+        await self._load_vca_connection_data()
+
+    @property
+    def is_default(self):
+        return self._vca_id is None
+
+    @property
+    def data(self) -> ConnectionData:
+        return self._data
+
+    async def _load_vca_connection_data(self) -> typing.NoReturn:
+        """
+        Load VCA connection data
+
+        If self._vca_id is None, it will get the VCA data from the Environment variables,
+        and the default VCA will be used. If it is not None, then it means that it will
+        load the credentials from the database (A non-default VCA will be used).
+        """
+        if self._vca_id:
+            self._data = await self._store.get_vca_connection_data(self._vca_id)
+        else:
+            envs = EnvironConfig()
+            # Get endpoints from the DB and ENV. Check if update in the database is needed or not.
+            db_endpoints = await self._store.get_vca_endpoints()
+            env_endpoints = (
+                envs["endpoints"].split(",")
+                if "endpoints" in envs
+                else ["{}:{}".format(envs["host"], envs.get("port", 17070))]
+            )
+
+            db_update_needed = not all(e in db_endpoints for e in env_endpoints)
+
+            endpoints = env_endpoints if db_update_needed else db_endpoints
+            config = {
+                "endpoints": endpoints,
+                "user": envs["user"],
+                "secret": envs["secret"],
+                "cacert": envs["cacert"],
+                "pubkey": envs.get("pubkey"),
+                "lxd-cloud": envs.get("cloud"),
+                "lxd-credentials": envs.get("credentials", envs.get("cloud")),
+                "k8s-cloud": envs.get("k8s_cloud"),
+                "k8s-credentials": envs.get("k8s_credentials", envs.get("k8s_cloud")),
+                "model-config": ModelConfig(envs),
+                "api-proxy": envs.get("api_proxy", None),
+            }
+            self._data = ConnectionData(**config)
+            if db_update_needed:
+                await self.update_endpoints(endpoints)
+
+    @property
+    def endpoints(self):
+        return self._data.endpoints
+
+    async def update_endpoints(self, endpoints: typing.List[str]):
+        await self._store.update_vca_endpoints(endpoints, self._vca_id)
+        self._data.endpoints = endpoints
+
+    @property
+    def lxd_cloud(self) -> Cloud:
+        return Cloud(self.data.lxd_cloud, self.data.lxd_credentials)
+
+    @property
+    def k8s_cloud(self) -> Cloud:
+        return Cloud(self.data.k8s_cloud, self.data.k8s_credentials)
+
+
+async def get_connection(store: Store, vca_id: str = None) -> Connection:
+    """
+    Get Connection
+
+    Method to get a Connection object with the VCA information loaded
+    """
+    connection = Connection(store, vca_id=vca_id)
+    await connection.load()
+    return connection
diff --git a/osm_lcm/n2vc/vca/connection_data.py b/osm_lcm/n2vc/vca/connection_data.py
new file mode 100644 (file)
index 0000000..1a1b576
--- /dev/null
@@ -0,0 +1,51 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+
+from osm_lcm.n2vc.utils import base64_to_cacert
+
+
+class ConnectionData:
+    def __init__(self, **kwargs):
+        """
+        Constructor
+
+        :param: kwargs:
+            endpoints (list): Endpoints of all the Juju controller units
+            user (str): Username for authenticating to the controller
+            secret (str): Secret for authenticating to the controller
+            cacert (str): Base64 encoded CA certificate for authenticating to the controller
+            (optional) pubkey (str): Public key to insert to the charm.
+                            This is useful to do `juju ssh`.
+                            It is not very useful though.
+                            TODO: Test it.
+            (optional) lxd-cloud (str): Name of the cloud to use for lxd proxy charms
+            (optional) lxd-credentials (str): Name of the lxd-cloud credentials
+            (optional) k8s-cloud (str): Name of the cloud to use for k8s proxy charms
+            (optional) k8s-credentials (str): Name of the k8s-cloud credentials
+            (optional) model-config (n2vc.config.ModelConfig): Config to apply in all Juju models
+            (deprecated, optional) api-proxy (str): Proxy IP to reach the controller.
+                                                    Used in case native charms cannot react the controller.
+        """
+        self.endpoints = kwargs["endpoints"]
+        self.user = kwargs["user"]
+        self.secret = kwargs["secret"]
+        self.cacert = base64_to_cacert(kwargs["cacert"])
+        self.pubkey = kwargs.get("pubkey", "")
+        self.lxd_cloud = kwargs.get("lxd-cloud", None)
+        self.lxd_credentials = kwargs.get("lxd-credentials", None)
+        self.k8s_cloud = kwargs.get("k8s-cloud", None)
+        self.k8s_credentials = kwargs.get("k8s-credentials", None)
+        self.model_config = kwargs.get("model-config", {})
+        self.model_config.update({"authorized-keys": self.pubkey})
+        self.api_proxy = kwargs.get("api-proxy", None)
index cb983da..9427fa0 100644 (file)
@@ -98,9 +98,9 @@ from osm_lcm.data_utils.vnfr import (
 )
 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
 from osm_lcm.data_utils.database.vim_account import VimAccountDB
-from n2vc.definitions import RelationEndpoint
-from n2vc.k8s_helm3_conn import K8sHelm3Connector
-from n2vc.k8s_juju_conn import K8sJujuConnector
+from osm_lcm.n2vc.definitions import RelationEndpoint
+from osm_lcm.n2vc.k8s_helm3_conn import K8sHelm3Connector
+from osm_lcm.n2vc.k8s_juju_conn import K8sJujuConnector
 
 from osm_common.dbbase import DbException
 from osm_common.fsbase import FsException
@@ -113,8 +113,8 @@ from osm_lcm.data_utils.wim import (
     select_feasible_wim_account,
 )
 
-from n2vc.n2vc_juju_conn import N2VCJujuConnector
-from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
+from osm_lcm.n2vc.n2vc_juju_conn import N2VCJujuConnector
+from osm_lcm.n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
 
 from osm_lcm.lcm_helm_conn import LCMHelmConn
 from osm_lcm.osm_config import OsmConfigBuilder
index 5b90cdb..49821cc 100644 (file)
@@ -17,7 +17,7 @@
 import logging
 from osm_lcm.lcm_utils import LcmBase
 
-from n2vc import kubectl
+from osm_lcm.n2vc import kubectl
 
 
 class OduWorkflow(LcmBase):
index 3415fa1..4cd22e4 100644 (file)
@@ -21,7 +21,7 @@ import asyncio
 from copy import deepcopy
 import yaml
 import copy
-from n2vc.exceptions import N2VCException
+from osm_lcm.n2vc.exceptions import N2VCException
 from os import getenv
 from osm_lcm import ns
 from osm_common.msgkafka import MsgKafka
index a464993..dcb62b6 100644 (file)
@@ -22,10 +22,10 @@ import logging
 import logging.handlers
 from osm_lcm import ROclient
 from osm_lcm.lcm_utils import LcmException, LcmBase, deep_get
-from n2vc.k8s_helm3_conn import K8sHelm3Connector
-from n2vc.k8s_juju_conn import K8sJujuConnector
-from n2vc.n2vc_juju_conn import N2VCJujuConnector
-from n2vc.exceptions import K8sException, N2VCException
+from osm_lcm.n2vc.k8s_helm3_conn import K8sHelm3Connector
+from osm_lcm.n2vc.k8s_juju_conn import K8sJujuConnector
+from osm_lcm.n2vc.n2vc_juju_conn import N2VCJujuConnector
+from osm_lcm.n2vc.exceptions import K8sException, N2VCException
 from osm_common.dbbase import DbException
 from copy import deepcopy
 from time import time
index ec714a2..c113bba 100644 (file)
@@ -15,6 +15,3 @@
 
 git+https://osm.etsi.org/gerrit/osm/common.git@master#egg=osm-common
 -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
-
-git+https://osm.etsi.org/gerrit/osm/N2VC.git@master#egg=n2vc
--r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
index 72b57cb..1f65f85 100644 (file)
@@ -18,200 +18,33 @@ aiokafka==0.11.0
     # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
 async-timeout==4.0.3
     # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
     #   -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
     #   aiokafka
-    #   retrying-async
-bcrypt==4.2.0
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-    #   paramiko
-cachetools==5.4.0
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-    #   google-auth
-certifi==2024.7.4
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-    #   kubernetes
-    #   requests
-cffi==1.17.0
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-    #   cryptography
-    #   pynacl
-charset-normalizer==3.3.2
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-    #   requests
-cryptography==43.0.0
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-    #   paramiko
 dataclasses==0.6
     # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
 dnspython==2.6.1
     # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
     #   -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
     #   pymongo
-google-auth==2.17.3
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-    #   kubernetes
-idna==3.7
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-    #   requests
-juju==2.9.44.0
-    # via -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-jujubundlelib==0.5.7
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-    #   theblues
-kubernetes==30.1.0
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-    #   juju
-macaroonbakery==1.3.4
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-    #   juju
-    #   theblues
 motor==3.5.1
     # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
     #   -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
-mypy-extensions==1.0.0
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-    #   typing-inspect
-n2vc @ git+https://osm.etsi.org/gerrit/osm/N2VC.git@master
-    # via -r requirements-dev.in
-oauthlib==3.2.2
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-    #   kubernetes
-    #   requests-oauthlib
 osm-common @ git+https://osm.etsi.org/gerrit/osm/common.git@master
     # via -r requirements-dev.in
 packaging==24.1
     # via
     #   -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
     #   aiokafka
-paramiko==2.12.0
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-    #   juju
-protobuf==3.20.3
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-    #   macaroonbakery
-pyasn1==0.6.0
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-    #   juju
-    #   pyasn1-modules
-    #   rsa
-pyasn1-modules==0.4.0
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-    #   google-auth
-pycparser==2.22
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-    #   cffi
 pycryptodome==3.20.0
     # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
-pymacaroons==0.13.0
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-    #   macaroonbakery
 pymongo==4.8.0
     # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
     #   -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
     #   motor
-pynacl==1.5.0
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-    #   macaroonbakery
-    #   paramiko
-    #   pymacaroons
-pyrfc3339==1.1
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-    #   juju
-    #   macaroonbakery
-python-dateutil==2.9.0.post0
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-    #   kubernetes
-pytz==2024.1
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-    #   pyrfc3339
 pyyaml==6.0.2
     # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
     #   -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
-    #   juju
-    #   jujubundlelib
-    #   kubernetes
-requests==2.32.3
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-    #   kubernetes
-    #   macaroonbakery
-    #   requests-oauthlib
-    #   theblues
-requests-oauthlib==2.0.0
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-    #   kubernetes
-retrying-async==2.0.0
-    # via -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-rsa==4.9
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-    #   google-auth
-six==1.16.0
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-    #   google-auth
-    #   kubernetes
-    #   macaroonbakery
-    #   paramiko
-    #   pymacaroons
-    #   python-dateutil
-theblues==0.5.2
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-    #   juju
-toposort==1.10
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-    #   juju
 typing-extensions==4.12.2
     # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
     #   -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
     #   aiokafka
-    #   typing-inspect
-typing-inspect==0.9.0
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-    #   juju
-urllib3==2.2.2
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-    #   kubernetes
-    #   requests
-websocket-client==1.8.0
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-    #   kubernetes
-websockets==12.0
-    # via
-    #   -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master
-    #   juju
index 15fb5ee..fb67c21 100644 (file)
@@ -14,6 +14,8 @@
 # limitations under the License.
 
 asynctest
+charset-normalizer
 coverage
 mock
-nose2
\ No newline at end of file
+nose2
+requests-mock
\ No newline at end of file
index 18a172f..a480ef7 100644 (file)
 #######################################################################################
 asynctest==0.13.0
     # via -r requirements-test.in
+certifi==2024.7.4
+    # via requests
+charset-normalizer==3.3.2
+    # via
+    #   -r requirements-test.in
+    #   requests
 coverage==7.6.1
     # via -r requirements-test.in
+idna==3.7
+    # via requests
 mock==5.1.0
     # via -r requirements-test.in
 nose2==0.15.1
     # via -r requirements-test.in
+requests==2.32.3
+    # via requests-mock
+requests-mock==1.12.1
+    # via -r requirements-test.in
+urllib3==2.2.2
+    # via requests
index 7e96421..ea400d7 100644 (file)
 
 aiohttp
 async-timeout
+charset-normalizer
 checksumdir
 config-man
+google-auth<2.18.0
 grpcio-tools
 grpclib
 jinja2
+juju==2.9.44.0
+kubernetes
+motor
 protobuf==3.20.3
+pyasn1
 pyrage
 pyyaml>6
 pydantic
index 725a2f7..deab8b9 100644 (file)
@@ -31,14 +31,34 @@ attrs==24.2.0
     # via
     #   aiohttp
     #   glom
+bcrypt==4.2.0
+    # via paramiko
 boltons==24.0.0
     # via
     #   face
     #   glom
+cachetools==5.4.0
+    # via google-auth
+certifi==2024.7.4
+    # via
+    #   kubernetes
+    #   requests
+cffi==1.17.0
+    # via
+    #   cryptography
+    #   pynacl
+charset-normalizer==3.3.2
+    # via
+    #   -r requirements.in
+    #   requests
 checksumdir==1.2.0
     # via -r requirements.in
 config-man==0.0.4
     # via -r requirements.in
+cryptography==43.0.0
+    # via paramiko
+dnspython==2.6.1
+    # via pymongo
 face==20.1.1
     # via glom
 fire==0.6.0
@@ -49,6 +69,10 @@ frozenlist==1.4.1
     #   aiosignal
 glom==23.5.0
     # via config-man
+google-auth==2.17.3
+    # via
+    #   -r requirements.in
+    #   kubernetes
 grpcio==1.65.4
     # via grpcio-tools
 grpcio-tools==1.48.2
@@ -62,40 +86,126 @@ hpack==4.0.0
 hyperframe==6.0.1
     # via h2
 idna==3.7
+    # via requests
     # via yarl
 jinja2==3.1.4
     # via -r requirements.in
+juju==2.9.44.0
+    # via -r requirements.in
+jujubundlelib==0.5.7
+    # via theblues
+kubernetes==30.1.0
+    # via
+    #   -r requirements.in
+    #   juju
+macaroonbakery==1.3.4
+    # via
+    #   juju
+    #   theblues
 markupsafe==2.1.5
     # via jinja2
+motor==3.5.1
+    # via -r requirements.in
 multidict==6.0.5
     # via
     #   aiohttp
     #   grpclib
     #   yarl
+mypy-extensions==1.0.0
+    # via typing-inspect
+oauthlib==3.2.2
+    # via
+    #   kubernetes
+    #   requests-oauthlib
+paramiko==2.12.0
+    # via juju
 protobuf==3.20.3
     # via
     #   -r requirements.in
     #   grpcio-tools
+    #   macaroonbakery
+pyasn1==0.6.0
+    # via
+    #   -r requirements.in
+    #   juju
+    #   pyasn1-modules
+    #   rsa
+pyasn1-modules==0.4.0
+    # via google-auth
+pycparser==2.22
+    # via cffi
 pydantic==2.8.2
     # via -r requirements.in
 pydantic-core==2.20.1
     # via pydantic
+pymacaroons==0.13.0
+    # via macaroonbakery
+pymongo==4.8.0
+    # via motor
+pynacl==1.5.0
+    # via
+    #   macaroonbakery
+    #   paramiko
+    #   pymacaroons
 pyrage==1.1.2
     # via -r requirements.in
+pyrfc3339==1.1
+    # via
+    #   juju
+    #   macaroonbakery
+python-dateutil==2.9.0.post0
+    # via kubernetes
+pytz==2024.1
+    # via pyrfc3339
 pyyaml==6.0.2
     # via -r requirements.in
+    #   juju
+    #   jujubundlelib
+    #   kubernetes
 randomname==0.2.1
     # via -r requirements.in
+requests==2.32.3
+    # via
+    #   kubernetes
+    #   macaroonbakery
+    #   requests-oauthlib
+    #   theblues
+requests-oauthlib==2.0.0
+    # via kubernetes
 retrying-async==2.0.0
     # via -r requirements.in
+rsa==4.9
+    # via google-auth
 six==1.16.0
-    # via fire
+    # via
+    #   fire
+    #   google-auth
+    #   kubernetes
+    #   macaroonbakery
+    #   paramiko
+    #   pymacaroons
+    #   python-dateutil
 termcolor==2.4.0
     # via fire
+theblues==0.5.2
+    # via juju
+toposort==1.10
+    # via juju
 typing-extensions==4.12.2
     # via
     #   pydantic
     #   pydantic-core
+    #   typing-inspect
+typing-inspect==0.9.0
+    # via juju
+urllib3==2.2.2
+    # via
+    #   kubernetes
+    #   requests
+websocket-client==1.8.0
+    # via kubernetes
+websockets==12.0
+    # via juju
 yarl==1.9.4
     # via aiohttp