Fixes for scaling proxy and native charm
[osm/N2VC.git] / n2vc / k8s_juju_conn.py
index 895e82b..e3d8a67 100644 (file)
 #     limitations under the License.
 
 import asyncio
-import concurrent
-from .exceptions import NotImplemented
-
-import io
-import juju
-# from juju.bundle import BundleHandler
-from juju.controller import Controller
-from juju.model import Model
-from juju.errors import JujuAPIError, JujuError
+import os
+import uuid
+import yaml
+import tempfile
+import binascii
+import base64
 
+from n2vc.exceptions import K8sException, N2VCBadArgumentsException
 from n2vc.k8s_conn import K8sConnector
+from n2vc.kubectl import Kubectl, CORE_CLIENT, RBAC_CLIENT
+from .exceptions import MethodNotImplemented
+from n2vc.utils import base64_to_cacert
+from n2vc.libjuju import Libjuju
+
+from kubernetes.client.models import (
+    V1ClusterRole,
+    V1ObjectMeta,
+    V1PolicyRule,
+    V1ServiceAccount,
+    V1ClusterRoleBinding,
+    V1RoleRef,
+    V1Subject,
+)
+
+from typing import Dict
+
+SERVICE_ACCOUNT_TOKEN_KEY = "token"
+SERVICE_ACCOUNT_ROOT_CA_KEY = "ca.crt"
+RBAC_LABEL_KEY_NAME = "rbac-id"
+
+ADMIN_NAMESPACE = "kube-system"
+RBAC_STACK_PREFIX = "juju-credential"
 
-import os
+# from juju.bundle import BundleHandler
 # import re
 # import ssl
 # from .vnf import N2VC
 
-import uuid
-import yaml
 
+def generate_rbac_id():
+    return binascii.hexlify(os.urandom(4)).decode()
 
-class K8sJujuConnector(K8sConnector):
 
+class K8sJujuConnector(K8sConnector):
     def __init__(
-            self,
-            fs: object,
-            db: object,
-            kubectl_command: str = '/usr/bin/kubectl',
-            juju_command: str = '/usr/bin/juju',
-            log: object = None,
-            on_update_db=None,
+        self,
+        fs: object,
+        db: object,
+        kubectl_command: str = "/usr/bin/kubectl",
+        juju_command: str = "/usr/bin/juju",
+        log: object = None,
+        loop: object = None,
+        on_update_db=None,
+        vca_config: dict = None,
     ):
         """
-
+        :param fs: file system for kubernetes and helm configuration
+        :param db: Database object
         :param kubectl_command: path to kubectl executable
         :param helm_command: path to helm executable
-        :param fs: file system for kubernetes and helm configuration
         :param log: logger
+        :param: loop: Asyncio loop
         """
 
         # parent class
@@ -62,53 +86,87 @@ class K8sJujuConnector(K8sConnector):
         )
 
         self.fs = fs
-        self.log.debug('Initializing K8S Juju connector')
-
-        self.authenticated = False
-        self.models = {}
-
-        self.juju_command = juju_command
-        self.juju_secret = ""
-
-        self.log.debug('K8S Juju connector initialized')
+        self.loop = loop or asyncio.get_event_loop()
+        self.log.debug("Initializing K8S Juju connector")
+
+        required_vca_config = [
+            "host",
+            "user",
+            "secret",
+            "ca_cert",
+        ]
+        if not vca_config or not all(k in vca_config for k in required_vca_config):
+            raise N2VCBadArgumentsException(
+                message="Missing arguments in vca_config: {}".format(vca_config),
+                bad_args=required_vca_config,
+            )
+        port = vca_config["port"] if "port" in vca_config else 17070
+        url = "{}:{}".format(vca_config["host"], port)
+        enable_os_upgrade = vca_config.get("enable_os_upgrade", True)
+        apt_mirror = vca_config.get("apt_mirror", None)
+        username = vca_config["user"]
+        secret = vca_config["secret"]
+        ca_cert = base64_to_cacert(vca_config["ca_cert"])
+
+        self.libjuju = Libjuju(
+            endpoint=url,
+            api_proxy=None,  # Not needed for k8s charms
+            enable_os_upgrade=enable_os_upgrade,
+            apt_mirror=apt_mirror,
+            username=username,
+            password=secret,
+            cacert=ca_cert,
+            loop=self.loop,
+            log=self.log,
+            db=self.db,
+        )
+        self.log.debug("K8S Juju connector initialized")
+        # TODO: Remove these commented lines:
+        # self.authenticated = False
+        # self.models = {}
+        # self.juju_secret = ""
 
     """Initialization"""
+
     async def init_env(
         self,
         k8s_creds: str,
-        namespace: str = 'kube-system',
+        namespace: str = "kube-system",
         reuse_cluster_uuid: str = None,
     ) -> (str, bool):
         """
         It prepares a given K8s cluster environment to run Juju bundles.
 
-        :param k8s_creds: credentials to access a given K8s cluster, i.e. a valid '.kube/config'
-        :param namespace: optional namespace to be used for juju. By default, 'kube-system' will be used
+        :param k8s_creds: credentials to access a given K8s cluster, i.e. a valid
+            '.kube/config'
+        :param namespace: optional namespace to be used for juju. By default,
+            'kube-system' will be used
         :param reuse_cluster_uuid: existing cluster uuid for reuse
-        :return: uuid of the K8s cluster and True if connector has installed some software in the cluster
-        (on error, an exception will be raised)
+        :return: uuid of the K8s cluster and True if connector has installed some
+            software in the cluster
+            (on error, an exception will be raised)
         """
 
-        """Bootstrapping
+        """Bootstrapping
 
-        Bootstrapping cannot be done, by design, through the API. We need to
-        use the CLI tools.
-        """
+        Bootstrapping cannot be done, by design, through the API. We need to
+        use the CLI tools.
+        """
 
-        """
-        WIP: Workflow
+        """
+        WIP: Workflow
 
-        1. Has the environment already been bootstrapped?
-        - Check the database to see if we have a record for this env
+        1. Has the environment already been bootstrapped?
+        - Check the database to see if we have a record for this env
 
-        2. If this is a new env, create it
-        - Add the k8s cloud to Juju
-        - Bootstrap
-        - Record it in the database
+        2. If this is a new env, create it
+        - Add the k8s cloud to Juju
+        - Bootstrap
+        - Record it in the database
 
-        3. Connect to the Juju controller for this cloud
+        3. Connect to the Juju controller for this cloud
 
-        """
+        """
         # cluster_uuid = reuse_cluster_uuid
         # if not cluster_uuid:
         #     cluster_uuid = str(uuid4())
@@ -127,135 +185,190 @@ class K8sJujuConnector(K8sConnector):
         # reuse_cluster_uuid, e.g. to try to fix it.       #
         ###################################################
 
-        if not reuse_cluster_uuid:
-            # This is a new cluster, so bootstrap it
-
-            cluster_uuid = str(uuid.uuid4())
-
-            # Is a local k8s cluster?
-            localk8s = self.is_local_k8s(k8s_creds)
-
-            # If the k8s is external, the juju controller needs a loadbalancer
-            loadbalancer = False if localk8s else True
-
-            # Name the new k8s cloud
-            k8s_cloud = "k8s-{}".format(cluster_uuid)
-
-            self.log.debug("Adding k8s cloud {}".format(k8s_cloud))
-            await self.add_k8s(k8s_cloud, k8s_creds)
-
-            # Bootstrap Juju controller
-            self.log.debug("Bootstrapping...")
-            await self.bootstrap(k8s_cloud, cluster_uuid, loadbalancer)
-            self.log.debug("Bootstrap done.")
+        # This is a new cluster, so bootstrap it
+
+        cluster_uuid = reuse_cluster_uuid or str(uuid.uuid4())
+
+        # Is a local k8s cluster?
+        # localk8s = self.is_local_k8s(k8s_creds)
+
+        # If the k8s is external, the juju controller needs a loadbalancer
+        # loadbalancer = False if localk8s else True
+
+        # Name the new k8s cloud
+        # k8s_cloud = "k8s-{}".format(cluster_uuid)
+
+        # self.log.debug("Adding k8s cloud {}".format(k8s_cloud))
+        # await self.add_k8s(k8s_cloud, k8s_creds)
+
+        # Bootstrap Juju controller
+        # self.log.debug("Bootstrapping...")
+        # await self.bootstrap(k8s_cloud, cluster_uuid, loadbalancer)
+        # self.log.debug("Bootstrap done.")
+
+        # Get the controller information
+
+        # Parse ~/.local/share/juju/controllers.yaml
+        # controllers.testing.api-endpoints|ca-cert|uuid
+        # self.log.debug("Getting controller endpoints")
+        # with open(os.path.expanduser("~/.local/share/juju/controllers.yaml")) as f:
+        #     controllers = yaml.load(f, Loader=yaml.Loader)
+        #     controller = controllers["controllers"][cluster_uuid]
+        #     endpoints = controller["api-endpoints"]
+        #     juju_endpoint = endpoints[0]
+        #     juju_ca_cert = controller["ca-cert"]
+
+        # Parse ~/.local/share/juju/accounts
+        # controllers.testing.user|password
+        # self.log.debug("Getting accounts")
+        # with open(os.path.expanduser("~/.local/share/juju/accounts.yaml")) as f:
+        #     controllers = yaml.load(f, Loader=yaml.Loader)
+        #     controller = controllers["controllers"][cluster_uuid]
+
+        #     juju_user = controller["user"]
+        #     juju_secret = controller["password"]
+
+        # config = {
+        #     "endpoint": juju_endpoint,
+        #     "username": juju_user,
+        #     "secret": juju_secret,
+        #     "cacert": juju_ca_cert,
+        #     "loadbalancer": loadbalancer,
+        # }
+
+        # Store the cluster configuration so it
+        # can be used for subsequent calls
+        kubecfg = tempfile.NamedTemporaryFile()
+        with open(kubecfg.name, "w") as kubecfg_file:
+            kubecfg_file.write(k8s_creds)
+        kubectl = Kubectl(config_file=kubecfg.name)
+
+        # CREATING RESOURCES IN K8S
+        rbac_id = generate_rbac_id()
+        metadata_name = "{}-{}".format(RBAC_STACK_PREFIX, rbac_id)
+        labels = {RBAC_STACK_PREFIX: rbac_id}
+
+        # Create cleanup dictionary to clean up created resources
+        # if it fails in the middle of the process
+        cleanup_data = []
+        try:
+            self._create_cluster_role(
+                kubectl,
+                name=metadata_name,
+                labels=labels,
+            )
+            cleanup_data.append(
+                {
+                    "delete": self._delete_cluster_role,
+                    "args": (kubectl, metadata_name),
+                }
+            )
 
-            # Get the controller information
+            self._create_service_account(
+                kubectl,
+                name=metadata_name,
+                labels=labels,
+            )
+            cleanup_data.append(
+                {
+                    "delete": self._delete_service_account,
+                    "args": (kubectl, metadata_name),
+                }
+            )
 
-            # Parse ~/.local/share/juju/controllers.yaml
-            # controllers.testing.api-endpoints|ca-cert|uuid
-            self.log.debug("Getting controller endpoints")
-            with open(os.path.expanduser(
-                "~/.local/share/juju/controllers.yaml"
-            )) as f:
-                controllers = yaml.load(f, Loader=yaml.Loader)
-                controller = controllers['controllers'][cluster_uuid]
-                endpoints = controller['api-endpoints']
-                self.juju_endpoint = endpoints[0]
-                self.juju_ca_cert = controller['ca-cert']
+            self._create_cluster_role_binding(
+                kubectl,
+                name=metadata_name,
+                labels=labels,
+            )
+            cleanup_data.append(
+                {
+                    "delete": self._delete_service_account,
+                    "args": (kubectl, metadata_name),
+                }
+            )
+            token, client_cert_data = await self._get_secret_data(
+                kubectl,
+                metadata_name,
+            )
 
-            # Parse ~/.local/share/juju/accounts
-            # controllers.testing.user|password
-            self.log.debug("Getting accounts")
-            with open(os.path.expanduser(
-                "~/.local/share/juju/accounts.yaml"
-            )) as f:
-                controllers = yaml.load(f, Loader=yaml.Loader)
-                controller = controllers['controllers'][cluster_uuid]
+            default_storage_class = kubectl.get_default_storage_class()
+            await self.libjuju.add_k8s(
+                name=cluster_uuid,
+                rbac_id=rbac_id,
+                token=token,
+                client_cert_data=client_cert_data,
+                configuration=kubectl.configuration,
+                storage_class=default_storage_class,
+                credential_name=self._get_credential_name(cluster_uuid),
+            )
+            # self.log.debug("Setting config")
+            # await self.set_config(cluster_uuid, config)
 
-                self.juju_user = controller['user']
-                self.juju_secret = controller['password']
+            # Test connection
+            # controller = await self.get_controller(cluster_uuid)
+            # await controller.disconnect()
 
+            # TODO: Remove these commented lines
             # raise Exception("EOL")
-
-            self.juju_public_key = None
-
-            config = {
-                'endpoint': self.juju_endpoint,
-                'username': self.juju_user,
-                'secret': self.juju_secret,
-                'cacert': self.juju_ca_cert,
-                'namespace': namespace,
-                'loadbalancer': loadbalancer,
-            }
-
-            # Store the cluster configuration so it
-            # can be used for subsequent calls
-            self.log.debug("Setting config")
-            await self.set_config(cluster_uuid, config)
-
-        else:
-            # This is an existing cluster, so get its config
-            cluster_uuid = reuse_cluster_uuid
-
-            config = self.get_config(cluster_uuid)
-
-            self.juju_endpoint = config['endpoint']
-            self.juju_user = config['username']
-            self.juju_secret = config['secret']
-            self.juju_ca_cert = config['cacert']
-            self.juju_public_key = None
-
-        # Login to the k8s cluster
-        if not self.authenticated:
-            await self.login(cluster_uuid)
-
-        # We're creating a new cluster
-        #print("Getting model {}".format(self.get_namespace(cluster_uuid), cluster_uuid=cluster_uuid))
-        #model = await self.get_model(
-        #    self.get_namespace(cluster_uuid),
-        #    cluster_uuid=cluster_uuid
-        #)
-
-        ## Disconnect from the model
-        #if model and model.is_connected():
-        #    await model.disconnect()
-
-        return cluster_uuid, True
+            # self.juju_public_key = None
+            # Login to the k8s cluster
+            # if not self.authenticated:
+            #     await self.login(cluster_uuid)
+
+            # We're creating a new cluster
+            # print("Getting model {}".format(self.get_namespace(cluster_uuid),
+            #    cluster_uuid=cluster_uuid))
+            # model = await self.get_model(
+            #    self.get_namespace(cluster_uuid),
+            #    cluster_uuid=cluster_uuid
+            # )
+
+            # Disconnect from the model
+            # if model and model.is_connected():
+            #    await model.disconnect()
+
+            return cluster_uuid, True
+        except Exception as e:
+            self.log.error("Error initializing k8scluster: {}".format(e))
+            if len(cleanup_data) > 0:
+                self.log.debug("Cleaning up created resources in k8s cluster...")
+                for item in cleanup_data:
+                    delete_function = item["delete"]
+                    delete_args = item["args"]
+                    delete_function(*delete_args)
+                self.log.debug("Cleanup finished")
+            raise e
 
     """Repo Management"""
+
     async def repo_add(
         self,
         name: str,
         url: str,
-        type: str = "charm",
+        _type: str = "charm",
     ):
-        raise NotImplemented()
+        raise MethodNotImplemented()
 
     async def repo_list(self):
-        raise NotImplemented()
+        raise MethodNotImplemented()
 
     async def repo_remove(
         self,
         name: str,
     ):
-        raise NotImplemented()
+        raise MethodNotImplemented()
 
-    async def synchronize_repos(
-        self,
-        cluster_uuid: str,
-        name: str
-    ):
+    async def synchronize_repos(self, cluster_uuid: str, name: str):
         """
         Returns None as currently add_repo is not implemented
         """
         return None
 
     """Reset"""
+
     async def reset(
-            self,
-            cluster_uuid: str,
-            force: bool = False,
-            uninstall_sw: bool = False
+        self, cluster_uuid: str, force: bool = False, uninstall_sw: bool = False
     ) -> bool:
         """Reset a cluster
 
@@ -266,35 +379,76 @@ class K8sJujuConnector(K8sConnector):
         """
 
         try:
-            if not self.authenticated:
-                await self.login(cluster_uuid)
-
-            if self.controller.is_connected():
-                # Destroy the model
-                namespace = self.get_namespace(cluster_uuid)
-                if await self.has_model(namespace):
-                    self.log.debug("[reset] Destroying model")
-                    await self.controller.destroy_model(
-                        namespace,
-                        destroy_storage=True
-                    )
-
-                # Disconnect from the controller
-                self.log.debug("[reset] Disconnecting controller")
-                await self.logout()
-
-                # Destroy the controller (via CLI)
-                self.log.debug("[reset] Destroying controller")
-                await self.destroy_controller(cluster_uuid)
-
-                self.log.debug("[reset] Removing k8s cloud")
-                k8s_cloud = "k8s-{}".format(cluster_uuid)
-                await self.remove_cloud(k8s_cloud)
-
-        except Exception as ex:
-            self.log.debug("Caught exception during reset: {}".format(ex))
+            # Remove k8scluster from database
+            # self.log.debug("[reset] Removing k8scluster from juju database")
+            # juju_db = self.db.get_one("admin", {"_id": "juju"})
+
+            # for k in juju_db["k8sclusters"]:
+            #     if k["_id"] == cluster_uuid:
+            #         juju_db["k8sclusters"].remove(k)
+            #         self.db.set_one(
+            #             table="admin",
+            #             q_filter={"_id": "juju"},
+            #             update_dict={"k8sclusters": juju_db["k8sclusters"]},
+            #         )
+            #         break
+
+            # Destroy the controller (via CLI)
+            # self.log.debug("[reset] Destroying controller")
+            # await self.destroy_controller(cluster_uuid)
+            self.log.debug("[reset] Removing k8s cloud")
+            # k8s_cloud = "k8s-{}".format(cluster_uuid)
+            # await self.remove_cloud(k8s_cloud)
+
+            cloud_creds = await self.libjuju.get_cloud_credentials(
+                cluster_uuid,
+                self._get_credential_name(cluster_uuid),
+            )
+
+            await self.libjuju.remove_cloud(cluster_uuid)
+
+            kubecfg = self.get_credentials(cluster_uuid=cluster_uuid)
+
+            kubecfg_file = tempfile.NamedTemporaryFile()
+            with open(kubecfg_file.name, "w") as f:
+                f.write(kubecfg)
+            kubectl = Kubectl(config_file=kubecfg_file.name)
+
+            delete_functions = [
+                self._delete_cluster_role_binding,
+                self._delete_service_account,
+                self._delete_cluster_role,
+            ]
 
+            credential_attrs = cloud_creds[0].result["attrs"]
+            if RBAC_LABEL_KEY_NAME in credential_attrs:
+                rbac_id = credential_attrs[RBAC_LABEL_KEY_NAME]
+                metadata_name = "{}-{}".format(RBAC_STACK_PREFIX, rbac_id)
+                delete_args = (kubectl, metadata_name)
+                for delete_func in delete_functions:
+                    try:
+                        delete_func(*delete_args)
+                    except Exception as e:
+                        self.log.warning("Cannot remove resource in K8s {}".format(e))
+
+        except Exception as e:
+            self.log.debug("Caught exception during reset: {}".format(e))
+            raise e
         return True
+        # TODO: Remove these commented lines
+        #     if not self.authenticated:
+        #         await self.login(cluster_uuid)
+
+        #     if self.controller.is_connected():
+        #         # Destroy the model
+        #         namespace = self.get_namespace(cluster_uuid)
+        #         if await self.has_model(namespace):
+        #             self.log.debug("[reset] Destroying model")
+        #             await self.controller.destroy_model(namespace, destroy_storage=True)
+
+        #         # Disconnect from the controller
+        #         self.log.debug("[reset] Disconnecting controller")
+        #         await self.logout()
 
     """Deployment"""
 
@@ -302,116 +456,121 @@ class K8sJujuConnector(K8sConnector):
         self,
         cluster_uuid: str,
         kdu_model: str,
+        kdu_instance: str,
         atomic: bool = True,
-        timeout: float = 300,
+        timeout: float = 1800,
         params: dict = None,
         db_dict: dict = None,
-        kdu_name: str = None
+        kdu_name: str = None,
+        namespace: str = None,
     ) -> bool:
         """Install a bundle
 
         :param cluster_uuid str: The UUID of the cluster to install to
         :param kdu_model str: The name or path of a bundle to install
+        :param kdu_instance: Kdu instance name
         :param atomic bool: If set, waits until the model is active and resets
                             the cluster on failure.
         :param timeout int: The time, in seconds, to wait for the install
                             to finish
         :param params dict: Key-value pairs of instantiation parameters
         :param kdu_name: Name of the KDU instance to be installed
+        :param namespace: K8s namespace to use for the KDU instance
 
         :return: If successful, returns ?
         """
+        bundle = kdu_model
 
-        if not self.authenticated:
-            self.log.debug("[install] Logging in to the controller")
-            await self.login(cluster_uuid)
+        # controller = await self.get_controller(cluster_uuid)
 
         ##
         # Get or create the model, based on the NS
         # uuid.
-        if kdu_name:
-            kdu_instance = "{}-{}".format(kdu_name, db_dict["filter"]["_id"])
+
+        if not db_dict:
+            raise K8sException("db_dict must be set")
+        if not bundle:
+            raise K8sException("bundle must be set")
+
+        if bundle.startswith("cs:"):
+            pass
+        elif bundle.startswith("http"):
+            # Download the file
+            pass
         else:
-            kdu_instance = db_dict["filter"]["_id"]
+            new_workdir = kdu_model.strip(kdu_model.split("/")[-1])
+            os.chdir(new_workdir)
+            bundle = "local:{}".format(kdu_model)
 
         self.log.debug("Checking for model named {}".format(kdu_instance))
 
         # Create the new model
         self.log.debug("Adding model: {}".format(kdu_instance))
-        model = await self.add_model(kdu_instance, cluster_uuid=cluster_uuid)
+        await self.libjuju.add_model(
+            model_name=kdu_instance,
+            cloud_name=cluster_uuid,
+            credential_name=self._get_credential_name(cluster_uuid),
+        )
 
-        if model:
-            # TODO: Instantiation parameters
+        if model:
+        # TODO: Instantiation parameters
 
-            """
-            "Juju bundle that models the KDU, in any of the following ways:
-                - <juju-repo>/<juju-bundle>
-                - <juju-bundle folder under k8s_models folder in the package>
-                - <juju-bundle tgz file (w/ or w/o extension) under k8s_models folder in the package>
-                - <URL_where_to_fetch_juju_bundle>
-            """
+        """
+        "Juju bundle that models the KDU, in any of the following ways:
+            - <juju-repo>/<juju-bundle>
+            - <juju-bundle folder under k8s_models folder in the package>
+            - <juju-bundle tgz file (w/ or w/o extension) under k8s_models folder
+                in the package>
+            - <URL_where_to_fetch_juju_bundle>
+        """
+        try:
+            previous_workdir = os.getcwd()
+        except FileNotFoundError:
+            previous_workdir = "/app/storage"
 
-            bundle = kdu_model
-            if kdu_model.startswith("cs:"):
-                bundle = kdu_model
-            elif kdu_model.startswith("http"):
-                # Download the file
-                pass
-            else:
-                # Local file
-
-                # if kdu_model.endswith(".tar.gz") or kdu_model.endswith(".tgz")
-                # Uncompress temporarily
-                # bundle = <uncompressed file>
-                pass
-
-            if not bundle:
-                # Raise named exception that the bundle could not be found
-                raise Exception()
-
-            self.log.debug("[install] deploying {}".format(bundle))
-            await model.deploy(bundle)
-
-            # Get the application
-            if atomic:
-                # applications = model.applications
-                self.log.debug("[install] Applications: {}".format(model.applications))
-                for name in model.applications:
-                    self.log.debug("[install] Waiting for {} to settle".format(name))
-                    application = model.applications[name]
-                    try:
-                        # It's not enough to wait for all units to be active;
-                        # the application status needs to be active as well.
-                        self.log.debug("Waiting for all units to be active...")
-                        await model.block_until(
-                            lambda: all(
-                                unit.agent_status == 'idle'
-                                and application.status in ['active', 'unknown']
-                                and unit.workload_status in [
-                                    'active', 'unknown'
-                                ] for unit in application.units
-                            ),
-                            timeout=timeout
-                        )
-                        self.log.debug("All units active.")
-
-                    except concurrent.futures._base.TimeoutError:
-                        self.log.debug("[install] Timeout exceeded; resetting cluster")
-                        await self.reset(cluster_uuid)
-                        return False
-
-            # Wait for the application to be active
-            if model.is_connected():
-                self.log.debug("[install] Disconnecting model")
-                await model.disconnect()
-
-            return kdu_instance
-        raise Exception("Unable to install")
-
-    async def instances_list(
-            self,
-            cluster_uuid: str
-    ) -> list:
+        self.log.debug("[install] deploying {}".format(bundle))
+        await self.libjuju.deploy(
+            bundle, model_name=kdu_instance, wait=atomic, timeout=timeout
+        )
+
+        # Get the application
+        # if atomic:
+        #     # applications = model.applications
+        #     self.log.debug("[install] Applications: {}".format(model.applications))
+        #     for name in model.applications:
+        #         self.log.debug("[install] Waiting for {} to settle".format(name))
+        #         application = model.applications[name]
+        #         try:
+        #             # It's not enough to wait for all units to be active;
+        #             # the application status needs to be active as well.
+        #             self.log.debug("Waiting for all units to be active...")
+        #             await model.block_until(
+        #                 lambda: all(
+        #                     unit.agent_status == "idle"
+        #                     and application.status in ["active", "unknown"]
+        #                     and unit.workload_status in ["active", "unknown"]
+        #                     for unit in application.units
+        #                 ),
+        #                 timeout=timeout,
+        #             )
+        #             self.log.debug("All units active.")
+
+        #         # TODO use asyncio.TimeoutError
+        #         except concurrent.futures._base.TimeoutError:
+        #             os.chdir(previous_workdir)
+        #             self.log.debug("[install] Timeout exceeded; resetting cluster")
+        #             await self.reset(cluster_uuid)
+        #             return False
+
+        # Wait for the application to be active
+        # if model.is_connected():
+        #     self.log.debug("[install] Disconnecting model")
+        #     await model.disconnect()
+        # await controller.disconnect()
+        os.chdir(previous_workdir)
+        return True
+
+    async def instances_list(self, cluster_uuid: str) -> list:
         """
         returns a list of deployed releases in a cluster
 
@@ -451,51 +610,63 @@ class K8sJujuConnector(K8sConnector):
         storage would require a redeployment of the service, at least in this
         initial release.
         """
-        namespace = self.get_namespace(cluster_uuid)
-        model = await self.get_model(namespace, cluster_uuid=cluster_uuid)
-
-        with open(kdu_model, 'r') as f:
-            bundle = yaml.safe_load(f)
-
-            """
-            {
-                'description': 'Test bundle',
-                'bundle': 'kubernetes',
-                'applications': {
-                    'mariadb-k8s': {
-                        'charm': 'cs:~charmed-osm/mariadb-k8s-20',
-                        'scale': 1,
-                        'options': {
-                            'password': 'manopw',
-                            'root_password': 'osm4u',
-                            'user': 'mano'
-                        },
-                        'series': 'kubernetes'
-                    }
-                }
-            }
-            """
-            # TODO: This should be returned in an agreed-upon format
-            for name in bundle['applications']:
-                self.log.debug(model.applications)
-                application = model.applications[name]
-                self.log.debug(application)
-
-                path = bundle['applications'][name]['charm']
-
-                try:
-                    await application.upgrade_charm(switch=path)
-                except juju.errors.JujuError as ex:
-                    if 'already running charm' in str(ex):
-                        # We're already running this version
-                        pass
-
-        await model.disconnect()
-
-        return True
-        raise NotImplemented()
+        raise MethodNotImplemented()
+        # TODO: Remove these commented lines
+
+        # model = await self.get_model(namespace, cluster_uuid=cluster_uuid)
+
+        # model = None
+        # namespace = self.get_namespace(cluster_uuid)
+        # controller = await self.get_controller(cluster_uuid)
+
+        # try:
+        #     if namespace not in await controller.list_models():
+        #         raise N2VCNotFound(message="Model {} does not exist".format(namespace))
+
+        #     model = await controller.get_model(namespace)
+        #     with open(kdu_model, "r") as f:
+        #         bundle = yaml.safe_load(f)
+
+        #         """
+        #         {
+        #             'description': 'Test bundle',
+        #             'bundle': 'kubernetes',
+        #             'applications': {
+        #                 'mariadb-k8s': {
+        #                     'charm': 'cs:~charmed-osm/mariadb-k8s-20',
+        #                     'scale': 1,
+        #                     'options': {
+        #                         'password': 'manopw',
+        #                         'root_password': 'osm4u',
+        #                         'user': 'mano'
+        #                     },
+        #                     'series': 'kubernetes'
+        #                 }
+        #             }
+        #         }
+        #         """
+        #         # TODO: This should be returned in an agreed-upon format
+        #         for name in bundle["applications"]:
+        #             self.log.debug(model.applications)
+        #             application = model.applications[name]
+        #             self.log.debug(application)
+
+        #             path = bundle["applications"][name]["charm"]
+
+        #             try:
+        #                 await application.upgrade_charm(switch=path)
+        #             except juju.errors.JujuError as ex:
+        #                 if "already running charm" in str(ex):
+        #                     # We're already running this version
+        #                     pass
+        # finally:
+        #     if model:
+        #         await model.disconnect()
+        #     await controller.disconnect()
+        # return True
 
     """Rollback"""
+
     async def rollback(
         self,
         cluster_uuid: str,
@@ -512,14 +683,11 @@ class K8sJujuConnector(K8sConnector):
         :return: If successful, returns the revision of active KDU instance,
                  or raises an exception
         """
-        raise NotImplemented()
+        raise MethodNotImplemented()
 
     """Deletion"""
-    async def uninstall(
-        self,
-        cluster_uuid: str,
-        kdu_instance: str
-    ) -> bool:
+
+    async def uninstall(self, cluster_uuid: str, kdu_instance: str) -> bool:
         """Uninstall a KDU instance
 
         :param cluster_uuid str: The UUID of the cluster
@@ -527,20 +695,110 @@ class K8sJujuConnector(K8sConnector):
 
         :return: Returns True if successful, or raises an exception
         """
-        if not self.authenticated:
-            self.log.debug("[uninstall] Connecting to controller")
-            await self.login(cluster_uuid)
+
+        # controller = await self.get_controller(cluster_uuid)
 
         self.log.debug("[uninstall] Destroying model")
 
-        await self.controller.destroy_models(kdu_instance)
+        await self.libjuju.destroy_model(kdu_instance, total_timeout=3600)
 
-        self.log.debug("[uninstall] Model destroyed and disconnecting")
-        await self.logout()
+        self.log.debug("[uninstall] Model destroyed and disconnecting")
+        # await controller.disconnect()
 
         return True
+        # TODO: Remove these commented lines
+        # if not self.authenticated:
+        #     self.log.debug("[uninstall] Connecting to controller")
+        #     await self.login(cluster_uuid)
+
+    async def exec_primitive(
+        self,
+        cluster_uuid: str = None,
+        kdu_instance: str = None,
+        primitive_name: str = None,
+        timeout: float = 300,
+        params: dict = None,
+        db_dict: dict = None,
+    ) -> str:
+        """Exec primitive (Juju action)
+
+        :param cluster_uuid str: The UUID of the cluster
+        :param kdu_instance str: The unique name of the KDU instance
+        :param primitive_name: Name of action that will be executed
+        :param timeout: Timeout for action execution
+        :param params: Dictionary of all the parameters needed for the action
+        :db_dict: Dictionary for any additional data
+
+        :return: Returns the output of the action
+        """
+
+        # controller = await self.get_controller(cluster_uuid)
+
+        if not params or "application-name" not in params:
+            raise K8sException(
+                "Missing application-name argument, \
+                                argument needed for K8s actions"
+            )
+        try:
+            self.log.debug(
+                "[exec_primitive] Getting model "
+                "kdu_instance: {}".format(kdu_instance)
+            )
+            application_name = params["application-name"]
+            actions = await self.libjuju.get_actions(application_name, kdu_instance)
+            if primitive_name not in actions:
+                raise K8sException("Primitive {} not found".format(primitive_name))
+            output, status = await self.libjuju.execute_action(
+                application_name, kdu_instance, primitive_name, **params
+            )
+            # model = await self.get_model(kdu_instance, controller=controller)
+
+            # application_name = params["application-name"]
+            # application = model.applications[application_name]
+
+            # actions = await application.get_actions()
+            # if primitive_name not in actions:
+            #     raise K8sException("Primitive {} not found".format(primitive_name))
+
+            # unit = None
+            # for u in application.units:
+            #     if await u.is_leader_from_status():
+            #         unit = u
+            #         break
+
+            # if unit is None:
+            #     raise K8sException("No leader unit found to execute action")
+
+            # self.log.debug("[exec_primitive] Running action: {}".format(primitive_name))
+            # action = await unit.run_action(primitive_name, **params)
+
+            # output = await model.get_action_output(action_uuid=action.entity_id)
+            # status = await model.get_action_status(uuid_or_prefix=action.entity_id)
+
+            # status = (
+            #     status[action.entity_id] if action.entity_id in status else "failed"
+            # )
+
+            if status != "completed":
+                raise K8sException(
+                    "status is not completed: {} output: {}".format(status, output)
+                )
+
+            return output
+
+        except Exception as e:
+            error_msg = "Error executing primitive {}: {}".format(primitive_name, e)
+            self.log.error(error_msg)
+            raise K8sException(message=error_msg)
+        # finally:
+        #     await controller.disconnect()
+        # TODO: Remove these commented lines:
+        # if not self.authenticated:
+        #     self.log.debug("[exec_primitive] Connecting to controller")
+        #     await self.login(cluster_uuid)
 
     """Introspection"""
+
     async def inspect_kdu(
         self,
         kdu_model: str,
@@ -557,8 +815,11 @@ class K8sJujuConnector(K8sConnector):
         """
 
         kdu = {}
-        with open(kdu_model, 'r') as f:
-            bundle = yaml.safe_load(f)
+        if not os.path.exists(kdu_model):
+            raise K8sException("file {} not found".format(kdu_model))
+
+        with open(kdu_model, "r") as f:
+            bundle = yaml.safe_load(f.read())
 
             """
             {
@@ -579,7 +840,7 @@ class K8sJujuConnector(K8sConnector):
             }
             """
             # TODO: This should be returned in an agreed-upon format
-            kdu = bundle['applications']
+            kdu = bundle["applications"]
 
         return kdu
 
@@ -597,11 +858,11 @@ class K8sJujuConnector(K8sConnector):
         """
         readme = None
 
-        files = ['README', 'README.txt', 'README.md']
+        files = ["README", "README.txt", "README.md"]
         path = os.path.dirname(kdu_model)
         for file in os.listdir(path):
             if file in files:
-                with open(file, 'r') as f:
+                with open(file, "r") as f:
                     readme = f.read()
                     break
 
@@ -623,226 +884,298 @@ class K8sJujuConnector(K8sConnector):
                  and deployment_time.
         """
         status = {}
+        # controller = await self.get_controller(cluster_uuid)
+        # model = await self.get_model(kdu_instance, controller=controller)
 
-        model = await self.get_model(self.get_namespace(cluster_uuid), cluster_uuid=cluster_uuid)
-
-        # model = await self.get_model_by_uuid(cluster_uuid)
-        if model:
-            model_status = await model.get_status()
-            status = model_status.applications
-
-            for name in model_status.applications:
-                application = model_status.applications[name]
-                status[name] = {
-                    'status': application['status']['status']
-                }
+        # model_status = await model.get_status()
+        # status = model_status.applications
+        model_status = await self.libjuju.get_model_status(kdu_instance)
+        for name in model_status.applications:
+            application = model_status.applications[name]
+            status[name] = {"status": application["status"]["status"]}
 
-            if model.is_connected():
-                await model.disconnect()
+        # await model.disconnect()
+        # await controller.disconnect()
 
         return status
 
-    # Private methods
-    async def add_k8s(
-        self,
-        cloud_name: str,
-        credentials: str,
-    ) -> bool:
-        """Add a k8s cloud to Juju
+    async def get_services(
+        self, cluster_uuid: str, kdu_instance: str, namespace: str
+    ) -> list:
+        """Return a list of services of a kdu_instance"""
 
-        Adds a Kubernetes cloud to Juju, so it can be bootstrapped with a
-        Juju Controller.
+        credentials = self.get_credentials(cluster_uuid=cluster_uuid)
 
-        :param cloud_name str: The name of the cloud to add.
-        :param credentials dict: A dictionary representing the output of
-            `kubectl config view --raw`.
+        # config_path = "/tmp/{}".format(cluster_uuid)
+        # config_file = "{}/config".format(config_path)
 
-        :returns: True if successful, otherwise raises an exception.
-        """
+        # if not os.path.exists(config_path):
+        #     os.makedirs(config_path)
+        # with open(config_file, "w") as f:
+        #     f.write(credentials)
 
-        cmd = [self.juju_command, "add-k8s", "--local", cloud_name]
-        self.log.debug(cmd)
+        kubecfg = tempfile.NamedTemporaryFile()
+        with open(kubecfg.name, "w") as kubecfg_file:
+            kubecfg_file.write(credentials)
+        kubectl = Kubectl(config_file=kubecfg.name)
 
-        process = await asyncio.create_subprocess_exec(
-            *cmd,
-            stdout=asyncio.subprocess.PIPE,
-            stderr=asyncio.subprocess.PIPE,
-            stdin=asyncio.subprocess.PIPE,
+        return kubectl.get_services(
+            field_selector="metadata.namespace={}".format(kdu_instance)
         )
 
-        # Feed the process the credentials
-        process.stdin.write(credentials.encode("utf-8"))
-        await process.stdin.drain()
-        process.stdin.close()
-
-        stdout, stderr = await process.communicate()
-
-        return_code = process.returncode
+    async def get_service(
+        self, cluster_uuid: str, service_name: str, namespace: str
+    ) -> object:
+        """Return data for a specific service inside a namespace"""
 
-        self.log.debug("add-k8s return code: {}".format(return_code))
+        credentials = self.get_credentials(cluster_uuid=cluster_uuid)
 
-        if return_code > 0:
-            raise Exception(stderr)
+        # config_path = "/tmp/{}".format(cluster_uuid)
+        # config_file = "{}/config".format(config_path)
 
-        return True
+        # if not os.path.exists(config_path):
+        #     os.makedirs(config_path)
+        # with open(config_file, "w") as f:
+        #     f.write(credentials)
 
-    async def add_model(
-        self,
-        model_name: str,
-        cluster_uuid: str,
-    ) -> juju.model.Model:
-        """Adds a model to the controller
+        kubecfg = tempfile.NamedTemporaryFile()
+        with open(kubecfg.name, "w") as kubecfg_file:
+            kubecfg_file.write(credentials)
+        kubectl = Kubectl(config_file=kubecfg.name)
 
-        Adds a new model to the Juju controller
-
-        :param model_name str: The name of the model to add.
-        :returns: The juju.model.Model object of the new model upon success or
-                  raises an exception.
-        """
-        if not self.authenticated:
-            await self.login(cluster_uuid)
-
-        self.log.debug("Adding model '{}' to cluster_uuid '{}'".format(model_name, cluster_uuid))
-        try:
-            model = await self.controller.add_model(
-                model_name,
-                config={'authorized-keys': self.juju_public_key}
+        return kubectl.get_services(
+            field_selector="metadata.name={},metadata.namespace={}".format(
+                service_name, namespace
             )
-        except Exception as ex:
-            self.log.debug(ex)
-            self.log.debug("Caught exception: {}".format(ex))
-            pass
-
-        return model
+        )[0]
 
-    async def bootstrap(
-        self,
-        cloud_name: str,
-        cluster_uuid: str,
-        loadbalancer: bool
-    ) -> bool:
-        """Bootstrap a Kubernetes controller
-
-        Bootstrap a Juju controller inside the Kubernetes cluster
-
-        :param cloud_name str: The name of the cloud.
-        :param cluster_uuid str: The UUID of the cluster to bootstrap.
-        :param loadbalancer bool: If the controller should use loadbalancer or not.
-        :returns: True upon success or raises an exception.
+    # Private methods
+    # async def add_k8s(self, cloud_name: str, credentials: str,) -> bool:
+    #     """Add a k8s cloud to Juju
+
+    #     Adds a Kubernetes cloud to Juju, so it can be bootstrapped with a
+    #     Juju Controller.
+
+    #     :param cloud_name str: The name of the cloud to add.
+    #     :param credentials dict: A dictionary representing the output of
+    #         `kubectl config view --raw`.
+
+    #     :returns: True if successful, otherwise raises an exception.
+    #     """
+
+    #     cmd = [self.juju_command, "add-k8s", "--local", cloud_name]
+    #     self.log.debug(cmd)
+
+    #     process = await asyncio.create_subprocess_exec(
+    #         *cmd,
+    #         stdout=asyncio.subprocess.PIPE,
+    #         stderr=asyncio.subprocess.PIPE,
+    #         stdin=asyncio.subprocess.PIPE,
+    #     )
+
+    #     # Feed the process the credentials
+    #     process.stdin.write(credentials.encode("utf-8"))
+    #     await process.stdin.drain()
+    #     process.stdin.close()
+
+    #     _stdout, stderr = await process.communicate()
+
+    #     return_code = process.returncode
+
+    #     self.log.debug("add-k8s return code: {}".format(return_code))
+
+    #     if return_code > 0:
+    #         raise Exception(stderr)
+
+    #     return True
+
+    # async def add_model(
+    #     self, model_name: str, cluster_uuid: str, controller: Controller
+    # ) -> Model:
+    #     """Adds a model to the controller
+
+    #     Adds a new model to the Juju controller
+
+    #     :param model_name str: The name of the model to add.
+    #     :param cluster_uuid str: ID of the cluster.
+    #     :param controller: Controller object in which the model will be added
+    #     :returns: The juju.model.Model object of the new model upon success or
+    #               raises an exception.
+    #     """
+
+    #     self.log.debug(
+    #         "Adding model '{}' to cluster_uuid '{}'".format(model_name, cluster_uuid)
+    #     )
+    #     model = None
+    #     try:
+    #         if self.juju_public_key is not None:
+    #             model = await controller.add_model(
+    #                 model_name, config={"authorized-keys": self.juju_public_key}
+    #             )
+    #         else:
+    #             model = await controller.add_model(model_name)
+    #     except Exception as ex:
+    #         self.log.debug(ex)
+    #         self.log.debug("Caught exception: {}".format(ex))
+    #         pass
+
+    #     return model
+
+    # async def bootstrap(
+    #     self, cloud_name: str, cluster_uuid: str, loadbalancer: bool
+    # ) -> bool:
+    #     """Bootstrap a Kubernetes controller
+
+    #     Bootstrap a Juju controller inside the Kubernetes cluster
+
+    #     :param cloud_name str: The name of the cloud.
+    #     :param cluster_uuid str: The UUID of the cluster to bootstrap.
+    #     :param loadbalancer bool: If the controller should use loadbalancer or not.
+    #     :returns: True upon success or raises an exception.
+    #     """
+
+    #     if not loadbalancer:
+    #         cmd = [self.juju_command, "bootstrap", cloud_name, cluster_uuid]
+    #     else:
+    #         """
+    #         For public clusters, specify that the controller service is using a
+    #         LoadBalancer.
+    #         """
+    #         cmd = [
+    #             self.juju_command,
+    #             "bootstrap",
+    #             cloud_name,
+    #             cluster_uuid,
+    #             "--config",
+    #             "controller-service-type=loadbalancer",
+    #         ]
+
+    #     self.log.debug(
+    #         "Bootstrapping controller {} in cloud {}".format(cluster_uuid, cloud_name)
+    #     )
+
+    #     process = await asyncio.create_subprocess_exec(
+    #         *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE,
+    #     )
+
+    #     _stdout, stderr = await process.communicate()
+
+    #     return_code = process.returncode
+
+    #     if return_code > 0:
+    #         #
+    #         if b"already exists" not in stderr:
+    #             raise Exception(stderr)
+
+    #     return True
+
+    # async def destroy_controller(self, cluster_uuid: str) -> bool:
+    #     """Destroy a Kubernetes controller
+
+    #     Destroy an existing Kubernetes controller.
+
+    #     :param cluster_uuid str: The UUID of the cluster to bootstrap.
+    #     :returns: True upon success or raises an exception.
+    #     """
+    #     cmd = [
+    #         self.juju_command,
+    #         "destroy-controller",
+    #         "--destroy-all-models",
+    #         "--destroy-storage",
+    #         "-y",
+    #         cluster_uuid,
+    #     ]
+
+    #     process = await asyncio.create_subprocess_exec(
+    #         *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE,
+    #     )
+
+    #     _stdout, stderr = await process.communicate()
+
+    #     return_code = process.returncode
+
+    #     if return_code > 0:
+    #         #
+    #         if "already exists" not in stderr:
+    #             raise Exception(stderr)
+
+    def get_credentials(self, cluster_uuid: str) -> str:
         """
-
-        if not loadbalancer:
-            cmd = [self.juju_command, "bootstrap", cloud_name, cluster_uuid]
-        else:
-            """
-            For public clusters, specify that the controller service is using a LoadBalancer.
-            """
-            cmd = [self.juju_command, "bootstrap", cloud_name, cluster_uuid, "--config", "controller-service-type=loadbalancer"]
-
-        self.log.debug("Bootstrapping controller {} in cloud {}".format(
-            cluster_uuid, cloud_name
-        ))
-
-        process = await asyncio.create_subprocess_exec(
-            *cmd,
-            stdout=asyncio.subprocess.PIPE,
-            stderr=asyncio.subprocess.PIPE,
-        )
-
-        stdout, stderr = await process.communicate()
-
-        return_code = process.returncode
-
-        if return_code > 0:
-            #
-            if b'already exists' not in stderr:
-                raise Exception(stderr)
-
-        return True
-
-    async def destroy_controller(
-        self,
-        cluster_uuid: str
-    ) -> bool:
-        """Destroy a Kubernetes controller
-
-        Destroy an existing Kubernetes controller.
-
-        :param cluster_uuid str: The UUID of the cluster to bootstrap.
-        :returns: True upon success or raises an exception.
+        Get Cluster Kubeconfig
         """
-        cmd = [
-            self.juju_command,
-            "destroy-controller",
-            "--destroy-all-models",
-            "--destroy-storage",
-            "-y",
-            cluster_uuid
-        ]
-
-        process = await asyncio.create_subprocess_exec(
-            *cmd,
-            stdout=asyncio.subprocess.PIPE,
-            stderr=asyncio.subprocess.PIPE,
+        k8scluster = self.db.get_one(
+            "k8sclusters", q_filter={"_id": cluster_uuid}, fail_on_empty=False
         )
 
-        stdout, stderr = await process.communicate()
-
-        return_code = process.returncode
-
-        if return_code > 0:
-            #
-            if 'already exists' not in stderr:
-                raise Exception(stderr)
-
-    def get_config(
-        self,
-        cluster_uuid: str,
-    ) -> dict:
-        """Get the cluster configuration
+        self.db.encrypt_decrypt_fields(
+            k8scluster.get("credentials"),
+            "decrypt",
+            ["password", "secret"],
+            schema_version=k8scluster["schema_version"],
+            salt=k8scluster["_id"],
+        )
 
-        Gets the configuration of the cluster
+        return yaml.safe_dump(k8scluster.get("credentials"))
 
-        :param cluster_uuid str: The UUID of the cluster.
-        :return: A dict upon success, or raises an exception.
+    def _get_credential_name(self, cluster_uuid: str) -> str:
         """
-        cluster_config = "{}/{}.yaml".format(self.fs.path, cluster_uuid)
-        if os.path.exists(cluster_config):
-            with open(cluster_config, 'r') as f:
-                config = yaml.safe_load(f.read())
-                return config
-        else:
-            raise Exception(
-                "Unable to locate configuration for cluster {}".format(
-                    cluster_uuid
-                )
-            )
+        Get credential name for a k8s cloud
 
-    async def get_model(
-        self,
-        model_name: str,
-        cluster_uuid: str,
-    ) -> juju.model.Model:
-        """Get a model from the Juju Controller.
+        We cannot use the cluster_uuid for the credential name directly,
+        because it cannot start with a number, it must start with a letter.
+        Therefore, the k8s cloud credential name will be "cred-" followed
+        by the cluster uuid.
 
-        Note: Model objects returned must call disconnected() before it goes
-        out of scope.
+        :param: cluster_uuid:   Cluster UUID of the kubernetes cloud (=cloud_name)
 
-        :param model_name str: The name of the model to get
-        :return The juju.model.Model object if found, or None.
+        :return:                Name to use for the credential name.
         """
-        if not self.authenticated:
-            await self.login(cluster_uuid)
-
-        model = None
-        models = await self.controller.list_models()
-        self.log.debug(models)
-        if model_name in models:
-            self.log.debug("Found model: {}".format(model_name))
-            model = await self.controller.get_model(
-                model_name
-            )
-        return model
+        return "cred-{}".format(cluster_uuid)
+
+    # def get_config(self, cluster_uuid: str,) -> dict:
+    #     """Get the cluster configuration
+
+    #     Gets the configuration of the cluster
+
+    #     :param cluster_uuid str: The UUID of the cluster.
+    #     :return: A dict upon success, or raises an exception.
+    #     """
+
+    #     juju_db = self.db.get_one("admin", {"_id": "juju"})
+    #     config = None
+    #     for k in juju_db["k8sclusters"]:
+    #         if k["_id"] == cluster_uuid:
+    #             config = k["config"]
+    #             self.db.encrypt_decrypt_fields(
+    #                 config,
+    #                 "decrypt",
+    #                 ["secret", "cacert"],
+    #                 schema_version="1.1",
+    #                 salt=k["_id"],
+    #             )
+    #             break
+    #     if not config:
+    #         raise Exception(
+    #             "Unable to locate configuration for cluster {}".format(cluster_uuid)
+    #         )
+    #     return config
+
+    # async def get_model(self, model_name: str, controller: Controller) -> Model:
+    #     """Get a model from the Juju Controller.
+
+    #     Note: Model objects returned must call disconnected() before it goes
+    #     out of scope.
+
+    #     :param model_name str: The name of the model to get
+    #     :param controller Controller: Controller object
+    #     :return The juju.model.Model object if found, or None.
+    #     """
+
+    #     models = await controller.list_models()
+    #     if model_name not in models:
+    #         raise N2VCNotFound("Model {} not found".format(model_name))
+    #     self.log.debug("Found model: {}".format(model_name))
+    #     return await controller.get_model(model_name)
 
     def get_namespace(
         self,
@@ -854,183 +1187,299 @@ class K8sJujuConnector(K8sConnector):
         :param cluster_uuid str: The UUID of the cluster
         :returns: The namespace UUID, or raises an exception
         """
-        config = self.get_config(cluster_uuid)
+        config = self.get_config(cluster_uuid)
 
         # Make sure the name is in the config
-        if 'namespace' not in config:
-            raise Exception("Namespace not found.")
+        # if "namespace" not in config:
+            raise Exception("Namespace not found.")
 
         # TODO: We want to make sure this is unique to the cluster, in case
         # the cluster is being reused.
         # Consider pre/appending the cluster id to the namespace string
-        return config['namespace']
+        pass
+
+    # TODO: Remove these lines of code
+    # async def has_model(self, model_name: str) -> bool:
+    #     """Check if a model exists in the controller
+
+    #     Checks to see if a model exists in the connected Juju controller.
+
+    #     :param model_name str: The name of the model
+    #     :return: A boolean indicating if the model exists
+    #     """
+    #     models = await self.controller.list_models()
+
+    #     if model_name in models:
+    #         return True
+    #     return False
+
+    # def is_local_k8s(self, credentials: str,) -> bool:
+    #     """Check if a cluster is local
+
+    #     Checks if a cluster is running in the local host
+
+    #     :param credentials dict: A dictionary containing the k8s credentials
+    #     :returns: A boolean if the cluster is running locally
+    #     """
 
-    async def has_model(
-        self,
-        model_name: str
-    ) -> bool:
-        """Check if a model exists in the controller
+    #     creds = yaml.safe_load(credentials)
 
-        Checks to see if a model exists in the connected Juju controller.
+    #     if creds and os.getenv("OSMLCM_VCA_APIPROXY"):
+    #         for cluster in creds["clusters"]:
+    #             if "server" in cluster["cluster"]:
+    #                 if os.getenv("OSMLCM_VCA_APIPROXY") in cluster["cluster"]["server"]:
+    #                     return True
 
-        :param model_name str: The name of the model
-        :return: A boolean indicating if the model exists
-        """
-        models = await self.controller.list_models()
-
-        if model_name in models:
-            return True
-        return False
+    #     return False
 
-    def is_local_k8s(
-        self,
-        credentials: str,
-    ) -> bool:
-        """Check if a cluster is local
+    # async def get_controller(self, cluster_uuid):
+    #     """Login to the Juju controller."""
 
-        Checks if a cluster is running in the local host
+    #     config = self.get_config(cluster_uuid)
 
-        :param credentials dict: A dictionary containing the k8s credentials
-        :returns: A boolean if the cluster is running locally
-        """
-        creds = yaml.safe_load(credentials)
-        if os.getenv("OSMLCM_VCA_APIPROXY"):
-            host_ip = os.getenv("OSMLCM_VCA_APIPROXY")
+    #     juju_endpoint = config["endpoint"]
+    #     juju_user = config["username"]
+    #     juju_secret = config["secret"]
+    #     juju_ca_cert = config["cacert"]
 
-        if creds and host_ip:
-            for cluster in creds['clusters']:
-                if 'server' in cluster['cluster']:
-                    if host_ip in cluster['cluster']['server']:
-                        return True
+    #     controller = Controller()
 
-        return False
+    #     if juju_secret:
+    #         self.log.debug(
+    #             "Connecting to controller... ws://{} as {}".format(
+    #                 juju_endpoint, juju_user,
+    #             )
+    #         )
+    #         try:
+    #             await controller.connect(
+    #                 endpoint=juju_endpoint,
+    #                 username=juju_user,
+    #                 password=juju_secret,
+    #                 cacert=juju_ca_cert,
+    #             )
+    #             self.log.debug("JujuApi: Logged into controller")
+    #             return controller
+    #         except Exception as ex:
+    #             self.log.debug(ex)
+    #             self.log.debug("Caught exception: {}".format(ex))
+    #     else:
+    #         self.log.fatal("VCA credentials not configured.")
 
-    async def login(self, cluster_uuid):
-        """Login to the Juju controller."""
+    # TODO: Remove these commented lines
+    #         self.authenticated = False
+    # if self.authenticated:
+    #         return
 
-        if self.authenticated:
-            return
+    #     self.connecting = True
+    #     juju_public_key = None
+    #     self.authenticated = True
+    #     Test: Make sure we have the credentials loaded
+    # async def logout(self):
+    #     """Logout of the Juju controller."""
+    #     self.log.debug("[logout]")
+    #     if not self.authenticated:
+    #         return False
 
-        self.connecting = True
+    #     for model in self.models:
+    #         self.log.debug("Logging out of model {}".format(model))
+    #         await self.models[model].disconnect()
 
-        # Test: Make sure we have the credentials loaded
-        config = self.get_config(cluster_uuid)
+    #     if self.controller:
+    #         self.log.debug("Disconnecting controller {}".format(self.controller))
+    #         await self.controller.disconnect()
+    #         self.controller = None
 
-        self.juju_endpoint = config['endpoint']
-        self.juju_user = config['username']
-        self.juju_secret = config['secret']
-        self.juju_ca_cert = config['cacert']
-        self.juju_public_key = None
+    #     self.authenticated = False
 
-        self.controller = Controller()
+    # async def remove_cloud(self, cloud_name: str,) -> bool:
+    #     """Remove a k8s cloud from Juju
 
-        if self.juju_secret:
-            self.log.debug(
-                "Connecting to controller... ws://{} as {}/{}".format(
-                    self.juju_endpoint,
-                    self.juju_user,
-                    self.juju_secret,
-                )
-            )
-            try:
-                await self.controller.connect(
-                    endpoint=self.juju_endpoint,
-                    username=self.juju_user,
-                    password=self.juju_secret,
-                    cacert=self.juju_ca_cert,
-                )
-                self.authenticated = True
-                self.log.debug("JujuApi: Logged into controller")
-            except Exception as ex:
-                self.log.debug(ex)
-                self.log.debug("Caught exception: {}".format(ex))
-                pass
-        else:
-            self.log.fatal("VCA credentials not configured.")
-            self.authenticated = False
+    #     Removes a Kubernetes cloud from Juju.
 
-    async def logout(self):
-        """Logout of the Juju controller."""
-        self.log.debug("[logout]")
-        if not self.authenticated:
-            return False
+    #     :param cloud_name str: The name of the cloud to add.
 
-        for model in self.models:
-            self.log.debug("Logging out of model {}".format(model))
-            await self.models[model].disconnect()
+    #     :returns: True if successful, otherwise raises an exception.
+    #     """
 
-        if self.controller:
-            self.log.debug("Disconnecting controller {}".format(
-                self.controller
-            ))
-            await self.controller.disconnect()
-            self.controller = None
+    #     # Remove the bootstrapped controller
+    #     cmd = [self.juju_command, "remove-k8s", "--client", cloud_name]
+    #     process = await asyncio.create_subprocess_exec(
+    #         *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE,
+    #     )
+
+    #     _stdout, stderr = await process.communicate()
 
-        self.authenticated = False
+    #     return_code = process.returncode
+
+    #     if return_code > 0:
+    #         raise Exception(stderr)
+
+    #     # Remove the cloud from the local config
+    #     cmd = [self.juju_command, "remove-cloud", "--client", cloud_name]
+    #     process = await asyncio.create_subprocess_exec(
+    #         *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE,
+    #     )
+
+    #     _stdout, stderr = await process.communicate()
+
+    #     return_code = process.returncode
+
+    #     if return_code > 0:
+    #         raise Exception(stderr)
+
+    #     return True
+
+    # async def set_config(self, cluster_uuid: str, config: dict,) -> bool:
+    #     """Save the cluster configuration
+
+    #     Saves the cluster information to the Mongo database
+
+    #     :param cluster_uuid str: The UUID of the cluster
+    #     :param config dict: A dictionary containing the cluster configuration
+    #     """
+
+    #     juju_db = self.db.get_one("admin", {"_id": "juju"})
+
+    #     k8sclusters = juju_db["k8sclusters"] if "k8sclusters" in juju_db else []
+    #     self.db.encrypt_decrypt_fields(
+    #         config,
+    #         "encrypt",
+    #         ["secret", "cacert"],
+    #         schema_version="1.1",
+    #         salt=cluster_uuid,
+    #     )
+    #     k8sclusters.append({"_id": cluster_uuid, "config": config})
+    #     self.db.set_one(
+    #         table="admin",
+    #         q_filter={"_id": "juju"},
+    #         update_dict={"k8sclusters": k8sclusters},
+    #     )
+
+    # Private methods to create/delete needed resources in the
+    # Kubernetes cluster to create the K8s cloud in Juju
 
-    async def remove_cloud(
+    def _create_cluster_role(
         self,
-        cloud_name: str,
-    ) -> bool:
-        """Remove a k8s cloud from Juju
-
-        Removes a Kubernetes cloud from Juju.
-
-        :param cloud_name str: The name of the cloud to add.
+        kubectl: Kubectl,
+        name: str,
+        labels: Dict[str, str],
+    ):
+        cluster_roles = kubectl.clients[RBAC_CLIENT].list_cluster_role(
+            field_selector="metadata.name={}".format(name)
+        )
 
-        :returns: True if successful, otherwise raises an exception.
-        """
+        if len(cluster_roles.items) > 0:
+            raise Exception(
+                "Cluster role with metadata.name={} already exists".format(name)
+            )
 
-        # Remove the bootstrapped controller
-        cmd = [self.juju_command, "remove-k8s", "--client", cloud_name]
-        process = await asyncio.create_subprocess_exec(
-            *cmd,
-            stdout=asyncio.subprocess.PIPE,
-            stderr=asyncio.subprocess.PIPE,
+        metadata = V1ObjectMeta(name=name, labels=labels, namespace=ADMIN_NAMESPACE)
+        # Cluster role
+        cluster_role = V1ClusterRole(
+            metadata=metadata,
+            rules=[
+                V1PolicyRule(api_groups=["*"], resources=["*"], verbs=["*"]),
+                V1PolicyRule(non_resource_ur_ls=["*"], verbs=["*"]),
+            ],
         )
 
-        stdout, stderr = await process.communicate()
+        kubectl.clients[RBAC_CLIENT].create_cluster_role(cluster_role)
 
-        return_code = process.returncode
+    def _delete_cluster_role(self, kubectl: Kubectl, name: str):
+        kubectl.clients[RBAC_CLIENT].delete_cluster_role(name)
 
-        if return_code > 0:
-            raise Exception(stderr)
-
-        # Remove the cloud from the local config
-        cmd = [self.juju_command, "remove-cloud", "--client", cloud_name]
-        process = await asyncio.create_subprocess_exec(
-            *cmd,
-            stdout=asyncio.subprocess.PIPE,
-            stderr=asyncio.subprocess.PIPE,
+    def _create_service_account(
+        self,
+        kubectl: Kubectl,
+        name: str,
+        labels: Dict[str, str],
+    ):
+        service_accounts = kubectl.clients[CORE_CLIENT].list_namespaced_service_account(
+            ADMIN_NAMESPACE, field_selector="metadata.name={}".format(name)
         )
+        if len(service_accounts.items) > 0:
+            raise Exception(
+                "Service account with metadata.name={} already exists".format(name)
+            )
 
-        stdout, stderr = await process.communicate()
-
-        return_code = process.returncode
+        metadata = V1ObjectMeta(name=name, labels=labels, namespace=ADMIN_NAMESPACE)
+        service_account = V1ServiceAccount(metadata=metadata)
 
-        if return_code > 0:
-            raise Exception(stderr)
+        kubectl.clients[CORE_CLIENT].create_namespaced_service_account(
+            ADMIN_NAMESPACE, service_account
+        )
 
-        return True
+    def _delete_service_account(self, kubectl: Kubectl, name: str):
+        kubectl.clients[CORE_CLIENT].delete_namespaced_service_account(
+            name, ADMIN_NAMESPACE
+        )
 
-    async def set_config(
+    def _create_cluster_role_binding(
         self,
-        cluster_uuid: str,
-        config: dict,
-    ) -> bool:
-        """Save the cluster configuration
+        kubectl: Kubectl,
+        name: str,
+        labels: Dict[str, str],
+    ):
+        role_bindings = kubectl.clients[RBAC_CLIENT].list_cluster_role_binding(
+            field_selector="metadata.name={}".format(name)
+        )
+        if len(role_bindings.items) > 0:
+            raise Exception("Generated rbac id already exists")
+
+        role_binding = V1ClusterRoleBinding(
+            metadata=V1ObjectMeta(name=name, labels=labels),
+            role_ref=V1RoleRef(kind="ClusterRole", name=name, api_group=""),
+            subjects=[
+                V1Subject(kind="ServiceAccount", name=name, namespace=ADMIN_NAMESPACE)
+            ],
+        )
+        kubectl.clients[RBAC_CLIENT].create_cluster_role_binding(role_binding)
 
-        Saves the cluster information to the file store
+    def _delete_cluster_role_binding(self, kubectl: Kubectl, name: str):
+        kubectl.clients[RBAC_CLIENT].delete_cluster_role_binding(name)
 
-        :param cluster_uuid str: The UUID of the cluster
-        :param config dict: A dictionary containing the cluster configuration
-        :returns: Boolean upon success or raises an exception.
-        """
+    async def _get_secret_data(self, kubectl: Kubectl, name: str) -> (str, str):
+        v1_core = kubectl.clients[CORE_CLIENT]
+
+        retries_limit = 10
+        secret_name = None
+        while True:
+            retries_limit -= 1
+            service_accounts = v1_core.list_namespaced_service_account(
+                ADMIN_NAMESPACE, field_selector="metadata.name={}".format(name)
+            )
+            if len(service_accounts.items) == 0:
+                raise Exception(
+                    "Service account not found with metadata.name={}".format(name)
+                )
+            service_account = service_accounts.items[0]
+            if service_account.secrets and len(service_account.secrets) > 0:
+                secret_name = service_account.secrets[0].name
+            if secret_name is not None or not retries_limit:
+                break
+        if not secret_name:
+            raise Exception(
+                "Failed getting the secret from service account {}".format(name)
+            )
+        secret = v1_core.list_namespaced_secret(
+            ADMIN_NAMESPACE,
+            field_selector="metadata.name={}".format(secret_name),
+        ).items[0]
 
-        cluster_config = "{}/{}.yaml".format(self.fs.path, cluster_uuid)
-        if not os.path.exists(cluster_config):
-            self.log.debug("Writing config to {}".format(cluster_config))
-            with open(cluster_config, 'w') as f:
-                f.write(yaml.dump(config, Dumper=yaml.Dumper))
+        token = secret.data[SERVICE_ACCOUNT_TOKEN_KEY]
+        client_certificate_data = secret.data[SERVICE_ACCOUNT_ROOT_CA_KEY]
 
-        return True
+        return (
+            base64.b64decode(token).decode("utf-8"),
+            base64.b64decode(client_certificate_data).decode("utf-8"),
+        )
+
+    @staticmethod
+    def generate_kdu_instance_name(**kwargs):
+        db_dict = kwargs.get("db_dict")
+        kdu_name = kwargs.get("kdu_name", None)
+        if kdu_name:
+            kdu_instance = "{}-{}".format(kdu_name, db_dict["filter"]["_id"])
+        else:
+            kdu_instance = db_dict["filter"]["_id"]
+        return kdu_instance