Add ModelConfig
[osm/N2VC.git] / n2vc / k8s_juju_conn.py
index 70045b0..0e9d547 100644 (file)
 #     limitations under the License.
 
 import asyncio
-import concurrent
 import os
 import uuid
 import yaml
+import tempfile
+import binascii
+import base64
 
-from juju.controller import Controller
-from juju.model import Model
-from n2vc.exceptions import K8sException
+from n2vc.config import ModelConfig
+from n2vc.exceptions import K8sException, N2VCBadArgumentsException
 from n2vc.k8s_conn import K8sConnector
-from n2vc.kubectl import Kubectl
-from .exceptions import MethodNotImplemented, N2VCNotFound
+from n2vc.kubectl import Kubectl, CORE_CLIENT, RBAC_CLIENT
+from .exceptions import MethodNotImplemented
+from n2vc.utils import base64_to_cacert
+from n2vc.libjuju import Libjuju
+
+from kubernetes.client.models import (
+    V1ClusterRole,
+    V1ObjectMeta,
+    V1PolicyRule,
+    V1ServiceAccount,
+    V1ClusterRoleBinding,
+    V1RoleRef,
+    V1Subject,
+)
+
+from typing import Dict
+
+SERVICE_ACCOUNT_TOKEN_KEY = "token"
+SERVICE_ACCOUNT_ROOT_CA_KEY = "ca.crt"
+RBAC_LABEL_KEY_NAME = "rbac-id"
+
+ADMIN_NAMESPACE = "kube-system"
+RBAC_STACK_PREFIX = "juju-credential"
+
+
+def generate_rbac_id():
+    return binascii.hexlify(os.urandom(4)).decode()
 
 
-# from juju.bundle import BundleHandler
-# import re
-# import ssl
-# from .vnf import N2VC
 class K8sJujuConnector(K8sConnector):
     def __init__(
         self,
@@ -43,24 +65,55 @@ class K8sJujuConnector(K8sConnector):
         vca_config: dict = None,
     ):
         """
-
+        :param fs: file system for kubernetes and helm configuration
+        :param db: Database object
         :param kubectl_command: path to kubectl executable
         :param helm_command: path to helm executable
-        :param fs: file system for kubernetes and helm configuration
         :param log: logger
+        :param: loop: Asyncio loop
         """
 
         # parent class
         K8sConnector.__init__(
-            self, db, log=log, on_update_db=on_update_db,
+            self,
+            db,
+            log=log,
+            on_update_db=on_update_db,
         )
 
         self.fs = fs
+        self.loop = loop or asyncio.get_event_loop()
         self.log.debug("Initializing K8S Juju connector")
 
-        self.juju_command = juju_command
-        self.juju_public_key = None
-
+        required_vca_config = [
+            "host",
+            "user",
+            "secret",
+            "ca_cert",
+        ]
+        if not vca_config or not all(k in vca_config for k in required_vca_config):
+            raise N2VCBadArgumentsException(
+                message="Missing arguments in vca_config: {}".format(vca_config),
+                bad_args=required_vca_config,
+            )
+        port = vca_config["port"] if "port" in vca_config else 17070
+        url = "{}:{}".format(vca_config["host"], port)
+        model_config = ModelConfig(vca_config)
+        username = vca_config["user"]
+        secret = vca_config["secret"]
+        ca_cert = base64_to_cacert(vca_config["ca_cert"])
+
+        self.libjuju = Libjuju(
+            endpoint=url,
+            api_proxy=None,  # Not needed for k8s charms
+            model_config=model_config,
+            username=username,
+            password=secret,
+            cacert=ca_cert,
+            loop=self.loop,
+            log=self.log,
+            db=self.db,
+        )
         self.log.debug("K8S Juju connector initialized")
         # TODO: Remove these commented lines:
         # self.authenticated = False
@@ -88,129 +141,91 @@ class K8sJujuConnector(K8sConnector):
             (on error, an exception will be raised)
         """
 
-        """Bootstrapping
-
-        Bootstrapping cannot be done, by design, through the API. We need to
-        use the CLI tools.
-        """
-
-        """
-        WIP: Workflow
-
-        1. Has the environment already been bootstrapped?
-        - Check the database to see if we have a record for this env
-
-        2. If this is a new env, create it
-        - Add the k8s cloud to Juju
-        - Bootstrap
-        - Record it in the database
-
-        3. Connect to the Juju controller for this cloud
-
-        """
-        # cluster_uuid = reuse_cluster_uuid
-        # if not cluster_uuid:
-        #     cluster_uuid = str(uuid4())
-
-        ##################################################
-        # TODO: Pull info from db based on the namespace #
-        ##################################################
-
-        ###################################################
-        # TODO: Make it idempotent, calling add-k8s and   #
-        # bootstrap whenever reuse_cluster_uuid is passed #
-        # as parameter                                    #
-        # `init_env` is called to initialize the K8s      #
-        # cluster for juju. If this initialization fails, #
-        # it can be called again by LCM with the param    #
-        # reuse_cluster_uuid, e.g. to try to fix it.       #
-        ###################################################
-
-        # This is a new cluster, so bootstrap it
-
         cluster_uuid = reuse_cluster_uuid or str(uuid.uuid4())
 
-        # Is a local k8s cluster?
-        localk8s = self.is_local_k8s(k8s_creds)
-
-        # If the k8s is external, the juju controller needs a loadbalancer
-        loadbalancer = False if localk8s else True
-
-        # Name the new k8s cloud
-        k8s_cloud = "k8s-{}".format(cluster_uuid)
-
-        self.log.debug("Adding k8s cloud {}".format(k8s_cloud))
-        await self.add_k8s(k8s_cloud, k8s_creds)
-
-        # Bootstrap Juju controller
-        self.log.debug("Bootstrapping...")
-        await self.bootstrap(k8s_cloud, cluster_uuid, loadbalancer)
-        self.log.debug("Bootstrap done.")
-
-        # Get the controller information
-
-        # Parse ~/.local/share/juju/controllers.yaml
-        # controllers.testing.api-endpoints|ca-cert|uuid
-        self.log.debug("Getting controller endpoints")
-        with open(os.path.expanduser("~/.local/share/juju/controllers.yaml")) as f:
-            controllers = yaml.load(f, Loader=yaml.Loader)
-            controller = controllers["controllers"][cluster_uuid]
-            endpoints = controller["api-endpoints"]
-            juju_endpoint = endpoints[0]
-            juju_ca_cert = controller["ca-cert"]
-
-        # Parse ~/.local/share/juju/accounts
-        # controllers.testing.user|password
-        self.log.debug("Getting accounts")
-        with open(os.path.expanduser("~/.local/share/juju/accounts.yaml")) as f:
-            controllers = yaml.load(f, Loader=yaml.Loader)
-            controller = controllers["controllers"][cluster_uuid]
-
-            juju_user = controller["user"]
-            juju_secret = controller["password"]
-
-        config = {
-            "endpoint": juju_endpoint,
-            "username": juju_user,
-            "secret": juju_secret,
-            "cacert": juju_ca_cert,
-            "loadbalancer": loadbalancer,
-        }
-
-        # Store the cluster configuration so it
-        # can be used for subsequent calls
-        self.log.debug("Setting config")
-        await self.set_config(cluster_uuid, config)
-
-        # Test connection
-        controller = await self.get_controller(cluster_uuid)
-        await controller.disconnect()
+        kubecfg = tempfile.NamedTemporaryFile()
+        with open(kubecfg.name, "w") as kubecfg_file:
+            kubecfg_file.write(k8s_creds)
+        kubectl = Kubectl(config_file=kubecfg.name)
 
-        # TODO: Remove these commented lines
-        # raise Exception("EOL")
-        # self.juju_public_key = None
-        # Login to the k8s cluster
-        # if not self.authenticated:
-        #     await self.login(cluster_uuid)
+        # CREATING RESOURCES IN K8S
+        rbac_id = generate_rbac_id()
+        metadata_name = "{}-{}".format(RBAC_STACK_PREFIX, rbac_id)
+        labels = {RBAC_STACK_PREFIX: rbac_id}
+
+        # Create cleanup dictionary to clean up created resources
+        # if it fails in the middle of the process
+        cleanup_data = []
+        try:
+            self._create_cluster_role(
+                kubectl,
+                name=metadata_name,
+                labels=labels,
+            )
+            cleanup_data.append(
+                {
+                    "delete": self._delete_cluster_role,
+                    "args": (kubectl, metadata_name),
+                }
+            )
 
-        # We're creating a new cluster
-        # print("Getting model {}".format(self.get_namespace(cluster_uuid),
-        #    cluster_uuid=cluster_uuid))
-        # model = await self.get_model(
-        #    self.get_namespace(cluster_uuid),
-        #    cluster_uuid=cluster_uuid
-        # )
+            self._create_service_account(
+                kubectl,
+                name=metadata_name,
+                labels=labels,
+            )
+            cleanup_data.append(
+                {
+                    "delete": self._delete_service_account,
+                    "args": (kubectl, metadata_name),
+                }
+            )
 
-        # Disconnect from the model
-        # if model and model.is_connected():
-        #    await model.disconnect()
+            self._create_cluster_role_binding(
+                kubectl,
+                name=metadata_name,
+                labels=labels,
+            )
+            cleanup_data.append(
+                {
+                    "delete": self._delete_service_account,
+                    "args": (kubectl, metadata_name),
+                }
+            )
+            token, client_cert_data = await self._get_secret_data(
+                kubectl,
+                metadata_name,
+            )
 
-        return cluster_uuid, True
+            default_storage_class = kubectl.get_default_storage_class()
+            await self.libjuju.add_k8s(
+                name=cluster_uuid,
+                rbac_id=rbac_id,
+                token=token,
+                client_cert_data=client_cert_data,
+                configuration=kubectl.configuration,
+                storage_class=default_storage_class,
+                credential_name=self._get_credential_name(cluster_uuid),
+            )
+            return cluster_uuid, True
+        except Exception as e:
+            self.log.error("Error initializing k8scluster: {}".format(e))
+            if len(cleanup_data) > 0:
+                self.log.debug("Cleaning up created resources in k8s cluster...")
+                for item in cleanup_data:
+                    delete_function = item["delete"]
+                    delete_args = item["args"]
+                    delete_function(*delete_args)
+                self.log.debug("Cleanup finished")
+            raise e
 
     """Repo Management"""
 
     async def repo_add(
-        self, name: str, url: str, _type: str = "charm",
+        self,
+        name: str,
+        url: str,
+        _type: str = "charm",
     ):
         raise MethodNotImplemented()
 
@@ -218,7 +233,8 @@ class K8sJujuConnector(K8sConnector):
         raise MethodNotImplemented()
 
     async def repo_remove(
-        self, name: str,
+        self,
+        name: str,
     ):
         raise MethodNotImplemented()
 
@@ -242,46 +258,43 @@ class K8sJujuConnector(K8sConnector):
         """
 
         try:
+            self.log.debug("[reset] Removing k8s cloud")
 
-            # Remove k8scluster from database
-            self.log.debug("[reset] Removing k8scluster from juju database")
-            juju_db = self.db.get_one("admin", {"_id": "juju"})
-
-            for k in juju_db["k8sclusters"]:
-                if k["_id"] == cluster_uuid:
-                    juju_db["k8sclusters"].remove(k)
-                    self.db.set_one(
-                        table="admin",
-                        q_filter={"_id": "juju"},
-                        update_dict={"k8sclusters": juju_db["k8sclusters"]},
-                    )
-                    break
+            cloud_creds = await self.libjuju.get_cloud_credentials(
+                cluster_uuid,
+                self._get_credential_name(cluster_uuid),
+            )
 
-            # Destroy the controller (via CLI)
-            self.log.debug("[reset] Destroying controller")
-            await self.destroy_controller(cluster_uuid)
+            await self.libjuju.remove_cloud(cluster_uuid)
 
-            self.log.debug("[reset] Removing k8s cloud")
-            k8s_cloud = "k8s-{}".format(cluster_uuid)
-            await self.remove_cloud(k8s_cloud)
+            kubecfg = self.get_credentials(cluster_uuid=cluster_uuid)
 
-        except Exception as ex:
-            self.log.debug("Caught exception during reset: {}".format(ex))
-        return True
-        # TODO: Remove these commented lines
-        #     if not self.authenticated:
-        #         await self.login(cluster_uuid)
+            kubecfg_file = tempfile.NamedTemporaryFile()
+            with open(kubecfg_file.name, "w") as f:
+                f.write(kubecfg)
+            kubectl = Kubectl(config_file=kubecfg_file.name)
+
+            delete_functions = [
+                self._delete_cluster_role_binding,
+                self._delete_service_account,
+                self._delete_cluster_role,
+            ]
 
-        #     if self.controller.is_connected():
-        #         # Destroy the model
-        #         namespace = self.get_namespace(cluster_uuid)
-        #         if await self.has_model(namespace):
-        #             self.log.debug("[reset] Destroying model")
-        #             await self.controller.destroy_model(namespace, destroy_storage=True)
+            credential_attrs = cloud_creds[0].result["attrs"]
+            if RBAC_LABEL_KEY_NAME in credential_attrs:
+                rbac_id = credential_attrs[RBAC_LABEL_KEY_NAME]
+                metadata_name = "{}-{}".format(RBAC_STACK_PREFIX, rbac_id)
+                delete_args = (kubectl, metadata_name)
+                for delete_func in delete_functions:
+                    try:
+                        delete_func(*delete_args)
+                    except Exception as e:
+                        self.log.warning("Cannot remove resource in K8s {}".format(e))
 
-        #         # Disconnect from the controller
-        #         self.log.debug("[reset] Disconnecting controller")
-        #         await self.logout()
+        except Exception as e:
+            self.log.debug("Caught exception during reset: {}".format(e))
+            raise e
+        return True
 
     """Deployment"""
 
@@ -289,8 +302,9 @@ class K8sJujuConnector(K8sConnector):
         self,
         cluster_uuid: str,
         kdu_model: str,
+        kdu_instance: str,
         atomic: bool = True,
-        timeout: float = 300,
+        timeout: float = 1800,
         params: dict = None,
         db_dict: dict = None,
         kdu_name: str = None,
@@ -300,6 +314,7 @@ class K8sJujuConnector(K8sConnector):
 
         :param cluster_uuid str: The UUID of the cluster to install to
         :param kdu_model str: The name or path of a bundle to install
+        :param kdu_instance: Kdu instance name
         :param atomic bool: If set, waits until the model is active and resets
                             the cluster on failure.
         :param timeout int: The time, in seconds, to wait for the install
@@ -310,99 +325,55 @@ class K8sJujuConnector(K8sConnector):
 
         :return: If successful, returns ?
         """
+        bundle = kdu_model
 
-        controller = await self.get_controller(cluster_uuid)
+        if not db_dict:
+            raise K8sException("db_dict must be set")
+        if not bundle:
+            raise K8sException("bundle must be set")
 
-        ##
-        # Get or create the model, based on the NS
-        # uuid.
-        if kdu_name:
-            kdu_instance = "{}-{}".format(kdu_name, db_dict["filter"]["_id"])
+        if bundle.startswith("cs:"):
+            pass
+        elif bundle.startswith("http"):
+            # Download the file
+            pass
         else:
-            kdu_instance = db_dict["filter"]["_id"]
+            new_workdir = kdu_model.strip(kdu_model.split("/")[-1])
+            os.chdir(new_workdir)
+            bundle = "local:{}".format(kdu_model)
 
         self.log.debug("Checking for model named {}".format(kdu_instance))
 
         # Create the new model
         self.log.debug("Adding model: {}".format(kdu_instance))
-        model = await self.add_model(
-            kdu_instance, cluster_uuid=cluster_uuid, controller=controller
+        await self.libjuju.add_model(
+            model_name=kdu_instance,
+            cloud_name=cluster_uuid,
+            credential_name=self._get_credential_name(cluster_uuid),
         )
 
-        if model:
-            # TODO: Instantiation parameters
+        if model:
+        # TODO: Instantiation parameters
 
-            """
-            "Juju bundle that models the KDU, in any of the following ways:
-                - <juju-repo>/<juju-bundle>
-                - <juju-bundle folder under k8s_models folder in the package>
-                - <juju-bundle tgz file (w/ or w/o extension) under k8s_models folder
-                    in the package>
-                - <URL_where_to_fetch_juju_bundle>
-            """
-            try:
-                previous_workdir = os.getcwd()
-            except FileNotFoundError:
-                previous_workdir = "/app/storage"
-
-            bundle = kdu_model
-            if kdu_model.startswith("cs:"):
-                bundle = kdu_model
-            elif kdu_model.startswith("http"):
-                # Download the file
-                pass
-            else:
-                new_workdir = kdu_model.strip(kdu_model.split("/")[-1])
-
-                os.chdir(new_workdir)
-
-                bundle = "local:{}".format(kdu_model)
-
-            if not bundle:
-                # Raise named exception that the bundle could not be found
-                raise Exception()
-
-            self.log.debug("[install] deploying {}".format(bundle))
-            await model.deploy(bundle)
-
-            # Get the application
-            if atomic:
-                # applications = model.applications
-                self.log.debug("[install] Applications: {}".format(model.applications))
-                for name in model.applications:
-                    self.log.debug("[install] Waiting for {} to settle".format(name))
-                    application = model.applications[name]
-                    try:
-                        # It's not enough to wait for all units to be active;
-                        # the application status needs to be active as well.
-                        self.log.debug("Waiting for all units to be active...")
-                        await model.block_until(
-                            lambda: all(
-                                unit.agent_status == "idle"
-                                and application.status in ["active", "unknown"]
-                                and unit.workload_status in ["active", "unknown"]
-                                for unit in application.units
-                            ),
-                            timeout=timeout,
-                        )
-                        self.log.debug("All units active.")
-
-                    # TODO use asyncio.TimeoutError
-                    except concurrent.futures._base.TimeoutError:
-                        os.chdir(previous_workdir)
-                        self.log.debug("[install] Timeout exceeded; resetting cluster")
-                        await self.reset(cluster_uuid)
-                        return False
-
-            # Wait for the application to be active
-            if model.is_connected():
-                self.log.debug("[install] Disconnecting model")
-                await model.disconnect()
-            await controller.disconnect()
-            os.chdir(previous_workdir)
-
-            return kdu_instance
-        raise Exception("Unable to install")
+        """
+        "Juju bundle that models the KDU, in any of the following ways:
+            - <juju-repo>/<juju-bundle>
+            - <juju-bundle folder under k8s_models folder in the package>
+            - <juju-bundle tgz file (w/ or w/o extension) under k8s_models folder
+                in the package>
+            - <URL_where_to_fetch_juju_bundle>
+        """
+        try:
+            previous_workdir = os.getcwd()
+        except FileNotFoundError:
+            previous_workdir = "/app/storage"
+
+        self.log.debug("[install] deploying {}".format(bundle))
+        await self.libjuju.deploy(
+            bundle, model_name=kdu_instance, wait=atomic, timeout=timeout
+        )
+        os.chdir(previous_workdir)
+        return True
 
     async def instances_list(self, cluster_uuid: str) -> list:
         """
@@ -445,64 +416,14 @@ class K8sJujuConnector(K8sConnector):
         initial release.
         """
         raise MethodNotImplemented()
-        # TODO: Remove these commented lines
-
-        # model = await self.get_model(namespace, cluster_uuid=cluster_uuid)
-
-        # model = None
-        # namespace = self.get_namespace(cluster_uuid)
-        # controller = await self.get_controller(cluster_uuid)
-
-        # try:
-        #     if namespace not in await controller.list_models():
-        #         raise N2VCNotFound(message="Model {} does not exist".format(namespace))
-
-        #     model = await controller.get_model(namespace)
-        #     with open(kdu_model, "r") as f:
-        #         bundle = yaml.safe_load(f)
-
-        #         """
-        #         {
-        #             'description': 'Test bundle',
-        #             'bundle': 'kubernetes',
-        #             'applications': {
-        #                 'mariadb-k8s': {
-        #                     'charm': 'cs:~charmed-osm/mariadb-k8s-20',
-        #                     'scale': 1,
-        #                     'options': {
-        #                         'password': 'manopw',
-        #                         'root_password': 'osm4u',
-        #                         'user': 'mano'
-        #                     },
-        #                     'series': 'kubernetes'
-        #                 }
-        #             }
-        #         }
-        #         """
-        #         # TODO: This should be returned in an agreed-upon format
-        #         for name in bundle["applications"]:
-        #             self.log.debug(model.applications)
-        #             application = model.applications[name]
-        #             self.log.debug(application)
-
-        #             path = bundle["applications"][name]["charm"]
-
-        #             try:
-        #                 await application.upgrade_charm(switch=path)
-        #             except juju.errors.JujuError as ex:
-        #                 if "already running charm" in str(ex):
-        #                     # We're already running this version
-        #                     pass
-        # finally:
-        #     if model:
-        #         await model.disconnect()
-        #     await controller.disconnect()
-        # return True
 
     """Rollback"""
 
     async def rollback(
-        self, cluster_uuid: str, kdu_instance: str, revision: int = 0,
+        self,
+        cluster_uuid: str,
+        kdu_instance: str,
+        revision: int = 0,
     ) -> str:
         """Rollback a model
 
@@ -527,14 +448,12 @@ class K8sJujuConnector(K8sConnector):
         :return: Returns True if successful, or raises an exception
         """
 
-        controller = await self.get_controller(cluster_uuid)
-
         self.log.debug("[uninstall] Destroying model")
 
-        await controller.destroy_models(kdu_instance)
+        await self.libjuju.destroy_model(kdu_instance, total_timeout=3600)
 
-        self.log.debug("[uninstall] Model destroyed and disconnecting")
-        await controller.disconnect()
+        self.log.debug("[uninstall] Model destroyed and disconnecting")
+        await controller.disconnect()
 
         return True
         # TODO: Remove these commented lines
@@ -563,8 +482,6 @@ class K8sJujuConnector(K8sConnector):
         :return: Returns the output of the action
         """
 
-        controller = await self.get_controller(cluster_uuid)
-
         if not params or "application-name" not in params:
             raise K8sException(
                 "Missing application-name argument, \
@@ -575,33 +492,12 @@ class K8sJujuConnector(K8sConnector):
                 "[exec_primitive] Getting model "
                 "kdu_instance: {}".format(kdu_instance)
             )
-
-            model = await self.get_model(kdu_instance, controller=controller)
-
             application_name = params["application-name"]
-            application = model.applications[application_name]
-
-            actions = await application.get_actions()
+            actions = await self.libjuju.get_actions(application_name, kdu_instance)
             if primitive_name not in actions:
                 raise K8sException("Primitive {} not found".format(primitive_name))
-
-            unit = None
-            for u in application.units:
-                if await u.is_leader_from_status():
-                    unit = u
-                    break
-
-            if unit is None:
-                raise K8sException("No leader unit found to execute action")
-
-            self.log.debug("[exec_primitive] Running action: {}".format(primitive_name))
-            action = await unit.run_action(primitive_name, **params)
-
-            output = await model.get_action_output(action_uuid=action.entity_id)
-            status = await model.get_action_status(uuid_or_prefix=action.entity_id)
-
-            status = (
-                status[action.entity_id] if action.entity_id in status else "failed"
+            output, status = await self.libjuju.execute_action(
+                application_name, kdu_instance, primitive_name, **params
             )
 
             if status != "completed":
@@ -615,16 +511,13 @@ class K8sJujuConnector(K8sConnector):
             error_msg = "Error executing primitive {}: {}".format(primitive_name, e)
             self.log.error(error_msg)
             raise K8sException(message=error_msg)
-        finally:
-            await controller.disconnect()
-        # TODO: Remove these commented lines:
-        # if not self.authenticated:
-        #     self.log.debug("[exec_primitive] Connecting to controller")
-        #     await self.login(cluster_uuid)
 
     """Introspection"""
 
-    async def inspect_kdu(self, kdu_model: str,) -> dict:
+    async def inspect_kdu(
+        self,
+        kdu_model: str,
+    ) -> dict:
         """Inspect a KDU
 
         Inspects a bundle and returns a dictionary of config parameters and
@@ -637,8 +530,11 @@ class K8sJujuConnector(K8sConnector):
         """
 
         kdu = {}
+        if not os.path.exists(kdu_model):
+            raise K8sException("file {} not found".format(kdu_model))
+
         with open(kdu_model, "r") as f:
-            bundle = yaml.safe_load(f)
+            bundle = yaml.safe_load(f.read())
 
             """
             {
@@ -663,7 +559,10 @@ class K8sJujuConnector(K8sConnector):
 
         return kdu
 
-    async def help_kdu(self, kdu_model: str,) -> str:
+    async def help_kdu(
+        self,
+        kdu_model: str,
+    ) -> str:
         """View the README
 
         If available, returns the README of the bundle.
@@ -684,7 +583,11 @@ class K8sJujuConnector(K8sConnector):
 
         return readme
 
-    async def status_kdu(self, cluster_uuid: str, kdu_instance: str,) -> dict:
+    async def status_kdu(
+        self,
+        cluster_uuid: str,
+        kdu_instance: str,
+    ) -> dict:
         """Get the status of the KDU
 
         Get the current status of the KDU instance.
@@ -696,19 +599,11 @@ class K8sJujuConnector(K8sConnector):
                  and deployment_time.
         """
         status = {}
-        controller = await self.get_controller(cluster_uuid)
-        model = await self.get_model(kdu_instance, controller=controller)
-
-        model_status = await model.get_status()
-        status = model_status.applications
-
+        model_status = await self.libjuju.get_model_status(kdu_instance)
         for name in model_status.applications:
             application = model_status.applications[name]
             status[name] = {"status": application["status"]["status"]}
 
-        await model.disconnect()
-        await controller.disconnect()
-
         return status
 
     async def get_services(
@@ -718,15 +613,11 @@ class K8sJujuConnector(K8sConnector):
 
         credentials = self.get_credentials(cluster_uuid=cluster_uuid)
 
-        config_path = "/tmp/{}".format(cluster_uuid)
-        config_file = "{}/config".format(config_path)
+        kubecfg = tempfile.NamedTemporaryFile()
+        with open(kubecfg.name, "w") as kubecfg_file:
+            kubecfg_file.write(credentials)
+        kubectl = Kubectl(config_file=kubecfg.name)
 
-        if not os.path.exists(config_path):
-            os.makedirs(config_path)
-        with open(config_file, "w") as f:
-            f.write(credentials)
-
-        kubectl = Kubectl(config_file=config_file)
         return kubectl.get_services(
             field_selector="metadata.namespace={}".format(kdu_instance)
         )
@@ -738,15 +629,10 @@ class K8sJujuConnector(K8sConnector):
 
         credentials = self.get_credentials(cluster_uuid=cluster_uuid)
 
-        config_path = "/tmp/{}".format(cluster_uuid)
-        config_file = "{}/config".format(config_path)
-
-        if not os.path.exists(config_path):
-            os.makedirs(config_path)
-        with open(config_file, "w") as f:
-            f.write(credentials)
-
-        kubectl = Kubectl(config_file=config_file)
+        kubecfg = tempfile.NamedTemporaryFile()
+        with open(kubecfg.name, "w") as kubecfg_file:
+            kubecfg_file.write(credentials)
+        kubectl = Kubectl(config_file=kubecfg.name)
 
         return kubectl.get_services(
             field_selector="metadata.name={},metadata.namespace={}".format(
@@ -754,156 +640,6 @@ class K8sJujuConnector(K8sConnector):
             )
         )[0]
 
-    # Private methods
-    async def add_k8s(self, cloud_name: str, credentials: str,) -> bool:
-        """Add a k8s cloud to Juju
-
-        Adds a Kubernetes cloud to Juju, so it can be bootstrapped with a
-        Juju Controller.
-
-        :param cloud_name str: The name of the cloud to add.
-        :param credentials dict: A dictionary representing the output of
-            `kubectl config view --raw`.
-
-        :returns: True if successful, otherwise raises an exception.
-        """
-
-        cmd = [self.juju_command, "add-k8s", "--local", cloud_name]
-        self.log.debug(cmd)
-
-        process = await asyncio.create_subprocess_exec(
-            *cmd,
-            stdout=asyncio.subprocess.PIPE,
-            stderr=asyncio.subprocess.PIPE,
-            stdin=asyncio.subprocess.PIPE,
-        )
-
-        # Feed the process the credentials
-        process.stdin.write(credentials.encode("utf-8"))
-        await process.stdin.drain()
-        process.stdin.close()
-
-        _stdout, stderr = await process.communicate()
-
-        return_code = process.returncode
-
-        self.log.debug("add-k8s return code: {}".format(return_code))
-
-        if return_code > 0:
-            raise Exception(stderr)
-
-        return True
-
-    async def add_model(
-        self, model_name: str, cluster_uuid: str, controller: Controller
-    ) -> Model:
-        """Adds a model to the controller
-
-        Adds a new model to the Juju controller
-
-        :param model_name str: The name of the model to add.
-        :param cluster_uuid str: ID of the cluster.
-        :param controller: Controller object in which the model will be added
-        :returns: The juju.model.Model object of the new model upon success or
-                  raises an exception.
-        """
-
-        self.log.debug(
-            "Adding model '{}' to cluster_uuid '{}'".format(model_name, cluster_uuid)
-        )
-        model = None
-        try:
-            if self.juju_public_key is not None:
-                model = await controller.add_model(
-                    model_name, config={"authorized-keys": self.juju_public_key}
-                )
-            else:
-                model = await controller.add_model(model_name)
-        except Exception as ex:
-            self.log.debug(ex)
-            self.log.debug("Caught exception: {}".format(ex))
-            pass
-
-        return model
-
-    async def bootstrap(
-        self, cloud_name: str, cluster_uuid: str, loadbalancer: bool
-    ) -> bool:
-        """Bootstrap a Kubernetes controller
-
-        Bootstrap a Juju controller inside the Kubernetes cluster
-
-        :param cloud_name str: The name of the cloud.
-        :param cluster_uuid str: The UUID of the cluster to bootstrap.
-        :param loadbalancer bool: If the controller should use loadbalancer or not.
-        :returns: True upon success or raises an exception.
-        """
-
-        if not loadbalancer:
-            cmd = [self.juju_command, "bootstrap", cloud_name, cluster_uuid]
-        else:
-            """
-            For public clusters, specify that the controller service is using a
-            LoadBalancer.
-            """
-            cmd = [
-                self.juju_command,
-                "bootstrap",
-                cloud_name,
-                cluster_uuid,
-                "--config",
-                "controller-service-type=loadbalancer",
-            ]
-
-        self.log.debug(
-            "Bootstrapping controller {} in cloud {}".format(cluster_uuid, cloud_name)
-        )
-
-        process = await asyncio.create_subprocess_exec(
-            *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE,
-        )
-
-        _stdout, stderr = await process.communicate()
-
-        return_code = process.returncode
-
-        if return_code > 0:
-            #
-            if b"already exists" not in stderr:
-                raise Exception(stderr)
-
-        return True
-
-    async def destroy_controller(self, cluster_uuid: str) -> bool:
-        """Destroy a Kubernetes controller
-
-        Destroy an existing Kubernetes controller.
-
-        :param cluster_uuid str: The UUID of the cluster to bootstrap.
-        :returns: True upon success or raises an exception.
-        """
-        cmd = [
-            self.juju_command,
-            "destroy-controller",
-            "--destroy-all-models",
-            "--destroy-storage",
-            "-y",
-            cluster_uuid,
-        ]
-
-        process = await asyncio.create_subprocess_exec(
-            *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE,
-        )
-
-        _stdout, stderr = await process.communicate()
-
-        return_code = process.returncode
-
-        if return_code > 0:
-            #
-            if "already exists" not in stderr:
-                raise Exception(stderr)
-
     def get_credentials(self, cluster_uuid: str) -> str:
         """
         Get Cluster Kubeconfig
@@ -922,222 +658,155 @@ class K8sJujuConnector(K8sConnector):
 
         return yaml.safe_dump(k8scluster.get("credentials"))
 
-    def get_config(self, cluster_uuid: str,) -> dict:
-        """Get the cluster configuration
-
-        Gets the configuration of the cluster
-
-        :param cluster_uuid str: The UUID of the cluster.
-        :return: A dict upon success, or raises an exception.
+    def _get_credential_name(self, cluster_uuid: str) -> str:
         """
+        Get credential name for a k8s cloud
 
-        juju_db = self.db.get_one("admin", {"_id": "juju"})
-        config = None
-        for k in juju_db["k8sclusters"]:
-            if k["_id"] == cluster_uuid:
-                config = k["config"]
-                self.db.encrypt_decrypt_fields(
-                    config,
-                    "decrypt",
-                    ["secret", "cacert"],
-                    schema_version="1.1",
-                    salt=k["_id"],
-                )
-                break
-        if not config:
-            raise Exception(
-                "Unable to locate configuration for cluster {}".format(cluster_uuid)
-            )
-        return config
-
-    async def get_model(self, model_name: str, controller: Controller) -> Model:
-        """Get a model from the Juju Controller.
+        We cannot use the cluster_uuid for the credential name directly,
+        because it cannot start with a number, it must start with a letter.
+        Therefore, the k8s cloud credential name will be "cred-" followed
+        by the cluster uuid.
 
-        Note: Model objects returned must call disconnected() before it goes
-        out of scope.
+        :param: cluster_uuid:   Cluster UUID of the kubernetes cloud (=cloud_name)
 
-        :param model_name str: The name of the model to get
-        :param controller Controller: Controller object
-        :return The juju.model.Model object if found, or None.
+        :return:                Name to use for the credential name.
         """
+        return "cred-{}".format(cluster_uuid)
 
-        models = await controller.list_models()
-        if model_name not in models:
-            raise N2VCNotFound("Model {} not found".format(model_name))
-        self.log.debug("Found model: {}".format(model_name))
-        return await controller.get_model(model_name)
-
-    def get_namespace(self, cluster_uuid: str,) -> str:
+    def get_namespace(
+        self,
+        cluster_uuid: str,
+    ) -> str:
         """Get the namespace UUID
         Gets the namespace's unique name
 
         :param cluster_uuid str: The UUID of the cluster
         :returns: The namespace UUID, or raises an exception
         """
-        config = self.get_config(cluster_uuid)
-
-        # Make sure the name is in the config
-        if "namespace" not in config:
-            raise Exception("Namespace not found.")
-
-        # TODO: We want to make sure this is unique to the cluster, in case
-        # the cluster is being reused.
-        # Consider pre/appending the cluster id to the namespace string
-        return config["namespace"]
-
-    # TODO: Remove these lines of code
-    # async def has_model(self, model_name: str) -> bool:
-    #     """Check if a model exists in the controller
+        pass
 
-    #     Checks to see if a model exists in the connected Juju controller.
-
-    #     :param model_name str: The name of the model
-    #     :return: A boolean indicating if the model exists
-    #     """
-    #     models = await self.controller.list_models()
-
-    #     if model_name in models:
-    #         return True
-    #     return False
-
-    def is_local_k8s(self, credentials: str,) -> bool:
-        """Check if a cluster is local
-
-        Checks if a cluster is running in the local host
-
-        :param credentials dict: A dictionary containing the k8s credentials
-        :returns: A boolean if the cluster is running locally
-        """
-
-        creds = yaml.safe_load(credentials)
-
-        if creds and os.getenv("OSMLCM_VCA_APIPROXY"):
-            for cluster in creds["clusters"]:
-                if "server" in cluster["cluster"]:
-                    if os.getenv("OSMLCM_VCA_APIPROXY") in cluster["cluster"]["server"]:
-                        return True
-
-        return False
-
-    async def get_controller(self, cluster_uuid):
-        """Login to the Juju controller."""
-
-        config = self.get_config(cluster_uuid)
-
-        juju_endpoint = config["endpoint"]
-        juju_user = config["username"]
-        juju_secret = config["secret"]
-        juju_ca_cert = config["cacert"]
-
-        controller = Controller()
+    def _create_cluster_role(
+        self,
+        kubectl: Kubectl,
+        name: str,
+        labels: Dict[str, str],
+    ):
+        cluster_roles = kubectl.clients[RBAC_CLIENT].list_cluster_role(
+            field_selector="metadata.name={}".format(name)
+        )
 
-        if juju_secret:
-            self.log.debug(
-                "Connecting to controller... ws://{} as {}".format(
-                    juju_endpoint, juju_user,
-                )
+        if len(cluster_roles.items) > 0:
+            raise Exception(
+                "Cluster role with metadata.name={} already exists".format(name)
             )
-            try:
-                await controller.connect(
-                    endpoint=juju_endpoint,
-                    username=juju_user,
-                    password=juju_secret,
-                    cacert=juju_ca_cert,
-                )
-                self.log.debug("JujuApi: Logged into controller")
-                return controller
-            except Exception as ex:
-                self.log.debug(ex)
-                self.log.debug("Caught exception: {}".format(ex))
-        else:
-            self.log.fatal("VCA credentials not configured.")
-
-    # TODO: Remove these commented lines
-    #         self.authenticated = False
-    # if self.authenticated:
-    #         return
-
-    #     self.connecting = True
-    #     juju_public_key = None
-    #     self.authenticated = True
-    #     Test: Make sure we have the credentials loaded
-    # async def logout(self):
-    #     """Logout of the Juju controller."""
-    #     self.log.debug("[logout]")
-    #     if not self.authenticated:
-    #         return False
-
-    #     for model in self.models:
-    #         self.log.debug("Logging out of model {}".format(model))
-    #         await self.models[model].disconnect()
-
-    #     if self.controller:
-    #         self.log.debug("Disconnecting controller {}".format(self.controller))
-    #         await self.controller.disconnect()
-    #         self.controller = None
-
-    #     self.authenticated = False
-
-    async def remove_cloud(self, cloud_name: str,) -> bool:
-        """Remove a k8s cloud from Juju
-
-        Removes a Kubernetes cloud from Juju.
-
-        :param cloud_name str: The name of the cloud to add.
 
-        :returns: True if successful, otherwise raises an exception.
-        """
-
-        # Remove the bootstrapped controller
-        cmd = [self.juju_command, "remove-k8s", "--client", cloud_name]
-        process = await asyncio.create_subprocess_exec(
-            *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE,
+        metadata = V1ObjectMeta(name=name, labels=labels, namespace=ADMIN_NAMESPACE)
+        # Cluster role
+        cluster_role = V1ClusterRole(
+            metadata=metadata,
+            rules=[
+                V1PolicyRule(api_groups=["*"], resources=["*"], verbs=["*"]),
+                V1PolicyRule(non_resource_ur_ls=["*"], verbs=["*"]),
+            ],
         )
 
-        _stdout, stderr = await process.communicate()
+        kubectl.clients[RBAC_CLIENT].create_cluster_role(cluster_role)
 
-        return_code = process.returncode
+    def _delete_cluster_role(self, kubectl: Kubectl, name: str):
+        kubectl.clients[RBAC_CLIENT].delete_cluster_role(name)
 
-        if return_code > 0:
-            raise Exception(stderr)
-
-        # Remove the cloud from the local config
-        cmd = [self.juju_command, "remove-cloud", "--client", cloud_name]
-        process = await asyncio.create_subprocess_exec(
-            *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE,
+    def _create_service_account(
+        self,
+        kubectl: Kubectl,
+        name: str,
+        labels: Dict[str, str],
+    ):
+        service_accounts = kubectl.clients[CORE_CLIENT].list_namespaced_service_account(
+            ADMIN_NAMESPACE, field_selector="metadata.name={}".format(name)
         )
+        if len(service_accounts.items) > 0:
+            raise Exception(
+                "Service account with metadata.name={} already exists".format(name)
+            )
 
-        _stdout, stderr = await process.communicate()
+        metadata = V1ObjectMeta(name=name, labels=labels, namespace=ADMIN_NAMESPACE)
+        service_account = V1ServiceAccount(metadata=metadata)
 
-        return_code = process.returncode
+        kubectl.clients[CORE_CLIENT].create_namespaced_service_account(
+            ADMIN_NAMESPACE, service_account
+        )
 
-        if return_code > 0:
-            raise Exception(stderr)
+    def _delete_service_account(self, kubectl: Kubectl, name: str):
+        kubectl.clients[CORE_CLIENT].delete_namespaced_service_account(
+            name, ADMIN_NAMESPACE
+        )
 
-        return True
+    def _create_cluster_role_binding(
+        self,
+        kubectl: Kubectl,
+        name: str,
+        labels: Dict[str, str],
+    ):
+        role_bindings = kubectl.clients[RBAC_CLIENT].list_cluster_role_binding(
+            field_selector="metadata.name={}".format(name)
+        )
+        if len(role_bindings.items) > 0:
+            raise Exception("Generated rbac id already exists")
+
+        role_binding = V1ClusterRoleBinding(
+            metadata=V1ObjectMeta(name=name, labels=labels),
+            role_ref=V1RoleRef(kind="ClusterRole", name=name, api_group=""),
+            subjects=[
+                V1Subject(kind="ServiceAccount", name=name, namespace=ADMIN_NAMESPACE)
+            ],
+        )
+        kubectl.clients[RBAC_CLIENT].create_cluster_role_binding(role_binding)
 
-    async def set_config(self, cluster_uuid: str, config: dict,) -> bool:
-        """Save the cluster configuration
+    def _delete_cluster_role_binding(self, kubectl: Kubectl, name: str):
+        kubectl.clients[RBAC_CLIENT].delete_cluster_role_binding(name)
 
-        Saves the cluster information to the Mongo database
+    async def _get_secret_data(self, kubectl: Kubectl, name: str) -> (str, str):
+        v1_core = kubectl.clients[CORE_CLIENT]
 
-        :param cluster_uuid str: The UUID of the cluster
-        :param config dict: A dictionary containing the cluster configuration
-        """
+        retries_limit = 10
+        secret_name = None
+        while True:
+            retries_limit -= 1
+            service_accounts = v1_core.list_namespaced_service_account(
+                ADMIN_NAMESPACE, field_selector="metadata.name={}".format(name)
+            )
+            if len(service_accounts.items) == 0:
+                raise Exception(
+                    "Service account not found with metadata.name={}".format(name)
+                )
+            service_account = service_accounts.items[0]
+            if service_account.secrets and len(service_account.secrets) > 0:
+                secret_name = service_account.secrets[0].name
+            if secret_name is not None or not retries_limit:
+                break
+        if not secret_name:
+            raise Exception(
+                "Failed getting the secret from service account {}".format(name)
+            )
+        secret = v1_core.list_namespaced_secret(
+            ADMIN_NAMESPACE,
+            field_selector="metadata.name={}".format(secret_name),
+        ).items[0]
 
-        juju_db = self.db.get_one("admin", {"_id": "juju"})
+        token = secret.data[SERVICE_ACCOUNT_TOKEN_KEY]
+        client_certificate_data = secret.data[SERVICE_ACCOUNT_ROOT_CA_KEY]
 
-        k8sclusters = juju_db["k8sclusters"] if "k8sclusters" in juju_db else []
-        self.db.encrypt_decrypt_fields(
-            config,
-            "encrypt",
-            ["secret", "cacert"],
-            schema_version="1.1",
-            salt=cluster_uuid,
-        )
-        k8sclusters.append({"_id": cluster_uuid, "config": config})
-        self.db.set_one(
-            table="admin",
-            q_filter={"_id": "juju"},
-            update_dict={"k8sclusters": k8sclusters},
+        return (
+            base64.b64decode(token).decode("utf-8"),
+            base64.b64decode(client_certificate_data).decode("utf-8"),
         )
+
+    @staticmethod
+    def generate_kdu_instance_name(**kwargs):
+        db_dict = kwargs.get("db_dict")
+        kdu_name = kwargs.get("kdu_name", None)
+        if kdu_name:
+            kdu_instance = "{}-{}".format(kdu_name, db_dict["filter"]["_id"])
+        else:
+            kdu_instance = db_dict["filter"]["_id"]
+        return kdu_instance