Bug 1609 fix
[osm/N2VC.git] / n2vc / k8s_juju_conn.py
index 9bc5d40..149947d 100644 (file)
 #     limitations under the License.
 
 import asyncio
-import concurrent
-from .exceptions import NotImplemented
+import os
+import uuid
+import yaml
+import tempfile
+import binascii
 
-import io
-import juju
-# from juju.bundle import BundleHandler
-from juju.controller import Controller
-from juju.model import Model
-from juju.errors import JujuAPIError, JujuError
+from n2vc.config import EnvironConfig
+from n2vc.exceptions import K8sException
+from n2vc.k8s_conn import K8sConnector
+from n2vc.kubectl import Kubectl
+from .exceptions import MethodNotImplemented
+from n2vc.libjuju import Libjuju
+from n2vc.utils import obj_to_dict, obj_to_yaml
+from n2vc.store import MotorStore
+from n2vc.vca.cloud import Cloud
+from n2vc.vca.connection import get_connection
 
-import logging
 
-from n2vc.k8s_conn import K8sConnector
+RBAC_LABEL_KEY_NAME = "rbac-id"
+RBAC_STACK_PREFIX = "juju-credential"
 
-import os
-# import re
-# import ssl
-# from .vnf import N2VC
 
-import uuid
-import yaml
+def generate_rbac_id():
+    return binascii.hexlify(os.urandom(4)).decode()
 
 
 class K8sJujuConnector(K8sConnector):
+    libjuju = None
 
     def __init__(
-            self,
-            fs: object,
-            db: object,
-            kubectl_command: str = '/usr/bin/kubectl',
-            juju_command: str = '/usr/bin/juju',
-            log=None,
-            on_update_db=None,
+        self,
+        fs: object,
+        db: object,
+        kubectl_command: str = "/usr/bin/kubectl",
+        juju_command: str = "/usr/bin/juju",
+        log: object = None,
+        loop: object = None,
+        on_update_db=None,
     ):
         """
-
+        :param fs: file system for kubernetes and helm configuration
+        :param db: Database object
         :param kubectl_command: path to kubectl executable
         :param helm_command: path to helm executable
-        :param fs: file system for kubernetes and helm configuration
         :param log: logger
+        :param: loop: Asyncio loop
         """
 
         # parent class
@@ -64,234 +70,195 @@ class K8sJujuConnector(K8sConnector):
         )
 
         self.fs = fs
-        self.info('Initializing K8S Juju connector')
+        self.loop = loop or asyncio.get_event_loop()
+        self.log.debug("Initializing K8S Juju connector")
 
-        self.authenticated = False
-        self.models = {}
-        self.log = logging.getLogger(__name__)
+        db_uri = EnvironConfig(prefixes=["OSMLCM_", "OSMMON_"]).get("database_uri")
+        self._store = MotorStore(db_uri)
+        self.loading_libjuju = asyncio.Lock(loop=self.loop)
 
-        self.juju_command = juju_command
-        self.juju_secret = ""
-
-        self.info('K8S Juju connector initialized')
+        self.log.debug("K8S Juju connector initialized")
+        # TODO: Remove these commented lines:
+        # self.authenticated = False
+        # self.models = {}
+        # self.juju_secret = ""
 
     """Initialization"""
+
     async def init_env(
         self,
         k8s_creds: str,
-        namespace: str = 'kube-system',
+        namespace: str = "kube-system",
         reuse_cluster_uuid: str = None,
+        **kwargs,
     ) -> (str, bool):
         """
         It prepares a given K8s cluster environment to run Juju bundles.
 
-        :param k8s_creds: credentials to access a given K8s cluster, i.e. a valid '.kube/config'
-        :param namespace: optional namespace to be used for juju. By default, 'kube-system' will be used
+        :param k8s_creds: credentials to access a given K8s cluster, i.e. a valid
+            '.kube/config'
+        :param namespace: optional namespace to be used for juju. By default,
+            'kube-system' will be used
         :param reuse_cluster_uuid: existing cluster uuid for reuse
-        :return: uuid of the K8s cluster and True if connector has installed some software in the cluster
-        (on error, an exception will be raised)
-        """
-
-        """Bootstrapping
+        :param: kwargs: Additional parameters
+            vca_id (str): VCA ID
 
-        Bootstrapping cannot be done, by design, through the API. We need to
-        use the CLI tools.
+        :return: uuid of the K8s cluster and True if connector has installed some
+            software in the cluster
+            (on error, an exception will be raised)
         """
+        libjuju = await self._get_libjuju(kwargs.get("vca_id"))
 
-        """
-        WIP: Workflow
-
-        1. Has the environment already been bootstrapped?
-        - Check the database to see if we have a record for this env
-
-        2. If this is a new env, create it
-        - Add the k8s cloud to Juju
-        - Bootstrap
-        - Record it in the database
+        cluster_uuid = reuse_cluster_uuid or str(uuid.uuid4())
+        kubectl = self._get_kubectl(k8s_creds)
 
-        3. Connect to the Juju controller for this cloud
-
-        """
-        # cluster_uuid = reuse_cluster_uuid
-        # if not cluster_uuid:
-        #     cluster_uuid = str(uuid4())
-
-        ##################################################
-        # TODO: Pull info from db based on the namespace #
-        ##################################################
-
-        ###################################################
-        # TODO: Make it idempotent, calling add-k8s and   #
-        # bootstrap whenever reuse_cluster_uuid is passed #
-        # as parameter                                    #
-        # `init_env` is called to initialize the K8s      #
-        # cluster for juju. If this initialization fails, #
-        # it can be called again by LCM with the param    #
-        # reuse_cluster_uuid, e.g. to try to fix it.       #
-        ###################################################
-
-        if not reuse_cluster_uuid:
-            # This is a new cluster, so bootstrap it
-
-            cluster_uuid = str(uuid.uuid4())
-
-            # Is a local k8s cluster?
-            localk8s = self.is_local_k8s(k8s_creds)
-
-            # If the k8s is external, the juju controller needs a loadbalancer
-            loadbalancer = False if localk8s else True
-
-            # Name the new k8s cloud
-            k8s_cloud = "k8s-{}".format(cluster_uuid)
-
-            print("Adding k8s cloud {}".format(k8s_cloud))
-            await self.add_k8s(k8s_cloud, k8s_creds)
-
-            # Bootstrap Juju controller
-            print("Bootstrapping...")
-            await self.bootstrap(k8s_cloud, cluster_uuid, loadbalancer)
-            print("Bootstrap done.")
-
-            # Get the controller information
-
-            # Parse ~/.local/share/juju/controllers.yaml
-            # controllers.testing.api-endpoints|ca-cert|uuid
-            print("Getting controller endpoints")
-            with open(os.path.expanduser(
-                "~/.local/share/juju/controllers.yaml"
-            )) as f:
-                controllers = yaml.load(f, Loader=yaml.Loader)
-                controller = controllers['controllers'][cluster_uuid]
-                endpoints = controller['api-endpoints']
-                self.juju_endpoint = endpoints[0]
-                self.juju_ca_cert = controller['ca-cert']
-
-            # Parse ~/.local/share/juju/accounts
-            # controllers.testing.user|password
-            print("Getting accounts")
-            with open(os.path.expanduser(
-                "~/.local/share/juju/accounts.yaml"
-            )) as f:
-                controllers = yaml.load(f, Loader=yaml.Loader)
-                controller = controllers['controllers'][cluster_uuid]
-
-                self.juju_user = controller['user']
-                self.juju_secret = controller['password']
-
-            print("user: {}".format(self.juju_user))
-            print("secret: {}".format(self.juju_secret))
-            print("endpoint: {}".format(self.juju_endpoint))
-            print("ca-cert: {}".format(self.juju_ca_cert))
-
-            # raise Exception("EOL")
-
-            self.juju_public_key = None
-
-            config = {
-                'endpoint': self.juju_endpoint,
-                'username': self.juju_user,
-                'secret': self.juju_secret,
-                'cacert': self.juju_ca_cert,
-                'namespace': namespace,
-                'loadbalancer': loadbalancer,
-            }
-
-            # Store the cluster configuration so it
-            # can be used for subsequent calls
-            print("Setting config")
-            await self.set_config(cluster_uuid, config)
-
-        else:
-            # This is an existing cluster, so get its config
-            cluster_uuid = reuse_cluster_uuid
+        # CREATING RESOURCES IN K8S
+        rbac_id = generate_rbac_id()
+        metadata_name = "{}-{}".format(RBAC_STACK_PREFIX, rbac_id)
+        labels = {RBAC_STACK_PREFIX: rbac_id}
 
-            config = self.get_config(cluster_uuid)
-
-            self.juju_endpoint = config['endpoint']
-            self.juju_user = config['username']
-            self.juju_secret = config['secret']
-            self.juju_ca_cert = config['cacert']
-            self.juju_public_key = None
-
-        # Login to the k8s cluster
-        if not self.authenticated:
-            await self.login(cluster_uuid)
+        # Create cleanup dictionary to clean up created resources
+        # if it fails in the middle of the process
+        cleanup_data = []
+        try:
+            kubectl.create_cluster_role(
+                name=metadata_name,
+                labels=labels,
+            )
+            cleanup_data.append(
+                {
+                    "delete": kubectl.delete_cluster_role,
+                    "args": (metadata_name),
+                }
+            )
 
-        # We're creating a new cluster
-        #print("Getting model {}".format(self.get_namespace(cluster_uuid), cluster_uuid=cluster_uuid))
-        #model = await self.get_model(
-        #    self.get_namespace(cluster_uuid),
-        #    cluster_uuid=cluster_uuid
-        #)
+            kubectl.create_service_account(
+                name=metadata_name,
+                labels=labels,
+            )
+            cleanup_data.append(
+                {
+                    "delete": kubectl.delete_service_account,
+                    "args": (metadata_name),
+                }
+            )
 
-        ## Disconnect from the model
-        #if model and model.is_connected():
-        #    await model.disconnect()
+            kubectl.create_cluster_role_binding(
+                name=metadata_name,
+                labels=labels,
+            )
+            cleanup_data.append(
+                {
+                    "delete": kubectl.delete_service_account,
+                    "args": (metadata_name),
+                }
+            )
+            token, client_cert_data = await kubectl.get_secret_data(
+                metadata_name,
+            )
 
-        return cluster_uuid, True
+            default_storage_class = kubectl.get_default_storage_class()
+            await libjuju.add_k8s(
+                name=cluster_uuid,
+                rbac_id=rbac_id,
+                token=token,
+                client_cert_data=client_cert_data,
+                configuration=kubectl.configuration,
+                storage_class=default_storage_class,
+                credential_name=self._get_credential_name(cluster_uuid),
+            )
+            return cluster_uuid, True
+        except Exception as e:
+            self.log.error("Error initializing k8scluster: {}".format(e))
+            if len(cleanup_data) > 0:
+                self.log.debug("Cleaning up created resources in k8s cluster...")
+                for item in cleanup_data:
+                    delete_function = item["delete"]
+                    delete_args = item["args"]
+                    delete_function(*delete_args)
+                self.log.debug("Cleanup finished")
+            raise e
 
     """Repo Management"""
+
     async def repo_add(
         self,
         name: str,
         url: str,
-        type: str = "charm",
+        _type: str = "charm",
     ):
-        raise NotImplemented()
+        raise MethodNotImplemented()
 
     async def repo_list(self):
-        raise NotImplemented()
+        raise MethodNotImplemented()
 
     async def repo_remove(
         self,
         name: str,
     ):
-        raise NotImplemented()
+        raise MethodNotImplemented()
+
+    async def synchronize_repos(self, cluster_uuid: str, name: str):
+        """
+        Returns None as currently add_repo is not implemented
+        """
+        return None
 
     """Reset"""
+
     async def reset(
-            self,
-            cluster_uuid: str,
-            force: bool = False,
-            uninstall_sw: bool = False
+        self,
+        cluster_uuid: str,
+        force: bool = False,
+        uninstall_sw: bool = False,
+        **kwargs,
     ) -> bool:
         """Reset a cluster
 
         Resets the Kubernetes cluster by removing the model that represents it.
 
         :param cluster_uuid str: The UUID of the cluster to reset
+        :param force: Force reset
+        :param uninstall_sw: Boolean to uninstall sw
+        :param: kwargs: Additional parameters
+            vca_id (str): VCA ID
+
         :return: Returns True if successful or raises an exception.
         """
 
         try:
-            if not self.authenticated:
-                await self.login(cluster_uuid)
-
-            if self.controller.is_connected():
-                # Destroy the model
-                namespace = self.get_namespace(cluster_uuid)
-                if await self.has_model(namespace):
-                    print("[reset] Destroying model")
-                    await self.controller.destroy_model(
-                        namespace,
-                        destroy_storage=True
-                    )
-
-                # Disconnect from the controller
-                print("[reset] Disconnecting controller")
-                await self.logout()
-
-                # Destroy the controller (via CLI)
-                print("[reset] Destroying controller")
-                await self.destroy_controller(cluster_uuid)
-
-                print("[reset] Removing k8s cloud")
-                k8s_cloud = "k8s-{}".format(cluster_uuid)
-                await self.remove_cloud(k8s_cloud)
-
-        except Exception as ex:
-            print("Caught exception during reset: {}".format(ex))
+            self.log.debug("[reset] Removing k8s cloud")
+            libjuju = await self._get_libjuju(kwargs.get("vca_id"))
+
+            cloud = Cloud(cluster_uuid, self._get_credential_name(cluster_uuid))
+
+            cloud_creds = await libjuju.get_cloud_credentials(cloud)
 
+            await libjuju.remove_cloud(cluster_uuid)
+
+            credentials = self.get_credentials(cluster_uuid=cluster_uuid)
+
+            kubectl = self._get_kubectl(credentials)
+
+            delete_functions = [
+                kubectl.delete_cluster_role_binding,
+                kubectl.delete_service_account,
+                kubectl.delete_cluster_role,
+            ]
+
+            credential_attrs = cloud_creds[0].result["attrs"]
+            if RBAC_LABEL_KEY_NAME in credential_attrs:
+                rbac_id = credential_attrs[RBAC_LABEL_KEY_NAME]
+                metadata_name = "{}-{}".format(RBAC_STACK_PREFIX, rbac_id)
+                for delete_func in delete_functions:
+                    try:
+                        delete_func(metadata_name)
+                    except Exception as e:
+                        self.log.warning("Cannot remove resource in K8s {}".format(e))
+
+        except Exception as e:
+            self.log.debug("Caught exception during reset: {}".format(e))
+            raise e
         return True
 
     """Deployment"""
@@ -300,116 +267,150 @@ class K8sJujuConnector(K8sConnector):
         self,
         cluster_uuid: str,
         kdu_model: str,
+        kdu_instance: str,
         atomic: bool = True,
-        timeout: float = 300,
+        timeout: float = 1800,
         params: dict = None,
         db_dict: dict = None,
-        kdu_name: str = None
+        kdu_name: str = None,
+        namespace: str = None,
+        **kwargs,
     ) -> bool:
         """Install a bundle
 
         :param cluster_uuid str: The UUID of the cluster to install to
         :param kdu_model str: The name or path of a bundle to install
+        :param kdu_instance: Kdu instance name
         :param atomic bool: If set, waits until the model is active and resets
                             the cluster on failure.
         :param timeout int: The time, in seconds, to wait for the install
                             to finish
         :param params dict: Key-value pairs of instantiation parameters
         :param kdu_name: Name of the KDU instance to be installed
+        :param namespace: K8s namespace to use for the KDU instance
+        :param kwargs: Additional parameters
+            vca_id (str): VCA ID
 
         :return: If successful, returns ?
         """
+        libjuju = await self._get_libjuju(kwargs.get("vca_id"))
+        bundle = kdu_model
 
-        if not self.authenticated:
-            print("[install] Logging in to the controller")
-            await self.login(cluster_uuid)
+        if not db_dict:
+            raise K8sException("db_dict must be set")
+        if not bundle:
+            raise K8sException("bundle must be set")
 
-        ##
-        # Get or create the model, based on the NS
-        # uuid.
-        if kdu_name:
-            kdu_instance = "{}-{}".format(kdu_name, db_dict["filter"]["_id"])
+        if bundle.startswith("cs:"):
+            pass
+        elif bundle.startswith("http"):
+            # Download the file
+            pass
         else:
-            kdu_instance = db_dict["filter"]["_id"]
+            new_workdir = kdu_model.strip(kdu_model.split("/")[-1])
+            os.chdir(new_workdir)
+            bundle = "local:{}".format(kdu_model)
 
         self.log.debug("Checking for model named {}".format(kdu_instance))
 
         # Create the new model
         self.log.debug("Adding model: {}".format(kdu_instance))
-        model = await self.add_model(kdu_instance, cluster_uuid=cluster_uuid)
+        cloud = Cloud(cluster_uuid, self._get_credential_name(cluster_uuid))
+        await libjuju.add_model(kdu_instance, cloud)
 
-        if model:
-            # TODO: Instantiation parameters
+        if model:
+        # TODO: Instantiation parameters
 
-            """
-            "Juju bundle that models the KDU, in any of the following ways:
-                - <juju-repo>/<juju-bundle>
-                - <juju-bundle folder under k8s_models folder in the package>
-                - <juju-bundle tgz file (w/ or w/o extension) under k8s_models folder in the package>
-                - <URL_where_to_fetch_juju_bundle>
-            """
+        """
+        "Juju bundle that models the KDU, in any of the following ways:
+            - <juju-repo>/<juju-bundle>
+            - <juju-bundle folder under k8s_models folder in the package>
+            - <juju-bundle tgz file (w/ or w/o extension) under k8s_models folder
+                in the package>
+            - <URL_where_to_fetch_juju_bundle>
+        """
+        try:
+            previous_workdir = os.getcwd()
+        except FileNotFoundError:
+            previous_workdir = "/app/storage"
 
-            bundle = kdu_model
-            if kdu_model.startswith("cs:"):
-                bundle = kdu_model
-            elif kdu_model.startswith("http"):
-                # Download the file
-                pass
-            else:
-                # Local file
-
-                # if kdu_model.endswith(".tar.gz") or kdu_model.endswith(".tgz")
-                # Uncompress temporarily
-                # bundle = <uncompressed file>
-                pass
-
-            if not bundle:
-                # Raise named exception that the bundle could not be found
-                raise Exception()
-
-            print("[install] deploying {}".format(bundle))
-            await model.deploy(bundle)
-
-            # Get the application
-            if atomic:
-                # applications = model.applications
-                print("[install] Applications: {}".format(model.applications))
-                for name in model.applications:
-                    print("[install] Waiting for {} to settle".format(name))
-                    application = model.applications[name]
-                    try:
-                        # It's not enough to wait for all units to be active;
-                        # the application status needs to be active as well.
-                        print("Waiting for all units to be active...")
-                        await model.block_until(
-                            lambda: all(
-                                unit.agent_status == 'idle'
-                                and application.status in ['active', 'unknown']
-                                and unit.workload_status in [
-                                    'active', 'unknown'
-                                ] for unit in application.units
-                            ),
-                            timeout=timeout
-                        )
-                        print("All units active.")
-
-                    except concurrent.futures._base.TimeoutError:
-                        print("[install] Timeout exceeded; resetting cluster")
-                        await self.reset(cluster_uuid)
-                        return False
-
-            # Wait for the application to be active
-            if model.is_connected():
-                print("[install] Disconnecting model")
-                await model.disconnect()
-
-            return kdu_instance
-        raise Exception("Unable to install")
-
-    async def instances_list(
-            self,
-            cluster_uuid: str
-    ) -> list:
+        self.log.debug("[install] deploying {}".format(bundle))
+        await libjuju.deploy(
+            bundle, model_name=kdu_instance, wait=atomic, timeout=timeout
+        )
+        os.chdir(previous_workdir)
+        if self.on_update_db:
+            await self.on_update_db(
+                cluster_uuid,
+                kdu_instance,
+                filter=db_dict["filter"],
+                vca_id=kwargs.get("vca_id"),
+            )
+        return True
+
+    async def scale(
+        self,
+        kdu_instance: str,
+        scale: int,
+        resource_name: str,
+        total_timeout: float = 1800,
+        **kwargs,
+    ) -> bool:
+        """Scale an application in a model
+
+        :param: kdu_instance str:        KDU instance name
+        :param: scale int:               Scale to which to set this application
+        :param: resource_name str:       Resource name (Application name)
+        :param: timeout float:           The time, in seconds, to wait for the install
+                                         to finish
+        :param kwargs:                   Additional parameters
+                                            vca_id (str): VCA ID
+
+        :return: If successful, returns True
+        """
+
+        try:
+            libjuju = await self._get_libjuju(kwargs.get("vca_id"))
+            await libjuju.scale_application(
+                model_name=kdu_instance,
+                application_name=resource_name,
+                scale=scale,
+                total_timeout=total_timeout,
+            )
+        except Exception as e:
+            error_msg = "Error scaling application {} in kdu instance {}: {}".format(
+                resource_name, kdu_instance, e
+            )
+            self.log.error(error_msg)
+            raise K8sException(message=error_msg)
+        return True
+
+    async def get_scale_count(
+        self,
+        resource_name: str,
+        kdu_instance: str,
+        **kwargs,
+    ) -> int:
+        """Get an application scale count
+
+        :param: resource_name str:       Resource name (Application name)
+        :param: kdu_instance str:        KDU instance name
+        :param kwargs:                   Additional parameters
+                                            vca_id (str): VCA ID
+        :return: Return application instance count
+        """
+        try:
+            libjuju = await self._get_libjuju(kwargs.get("vca_id"))
+            status = await libjuju.get_model_status(kdu_instance)
+            return len(status.applications[resource_name].units)
+        except Exception as e:
+            error_msg = "Error getting scale count from application {} in kdu instance {}: {}".format(
+                resource_name, kdu_instance, e
+            )
+            self.log.error(error_msg)
+            raise K8sException(message=error_msg)
+
+    async def instances_list(self, cluster_uuid: str) -> list:
         """
         returns a list of deployed releases in a cluster
 
@@ -449,51 +450,10 @@ class K8sJujuConnector(K8sConnector):
         storage would require a redeployment of the service, at least in this
         initial release.
         """
-        namespace = self.get_namespace(cluster_uuid)
-        model = await self.get_model(namespace, cluster_uuid=cluster_uuid)
-
-        with open(kdu_model, 'r') as f:
-            bundle = yaml.safe_load(f)
-
-            """
-            {
-                'description': 'Test bundle',
-                'bundle': 'kubernetes',
-                'applications': {
-                    'mariadb-k8s': {
-                        'charm': 'cs:~charmed-osm/mariadb-k8s-20',
-                        'scale': 1,
-                        'options': {
-                            'password': 'manopw',
-                            'root_password': 'osm4u',
-                            'user': 'mano'
-                        },
-                        'series': 'kubernetes'
-                    }
-                }
-            }
-            """
-            # TODO: This should be returned in an agreed-upon format
-            for name in bundle['applications']:
-                print(model.applications)
-                application = model.applications[name]
-                print(application)
-
-                path = bundle['applications'][name]['charm']
-
-                try:
-                    await application.upgrade_charm(switch=path)
-                except juju.errors.JujuError as ex:
-                    if 'already running charm' in str(ex):
-                        # We're already running this version
-                        pass
-
-        await model.disconnect()
-
-        return True
-        raise NotImplemented()
+        raise MethodNotImplemented()
 
     """Rollback"""
+
     async def rollback(
         self,
         cluster_uuid: str,
@@ -510,35 +470,101 @@ class K8sJujuConnector(K8sConnector):
         :return: If successful, returns the revision of active KDU instance,
                  or raises an exception
         """
-        raise NotImplemented()
+        raise MethodNotImplemented()
 
     """Deletion"""
+
     async def uninstall(
         self,
         cluster_uuid: str,
-        kdu_instance: str
+        kdu_instance: str,
+        **kwargs,
     ) -> bool:
         """Uninstall a KDU instance
 
         :param cluster_uuid str: The UUID of the cluster
         :param kdu_instance str: The unique name of the KDU instance
+        :param kwargs: Additional parameters
+            vca_id (str): VCA ID
 
         :return: Returns True if successful, or raises an exception
         """
-        if not self.authenticated:
-            self.log.debug("[uninstall] Connecting to controller")
-            await self.login(cluster_uuid)
 
         self.log.debug("[uninstall] Destroying model")
+        libjuju = await self._get_libjuju(kwargs.get("vca_id"))
 
-        await self.controller.destroy_models(kdu_instance)
+        await libjuju.destroy_model(kdu_instance, total_timeout=3600)
 
-        self.log.debug("[uninstall] Model destroyed and disconnecting")
-        await self.logout()
+        self.log.debug("[uninstall] Model destroyed and disconnecting")
+        # await controller.disconnect()
 
         return True
+        # TODO: Remove these commented lines
+        # if not self.authenticated:
+        #     self.log.debug("[uninstall] Connecting to controller")
+        #     await self.login(cluster_uuid)
+
+    async def exec_primitive(
+        self,
+        cluster_uuid: str = None,
+        kdu_instance: str = None,
+        primitive_name: str = None,
+        timeout: float = 300,
+        params: dict = None,
+        db_dict: dict = None,
+        **kwargs,
+    ) -> str:
+        """Exec primitive (Juju action)
+
+        :param cluster_uuid str: The UUID of the cluster
+        :param kdu_instance str: The unique name of the KDU instance
+        :param primitive_name: Name of action that will be executed
+        :param timeout: Timeout for action execution
+        :param params: Dictionary of all the parameters needed for the action
+        :param db_dict: Dictionary for any additional data
+        :param kwargs: Additional parameters
+            vca_id (str): VCA ID
+
+        :return: Returns the output of the action
+        """
+        libjuju = await self._get_libjuju(kwargs.get("vca_id"))
+
+        if not params or "application-name" not in params:
+            raise K8sException(
+                "Missing application-name argument, \
+                                argument needed for K8s actions"
+            )
+        try:
+            self.log.debug(
+                "[exec_primitive] Getting model "
+                "kdu_instance: {}".format(kdu_instance)
+            )
+            application_name = params["application-name"]
+            actions = await libjuju.get_actions(application_name, kdu_instance)
+            if primitive_name not in actions:
+                raise K8sException("Primitive {} not found".format(primitive_name))
+            output, status = await libjuju.execute_action(
+                application_name, kdu_instance, primitive_name, **params
+            )
+
+            if status != "completed":
+                raise K8sException(
+                    "status is not completed: {} output: {}".format(status, output)
+                )
+            if self.on_update_db:
+                await self.on_update_db(
+                    cluster_uuid, kdu_instance, filter=db_dict["filter"]
+                )
+
+            return output
+
+        except Exception as e:
+            error_msg = "Error executing primitive {}: {}".format(primitive_name, e)
+            self.log.error(error_msg)
+            raise K8sException(message=error_msg)
 
     """Introspection"""
+
     async def inspect_kdu(
         self,
         kdu_model: str,
@@ -555,8 +581,11 @@ class K8sJujuConnector(K8sConnector):
         """
 
         kdu = {}
-        with open(kdu_model, 'r') as f:
-            bundle = yaml.safe_load(f)
+        if not os.path.exists(kdu_model):
+            raise K8sException("file {} not found".format(kdu_model))
+
+        with open(kdu_model, "r") as f:
+            bundle = yaml.safe_load(f.read())
 
             """
             {
@@ -577,7 +606,7 @@ class K8sJujuConnector(K8sConnector):
             }
             """
             # TODO: This should be returned in an agreed-upon format
-            kdu = bundle['applications']
+            kdu = bundle["applications"]
 
         return kdu
 
@@ -595,11 +624,11 @@ class K8sJujuConnector(K8sConnector):
         """
         readme = None
 
-        files = ['README', 'README.txt', 'README.md']
+        files = ["README", "README.txt", "README.md"]
         path = os.path.dirname(kdu_model)
         for file in os.listdir(path):
             if file in files:
-                with open(file, 'r') as f:
+                with open(file, "r") as f:
                     readme = f.read()
                     break
 
@@ -609,6 +638,9 @@ class K8sJujuConnector(K8sConnector):
         self,
         cluster_uuid: str,
         kdu_instance: str,
+        complete_status: bool = False,
+        yaml_format: bool = False,
+        **kwargs,
     ) -> dict:
         """Get the status of the KDU
 
@@ -616,231 +648,119 @@ class K8sJujuConnector(K8sConnector):
 
         :param cluster_uuid str: The UUID of the cluster
         :param kdu_instance str: The unique id of the KDU instance
+        :param complete_status: To get the complete_status of the KDU
+        :param yaml_format: To get the status in proper format for NSR record
+        :param: kwargs: Additional parameters
+            vca_id (str): VCA ID
 
         :return: Returns a dictionary containing namespace, state, resources,
-                 and deployment_time.
+                 and deployment_time and returns complete_status if complete_status is True
         """
+        libjuju = await self._get_libjuju(kwargs.get("vca_id"))
         status = {}
 
-        model = await self.get_model(self.get_namespace(cluster_uuid), cluster_uuid=cluster_uuid)
-
-        # model = await self.get_model_by_uuid(cluster_uuid)
-        if model:
-            model_status = await model.get_status()
-            status = model_status.applications
+        model_status = await libjuju.get_model_status(kdu_instance)
 
+        if not complete_status:
             for name in model_status.applications:
                 application = model_status.applications[name]
-                status[name] = {
-                    'status': application['status']['status']
-                }
-
-            if model.is_connected():
-                await model.disconnect()
+                status[name] = {"status": application["status"]["status"]}
+        else:
+            if yaml_format:
+                return obj_to_yaml(model_status)
+            else:
+                return obj_to_dict(model_status)
 
         return status
 
-    # Private methods
-    async def add_k8s(
-        self,
-        cloud_name: str,
-        credentials: str,
-    ) -> bool:
-        """Add a k8s cloud to Juju
-
-        Adds a Kubernetes cloud to Juju, so it can be bootstrapped with a
-        Juju Controller.
-
-        :param cloud_name str: The name of the cloud to add.
-        :param credentials dict: A dictionary representing the output of
-            `kubectl config view --raw`.
-
-        :returns: True if successful, otherwise raises an exception.
+    async def update_vca_status(self, vcastatus: dict, kdu_instance: str, **kwargs):
         """
+        Add all configs, actions, executed actions of all applications in a model to vcastatus dict
 
-        cmd = [self.juju_command, "add-k8s", "--local", cloud_name]
-        print(cmd)
-
-        process = await asyncio.create_subprocess_exec(
-            *cmd,
-            stdout=asyncio.subprocess.PIPE,
-            stderr=asyncio.subprocess.PIPE,
-            stdin=asyncio.subprocess.PIPE,
-        )
-
-        # Feed the process the credentials
-        process.stdin.write(credentials.encode("utf-8"))
-        await process.stdin.drain()
-        process.stdin.close()
-
-        stdout, stderr = await process.communicate()
-
-        return_code = process.returncode
-
-        print("add-k8s return code: {}".format(return_code))
-
-        if return_code > 0:
-            raise Exception(stderr)
-
-        return True
-
-    async def add_model(
-        self,
-        model_name: str,
-        cluster_uuid: str,
-    ) -> juju.model.Model:
-        """Adds a model to the controller
-
-        Adds a new model to the Juju controller
+        :param vcastatus dict: dict containing vcastatus
+        :param kdu_instance str: The unique id of the KDU instance
+        :param: kwargs: Additional parameters
+            vca_id (str): VCA ID
 
-        :param model_name str: The name of the model to add.
-        :returns: The juju.model.Model object of the new model upon success or
-                  raises an exception.
+        :return: None
         """
-        if not self.authenticated:
-            await self.login(cluster_uuid)
-
-        self.log.debug("Adding model '{}' to cluster_uuid '{}'".format(model_name, cluster_uuid))
+        libjuju = await self._get_libjuju(kwargs.get("vca_id"))
         try:
-            model = await self.controller.add_model(
-                model_name,
-                config={'authorized-keys': self.juju_public_key}
-            )
-        except Exception as ex:
-            self.log.debug(ex)
-            self.log.debug("Caught exception: {}".format(ex))
-            pass
-
-        return model
-
-    async def bootstrap(
-        self,
-        cloud_name: str,
-        cluster_uuid: str,
-        loadbalancer: bool
-    ) -> bool:
-        """Bootstrap a Kubernetes controller
-
-        Bootstrap a Juju controller inside the Kubernetes cluster
-
-        :param cloud_name str: The name of the cloud.
-        :param cluster_uuid str: The UUID of the cluster to bootstrap.
-        :param loadbalancer bool: If the controller should use loadbalancer or not.
-        :returns: True upon success or raises an exception.
-        """
-
-        if not loadbalancer:
-            cmd = [self.juju_command, "bootstrap", cloud_name, cluster_uuid]
-        else:
-            """
-            For public clusters, specify that the controller service is using a LoadBalancer.
-            """
-            cmd = [self.juju_command, "bootstrap", cloud_name, cluster_uuid, "--config", "controller-service-type=loadbalancer"]
-
-        print("Bootstrapping controller {} in cloud {}".format(
-            cluster_uuid, cloud_name
-        ))
+            for model_name in vcastatus:
+                # Adding executed actions
+                vcastatus[model_name][
+                    "executedActions"
+                ] = await libjuju.get_executed_actions(kdu_instance)
+
+                for application in vcastatus[model_name]["applications"]:
+                    # Adding application actions
+                    vcastatus[model_name]["applications"][application][
+                        "actions"
+                    ] = await libjuju.get_actions(application, kdu_instance)
+                    # Adding application configs
+                    vcastatus[model_name]["applications"][application][
+                        "configs"
+                    ] = await libjuju.get_application_configs(kdu_instance, application)
+
+        except Exception as e:
+            self.log.debug("Error in updating vca status: {}".format(str(e)))
+
+    async def get_services(
+        self, cluster_uuid: str, kdu_instance: str, namespace: str
+    ) -> list:
+        """Return a list of services of a kdu_instance"""
 
-        process = await asyncio.create_subprocess_exec(
-            *cmd,
-            stdout=asyncio.subprocess.PIPE,
-            stderr=asyncio.subprocess.PIPE,
+        credentials = self.get_credentials(cluster_uuid=cluster_uuid)
+        kubectl = self._get_kubectl(credentials)
+        return kubectl.get_services(
+            field_selector="metadata.namespace={}".format(kdu_instance)
         )
 
-        stdout, stderr = await process.communicate()
-
-        return_code = process.returncode
-
-        if return_code > 0:
-            #
-            if b'already exists' not in stderr:
-                raise Exception(stderr)
-
-        return True
-
-    async def destroy_controller(
-        self,
-        cluster_uuid: str
-    ) -> bool:
-        """Destroy a Kubernetes controller
+    async def get_service(
+        self, cluster_uuid: str, service_name: str, namespace: str
+    ) -> object:
+        """Return data for a specific service inside a namespace"""
 
-        Destroy an existing Kubernetes controller.
+        credentials = self.get_credentials(cluster_uuid=cluster_uuid)
+        kubectl = self._get_kubectl(credentials)
+        return kubectl.get_services(
+            field_selector="metadata.name={},metadata.namespace={}".format(
+                service_name, namespace
+            )
+        )[0]
 
-        :param cluster_uuid str: The UUID of the cluster to bootstrap.
-        :returns: True upon success or raises an exception.
+    def get_credentials(self, cluster_uuid: str) -> str:
+        """
+        Get Cluster Kubeconfig
         """
-        cmd = [
-            self.juju_command,
-            "destroy-controller",
-            "--destroy-all-models",
-            "--destroy-storage",
-            "-y",
-            cluster_uuid
-        ]
-
-        process = await asyncio.create_subprocess_exec(
-            *cmd,
-            stdout=asyncio.subprocess.PIPE,
-            stderr=asyncio.subprocess.PIPE,
+        k8scluster = self.db.get_one(
+            "k8sclusters", q_filter={"_id": cluster_uuid}, fail_on_empty=False
         )
 
-        stdout, stderr = await process.communicate()
-
-        return_code = process.returncode
-
-        if return_code > 0:
-            #
-            if 'already exists' not in stderr:
-                raise Exception(stderr)
-
-    def get_config(
-        self,
-        cluster_uuid: str,
-    ) -> dict:
-        """Get the cluster configuration
+        self.db.encrypt_decrypt_fields(
+            k8scluster.get("credentials"),
+            "decrypt",
+            ["password", "secret"],
+            schema_version=k8scluster["schema_version"],
+            salt=k8scluster["_id"],
+        )
 
-        Gets the configuration of the cluster
+        return yaml.safe_dump(k8scluster.get("credentials"))
 
-        :param cluster_uuid str: The UUID of the cluster.
-        :return: A dict upon success, or raises an exception.
+    def _get_credential_name(self, cluster_uuid: str) -> str:
         """
-        cluster_config = "{}/{}.yaml".format(self.fs.path, cluster_uuid)
-        if os.path.exists(cluster_config):
-            with open(cluster_config, 'r') as f:
-                config = yaml.safe_load(f.read())
-                return config
-        else:
-            raise Exception(
-                "Unable to locate configuration for cluster {}".format(
-                    cluster_uuid
-                )
-            )
+        Get credential name for a k8s cloud
 
-    async def get_model(
-        self,
-        model_name: str,
-        cluster_uuid: str,
-    ) -> juju.model.Model:
-        """Get a model from the Juju Controller.
+        We cannot use the cluster_uuid for the credential name directly,
+        because it cannot start with a number, it must start with a letter.
+        Therefore, the k8s cloud credential name will be "cred-" followed
+        by the cluster uuid.
 
-        Note: Model objects returned must call disconnected() before it goes
-        out of scope.
+        :param: cluster_uuid:   Cluster UUID of the kubernetes cloud (=cloud_name)
 
-        :param model_name str: The name of the model to get
-        :return The juju.model.Model object if found, or None.
+        :return:                Name to use for the credential name.
         """
-        if not self.authenticated:
-            await self.login(cluster_uuid)
-
-        model = None
-        models = await self.controller.list_models()
-        self.log.debug(models)
-        if model_name in models:
-            self.log.debug("Found model: {}".format(model_name))
-            model = await self.controller.get_model(
-                model_name
-            )
-        return model
+        return "cred-{}".format(cluster_uuid)
 
     def get_namespace(
         self,
@@ -852,183 +772,50 @@ class K8sJujuConnector(K8sConnector):
         :param cluster_uuid str: The UUID of the cluster
         :returns: The namespace UUID, or raises an exception
         """
-        config = self.get_config(cluster_uuid)
+        pass
 
-        # Make sure the name is in the config
-        if 'namespace' not in config:
-            raise Exception("Namespace not found.")
-
-        # TODO: We want to make sure this is unique to the cluster, in case
-        # the cluster is being reused.
-        # Consider pre/appending the cluster id to the namespace string
-        return config['namespace']
-
-    async def has_model(
-        self,
-        model_name: str
-    ) -> bool:
-        """Check if a model exists in the controller
-
-        Checks to see if a model exists in the connected Juju controller.
+    @staticmethod
+    def generate_kdu_instance_name(**kwargs):
+        db_dict = kwargs.get("db_dict")
+        kdu_name = kwargs.get("kdu_name", None)
+        if kdu_name:
+            kdu_instance = "{}-{}".format(kdu_name, db_dict["filter"]["_id"])
+        else:
+            kdu_instance = db_dict["filter"]["_id"]
+        return kdu_instance
 
-        :param model_name str: The name of the model
-        :return: A boolean indicating if the model exists
+    async def _get_libjuju(self, vca_id: str = None) -> Libjuju:
         """
-        models = await self.controller.list_models()
-
-        if model_name in models:
-            return True
-        return False
-
-    def is_local_k8s(
-        self,
-        credentials: str,
-    ) -> bool:
-        """Check if a cluster is local
+        Get libjuju object
 
-        Checks if a cluster is running in the local host
-
-        :param credentials dict: A dictionary containing the k8s credentials
-        :returns: A boolean if the cluster is running locally
+        :param: vca_id: VCA ID
+                        If None, get a libjuju object with a Connection to the default VCA
+                        Else, geta libjuju object with a Connection to the specified VCA
         """
-        creds = yaml.safe_load(credentials)
-        if os.getenv("OSMLCM_VCA_APIPROXY"):
-            host_ip = os.getenv("OSMLCM_VCA_APIPROXY")
-
-        if creds and host_ip:
-            for cluster in creds['clusters']:
-                if 'server' in cluster['cluster']:
-                    if host_ip in cluster['cluster']['server']:
-                        return True
-
-        return False
-
-    async def login(self, cluster_uuid):
-        """Login to the Juju controller."""
-
-        if self.authenticated:
-            return
-
-        self.connecting = True
-
-        # Test: Make sure we have the credentials loaded
-        config = self.get_config(cluster_uuid)
-
-        self.juju_endpoint = config['endpoint']
-        self.juju_user = config['username']
-        self.juju_secret = config['secret']
-        self.juju_ca_cert = config['cacert']
-        self.juju_public_key = None
-
-        self.controller = Controller()
-
-        if self.juju_secret:
-            self.log.debug(
-                "Connecting to controller... ws://{} as {}/{}".format(
-                    self.juju_endpoint,
-                    self.juju_user,
-                    self.juju_secret,
-                )
-            )
-            try:
-                await self.controller.connect(
-                    endpoint=self.juju_endpoint,
-                    username=self.juju_user,
-                    password=self.juju_secret,
-                    cacert=self.juju_ca_cert,
-                )
-                self.authenticated = True
-                self.log.debug("JujuApi: Logged into controller")
-            except Exception as ex:
-                print(ex)
-                self.log.debug("Caught exception: {}".format(ex))
-                pass
+        if not vca_id:
+            while self.loading_libjuju.locked():
+                await asyncio.sleep(0.1)
+            if not self.libjuju:
+                async with self.loading_libjuju:
+                    vca_connection = await get_connection(self._store)
+                    self.libjuju = Libjuju(vca_connection, loop=self.loop, log=self.log)
+            return self.libjuju
         else:
-            self.log.fatal("VCA credentials not configured.")
-            self.authenticated = False
-
-    async def logout(self):
-        """Logout of the Juju controller."""
-        print("[logout]")
-        if not self.authenticated:
-            return False
-
-        for model in self.models:
-            print("Logging out of model {}".format(model))
-            await self.models[model].disconnect()
-
-        if self.controller:
-            self.log.debug("Disconnecting controller {}".format(
-                self.controller
-            ))
-            await self.controller.disconnect()
-            self.controller = None
-
-        self.authenticated = False
-
-    async def remove_cloud(
-        self,
-        cloud_name: str,
-    ) -> bool:
-        """Remove a k8s cloud from Juju
-
-        Removes a Kubernetes cloud from Juju.
-
-        :param cloud_name str: The name of the cloud to add.
+            vca_connection = await get_connection(self._store, vca_id)
+            return Libjuju(
+                vca_connection,
+                loop=self.loop,
+                log=self.log,
+                n2vc=self,
+            )
 
-        :returns: True if successful, otherwise raises an exception.
+    def _get_kubectl(self, credentials: str) -> Kubectl:
         """
+        Get Kubectl object
 
-        # Remove the bootstrapped controller
-        cmd = [self.juju_command, "remove-k8s", "--client", cloud_name]
-        process = await asyncio.create_subprocess_exec(
-            *cmd,
-            stdout=asyncio.subprocess.PIPE,
-            stderr=asyncio.subprocess.PIPE,
-        )
-
-        stdout, stderr = await process.communicate()
-
-        return_code = process.returncode
-
-        if return_code > 0:
-            raise Exception(stderr)
-
-        # Remove the cloud from the local config
-        cmd = [self.juju_command, "remove-cloud", "--client", cloud_name]
-        process = await asyncio.create_subprocess_exec(
-            *cmd,
-            stdout=asyncio.subprocess.PIPE,
-            stderr=asyncio.subprocess.PIPE,
-        )
-
-        stdout, stderr = await process.communicate()
-
-        return_code = process.returncode
-
-        if return_code > 0:
-            raise Exception(stderr)
-
-        return True
-
-    async def set_config(
-        self,
-        cluster_uuid: str,
-        config: dict,
-    ) -> bool:
-        """Save the cluster configuration
-
-        Saves the cluster information to the file store
-
-        :param cluster_uuid str: The UUID of the cluster
-        :param config dict: A dictionary containing the cluster configuration
-        :returns: Boolean upon success or raises an exception.
+        :param: kubeconfig_credentials: Kubeconfig credentials
         """
-
-        cluster_config = "{}/{}.yaml".format(self.fs.path, cluster_uuid)
-        if not os.path.exists(cluster_config):
-            print("Writing config to {}".format(cluster_config))
-            with open(cluster_config, 'w') as f:
-                f.write(yaml.dump(config, Dumper=yaml.Dumper))
-
-        return True
+        kubecfg = tempfile.NamedTemporaryFile()
+        with open(kubecfg.name, "w") as kubecfg_file:
+            kubecfg_file.write(credentials)
+        return Kubectl(config_file=kubecfg.name)