X-Git-Url: https://osm.etsi.org/gitweb/?p=osm%2FN2VC.git;a=blobdiff_plain;f=n2vc%2Fk8s_juju_conn.py;h=5b158d7b7e0869716f42a6f54078c05a936b7c3f;hp=b2ef079950aae0243279f9a35887baa5e57cad4a;hb=refs%2Fchanges%2F40%2F10540%2F1;hpb=e85ba44ca3988aae5932ced96787308c67f9be86 diff --git a/n2vc/k8s_juju_conn.py b/n2vc/k8s_juju_conn.py index b2ef079..5b158d7 100644 --- a/n2vc/k8s_juju_conn.py +++ b/n2vc/k8s_juju_conn.py @@ -13,24 +13,50 @@ # limitations under the License. import asyncio -import concurrent import os import uuid import yaml +import tempfile +import binascii +import base64 -import juju -from juju.controller import Controller -from juju.model import Model -from n2vc.exceptions import K8sException +from n2vc.config import ModelConfig +from n2vc.exceptions import K8sException, N2VCBadArgumentsException from n2vc.k8s_conn import K8sConnector - +from n2vc.kubectl import Kubectl, CORE_CLIENT, RBAC_CLIENT from .exceptions import MethodNotImplemented +from n2vc.utils import base64_to_cacert +from n2vc.libjuju import Libjuju + +from kubernetes.client.models import ( + V1ClusterRole, + V1ObjectMeta, + V1PolicyRule, + V1ServiceAccount, + V1ClusterRoleBinding, + V1RoleRef, + V1Subject, +) + +from typing import Dict + +SERVICE_ACCOUNT_TOKEN_KEY = "token" +SERVICE_ACCOUNT_ROOT_CA_KEY = "ca.crt" +RBAC_LABEL_KEY_NAME = "rbac-id" +ADMIN_NAMESPACE = "kube-system" +RBAC_STACK_PREFIX = "juju-credential" # from juju.bundle import BundleHandler # import re # import ssl # from .vnf import N2VC + + +def generate_rbac_id(): + return binascii.hexlify(os.urandom(4)).decode() + + class K8sJujuConnector(K8sConnector): def __init__( self, @@ -39,31 +65,65 @@ class K8sJujuConnector(K8sConnector): kubectl_command: str = "/usr/bin/kubectl", juju_command: str = "/usr/bin/juju", log: object = None, + loop: object = None, on_update_db=None, + vca_config: dict = None, ): """ - + :param fs: file system for kubernetes and helm configuration + :param db: Database object :param kubectl_command: path to kubectl executable :param helm_command: path to helm executable - :param fs: file system for kubernetes and helm configuration :param log: logger + :param: loop: Asyncio loop """ # parent class K8sConnector.__init__( - self, db, log=log, on_update_db=on_update_db, + self, + db, + log=log, + on_update_db=on_update_db, ) self.fs = fs + self.loop = loop or asyncio.get_event_loop() self.log.debug("Initializing K8S Juju connector") - self.authenticated = False - self.models = {} - - self.juju_command = juju_command - self.juju_secret = "" - + required_vca_config = [ + "host", + "user", + "secret", + "ca_cert", + ] + if not vca_config or not all(k in vca_config for k in required_vca_config): + raise N2VCBadArgumentsException( + message="Missing arguments in vca_config: {}".format(vca_config), + bad_args=required_vca_config, + ) + port = vca_config["port"] if "port" in vca_config else 17070 + url = "{}:{}".format(vca_config["host"], port) + model_config = ModelConfig(vca_config) + username = vca_config["user"] + secret = vca_config["secret"] + ca_cert = base64_to_cacert(vca_config["ca_cert"]) + + self.libjuju = Libjuju( + endpoint=url, + api_proxy=None, # Not needed for k8s charms + model_config=model_config, + username=username, + password=secret, + cacert=ca_cert, + loop=self.loop, + log=self.log, + db=self.db, + ) self.log.debug("K8S Juju connector initialized") + # TODO: Remove these commented lines: + # self.authenticated = False + # self.models = {} + # self.juju_secret = "" """Initialization""" @@ -86,26 +146,26 @@ class K8sJujuConnector(K8sConnector): (on error, an exception will be raised) """ - """Bootstrapping + # """Bootstrapping - Bootstrapping cannot be done, by design, through the API. We need to - use the CLI tools. - """ + # Bootstrapping cannot be done, by design, through the API. We need to + # use the CLI tools. + # """ - """ - WIP: Workflow + # """ + # WIP: Workflow - 1. Has the environment already been bootstrapped? - - Check the database to see if we have a record for this env + # 1. Has the environment already been bootstrapped? + # - Check the database to see if we have a record for this env - 2. If this is a new env, create it - - Add the k8s cloud to Juju - - Bootstrap - - Record it in the database + # 2. If this is a new env, create it + # - Add the k8s cloud to Juju + # - Bootstrap + # - Record it in the database - 3. Connect to the Juju controller for this cloud + # 3. Connect to the Juju controller for this cloud - """ + # """ # cluster_uuid = reuse_cluster_uuid # if not cluster_uuid: # cluster_uuid = str(uuid4()) @@ -124,102 +184,168 @@ class K8sJujuConnector(K8sConnector): # reuse_cluster_uuid, e.g. to try to fix it. # ################################################### - if not reuse_cluster_uuid: - # This is a new cluster, so bootstrap it - - cluster_uuid = str(uuid.uuid4()) - - # Is a local k8s cluster? - localk8s = self.is_local_k8s(k8s_creds) - - # If the k8s is external, the juju controller needs a loadbalancer - loadbalancer = False if localk8s else True - - # Name the new k8s cloud - k8s_cloud = "k8s-{}".format(cluster_uuid) - - self.log.debug("Adding k8s cloud {}".format(k8s_cloud)) - await self.add_k8s(k8s_cloud, k8s_creds) - - # Bootstrap Juju controller - self.log.debug("Bootstrapping...") - await self.bootstrap(k8s_cloud, cluster_uuid, loadbalancer) - self.log.debug("Bootstrap done.") + # This is a new cluster, so bootstrap it + + cluster_uuid = reuse_cluster_uuid or str(uuid.uuid4()) + + # Is a local k8s cluster? + # localk8s = self.is_local_k8s(k8s_creds) + + # If the k8s is external, the juju controller needs a loadbalancer + # loadbalancer = False if localk8s else True + + # Name the new k8s cloud + # k8s_cloud = "k8s-{}".format(cluster_uuid) + + # self.log.debug("Adding k8s cloud {}".format(k8s_cloud)) + # await self.add_k8s(k8s_cloud, k8s_creds) + + # Bootstrap Juju controller + # self.log.debug("Bootstrapping...") + # await self.bootstrap(k8s_cloud, cluster_uuid, loadbalancer) + # self.log.debug("Bootstrap done.") + + # Get the controller information + + # Parse ~/.local/share/juju/controllers.yaml + # controllers.testing.api-endpoints|ca-cert|uuid + # self.log.debug("Getting controller endpoints") + # with open(os.path.expanduser("~/.local/share/juju/controllers.yaml")) as f: + # controllers = yaml.load(f, Loader=yaml.Loader) + # controller = controllers["controllers"][cluster_uuid] + # endpoints = controller["api-endpoints"] + # juju_endpoint = endpoints[0] + # juju_ca_cert = controller["ca-cert"] + + # Parse ~/.local/share/juju/accounts + # controllers.testing.user|password + # self.log.debug("Getting accounts") + # with open(os.path.expanduser("~/.local/share/juju/accounts.yaml")) as f: + # controllers = yaml.load(f, Loader=yaml.Loader) + # controller = controllers["controllers"][cluster_uuid] + + # juju_user = controller["user"] + # juju_secret = controller["password"] + + # config = { + # "endpoint": juju_endpoint, + # "username": juju_user, + # "secret": juju_secret, + # "cacert": juju_ca_cert, + # "loadbalancer": loadbalancer, + # } + + # Store the cluster configuration so it + # can be used for subsequent calls + kubecfg = tempfile.NamedTemporaryFile() + with open(kubecfg.name, "w") as kubecfg_file: + kubecfg_file.write(k8s_creds) + kubectl = Kubectl(config_file=kubecfg.name) + + # CREATING RESOURCES IN K8S + rbac_id = generate_rbac_id() + metadata_name = "{}-{}".format(RBAC_STACK_PREFIX, rbac_id) + labels = {RBAC_STACK_PREFIX: rbac_id} + + # Create cleanup dictionary to clean up created resources + # if it fails in the middle of the process + cleanup_data = [] + try: + self._create_cluster_role( + kubectl, + name=metadata_name, + labels=labels, + ) + cleanup_data.append( + { + "delete": self._delete_cluster_role, + "args": (kubectl, metadata_name), + } + ) - # Get the controller information + self._create_service_account( + kubectl, + name=metadata_name, + labels=labels, + ) + cleanup_data.append( + { + "delete": self._delete_service_account, + "args": (kubectl, metadata_name), + } + ) - # Parse ~/.local/share/juju/controllers.yaml - # controllers.testing.api-endpoints|ca-cert|uuid - self.log.debug("Getting controller endpoints") - with open(os.path.expanduser("~/.local/share/juju/controllers.yaml")) as f: - controllers = yaml.load(f, Loader=yaml.Loader) - controller = controllers["controllers"][cluster_uuid] - endpoints = controller["api-endpoints"] - self.juju_endpoint = endpoints[0] - self.juju_ca_cert = controller["ca-cert"] + self._create_cluster_role_binding( + kubectl, + name=metadata_name, + labels=labels, + ) + cleanup_data.append( + { + "delete": self._delete_service_account, + "args": (kubectl, metadata_name), + } + ) + token, client_cert_data = await self._get_secret_data( + kubectl, + metadata_name, + ) - # Parse ~/.local/share/juju/accounts - # controllers.testing.user|password - self.log.debug("Getting accounts") - with open(os.path.expanduser("~/.local/share/juju/accounts.yaml")) as f: - controllers = yaml.load(f, Loader=yaml.Loader) - controller = controllers["controllers"][cluster_uuid] + default_storage_class = kubectl.get_default_storage_class() + await self.libjuju.add_k8s( + name=cluster_uuid, + rbac_id=rbac_id, + token=token, + client_cert_data=client_cert_data, + configuration=kubectl.configuration, + storage_class=default_storage_class, + credential_name=self._get_credential_name(cluster_uuid), + ) + # self.log.debug("Setting config") + # await self.set_config(cluster_uuid, config) - self.juju_user = controller["user"] - self.juju_secret = controller["password"] + # Test connection + # controller = await self.get_controller(cluster_uuid) + # await controller.disconnect() + # TODO: Remove these commented lines # raise Exception("EOL") - - self.juju_public_key = None - - config = { - "endpoint": self.juju_endpoint, - "username": self.juju_user, - "secret": self.juju_secret, - "cacert": self.juju_ca_cert, - "namespace": namespace, - "loadbalancer": loadbalancer, - } - - # Store the cluster configuration so it - # can be used for subsequent calls - self.log.debug("Setting config") - await self.set_config(cluster_uuid, config) - - else: - # This is an existing cluster, so get its config - cluster_uuid = reuse_cluster_uuid - - config = self.get_config(cluster_uuid) - - self.juju_endpoint = config["endpoint"] - self.juju_user = config["username"] - self.juju_secret = config["secret"] - self.juju_ca_cert = config["cacert"] - self.juju_public_key = None - - # Login to the k8s cluster - if not self.authenticated: - await self.login(cluster_uuid) - - # We're creating a new cluster - # print("Getting model {}".format(self.get_namespace(cluster_uuid), - # cluster_uuid=cluster_uuid)) - # model = await self.get_model( - # self.get_namespace(cluster_uuid), - # cluster_uuid=cluster_uuid - # ) - - # Disconnect from the model - # if model and model.is_connected(): - # await model.disconnect() - - return cluster_uuid, True + # self.juju_public_key = None + # Login to the k8s cluster + # if not self.authenticated: + # await self.login(cluster_uuid) + + # We're creating a new cluster + # print("Getting model {}".format(self.get_namespace(cluster_uuid), + # cluster_uuid=cluster_uuid)) + # model = await self.get_model( + # self.get_namespace(cluster_uuid), + # cluster_uuid=cluster_uuid + # ) + + # Disconnect from the model + # if model and model.is_connected(): + # await model.disconnect() + + return cluster_uuid, True + except Exception as e: + self.log.error("Error initializing k8scluster: {}".format(e)) + if len(cleanup_data) > 0: + self.log.debug("Cleaning up created resources in k8s cluster...") + for item in cleanup_data: + delete_function = item["delete"] + delete_args = item["args"] + delete_function(*delete_args) + self.log.debug("Cleanup finished") + raise e """Repo Management""" async def repo_add( - self, name: str, url: str, _type: str = "charm", + self, + name: str, + url: str, + _type: str = "charm", ): raise MethodNotImplemented() @@ -227,7 +353,8 @@ class K8sJujuConnector(K8sConnector): raise MethodNotImplemented() async def repo_remove( - self, name: str, + self, + name: str, ): raise MethodNotImplemented() @@ -251,32 +378,76 @@ class K8sJujuConnector(K8sConnector): """ try: - if not self.authenticated: - await self.login(cluster_uuid) + # Remove k8scluster from database + # self.log.debug("[reset] Removing k8scluster from juju database") + # juju_db = self.db.get_one("admin", {"_id": "juju"}) + + # for k in juju_db["k8sclusters"]: + # if k["_id"] == cluster_uuid: + # juju_db["k8sclusters"].remove(k) + # self.db.set_one( + # table="admin", + # q_filter={"_id": "juju"}, + # update_dict={"k8sclusters": juju_db["k8sclusters"]}, + # ) + # break + + # Destroy the controller (via CLI) + # self.log.debug("[reset] Destroying controller") + # await self.destroy_controller(cluster_uuid) + self.log.debug("[reset] Removing k8s cloud") + # k8s_cloud = "k8s-{}".format(cluster_uuid) + # await self.remove_cloud(k8s_cloud) + + cloud_creds = await self.libjuju.get_cloud_credentials( + cluster_uuid, + self._get_credential_name(cluster_uuid), + ) - if self.controller.is_connected(): - # Destroy the model - namespace = self.get_namespace(cluster_uuid) - if await self.has_model(namespace): - self.log.debug("[reset] Destroying model") - await self.controller.destroy_model(namespace, destroy_storage=True) + await self.libjuju.remove_cloud(cluster_uuid) - # Disconnect from the controller - self.log.debug("[reset] Disconnecting controller") - await self.logout() + kubecfg = self.get_credentials(cluster_uuid=cluster_uuid) - # Destroy the controller (via CLI) - self.log.debug("[reset] Destroying controller") - await self.destroy_controller(cluster_uuid) + kubecfg_file = tempfile.NamedTemporaryFile() + with open(kubecfg_file.name, "w") as f: + f.write(kubecfg) + kubectl = Kubectl(config_file=kubecfg_file.name) - self.log.debug("[reset] Removing k8s cloud") - k8s_cloud = "k8s-{}".format(cluster_uuid) - await self.remove_cloud(k8s_cloud) + delete_functions = [ + self._delete_cluster_role_binding, + self._delete_service_account, + self._delete_cluster_role, + ] - except Exception as ex: - self.log.debug("Caught exception during reset: {}".format(ex)) + credential_attrs = cloud_creds[0].result["attrs"] + if RBAC_LABEL_KEY_NAME in credential_attrs: + rbac_id = credential_attrs[RBAC_LABEL_KEY_NAME] + metadata_name = "{}-{}".format(RBAC_STACK_PREFIX, rbac_id) + delete_args = (kubectl, metadata_name) + for delete_func in delete_functions: + try: + delete_func(*delete_args) + except Exception as e: + self.log.warning("Cannot remove resource in K8s {}".format(e)) + except Exception as e: + self.log.debug("Caught exception during reset: {}".format(e)) + raise e return True + # TODO: Remove these commented lines + # if not self.authenticated: + # await self.login(cluster_uuid) + + # if self.controller.is_connected(): + # # Destroy the model + # namespace = self.get_namespace(cluster_uuid) + # if await self.has_model(namespace): + # self.log.debug("[reset] Destroying model") + # await self.controller.destroy_model(namespace, destroy_storage=True) + + # # Disconnect from the controller + # self.log.debug("[reset] Disconnecting controller") + # await self.logout() """Deployment""" @@ -284,8 +455,9 @@ class K8sJujuConnector(K8sConnector): self, cluster_uuid: str, kdu_model: str, + kdu_instance: str, atomic: bool = True, - timeout: float = 300, + timeout: float = 1800, params: dict = None, db_dict: dict = None, kdu_name: str = None, @@ -295,6 +467,7 @@ class K8sJujuConnector(K8sConnector): :param cluster_uuid str: The UUID of the cluster to install to :param kdu_model str: The name or path of a bundle to install + :param kdu_instance: Kdu instance name :param atomic bool: If set, waits until the model is active and resets the cluster on failure. :param timeout int: The time, in seconds, to wait for the install @@ -305,99 +478,96 @@ class K8sJujuConnector(K8sConnector): :return: If successful, returns ? """ + bundle = kdu_model - if not self.authenticated: - self.log.debug("[install] Logging in to the controller") - await self.login(cluster_uuid) + # controller = await self.get_controller(cluster_uuid) ## # Get or create the model, based on the NS # uuid. - if kdu_name: - kdu_instance = "{}-{}".format(kdu_name, db_dict["filter"]["_id"]) + + if not db_dict: + raise K8sException("db_dict must be set") + if not bundle: + raise K8sException("bundle must be set") + + if bundle.startswith("cs:"): + pass + elif bundle.startswith("http"): + # Download the file + pass else: - kdu_instance = db_dict["filter"]["_id"] + new_workdir = kdu_model.strip(kdu_model.split("/")[-1]) + os.chdir(new_workdir) + bundle = "local:{}".format(kdu_model) self.log.debug("Checking for model named {}".format(kdu_instance)) # Create the new model self.log.debug("Adding model: {}".format(kdu_instance)) - model = await self.add_model(kdu_instance, cluster_uuid=cluster_uuid) + await self.libjuju.add_model( + model_name=kdu_instance, + cloud_name=cluster_uuid, + credential_name=self._get_credential_name(cluster_uuid), + ) - if model: - # TODO: Instantiation parameters + # if model: + # TODO: Instantiation parameters - """ - "Juju bundle that models the KDU, in any of the following ways: - - / - - - - - - - """ - try: - previous_workdir = os.getcwd() - except FileNotFoundError: - previous_workdir = "/app/storage" - - bundle = kdu_model - if kdu_model.startswith("cs:"): - bundle = kdu_model - elif kdu_model.startswith("http"): - # Download the file - pass - else: - new_workdir = kdu_model.strip(kdu_model.split("/")[-1]) - - os.chdir(new_workdir) - - bundle = "local:{}".format(kdu_model) - - if not bundle: - # Raise named exception that the bundle could not be found - raise Exception() - - self.log.debug("[install] deploying {}".format(bundle)) - await model.deploy(bundle) - - # Get the application - if atomic: - # applications = model.applications - self.log.debug("[install] Applications: {}".format(model.applications)) - for name in model.applications: - self.log.debug("[install] Waiting for {} to settle".format(name)) - application = model.applications[name] - try: - # It's not enough to wait for all units to be active; - # the application status needs to be active as well. - self.log.debug("Waiting for all units to be active...") - await model.block_until( - lambda: all( - unit.agent_status == "idle" - and application.status in ["active", "unknown"] - and unit.workload_status in ["active", "unknown"] - for unit in application.units - ), - timeout=timeout, - ) - self.log.debug("All units active.") - - # TODO use asyncio.TimeoutError - except concurrent.futures._base.TimeoutError: - os.chdir(previous_workdir) - self.log.debug("[install] Timeout exceeded; resetting cluster") - await self.reset(cluster_uuid) - return False - - # Wait for the application to be active - if model.is_connected(): - self.log.debug("[install] Disconnecting model") - await model.disconnect() - - os.chdir(previous_workdir) - - return kdu_instance - raise Exception("Unable to install") + """ + "Juju bundle that models the KDU, in any of the following ways: + - / + - + - + - + """ + try: + previous_workdir = os.getcwd() + except FileNotFoundError: + previous_workdir = "/app/storage" + + self.log.debug("[install] deploying {}".format(bundle)) + await self.libjuju.deploy( + bundle, model_name=kdu_instance, wait=atomic, timeout=timeout + ) + + # Get the application + # if atomic: + # # applications = model.applications + # self.log.debug("[install] Applications: {}".format(model.applications)) + # for name in model.applications: + # self.log.debug("[install] Waiting for {} to settle".format(name)) + # application = model.applications[name] + # try: + # # It's not enough to wait for all units to be active; + # # the application status needs to be active as well. + # self.log.debug("Waiting for all units to be active...") + # await model.block_until( + # lambda: all( + # unit.agent_status == "idle" + # and application.status in ["active", "unknown"] + # and unit.workload_status in ["active", "unknown"] + # for unit in application.units + # ), + # timeout=timeout, + # ) + # self.log.debug("All units active.") + + # # TODO use asyncio.TimeoutError + # except concurrent.futures._base.TimeoutError: + # os.chdir(previous_workdir) + # self.log.debug("[install] Timeout exceeded; resetting cluster") + # await self.reset(cluster_uuid) + # return False + + # Wait for the application to be active + # if model.is_connected(): + # self.log.debug("[install] Disconnecting model") + # await model.disconnect() + # await controller.disconnect() + os.chdir(previous_workdir) + return True async def instances_list(self, cluster_uuid: str) -> list: """ @@ -439,54 +609,68 @@ class K8sJujuConnector(K8sConnector): storage would require a redeployment of the service, at least in this initial release. """ - namespace = self.get_namespace(cluster_uuid) - model = await self.get_model(namespace, cluster_uuid=cluster_uuid) - - with open(kdu_model, "r") as f: - bundle = yaml.safe_load(f) - - """ - { - 'description': 'Test bundle', - 'bundle': 'kubernetes', - 'applications': { - 'mariadb-k8s': { - 'charm': 'cs:~charmed-osm/mariadb-k8s-20', - 'scale': 1, - 'options': { - 'password': 'manopw', - 'root_password': 'osm4u', - 'user': 'mano' - }, - 'series': 'kubernetes' - } - } - } - """ - # TODO: This should be returned in an agreed-upon format - for name in bundle["applications"]: - self.log.debug(model.applications) - application = model.applications[name] - self.log.debug(application) - - path = bundle["applications"][name]["charm"] - - try: - await application.upgrade_charm(switch=path) - except juju.errors.JujuError as ex: - if "already running charm" in str(ex): - # We're already running this version - pass - - await model.disconnect() - - return True raise MethodNotImplemented() + # TODO: Remove these commented lines + + # model = await self.get_model(namespace, cluster_uuid=cluster_uuid) + + # model = None + # namespace = self.get_namespace(cluster_uuid) + # controller = await self.get_controller(cluster_uuid) + + # try: + # if namespace not in await controller.list_models(): + # raise N2VCNotFound(message="Model {} does not exist".format(namespace)) + + # model = await controller.get_model(namespace) + # with open(kdu_model, "r") as f: + # bundle = yaml.safe_load(f) + + # """ + # { + # 'description': 'Test bundle', + # 'bundle': 'kubernetes', + # 'applications': { + # 'mariadb-k8s': { + # 'charm': 'cs:~charmed-osm/mariadb-k8s-20', + # 'scale': 1, + # 'options': { + # 'password': 'manopw', + # 'root_password': 'osm4u', + # 'user': 'mano' + # }, + # 'series': 'kubernetes' + # } + # } + # } + # """ + # # TODO: This should be returned in an agreed-upon format + # for name in bundle["applications"]: + # self.log.debug(model.applications) + # application = model.applications[name] + # self.log.debug(application) + + # path = bundle["applications"][name]["charm"] + + # try: + # await application.upgrade_charm(switch=path) + # except juju.errors.JujuError as ex: + # if "already running charm" in str(ex): + # # We're already running this version + # pass + # finally: + # if model: + # await model.disconnect() + # await controller.disconnect() + # return True """Rollback""" async def rollback( - self, cluster_uuid: str, kdu_instance: str, revision: int = 0, + self, + cluster_uuid: str, + kdu_instance: str, + revision: int = 0, ) -> str: """Rollback a model @@ -510,18 +694,21 @@ class K8sJujuConnector(K8sConnector): :return: Returns True if successful, or raises an exception """ - if not self.authenticated: - self.log.debug("[uninstall] Connecting to controller") - await self.login(cluster_uuid) + + # controller = await self.get_controller(cluster_uuid) self.log.debug("[uninstall] Destroying model") - await self.controller.destroy_models(kdu_instance) + await self.libjuju.destroy_model(kdu_instance, total_timeout=3600) - self.log.debug("[uninstall] Model destroyed and disconnecting") - await self.logout() + # self.log.debug("[uninstall] Model destroyed and disconnecting") + # await controller.disconnect() return True + # TODO: Remove these commented lines + # if not self.authenticated: + # self.log.debug("[uninstall] Connecting to controller") + # await self.login(cluster_uuid) async def exec_primitive( self, @@ -543,9 +730,8 @@ class K8sJujuConnector(K8sConnector): :return: Returns the output of the action """ - if not self.authenticated: - self.log.debug("[exec_primitive] Connecting to controller") - await self.login(cluster_uuid) + + # controller = await self.get_controller(cluster_uuid) if not params or "application-name" not in params: raise K8sException( @@ -557,34 +743,40 @@ class K8sJujuConnector(K8sConnector): "[exec_primitive] Getting model " "kdu_instance: {}".format(kdu_instance) ) - - model = await self.get_model(kdu_instance, cluster_uuid) - application_name = params["application-name"] - application = model.applications[application_name] - - actions = await application.get_actions() + actions = await self.libjuju.get_actions(application_name, kdu_instance) if primitive_name not in actions: raise K8sException("Primitive {} not found".format(primitive_name)) + output, status = await self.libjuju.execute_action( + application_name, kdu_instance, primitive_name, **params + ) + # model = await self.get_model(kdu_instance, controller=controller) - unit = None - for u in application.units: - if await u.is_leader_from_status(): - unit = u - break + # application_name = params["application-name"] + # application = model.applications[application_name] - if unit is None: - raise K8sException("No leader unit found to execute action") + # actions = await application.get_actions() + # if primitive_name not in actions: + # raise K8sException("Primitive {} not found".format(primitive_name)) - self.log.debug("[exec_primitive] Running action: {}".format(primitive_name)) - action = await unit.run_action(primitive_name, **params) + # unit = None + # for u in application.units: + # if await u.is_leader_from_status(): + # unit = u + # break - output = await model.get_action_output(action_uuid=action.entity_id) - status = await model.get_action_status(uuid_or_prefix=action.entity_id) + # if unit is None: + # raise K8sException("No leader unit found to execute action") - status = ( - status[action.entity_id] if action.entity_id in status else "failed" - ) + # self.log.debug("[exec_primitive] Running action: {}".format(primitive_name)) + # action = await unit.run_action(primitive_name, **params) + + # output = await model.get_action_output(action_uuid=action.entity_id) + # status = await model.get_action_status(uuid_or_prefix=action.entity_id) + + # status = ( + # status[action.entity_id] if action.entity_id in status else "failed" + # ) if status != "completed": raise K8sException( @@ -597,10 +789,19 @@ class K8sJujuConnector(K8sConnector): error_msg = "Error executing primitive {}: {}".format(primitive_name, e) self.log.error(error_msg) raise K8sException(message=error_msg) + # finally: + # await controller.disconnect() + # TODO: Remove these commented lines: + # if not self.authenticated: + # self.log.debug("[exec_primitive] Connecting to controller") + # await self.login(cluster_uuid) """Introspection""" - async def inspect_kdu(self, kdu_model: str,) -> dict: + async def inspect_kdu( + self, + kdu_model: str, + ) -> dict: """Inspect a KDU Inspects a bundle and returns a dictionary of config parameters and @@ -613,8 +814,11 @@ class K8sJujuConnector(K8sConnector): """ kdu = {} + if not os.path.exists(kdu_model): + raise K8sException("file {} not found".format(kdu_model)) + with open(kdu_model, "r") as f: - bundle = yaml.safe_load(f) + bundle = yaml.safe_load(f.read()) """ { @@ -639,7 +843,10 @@ class K8sJujuConnector(K8sConnector): return kdu - async def help_kdu(self, kdu_model: str,) -> str: + async def help_kdu( + self, + kdu_model: str, + ) -> str: """View the README If available, returns the README of the bundle. @@ -660,7 +867,11 @@ class K8sJujuConnector(K8sConnector): return readme - async def status_kdu(self, cluster_uuid: str, kdu_instance: str,) -> dict: + async def status_kdu( + self, + cluster_uuid: str, + kdu_instance: str, + ) -> dict: """Get the status of the KDU Get the current status of the KDU instance. @@ -672,372 +883,602 @@ class K8sJujuConnector(K8sConnector): and deployment_time. """ status = {} + # controller = await self.get_controller(cluster_uuid) + # model = await self.get_model(kdu_instance, controller=controller) - model = await self.get_model( - self.get_namespace(cluster_uuid), cluster_uuid=cluster_uuid - ) - - # model = await self.get_model_by_uuid(cluster_uuid) - if model: - model_status = await model.get_status() - status = model_status.applications + # model_status = await model.get_status() + # status = model_status.applications + model_status = await self.libjuju.get_model_status(kdu_instance) + for name in model_status.applications: + application = model_status.applications[name] + status[name] = {"status": application["status"]["status"]} - for name in model_status.applications: - application = model_status.applications[name] - status[name] = {"status": application["status"]["status"]} - - if model.is_connected(): - await model.disconnect() + # await model.disconnect() + # await controller.disconnect() return status - # Private methods - async def add_k8s(self, cloud_name: str, credentials: str,) -> bool: - """Add a k8s cloud to Juju + async def get_services( + self, cluster_uuid: str, kdu_instance: str, namespace: str + ) -> list: + """Return a list of services of a kdu_instance""" - Adds a Kubernetes cloud to Juju, so it can be bootstrapped with a - Juju Controller. + credentials = self.get_credentials(cluster_uuid=cluster_uuid) - :param cloud_name str: The name of the cloud to add. - :param credentials dict: A dictionary representing the output of - `kubectl config view --raw`. + # config_path = "/tmp/{}".format(cluster_uuid) + # config_file = "{}/config".format(config_path) - :returns: True if successful, otherwise raises an exception. - """ + # if not os.path.exists(config_path): + # os.makedirs(config_path) + # with open(config_file, "w") as f: + # f.write(credentials) - cmd = [self.juju_command, "add-k8s", "--local", cloud_name] - self.log.debug(cmd) + kubecfg = tempfile.NamedTemporaryFile() + with open(kubecfg.name, "w") as kubecfg_file: + kubecfg_file.write(credentials) + kubectl = Kubectl(config_file=kubecfg.name) - process = await asyncio.create_subprocess_exec( - *cmd, - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE, - stdin=asyncio.subprocess.PIPE, + return kubectl.get_services( + field_selector="metadata.namespace={}".format(kdu_instance) ) - # Feed the process the credentials - process.stdin.write(credentials.encode("utf-8")) - await process.stdin.drain() - process.stdin.close() + async def get_service( + self, cluster_uuid: str, service_name: str, namespace: str + ) -> object: + """Return data for a specific service inside a namespace""" - _stdout, stderr = await process.communicate() + credentials = self.get_credentials(cluster_uuid=cluster_uuid) - return_code = process.returncode + # config_path = "/tmp/{}".format(cluster_uuid) + # config_file = "{}/config".format(config_path) - self.log.debug("add-k8s return code: {}".format(return_code)) + # if not os.path.exists(config_path): + # os.makedirs(config_path) + # with open(config_file, "w") as f: + # f.write(credentials) - if return_code > 0: - raise Exception(stderr) + kubecfg = tempfile.NamedTemporaryFile() + with open(kubecfg.name, "w") as kubecfg_file: + kubecfg_file.write(credentials) + kubectl = Kubectl(config_file=kubecfg.name) - return True - - async def add_model(self, model_name: str, cluster_uuid: str,) -> Model: - """Adds a model to the controller - - Adds a new model to the Juju controller + return kubectl.get_services( + field_selector="metadata.name={},metadata.namespace={}".format( + service_name, namespace + ) + )[0] - :param model_name str: The name of the model to add. - :returns: The juju.model.Model object of the new model upon success or - raises an exception. + # Private methods + # async def add_k8s(self, cloud_name: str, credentials: str,) -> bool: + # """Add a k8s cloud to Juju + + # Adds a Kubernetes cloud to Juju, so it can be bootstrapped with a + # Juju Controller. + + # :param cloud_name str: The name of the cloud to add. + # :param credentials dict: A dictionary representing the output of + # `kubectl config view --raw`. + + # :returns: True if successful, otherwise raises an exception. + # """ + + # cmd = [self.juju_command, "add-k8s", "--local", cloud_name] + # self.log.debug(cmd) + + # process = await asyncio.create_subprocess_exec( + # *cmd, + # stdout=asyncio.subprocess.PIPE, + # stderr=asyncio.subprocess.PIPE, + # stdin=asyncio.subprocess.PIPE, + # ) + + # # Feed the process the credentials + # process.stdin.write(credentials.encode("utf-8")) + # await process.stdin.drain() + # process.stdin.close() + + # _stdout, stderr = await process.communicate() + + # return_code = process.returncode + + # self.log.debug("add-k8s return code: {}".format(return_code)) + + # if return_code > 0: + # raise Exception(stderr) + + # return True + + # async def add_model( + # self, model_name: str, cluster_uuid: str, controller: Controller + # ) -> Model: + # """Adds a model to the controller + + # Adds a new model to the Juju controller + + # :param model_name str: The name of the model to add. + # :param cluster_uuid str: ID of the cluster. + # :param controller: Controller object in which the model will be added + # :returns: The juju.model.Model object of the new model upon success or + # raises an exception. + # """ + + # self.log.debug( + # "Adding model '{}' to cluster_uuid '{}'".format(model_name, cluster_uuid) + # ) + # model = None + # try: + # if self.juju_public_key is not None: + # model = await controller.add_model( + # model_name, config={"authorized-keys": self.juju_public_key} + # ) + # else: + # model = await controller.add_model(model_name) + # except Exception as ex: + # self.log.debug(ex) + # self.log.debug("Caught exception: {}".format(ex)) + # pass + + # return model + + # async def bootstrap( + # self, cloud_name: str, cluster_uuid: str, loadbalancer: bool + # ) -> bool: + # """Bootstrap a Kubernetes controller + + # Bootstrap a Juju controller inside the Kubernetes cluster + + # :param cloud_name str: The name of the cloud. + # :param cluster_uuid str: The UUID of the cluster to bootstrap. + # :param loadbalancer bool: If the controller should use loadbalancer or not. + # :returns: True upon success or raises an exception. + # """ + + # if not loadbalancer: + # cmd = [self.juju_command, "bootstrap", cloud_name, cluster_uuid] + # else: + # """ + # For public clusters, specify that the controller service is using a + # LoadBalancer. + # """ + # cmd = [ + # self.juju_command, + # "bootstrap", + # cloud_name, + # cluster_uuid, + # "--config", + # "controller-service-type=loadbalancer", + # ] + + # self.log.debug( + # "Bootstrapping controller {} in cloud {}".format(cluster_uuid, cloud_name) + # ) + + # process = await asyncio.create_subprocess_exec( + # *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, + # ) + + # _stdout, stderr = await process.communicate() + + # return_code = process.returncode + + # if return_code > 0: + # # + # if b"already exists" not in stderr: + # raise Exception(stderr) + + # return True + + # async def destroy_controller(self, cluster_uuid: str) -> bool: + # """Destroy a Kubernetes controller + + # Destroy an existing Kubernetes controller. + + # :param cluster_uuid str: The UUID of the cluster to bootstrap. + # :returns: True upon success or raises an exception. + # """ + # cmd = [ + # self.juju_command, + # "destroy-controller", + # "--destroy-all-models", + # "--destroy-storage", + # "-y", + # cluster_uuid, + # ] + + # process = await asyncio.create_subprocess_exec( + # *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, + # ) + + # _stdout, stderr = await process.communicate() + + # return_code = process.returncode + + # if return_code > 0: + # # + # if "already exists" not in stderr: + # raise Exception(stderr) + + def get_credentials(self, cluster_uuid: str) -> str: """ - if not self.authenticated: - await self.login(cluster_uuid) - - self.log.debug( - "Adding model '{}' to cluster_uuid '{}'".format(model_name, cluster_uuid) - ) - try: - if self.juju_public_key is not None: - model = await self.controller.add_model( - model_name, config={"authorized-keys": self.juju_public_key} - ) - else: - model = await self.controller.add_model(model_name) - except Exception as ex: - self.log.debug(ex) - self.log.debug("Caught exception: {}".format(ex)) - pass - - return model - - async def bootstrap( - self, cloud_name: str, cluster_uuid: str, loadbalancer: bool - ) -> bool: - """Bootstrap a Kubernetes controller - - Bootstrap a Juju controller inside the Kubernetes cluster - - :param cloud_name str: The name of the cloud. - :param cluster_uuid str: The UUID of the cluster to bootstrap. - :param loadbalancer bool: If the controller should use loadbalancer or not. - :returns: True upon success or raises an exception. + Get Cluster Kubeconfig """ - - if not loadbalancer: - cmd = [self.juju_command, "bootstrap", cloud_name, cluster_uuid] - else: - """ - For public clusters, specify that the controller service is using a - LoadBalancer. - """ - cmd = [ - self.juju_command, - "bootstrap", - cloud_name, - cluster_uuid, - "--config", - "controller-service-type=loadbalancer", - ] - - self.log.debug( - "Bootstrapping controller {} in cloud {}".format(cluster_uuid, cloud_name) + k8scluster = self.db.get_one( + "k8sclusters", q_filter={"_id": cluster_uuid}, fail_on_empty=False ) - process = await asyncio.create_subprocess_exec( - *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, + self.db.encrypt_decrypt_fields( + k8scluster.get("credentials"), + "decrypt", + ["password", "secret"], + schema_version=k8scluster["schema_version"], + salt=k8scluster["_id"], ) - _stdout, stderr = await process.communicate() + return yaml.safe_dump(k8scluster.get("credentials")) - return_code = process.returncode - - if return_code > 0: - # - if b"already exists" not in stderr: - raise Exception(stderr) - - return True - - async def destroy_controller(self, cluster_uuid: str) -> bool: - """Destroy a Kubernetes controller - - Destroy an existing Kubernetes controller. - - :param cluster_uuid str: The UUID of the cluster to bootstrap. - :returns: True upon success or raises an exception. + def _get_credential_name(self, cluster_uuid: str) -> str: """ - cmd = [ - self.juju_command, - "destroy-controller", - "--destroy-all-models", - "--destroy-storage", - "-y", - cluster_uuid, - ] - - process = await asyncio.create_subprocess_exec( - *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, - ) + Get credential name for a k8s cloud - _stdout, stderr = await process.communicate() + We cannot use the cluster_uuid for the credential name directly, + because it cannot start with a number, it must start with a letter. + Therefore, the k8s cloud credential name will be "cred-" followed + by the cluster uuid. - return_code = process.returncode + :param: cluster_uuid: Cluster UUID of the kubernetes cloud (=cloud_name) - if return_code > 0: - # - if "already exists" not in stderr: - raise Exception(stderr) - - def get_config(self, cluster_uuid: str,) -> dict: - """Get the cluster configuration - - Gets the configuration of the cluster - - :param cluster_uuid str: The UUID of the cluster. - :return: A dict upon success, or raises an exception. - """ - cluster_config = "{}/{}.yaml".format(self.fs.path, cluster_uuid) - if os.path.exists(cluster_config): - with open(cluster_config, "r") as f: - config = yaml.safe_load(f.read()) - return config - else: - raise Exception( - "Unable to locate configuration for cluster {}".format(cluster_uuid) - ) - - async def get_model(self, model_name: str, cluster_uuid: str,) -> Model: - """Get a model from the Juju Controller. - - Note: Model objects returned must call disconnected() before it goes - out of scope. - - :param model_name str: The name of the model to get - :return The juju.model.Model object if found, or None. + :return: Name to use for the credential name. """ - if not self.authenticated: - await self.login(cluster_uuid) - - model = None - models = await self.controller.list_models() - if model_name in models: - self.log.debug("Found model: {}".format(model_name)) - model = await self.controller.get_model(model_name) - return model - - def get_namespace(self, cluster_uuid: str,) -> str: + return "cred-{}".format(cluster_uuid) + + # def get_config(self, cluster_uuid: str,) -> dict: + # """Get the cluster configuration + + # Gets the configuration of the cluster + + # :param cluster_uuid str: The UUID of the cluster. + # :return: A dict upon success, or raises an exception. + # """ + + # juju_db = self.db.get_one("admin", {"_id": "juju"}) + # config = None + # for k in juju_db["k8sclusters"]: + # if k["_id"] == cluster_uuid: + # config = k["config"] + # self.db.encrypt_decrypt_fields( + # config, + # "decrypt", + # ["secret", "cacert"], + # schema_version="1.1", + # salt=k["_id"], + # ) + # break + # if not config: + # raise Exception( + # "Unable to locate configuration for cluster {}".format(cluster_uuid) + # ) + # return config + + # async def get_model(self, model_name: str, controller: Controller) -> Model: + # """Get a model from the Juju Controller. + + # Note: Model objects returned must call disconnected() before it goes + # out of scope. + + # :param model_name str: The name of the model to get + # :param controller Controller: Controller object + # :return The juju.model.Model object if found, or None. + # """ + + # models = await controller.list_models() + # if model_name not in models: + # raise N2VCNotFound("Model {} not found".format(model_name)) + # self.log.debug("Found model: {}".format(model_name)) + # return await controller.get_model(model_name) + + def get_namespace( + self, + cluster_uuid: str, + ) -> str: """Get the namespace UUID Gets the namespace's unique name :param cluster_uuid str: The UUID of the cluster :returns: The namespace UUID, or raises an exception """ - config = self.get_config(cluster_uuid) + # config = self.get_config(cluster_uuid) # Make sure the name is in the config - if "namespace" not in config: - raise Exception("Namespace not found.") + # if "namespace" not in config: + # raise Exception("Namespace not found.") # TODO: We want to make sure this is unique to the cluster, in case # the cluster is being reused. # Consider pre/appending the cluster id to the namespace string - return config["namespace"] + pass + + # TODO: Remove these lines of code + # async def has_model(self, model_name: str) -> bool: + # """Check if a model exists in the controller + + # Checks to see if a model exists in the connected Juju controller. + + # :param model_name str: The name of the model + # :return: A boolean indicating if the model exists + # """ + # models = await self.controller.list_models() + + # if model_name in models: + # return True + # return False + + # def is_local_k8s(self, credentials: str,) -> bool: + # """Check if a cluster is local + + # Checks if a cluster is running in the local host + + # :param credentials dict: A dictionary containing the k8s credentials + # :returns: A boolean if the cluster is running locally + # """ - async def has_model(self, model_name: str) -> bool: - """Check if a model exists in the controller + # creds = yaml.safe_load(credentials) - Checks to see if a model exists in the connected Juju controller. + # if creds and os.getenv("OSMLCM_VCA_APIPROXY"): + # for cluster in creds["clusters"]: + # if "server" in cluster["cluster"]: + # if os.getenv("OSMLCM_VCA_APIPROXY") in cluster["cluster"]["server"]: + # return True - :param model_name str: The name of the model - :return: A boolean indicating if the model exists - """ - models = await self.controller.list_models() + # return False - if model_name in models: - return True - return False + # async def get_controller(self, cluster_uuid): + # """Login to the Juju controller.""" - def is_local_k8s(self, credentials: str,) -> bool: - """Check if a cluster is local + # config = self.get_config(cluster_uuid) - Checks if a cluster is running in the local host + # juju_endpoint = config["endpoint"] + # juju_user = config["username"] + # juju_secret = config["secret"] + # juju_ca_cert = config["cacert"] - :param credentials dict: A dictionary containing the k8s credentials - :returns: A boolean if the cluster is running locally - """ - creds = yaml.safe_load(credentials) - if os.getenv("OSMLCM_VCA_APIPROXY"): - host_ip = os.getenv("OSMLCM_VCA_APIPROXY") + # controller = Controller() - if creds and host_ip: - for cluster in creds["clusters"]: - if "server" in cluster["cluster"]: - if host_ip in cluster["cluster"]["server"]: - return True + # if juju_secret: + # self.log.debug( + # "Connecting to controller... ws://{} as {}".format( + # juju_endpoint, juju_user, + # ) + # ) + # try: + # await controller.connect( + # endpoint=juju_endpoint, + # username=juju_user, + # password=juju_secret, + # cacert=juju_ca_cert, + # ) + # self.log.debug("JujuApi: Logged into controller") + # return controller + # except Exception as ex: + # self.log.debug(ex) + # self.log.debug("Caught exception: {}".format(ex)) + # else: + # self.log.fatal("VCA credentials not configured.") - return False + # TODO: Remove these commented lines + # self.authenticated = False + # if self.authenticated: + # return - async def login(self, cluster_uuid): - """Login to the Juju controller.""" + # self.connecting = True + # juju_public_key = None + # self.authenticated = True + # Test: Make sure we have the credentials loaded + # async def logout(self): + # """Logout of the Juju controller.""" + # self.log.debug("[logout]") + # if not self.authenticated: + # return False - if self.authenticated: - return + # for model in self.models: + # self.log.debug("Logging out of model {}".format(model)) + # await self.models[model].disconnect() - self.connecting = True + # if self.controller: + # self.log.debug("Disconnecting controller {}".format(self.controller)) + # await self.controller.disconnect() + # self.controller = None - # Test: Make sure we have the credentials loaded - config = self.get_config(cluster_uuid) + # self.authenticated = False - self.juju_endpoint = config["endpoint"] - self.juju_user = config["username"] - self.juju_secret = config["secret"] - self.juju_ca_cert = config["cacert"] - self.juju_public_key = None + # async def remove_cloud(self, cloud_name: str,) -> bool: + # """Remove a k8s cloud from Juju - self.controller = Controller() + # Removes a Kubernetes cloud from Juju. - if self.juju_secret: - self.log.debug( - "Connecting to controller... ws://{} as {}/{}".format( - self.juju_endpoint, self.juju_user, self.juju_secret, - ) - ) - try: - await self.controller.connect( - endpoint=self.juju_endpoint, - username=self.juju_user, - password=self.juju_secret, - cacert=self.juju_ca_cert, - ) - self.authenticated = True - self.log.debug("JujuApi: Logged into controller") - except Exception as ex: - self.log.debug(ex) - self.log.debug("Caught exception: {}".format(ex)) - pass - else: - self.log.fatal("VCA credentials not configured.") - self.authenticated = False + # :param cloud_name str: The name of the cloud to add. - async def logout(self): - """Logout of the Juju controller.""" - self.log.debug("[logout]") - if not self.authenticated: - return False + # :returns: True if successful, otherwise raises an exception. + # """ - for model in self.models: - self.log.debug("Logging out of model {}".format(model)) - await self.models[model].disconnect() + # # Remove the bootstrapped controller + # cmd = [self.juju_command, "remove-k8s", "--client", cloud_name] + # process = await asyncio.create_subprocess_exec( + # *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, + # ) + + # _stdout, stderr = await process.communicate() - if self.controller: - self.log.debug("Disconnecting controller {}".format(self.controller)) - await self.controller.disconnect() - self.controller = None + # return_code = process.returncode + + # if return_code > 0: + # raise Exception(stderr) + + # # Remove the cloud from the local config + # cmd = [self.juju_command, "remove-cloud", "--client", cloud_name] + # process = await asyncio.create_subprocess_exec( + # *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, + # ) + + # _stdout, stderr = await process.communicate() + + # return_code = process.returncode + + # if return_code > 0: + # raise Exception(stderr) + + # return True + + # async def set_config(self, cluster_uuid: str, config: dict,) -> bool: + # """Save the cluster configuration + + # Saves the cluster information to the Mongo database + + # :param cluster_uuid str: The UUID of the cluster + # :param config dict: A dictionary containing the cluster configuration + # """ + + # juju_db = self.db.get_one("admin", {"_id": "juju"}) + + # k8sclusters = juju_db["k8sclusters"] if "k8sclusters" in juju_db else [] + # self.db.encrypt_decrypt_fields( + # config, + # "encrypt", + # ["secret", "cacert"], + # schema_version="1.1", + # salt=cluster_uuid, + # ) + # k8sclusters.append({"_id": cluster_uuid, "config": config}) + # self.db.set_one( + # table="admin", + # q_filter={"_id": "juju"}, + # update_dict={"k8sclusters": k8sclusters}, + # ) + + # Private methods to create/delete needed resources in the + # Kubernetes cluster to create the K8s cloud in Juju - self.authenticated = False + def _create_cluster_role( + self, + kubectl: Kubectl, + name: str, + labels: Dict[str, str], + ): + cluster_roles = kubectl.clients[RBAC_CLIENT].list_cluster_role( + field_selector="metadata.name={}".format(name) + ) - async def remove_cloud(self, cloud_name: str,) -> bool: - """Remove a k8s cloud from Juju + if len(cluster_roles.items) > 0: + raise Exception( + "Cluster role with metadata.name={} already exists".format(name) + ) - Removes a Kubernetes cloud from Juju. + metadata = V1ObjectMeta(name=name, labels=labels, namespace=ADMIN_NAMESPACE) + # Cluster role + cluster_role = V1ClusterRole( + metadata=metadata, + rules=[ + V1PolicyRule(api_groups=["*"], resources=["*"], verbs=["*"]), + V1PolicyRule(non_resource_ur_ls=["*"], verbs=["*"]), + ], + ) - :param cloud_name str: The name of the cloud to add. + kubectl.clients[RBAC_CLIENT].create_cluster_role(cluster_role) - :returns: True if successful, otherwise raises an exception. - """ + def _delete_cluster_role(self, kubectl: Kubectl, name: str): + kubectl.clients[RBAC_CLIENT].delete_cluster_role(name) - # Remove the bootstrapped controller - cmd = [self.juju_command, "remove-k8s", "--client", cloud_name] - process = await asyncio.create_subprocess_exec( - *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, + def _create_service_account( + self, + kubectl: Kubectl, + name: str, + labels: Dict[str, str], + ): + service_accounts = kubectl.clients[CORE_CLIENT].list_namespaced_service_account( + ADMIN_NAMESPACE, field_selector="metadata.name={}".format(name) ) + if len(service_accounts.items) > 0: + raise Exception( + "Service account with metadata.name={} already exists".format(name) + ) - _stdout, stderr = await process.communicate() - - return_code = process.returncode - - if return_code > 0: - raise Exception(stderr) + metadata = V1ObjectMeta(name=name, labels=labels, namespace=ADMIN_NAMESPACE) + service_account = V1ServiceAccount(metadata=metadata) - # Remove the cloud from the local config - cmd = [self.juju_command, "remove-cloud", "--client", cloud_name] - process = await asyncio.create_subprocess_exec( - *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, + kubectl.clients[CORE_CLIENT].create_namespaced_service_account( + ADMIN_NAMESPACE, service_account ) - _stdout, stderr = await process.communicate() + def _delete_service_account(self, kubectl: Kubectl, name: str): + kubectl.clients[CORE_CLIENT].delete_namespaced_service_account( + name, ADMIN_NAMESPACE + ) - return_code = process.returncode + def _create_cluster_role_binding( + self, + kubectl: Kubectl, + name: str, + labels: Dict[str, str], + ): + role_bindings = kubectl.clients[RBAC_CLIENT].list_cluster_role_binding( + field_selector="metadata.name={}".format(name) + ) + if len(role_bindings.items) > 0: + raise Exception("Generated rbac id already exists") + + role_binding = V1ClusterRoleBinding( + metadata=V1ObjectMeta(name=name, labels=labels), + role_ref=V1RoleRef(kind="ClusterRole", name=name, api_group=""), + subjects=[ + V1Subject(kind="ServiceAccount", name=name, namespace=ADMIN_NAMESPACE) + ], + ) + kubectl.clients[RBAC_CLIENT].create_cluster_role_binding(role_binding) - if return_code > 0: - raise Exception(stderr) + def _delete_cluster_role_binding(self, kubectl: Kubectl, name: str): + kubectl.clients[RBAC_CLIENT].delete_cluster_role_binding(name) - return True + async def _get_secret_data(self, kubectl: Kubectl, name: str) -> (str, str): + v1_core = kubectl.clients[CORE_CLIENT] - async def set_config(self, cluster_uuid: str, config: dict,) -> bool: - """Save the cluster configuration - - Saves the cluster information to the file store + retries_limit = 10 + secret_name = None + while True: + retries_limit -= 1 + service_accounts = v1_core.list_namespaced_service_account( + ADMIN_NAMESPACE, field_selector="metadata.name={}".format(name) + ) + if len(service_accounts.items) == 0: + raise Exception( + "Service account not found with metadata.name={}".format(name) + ) + service_account = service_accounts.items[0] + if service_account.secrets and len(service_account.secrets) > 0: + secret_name = service_account.secrets[0].name + if secret_name is not None or not retries_limit: + break + if not secret_name: + raise Exception( + "Failed getting the secret from service account {}".format(name) + ) + secret = v1_core.list_namespaced_secret( + ADMIN_NAMESPACE, + field_selector="metadata.name={}".format(secret_name), + ).items[0] - :param cluster_uuid str: The UUID of the cluster - :param config dict: A dictionary containing the cluster configuration - :returns: Boolean upon success or raises an exception. - """ + token = secret.data[SERVICE_ACCOUNT_TOKEN_KEY] + client_certificate_data = secret.data[SERVICE_ACCOUNT_ROOT_CA_KEY] - cluster_config = "{}/{}.yaml".format(self.fs.path, cluster_uuid) - if not os.path.exists(cluster_config): - self.log.debug("Writing config to {}".format(cluster_config)) - with open(cluster_config, "w") as f: - f.write(yaml.dump(config, Dumper=yaml.Dumper)) + return ( + base64.b64decode(token).decode("utf-8"), + base64.b64decode(client_certificate_data).decode("utf-8"), + ) - return True + @staticmethod + def generate_kdu_instance_name(**kwargs): + db_dict = kwargs.get("db_dict") + kdu_name = kwargs.get("kdu_name", None) + if kdu_name: + kdu_instance = "{}-{}".format(kdu_name, db_dict["filter"]["_id"]) + else: + kdu_instance = db_dict["filter"]["_id"] + return kdu_instance