X-Git-Url: https://osm.etsi.org/gitweb/?p=osm%2FN2VC.git;a=blobdiff_plain;f=n2vc%2Fk8s_juju_conn.py;h=1704ec0cc93c83f24a5ab9c97bbef5276827b1c1;hp=3d58385ede494b7e0378c0e3605a1bad2bb4808d;hb=47f653875c18b0bfe00627de5c554ad5d480ad1d;hpb=f6e9b00b6f7cd35e45ace4c84b53fe8d12b2438c diff --git a/n2vc/k8s_juju_conn.py b/n2vc/k8s_juju_conn.py index 3d58385..1704ec0 100644 --- a/n2vc/k8s_juju_conn.py +++ b/n2vc/k8s_juju_conn.py @@ -18,45 +18,31 @@ import uuid import yaml import tempfile import binascii -import base64 -from n2vc.exceptions import K8sException, N2VCBadArgumentsException +from n2vc.config import EnvironConfig +from n2vc.definitions import RelationEndpoint +from n2vc.exceptions import K8sException from n2vc.k8s_conn import K8sConnector -from n2vc.kubectl import Kubectl, CORE_CLIENT, RBAC_CLIENT +from n2vc.kubectl import Kubectl from .exceptions import MethodNotImplemented -from n2vc.utils import base64_to_cacert from n2vc.libjuju import Libjuju +from n2vc.utils import obj_to_dict, obj_to_yaml +from n2vc.store import MotorStore +from n2vc.vca.cloud import Cloud +from n2vc.vca.connection import get_connection -from kubernetes.client.models import ( - V1ClusterRole, - V1ObjectMeta, - V1PolicyRule, - V1ServiceAccount, - V1ClusterRoleBinding, - V1RoleRef, - V1Subject, -) -from typing import Dict - -SERVICE_ACCOUNT_TOKEN_KEY = "token" -SERVICE_ACCOUNT_ROOT_CA_KEY = "ca.crt" RBAC_LABEL_KEY_NAME = "rbac-id" - -ADMIN_NAMESPACE = "kube-system" RBAC_STACK_PREFIX = "juju-credential" -# from juju.bundle import BundleHandler -# import re -# import ssl -# from .vnf import N2VC - def generate_rbac_id(): return binascii.hexlify(os.urandom(4)).decode() class K8sJujuConnector(K8sConnector): + libjuju = None + def __init__( self, fs: object, @@ -66,7 +52,6 @@ class K8sJujuConnector(K8sConnector): log: object = None, loop: object = None, on_update_db=None, - vca_config: dict = None, ): """ :param fs: file system for kubernetes and helm configuration @@ -89,37 +74,10 @@ class K8sJujuConnector(K8sConnector): self.loop = loop or asyncio.get_event_loop() self.log.debug("Initializing K8S Juju connector") - required_vca_config = [ - "host", - "user", - "secret", - "ca_cert", - ] - if not vca_config or not all(k in vca_config for k in required_vca_config): - raise N2VCBadArgumentsException( - message="Missing arguments in vca_config: {}".format(vca_config), - bad_args=required_vca_config, - ) - port = vca_config["port"] if "port" in vca_config else 17070 - url = "{}:{}".format(vca_config["host"], port) - enable_os_upgrade = vca_config.get("enable_os_upgrade", True) - apt_mirror = vca_config.get("apt_mirror", None) - username = vca_config["user"] - secret = vca_config["secret"] - ca_cert = base64_to_cacert(vca_config["ca_cert"]) - - self.libjuju = Libjuju( - endpoint=url, - api_proxy=None, # Not needed for k8s charms - enable_os_upgrade=enable_os_upgrade, - apt_mirror=apt_mirror, - username=username, - password=secret, - cacert=ca_cert, - loop=self.loop, - log=self.log, - db=self.db, - ) + db_uri = EnvironConfig(prefixes=["OSMLCM_", "OSMMON_"]).get("database_uri") + self._store = MotorStore(db_uri) + self.loading_libjuju = asyncio.Lock(loop=self.loop) + self.log.debug("K8S Juju connector initialized") # TODO: Remove these commented lines: # self.authenticated = False @@ -133,6 +91,7 @@ class K8sJujuConnector(K8sConnector): k8s_creds: str, namespace: str = "kube-system", reuse_cluster_uuid: str = None, + **kwargs, ) -> (str, bool): """ It prepares a given K8s cluster environment to run Juju bundles. @@ -142,106 +101,17 @@ class K8sJujuConnector(K8sConnector): :param namespace: optional namespace to be used for juju. By default, 'kube-system' will be used :param reuse_cluster_uuid: existing cluster uuid for reuse + :param: kwargs: Additional parameters + vca_id (str): VCA ID + :return: uuid of the K8s cluster and True if connector has installed some software in the cluster (on error, an exception will be raised) """ - - # """Bootstrapping - - # Bootstrapping cannot be done, by design, through the API. We need to - # use the CLI tools. - # """ - - # """ - # WIP: Workflow - - # 1. Has the environment already been bootstrapped? - # - Check the database to see if we have a record for this env - - # 2. If this is a new env, create it - # - Add the k8s cloud to Juju - # - Bootstrap - # - Record it in the database - - # 3. Connect to the Juju controller for this cloud - - # """ - # cluster_uuid = reuse_cluster_uuid - # if not cluster_uuid: - # cluster_uuid = str(uuid4()) - - ################################################## - # TODO: Pull info from db based on the namespace # - ################################################## - - ################################################### - # TODO: Make it idempotent, calling add-k8s and # - # bootstrap whenever reuse_cluster_uuid is passed # - # as parameter # - # `init_env` is called to initialize the K8s # - # cluster for juju. If this initialization fails, # - # it can be called again by LCM with the param # - # reuse_cluster_uuid, e.g. to try to fix it. # - ################################################### - - # This is a new cluster, so bootstrap it + libjuju = await self._get_libjuju(kwargs.get("vca_id")) cluster_uuid = reuse_cluster_uuid or str(uuid.uuid4()) - - # Is a local k8s cluster? - # localk8s = self.is_local_k8s(k8s_creds) - - # If the k8s is external, the juju controller needs a loadbalancer - # loadbalancer = False if localk8s else True - - # Name the new k8s cloud - # k8s_cloud = "k8s-{}".format(cluster_uuid) - - # self.log.debug("Adding k8s cloud {}".format(k8s_cloud)) - # await self.add_k8s(k8s_cloud, k8s_creds) - - # Bootstrap Juju controller - # self.log.debug("Bootstrapping...") - # await self.bootstrap(k8s_cloud, cluster_uuid, loadbalancer) - # self.log.debug("Bootstrap done.") - - # Get the controller information - - # Parse ~/.local/share/juju/controllers.yaml - # controllers.testing.api-endpoints|ca-cert|uuid - # self.log.debug("Getting controller endpoints") - # with open(os.path.expanduser("~/.local/share/juju/controllers.yaml")) as f: - # controllers = yaml.load(f, Loader=yaml.Loader) - # controller = controllers["controllers"][cluster_uuid] - # endpoints = controller["api-endpoints"] - # juju_endpoint = endpoints[0] - # juju_ca_cert = controller["ca-cert"] - - # Parse ~/.local/share/juju/accounts - # controllers.testing.user|password - # self.log.debug("Getting accounts") - # with open(os.path.expanduser("~/.local/share/juju/accounts.yaml")) as f: - # controllers = yaml.load(f, Loader=yaml.Loader) - # controller = controllers["controllers"][cluster_uuid] - - # juju_user = controller["user"] - # juju_secret = controller["password"] - - # config = { - # "endpoint": juju_endpoint, - # "username": juju_user, - # "secret": juju_secret, - # "cacert": juju_ca_cert, - # "loadbalancer": loadbalancer, - # } - - # Store the cluster configuration so it - # can be used for subsequent calls - kubecfg = tempfile.NamedTemporaryFile() - with open(kubecfg.name, "w") as kubecfg_file: - kubecfg_file.write(k8s_creds) - kubectl = Kubectl(config_file=kubecfg.name) + kubectl = self._get_kubectl(k8s_creds) # CREATING RESOURCES IN K8S rbac_id = generate_rbac_id() @@ -252,48 +122,49 @@ class K8sJujuConnector(K8sConnector): # if it fails in the middle of the process cleanup_data = [] try: - self._create_cluster_role( - kubectl, + self.log.debug("Initializing K8s cluster for juju") + kubectl.create_cluster_role( name=metadata_name, labels=labels, ) + self.log.debug("Cluster role created") cleanup_data.append( { - "delete": self._delete_cluster_role, - "args": (kubectl, metadata_name), + "delete": kubectl.delete_cluster_role, + "args": (metadata_name,), } ) - self._create_service_account( - kubectl, + kubectl.create_service_account( name=metadata_name, labels=labels, ) + self.log.debug("Service account created") cleanup_data.append( { - "delete": self._delete_service_account, - "args": (kubectl, metadata_name), + "delete": kubectl.delete_service_account, + "args": (metadata_name,), } ) - self._create_cluster_role_binding( - kubectl, + kubectl.create_cluster_role_binding( name=metadata_name, labels=labels, ) + self.log.debug("Role binding created") cleanup_data.append( { - "delete": self._delete_service_account, - "args": (kubectl, metadata_name), + "delete": kubectl.delete_service_account, + "args": (metadata_name,), } ) - token, client_cert_data = await self._get_secret_data( - kubectl, + token, client_cert_data = await kubectl.get_secret_data( metadata_name, ) default_storage_class = kubectl.get_default_storage_class() - await self.libjuju.add_k8s( + self.log.debug("Default storage class: {}".format(default_storage_class)) + await libjuju.add_k8s( name=cluster_uuid, rbac_id=rbac_id, token=token, @@ -302,35 +173,10 @@ class K8sJujuConnector(K8sConnector): storage_class=default_storage_class, credential_name=self._get_credential_name(cluster_uuid), ) - # self.log.debug("Setting config") - # await self.set_config(cluster_uuid, config) - - # Test connection - # controller = await self.get_controller(cluster_uuid) - # await controller.disconnect() - - # TODO: Remove these commented lines - # raise Exception("EOL") - # self.juju_public_key = None - # Login to the k8s cluster - # if not self.authenticated: - # await self.login(cluster_uuid) - - # We're creating a new cluster - # print("Getting model {}".format(self.get_namespace(cluster_uuid), - # cluster_uuid=cluster_uuid)) - # model = await self.get_model( - # self.get_namespace(cluster_uuid), - # cluster_uuid=cluster_uuid - # ) - - # Disconnect from the model - # if model and model.is_connected(): - # await model.disconnect() - + self.log.debug("K8s cluster added to juju controller") return cluster_uuid, True except Exception as e: - self.log.error("Error initializing k8scluster: {}".format(e)) + self.log.error("Error initializing k8scluster: {}".format(e), exc_info=True) if len(cleanup_data) > 0: self.log.debug("Cleaning up created resources in k8s cluster...") for item in cleanup_data: @@ -368,66 +214,52 @@ class K8sJujuConnector(K8sConnector): """Reset""" async def reset( - self, cluster_uuid: str, force: bool = False, uninstall_sw: bool = False + self, + cluster_uuid: str, + force: bool = False, + uninstall_sw: bool = False, + **kwargs, ) -> bool: """Reset a cluster Resets the Kubernetes cluster by removing the model that represents it. :param cluster_uuid str: The UUID of the cluster to reset + :param force: Force reset + :param uninstall_sw: Boolean to uninstall sw + :param: kwargs: Additional parameters + vca_id (str): VCA ID + :return: Returns True if successful or raises an exception. """ try: - # Remove k8scluster from database - # self.log.debug("[reset] Removing k8scluster from juju database") - # juju_db = self.db.get_one("admin", {"_id": "juju"}) - - # for k in juju_db["k8sclusters"]: - # if k["_id"] == cluster_uuid: - # juju_db["k8sclusters"].remove(k) - # self.db.set_one( - # table="admin", - # q_filter={"_id": "juju"}, - # update_dict={"k8sclusters": juju_db["k8sclusters"]}, - # ) - # break - - # Destroy the controller (via CLI) - # self.log.debug("[reset] Destroying controller") - # await self.destroy_controller(cluster_uuid) self.log.debug("[reset] Removing k8s cloud") - # k8s_cloud = "k8s-{}".format(cluster_uuid) - # await self.remove_cloud(k8s_cloud) + libjuju = await self._get_libjuju(kwargs.get("vca_id")) - cloud_creds = await self.libjuju.get_cloud_credentials( - cluster_uuid, - self._get_credential_name(cluster_uuid), - ) + cloud = Cloud(cluster_uuid, self._get_credential_name(cluster_uuid)) + + cloud_creds = await libjuju.get_cloud_credentials(cloud) - await self.libjuju.remove_cloud(cluster_uuid) + await libjuju.remove_cloud(cluster_uuid) - kubecfg = self.get_credentials(cluster_uuid=cluster_uuid) + credentials = self.get_credentials(cluster_uuid=cluster_uuid) - kubecfg_file = tempfile.NamedTemporaryFile() - with open(kubecfg_file.name, "w") as f: - f.write(kubecfg) - kubectl = Kubectl(config_file=kubecfg_file.name) + kubectl = self._get_kubectl(credentials) delete_functions = [ - self._delete_cluster_role_binding, - self._delete_service_account, - self._delete_cluster_role, + kubectl.delete_cluster_role_binding, + kubectl.delete_service_account, + kubectl.delete_cluster_role, ] credential_attrs = cloud_creds[0].result["attrs"] if RBAC_LABEL_KEY_NAME in credential_attrs: rbac_id = credential_attrs[RBAC_LABEL_KEY_NAME] metadata_name = "{}-{}".format(RBAC_STACK_PREFIX, rbac_id) - delete_args = (kubectl, metadata_name) for delete_func in delete_functions: try: - delete_func(*delete_args) + delete_func(metadata_name) except Exception as e: self.log.warning("Cannot remove resource in K8s {}".format(e)) @@ -435,20 +267,6 @@ class K8sJujuConnector(K8sConnector): self.log.debug("Caught exception during reset: {}".format(e)) raise e return True - # TODO: Remove these commented lines - # if not self.authenticated: - # await self.login(cluster_uuid) - - # if self.controller.is_connected(): - # # Destroy the model - # namespace = self.get_namespace(cluster_uuid) - # if await self.has_model(namespace): - # self.log.debug("[reset] Destroying model") - # await self.controller.destroy_model(namespace, destroy_storage=True) - - # # Disconnect from the controller - # self.log.debug("[reset] Disconnecting controller") - # await self.logout() """Deployment""" @@ -456,17 +274,20 @@ class K8sJujuConnector(K8sConnector): self, cluster_uuid: str, kdu_model: str, + kdu_instance: str, atomic: bool = True, timeout: float = 1800, params: dict = None, db_dict: dict = None, kdu_name: str = None, namespace: str = None, + **kwargs, ) -> bool: """Install a bundle :param cluster_uuid str: The UUID of the cluster to install to :param kdu_model str: The name or path of a bundle to install + :param kdu_instance: Kdu instance name :param atomic bool: If set, waits until the model is active and resets the cluster on failure. :param timeout int: The time, in seconds, to wait for the install @@ -474,23 +295,24 @@ class K8sJujuConnector(K8sConnector): :param params dict: Key-value pairs of instantiation parameters :param kdu_name: Name of the KDU instance to be installed :param namespace: K8s namespace to use for the KDU instance + :param kwargs: Additional parameters + vca_id (str): VCA ID :return: If successful, returns ? """ + libjuju = await self._get_libjuju(kwargs.get("vca_id")) bundle = kdu_model - # controller = await self.get_controller(cluster_uuid) - - ## - # Get or create the model, based on the NS - # uuid. - if not db_dict: raise K8sException("db_dict must be set") if not bundle: raise K8sException("bundle must be set") if bundle.startswith("cs:"): + # For Juju Bundles provided by the Charm Store + pass + elif bundle.startswith("ch:"): + # For Juju Bundles provided by the Charm Hub (this only works for juju version >= 2.9) pass elif bundle.startswith("http"): # Download the file @@ -500,20 +322,12 @@ class K8sJujuConnector(K8sConnector): os.chdir(new_workdir) bundle = "local:{}".format(kdu_model) - if kdu_name: - kdu_instance = "{}-{}".format(kdu_name, db_dict["filter"]["_id"]) - else: - kdu_instance = db_dict["filter"]["_id"] - self.log.debug("Checking for model named {}".format(kdu_instance)) # Create the new model self.log.debug("Adding model: {}".format(kdu_instance)) - await self.libjuju.add_model( - model_name=kdu_instance, - cloud_name=cluster_uuid, - credential_name=self._get_credential_name(cluster_uuid), - ) + cloud = Cloud(cluster_uuid, self._get_credential_name(cluster_uuid)) + await libjuju.add_model(kdu_instance, cloud) # if model: # TODO: Instantiation parameters @@ -532,47 +346,81 @@ class K8sJujuConnector(K8sConnector): previous_workdir = "/app/storage" self.log.debug("[install] deploying {}".format(bundle)) - await self.libjuju.deploy( + await libjuju.deploy( bundle, model_name=kdu_instance, wait=atomic, timeout=timeout ) - - # Get the application - # if atomic: - # # applications = model.applications - # self.log.debug("[install] Applications: {}".format(model.applications)) - # for name in model.applications: - # self.log.debug("[install] Waiting for {} to settle".format(name)) - # application = model.applications[name] - # try: - # # It's not enough to wait for all units to be active; - # # the application status needs to be active as well. - # self.log.debug("Waiting for all units to be active...") - # await model.block_until( - # lambda: all( - # unit.agent_status == "idle" - # and application.status in ["active", "unknown"] - # and unit.workload_status in ["active", "unknown"] - # for unit in application.units - # ), - # timeout=timeout, - # ) - # self.log.debug("All units active.") - - # # TODO use asyncio.TimeoutError - # except concurrent.futures._base.TimeoutError: - # os.chdir(previous_workdir) - # self.log.debug("[install] Timeout exceeded; resetting cluster") - # await self.reset(cluster_uuid) - # return False - - # Wait for the application to be active - # if model.is_connected(): - # self.log.debug("[install] Disconnecting model") - # await model.disconnect() - # await controller.disconnect() os.chdir(previous_workdir) + if self.on_update_db: + await self.on_update_db( + cluster_uuid, + kdu_instance, + filter=db_dict["filter"], + vca_id=kwargs.get("vca_id"), + ) + return True - return kdu_instance + async def scale( + self, + kdu_instance: str, + scale: int, + resource_name: str, + total_timeout: float = 1800, + **kwargs, + ) -> bool: + """Scale an application in a model + + :param: kdu_instance str: KDU instance name + :param: scale int: Scale to which to set the application + :param: resource_name str: The application name in the Juju Bundle + :param: timeout float: The time, in seconds, to wait for the install + to finish + :param kwargs: Additional parameters + vca_id (str): VCA ID + + :return: If successful, returns True + """ + + try: + libjuju = await self._get_libjuju(kwargs.get("vca_id")) + await libjuju.scale_application( + model_name=kdu_instance, + application_name=resource_name, + scale=scale, + total_timeout=total_timeout, + ) + except Exception as e: + error_msg = "Error scaling application {} in kdu instance {}: {}".format( + resource_name, kdu_instance, e + ) + self.log.error(error_msg) + raise K8sException(message=error_msg) + return True + + async def get_scale_count( + self, + resource_name: str, + kdu_instance: str, + **kwargs, + ) -> int: + """Get an application scale count + + :param: resource_name str: The application name in the Juju Bundle + :param: kdu_instance str: KDU instance name + :param kwargs: Additional parameters + vca_id (str): VCA ID + :return: Return application instance count + """ + + try: + libjuju = await self._get_libjuju(kwargs.get("vca_id")) + status = await libjuju.get_model_status(kdu_instance) + return len(status.applications[resource_name].units) + except Exception as e: + error_msg = "Error getting scale count from application {} in kdu instance {}: {}".format( + resource_name, kdu_instance, e + ) + self.log.error(error_msg) + raise K8sException(message=error_msg) async def instances_list(self, cluster_uuid: str) -> list: """ @@ -615,59 +463,6 @@ class K8sJujuConnector(K8sConnector): initial release. """ raise MethodNotImplemented() - # TODO: Remove these commented lines - - # model = await self.get_model(namespace, cluster_uuid=cluster_uuid) - - # model = None - # namespace = self.get_namespace(cluster_uuid) - # controller = await self.get_controller(cluster_uuid) - - # try: - # if namespace not in await controller.list_models(): - # raise N2VCNotFound(message="Model {} does not exist".format(namespace)) - - # model = await controller.get_model(namespace) - # with open(kdu_model, "r") as f: - # bundle = yaml.safe_load(f) - - # """ - # { - # 'description': 'Test bundle', - # 'bundle': 'kubernetes', - # 'applications': { - # 'mariadb-k8s': { - # 'charm': 'cs:~charmed-osm/mariadb-k8s-20', - # 'scale': 1, - # 'options': { - # 'password': 'manopw', - # 'root_password': 'osm4u', - # 'user': 'mano' - # }, - # 'series': 'kubernetes' - # } - # } - # } - # """ - # # TODO: This should be returned in an agreed-upon format - # for name in bundle["applications"]: - # self.log.debug(model.applications) - # application = model.applications[name] - # self.log.debug(application) - - # path = bundle["applications"][name]["charm"] - - # try: - # await application.upgrade_charm(switch=path) - # except juju.errors.JujuError as ex: - # if "already running charm" in str(ex): - # # We're already running this version - # pass - # finally: - # if model: - # await model.disconnect() - # await controller.disconnect() - # return True """Rollback""" @@ -691,20 +486,26 @@ class K8sJujuConnector(K8sConnector): """Deletion""" - async def uninstall(self, cluster_uuid: str, kdu_instance: str) -> bool: + async def uninstall( + self, + cluster_uuid: str, + kdu_instance: str, + **kwargs, + ) -> bool: """Uninstall a KDU instance :param cluster_uuid str: The UUID of the cluster :param kdu_instance str: The unique name of the KDU instance + :param kwargs: Additional parameters + vca_id (str): VCA ID :return: Returns True if successful, or raises an exception """ - # controller = await self.get_controller(cluster_uuid) - self.log.debug("[uninstall] Destroying model") + libjuju = await self._get_libjuju(kwargs.get("vca_id")) - await self.libjuju.destroy_model(kdu_instance, total_timeout=3600) + await libjuju.destroy_model(kdu_instance, total_timeout=3600) # self.log.debug("[uninstall] Model destroyed and disconnecting") # await controller.disconnect() @@ -723,6 +524,7 @@ class K8sJujuConnector(K8sConnector): timeout: float = 300, params: dict = None, db_dict: dict = None, + **kwargs, ) -> str: """Exec primitive (Juju action) @@ -731,12 +533,13 @@ class K8sJujuConnector(K8sConnector): :param primitive_name: Name of action that will be executed :param timeout: Timeout for action execution :param params: Dictionary of all the parameters needed for the action - :db_dict: Dictionary for any additional data + :param db_dict: Dictionary for any additional data + :param kwargs: Additional parameters + vca_id (str): VCA ID :return: Returns the output of the action """ - - # controller = await self.get_controller(cluster_uuid) + libjuju = await self._get_libjuju(kwargs.get("vca_id")) if not params or "application-name" not in params: raise K8sException( @@ -749,44 +552,21 @@ class K8sJujuConnector(K8sConnector): "kdu_instance: {}".format(kdu_instance) ) application_name = params["application-name"] - actions = await self.libjuju.get_actions(application_name, kdu_instance) + actions = await libjuju.get_actions(application_name, kdu_instance) if primitive_name not in actions: raise K8sException("Primitive {} not found".format(primitive_name)) - output, status = await self.libjuju.execute_action( + output, status = await libjuju.execute_action( application_name, kdu_instance, primitive_name, **params ) - # model = await self.get_model(kdu_instance, controller=controller) - - # application_name = params["application-name"] - # application = model.applications[application_name] - - # actions = await application.get_actions() - # if primitive_name not in actions: - # raise K8sException("Primitive {} not found".format(primitive_name)) - - # unit = None - # for u in application.units: - # if await u.is_leader_from_status(): - # unit = u - # break - - # if unit is None: - # raise K8sException("No leader unit found to execute action") - - # self.log.debug("[exec_primitive] Running action: {}".format(primitive_name)) - # action = await unit.run_action(primitive_name, **params) - - # output = await model.get_action_output(action_uuid=action.entity_id) - # status = await model.get_action_status(uuid_or_prefix=action.entity_id) - - # status = ( - # status[action.entity_id] if action.entity_id in status else "failed" - # ) if status != "completed": raise K8sException( "status is not completed: {} output: {}".format(status, output) ) + if self.on_update_db: + await self.on_update_db( + cluster_uuid, kdu_instance, filter=db_dict["filter"] + ) return output @@ -794,12 +574,6 @@ class K8sJujuConnector(K8sConnector): error_msg = "Error executing primitive {}: {}".format(primitive_name, e) self.log.error(error_msg) raise K8sException(message=error_msg) - # finally: - # await controller.disconnect() - # TODO: Remove these commented lines: - # if not self.authenticated: - # self.log.debug("[exec_primitive] Connecting to controller") - # await self.login(cluster_uuid) """Introspection""" @@ -876,6 +650,9 @@ class K8sJujuConnector(K8sConnector): self, cluster_uuid: str, kdu_instance: str, + complete_status: bool = False, + yaml_format: bool = False, + **kwargs, ) -> dict: """Get the status of the KDU @@ -883,46 +660,117 @@ class K8sJujuConnector(K8sConnector): :param cluster_uuid str: The UUID of the cluster :param kdu_instance str: The unique id of the KDU instance + :param complete_status: To get the complete_status of the KDU + :param yaml_format: To get the status in proper format for NSR record + :param: kwargs: Additional parameters + vca_id (str): VCA ID :return: Returns a dictionary containing namespace, state, resources, - and deployment_time. + and deployment_time and returns complete_status if complete_status is True """ + libjuju = await self._get_libjuju(kwargs.get("vca_id")) status = {} - # controller = await self.get_controller(cluster_uuid) - # model = await self.get_model(kdu_instance, controller=controller) - # model_status = await model.get_status() - # status = model_status.applications - model_status = await self.libjuju.get_model_status(kdu_instance) - for name in model_status.applications: - application = model_status.applications[name] - status[name] = {"status": application["status"]["status"]} + model_status = await libjuju.get_model_status(kdu_instance) - # await model.disconnect() - # await controller.disconnect() + if not complete_status: + for name in model_status.applications: + application = model_status.applications[name] + status[name] = {"status": application["status"]["status"]} + else: + if yaml_format: + return obj_to_yaml(model_status) + else: + return obj_to_dict(model_status) return status + async def add_relation( + self, + provider: RelationEndpoint, + requirer: RelationEndpoint, + ): + """ + Add relation between two charmed endpoints + + :param: provider: Provider relation endpoint + :param: requirer: Requirer relation endpoint + """ + self.log.debug(f"adding new relation between {provider} and {requirer}") + cross_model_relation = ( + provider.model_name != requirer.model_name + or requirer.vca_id != requirer.vca_id + ) + try: + if cross_model_relation: + # Cross-model relation + provider_libjuju = await self._get_libjuju(provider.vca_id) + requirer_libjuju = await self._get_libjuju(requirer.vca_id) + offer = await provider_libjuju.offer(provider) + if offer: + saas_name = await requirer_libjuju.consume( + requirer.model_name, offer, provider_libjuju + ) + await requirer_libjuju.add_relation( + requirer.model_name, + requirer.endpoint, + saas_name, + ) + else: + # Standard relation + vca_id = provider.vca_id + model = provider.model_name + libjuju = await self._get_libjuju(vca_id) + # add juju relations between two applications + await libjuju.add_relation( + model_name=model, + endpoint_1=provider.endpoint, + endpoint_2=requirer.endpoint, + ) + except Exception as e: + message = f"Error adding relation between {provider} and {requirer}: {e}" + self.log.error(message) + raise Exception(message=message) + + async def update_vca_status(self, vcastatus: dict, kdu_instance: str, **kwargs): + """ + Add all configs, actions, executed actions of all applications in a model to vcastatus dict + + :param vcastatus dict: dict containing vcastatus + :param kdu_instance str: The unique id of the KDU instance + :param: kwargs: Additional parameters + vca_id (str): VCA ID + + :return: None + """ + libjuju = await self._get_libjuju(kwargs.get("vca_id")) + try: + for model_name in vcastatus: + # Adding executed actions + vcastatus[model_name][ + "executedActions" + ] = await libjuju.get_executed_actions(kdu_instance) + + for application in vcastatus[model_name]["applications"]: + # Adding application actions + vcastatus[model_name]["applications"][application][ + "actions" + ] = await libjuju.get_actions(application, kdu_instance) + # Adding application configs + vcastatus[model_name]["applications"][application][ + "configs" + ] = await libjuju.get_application_configs(kdu_instance, application) + + except Exception as e: + self.log.debug("Error in updating vca status: {}".format(str(e))) + async def get_services( self, cluster_uuid: str, kdu_instance: str, namespace: str ) -> list: """Return a list of services of a kdu_instance""" credentials = self.get_credentials(cluster_uuid=cluster_uuid) - - # config_path = "/tmp/{}".format(cluster_uuid) - # config_file = "{}/config".format(config_path) - - # if not os.path.exists(config_path): - # os.makedirs(config_path) - # with open(config_file, "w") as f: - # f.write(credentials) - - kubecfg = tempfile.NamedTemporaryFile() - with open(kubecfg.name, "w") as kubecfg_file: - kubecfg_file.write(credentials) - kubectl = Kubectl(config_file=kubecfg.name) - + kubectl = self._get_kubectl(credentials) return kubectl.get_services( field_selector="metadata.namespace={}".format(kdu_instance) ) @@ -933,176 +781,13 @@ class K8sJujuConnector(K8sConnector): """Return data for a specific service inside a namespace""" credentials = self.get_credentials(cluster_uuid=cluster_uuid) - - # config_path = "/tmp/{}".format(cluster_uuid) - # config_file = "{}/config".format(config_path) - - # if not os.path.exists(config_path): - # os.makedirs(config_path) - # with open(config_file, "w") as f: - # f.write(credentials) - - kubecfg = tempfile.NamedTemporaryFile() - with open(kubecfg.name, "w") as kubecfg_file: - kubecfg_file.write(credentials) - kubectl = Kubectl(config_file=kubecfg.name) - + kubectl = self._get_kubectl(credentials) return kubectl.get_services( field_selector="metadata.name={},metadata.namespace={}".format( service_name, namespace ) )[0] - # Private methods - # async def add_k8s(self, cloud_name: str, credentials: str,) -> bool: - # """Add a k8s cloud to Juju - - # Adds a Kubernetes cloud to Juju, so it can be bootstrapped with a - # Juju Controller. - - # :param cloud_name str: The name of the cloud to add. - # :param credentials dict: A dictionary representing the output of - # `kubectl config view --raw`. - - # :returns: True if successful, otherwise raises an exception. - # """ - - # cmd = [self.juju_command, "add-k8s", "--local", cloud_name] - # self.log.debug(cmd) - - # process = await asyncio.create_subprocess_exec( - # *cmd, - # stdout=asyncio.subprocess.PIPE, - # stderr=asyncio.subprocess.PIPE, - # stdin=asyncio.subprocess.PIPE, - # ) - - # # Feed the process the credentials - # process.stdin.write(credentials.encode("utf-8")) - # await process.stdin.drain() - # process.stdin.close() - - # _stdout, stderr = await process.communicate() - - # return_code = process.returncode - - # self.log.debug("add-k8s return code: {}".format(return_code)) - - # if return_code > 0: - # raise Exception(stderr) - - # return True - - # async def add_model( - # self, model_name: str, cluster_uuid: str, controller: Controller - # ) -> Model: - # """Adds a model to the controller - - # Adds a new model to the Juju controller - - # :param model_name str: The name of the model to add. - # :param cluster_uuid str: ID of the cluster. - # :param controller: Controller object in which the model will be added - # :returns: The juju.model.Model object of the new model upon success or - # raises an exception. - # """ - - # self.log.debug( - # "Adding model '{}' to cluster_uuid '{}'".format(model_name, cluster_uuid) - # ) - # model = None - # try: - # if self.juju_public_key is not None: - # model = await controller.add_model( - # model_name, config={"authorized-keys": self.juju_public_key} - # ) - # else: - # model = await controller.add_model(model_name) - # except Exception as ex: - # self.log.debug(ex) - # self.log.debug("Caught exception: {}".format(ex)) - # pass - - # return model - - # async def bootstrap( - # self, cloud_name: str, cluster_uuid: str, loadbalancer: bool - # ) -> bool: - # """Bootstrap a Kubernetes controller - - # Bootstrap a Juju controller inside the Kubernetes cluster - - # :param cloud_name str: The name of the cloud. - # :param cluster_uuid str: The UUID of the cluster to bootstrap. - # :param loadbalancer bool: If the controller should use loadbalancer or not. - # :returns: True upon success or raises an exception. - # """ - - # if not loadbalancer: - # cmd = [self.juju_command, "bootstrap", cloud_name, cluster_uuid] - # else: - # """ - # For public clusters, specify that the controller service is using a - # LoadBalancer. - # """ - # cmd = [ - # self.juju_command, - # "bootstrap", - # cloud_name, - # cluster_uuid, - # "--config", - # "controller-service-type=loadbalancer", - # ] - - # self.log.debug( - # "Bootstrapping controller {} in cloud {}".format(cluster_uuid, cloud_name) - # ) - - # process = await asyncio.create_subprocess_exec( - # *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, - # ) - - # _stdout, stderr = await process.communicate() - - # return_code = process.returncode - - # if return_code > 0: - # # - # if b"already exists" not in stderr: - # raise Exception(stderr) - - # return True - - # async def destroy_controller(self, cluster_uuid: str) -> bool: - # """Destroy a Kubernetes controller - - # Destroy an existing Kubernetes controller. - - # :param cluster_uuid str: The UUID of the cluster to bootstrap. - # :returns: True upon success or raises an exception. - # """ - # cmd = [ - # self.juju_command, - # "destroy-controller", - # "--destroy-all-models", - # "--destroy-storage", - # "-y", - # cluster_uuid, - # ] - - # process = await asyncio.create_subprocess_exec( - # *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, - # ) - - # _stdout, stderr = await process.communicate() - - # return_code = process.returncode - - # if return_code > 0: - # # - # if "already exists" not in stderr: - # raise Exception(stderr) - def get_credentials(self, cluster_uuid: str) -> str: """ Get Cluster Kubeconfig @@ -1136,51 +821,6 @@ class K8sJujuConnector(K8sConnector): """ return "cred-{}".format(cluster_uuid) - # def get_config(self, cluster_uuid: str,) -> dict: - # """Get the cluster configuration - - # Gets the configuration of the cluster - - # :param cluster_uuid str: The UUID of the cluster. - # :return: A dict upon success, or raises an exception. - # """ - - # juju_db = self.db.get_one("admin", {"_id": "juju"}) - # config = None - # for k in juju_db["k8sclusters"]: - # if k["_id"] == cluster_uuid: - # config = k["config"] - # self.db.encrypt_decrypt_fields( - # config, - # "decrypt", - # ["secret", "cacert"], - # schema_version="1.1", - # salt=k["_id"], - # ) - # break - # if not config: - # raise Exception( - # "Unable to locate configuration for cluster {}".format(cluster_uuid) - # ) - # return config - - # async def get_model(self, model_name: str, controller: Controller) -> Model: - # """Get a model from the Juju Controller. - - # Note: Model objects returned must call disconnected() before it goes - # out of scope. - - # :param model_name str: The name of the model to get - # :param controller Controller: Controller object - # :return The juju.model.Model object if found, or None. - # """ - - # models = await controller.list_models() - # if model_name not in models: - # raise N2VCNotFound("Model {} not found".format(model_name)) - # self.log.debug("Found model: {}".format(model_name)) - # return await controller.get_model(model_name) - def get_namespace( self, cluster_uuid: str, @@ -1191,289 +831,50 @@ class K8sJujuConnector(K8sConnector): :param cluster_uuid str: The UUID of the cluster :returns: The namespace UUID, or raises an exception """ - # config = self.get_config(cluster_uuid) - - # Make sure the name is in the config - # if "namespace" not in config: - # raise Exception("Namespace not found.") - - # TODO: We want to make sure this is unique to the cluster, in case - # the cluster is being reused. - # Consider pre/appending the cluster id to the namespace string pass - # TODO: Remove these lines of code - # async def has_model(self, model_name: str) -> bool: - # """Check if a model exists in the controller - - # Checks to see if a model exists in the connected Juju controller. - - # :param model_name str: The name of the model - # :return: A boolean indicating if the model exists - # """ - # models = await self.controller.list_models() - - # if model_name in models: - # return True - # return False - - # def is_local_k8s(self, credentials: str,) -> bool: - # """Check if a cluster is local - - # Checks if a cluster is running in the local host - - # :param credentials dict: A dictionary containing the k8s credentials - # :returns: A boolean if the cluster is running locally - # """ - - # creds = yaml.safe_load(credentials) - - # if creds and os.getenv("OSMLCM_VCA_APIPROXY"): - # for cluster in creds["clusters"]: - # if "server" in cluster["cluster"]: - # if os.getenv("OSMLCM_VCA_APIPROXY") in cluster["cluster"]["server"]: - # return True - - # return False - - # async def get_controller(self, cluster_uuid): - # """Login to the Juju controller.""" - - # config = self.get_config(cluster_uuid) - - # juju_endpoint = config["endpoint"] - # juju_user = config["username"] - # juju_secret = config["secret"] - # juju_ca_cert = config["cacert"] - - # controller = Controller() - - # if juju_secret: - # self.log.debug( - # "Connecting to controller... ws://{} as {}".format( - # juju_endpoint, juju_user, - # ) - # ) - # try: - # await controller.connect( - # endpoint=juju_endpoint, - # username=juju_user, - # password=juju_secret, - # cacert=juju_ca_cert, - # ) - # self.log.debug("JujuApi: Logged into controller") - # return controller - # except Exception as ex: - # self.log.debug(ex) - # self.log.debug("Caught exception: {}".format(ex)) - # else: - # self.log.fatal("VCA credentials not configured.") - - # TODO: Remove these commented lines - # self.authenticated = False - # if self.authenticated: - # return - - # self.connecting = True - # juju_public_key = None - # self.authenticated = True - # Test: Make sure we have the credentials loaded - # async def logout(self): - # """Logout of the Juju controller.""" - # self.log.debug("[logout]") - # if not self.authenticated: - # return False - - # for model in self.models: - # self.log.debug("Logging out of model {}".format(model)) - # await self.models[model].disconnect() - - # if self.controller: - # self.log.debug("Disconnecting controller {}".format(self.controller)) - # await self.controller.disconnect() - # self.controller = None - - # self.authenticated = False - - # async def remove_cloud(self, cloud_name: str,) -> bool: - # """Remove a k8s cloud from Juju - - # Removes a Kubernetes cloud from Juju. - - # :param cloud_name str: The name of the cloud to add. - - # :returns: True if successful, otherwise raises an exception. - # """ - - # # Remove the bootstrapped controller - # cmd = [self.juju_command, "remove-k8s", "--client", cloud_name] - # process = await asyncio.create_subprocess_exec( - # *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, - # ) - - # _stdout, stderr = await process.communicate() - - # return_code = process.returncode - - # if return_code > 0: - # raise Exception(stderr) - - # # Remove the cloud from the local config - # cmd = [self.juju_command, "remove-cloud", "--client", cloud_name] - # process = await asyncio.create_subprocess_exec( - # *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, - # ) - - # _stdout, stderr = await process.communicate() - - # return_code = process.returncode - - # if return_code > 0: - # raise Exception(stderr) - - # return True - - # async def set_config(self, cluster_uuid: str, config: dict,) -> bool: - # """Save the cluster configuration - - # Saves the cluster information to the Mongo database - - # :param cluster_uuid str: The UUID of the cluster - # :param config dict: A dictionary containing the cluster configuration - # """ - - # juju_db = self.db.get_one("admin", {"_id": "juju"}) - - # k8sclusters = juju_db["k8sclusters"] if "k8sclusters" in juju_db else [] - # self.db.encrypt_decrypt_fields( - # config, - # "encrypt", - # ["secret", "cacert"], - # schema_version="1.1", - # salt=cluster_uuid, - # ) - # k8sclusters.append({"_id": cluster_uuid, "config": config}) - # self.db.set_one( - # table="admin", - # q_filter={"_id": "juju"}, - # update_dict={"k8sclusters": k8sclusters}, - # ) - - # Private methods to create/delete needed resources in the - # Kubernetes cluster to create the K8s cloud in Juju - - def _create_cluster_role( - self, - kubectl: Kubectl, - name: str, - labels: Dict[str, str], - ): - cluster_roles = kubectl.clients[RBAC_CLIENT].list_cluster_role( - field_selector="metadata.name={}".format(name) - ) - - if len(cluster_roles.items) > 0: - raise Exception( - "Cluster role with metadata.name={} already exists".format(name) - ) - - metadata = V1ObjectMeta(name=name, labels=labels, namespace=ADMIN_NAMESPACE) - # Cluster role - cluster_role = V1ClusterRole( - metadata=metadata, - rules=[ - V1PolicyRule(api_groups=["*"], resources=["*"], verbs=["*"]), - V1PolicyRule(non_resource_ur_ls=["*"], verbs=["*"]), - ], - ) - - kubectl.clients[RBAC_CLIENT].create_cluster_role(cluster_role) - - def _delete_cluster_role(self, kubectl: Kubectl, name: str): - kubectl.clients[RBAC_CLIENT].delete_cluster_role(name) - - def _create_service_account( - self, - kubectl: Kubectl, - name: str, - labels: Dict[str, str], - ): - service_accounts = kubectl.clients[CORE_CLIENT].list_namespaced_service_account( - ADMIN_NAMESPACE, field_selector="metadata.name={}".format(name) - ) - if len(service_accounts.items) > 0: - raise Exception( - "Service account with metadata.name={} already exists".format(name) - ) - - metadata = V1ObjectMeta(name=name, labels=labels, namespace=ADMIN_NAMESPACE) - service_account = V1ServiceAccount(metadata=metadata) - - kubectl.clients[CORE_CLIENT].create_namespaced_service_account( - ADMIN_NAMESPACE, service_account - ) - - def _delete_service_account(self, kubectl: Kubectl, name: str): - kubectl.clients[CORE_CLIENT].delete_namespaced_service_account( - name, ADMIN_NAMESPACE - ) - - def _create_cluster_role_binding( - self, - kubectl: Kubectl, - name: str, - labels: Dict[str, str], - ): - role_bindings = kubectl.clients[RBAC_CLIENT].list_cluster_role_binding( - field_selector="metadata.name={}".format(name) - ) - if len(role_bindings.items) > 0: - raise Exception("Generated rbac id already exists") - - role_binding = V1ClusterRoleBinding( - metadata=V1ObjectMeta(name=name, labels=labels), - role_ref=V1RoleRef(kind="ClusterRole", name=name, api_group=""), - subjects=[ - V1Subject(kind="ServiceAccount", name=name, namespace=ADMIN_NAMESPACE) - ], - ) - kubectl.clients[RBAC_CLIENT].create_cluster_role_binding(role_binding) - - def _delete_cluster_role_binding(self, kubectl: Kubectl, name: str): - kubectl.clients[RBAC_CLIENT].delete_cluster_role_binding(name) + @staticmethod + def generate_kdu_instance_name(**kwargs): + db_dict = kwargs.get("db_dict") + kdu_name = kwargs.get("kdu_name", None) + if kdu_name: + kdu_instance = "{}-{}".format(kdu_name, db_dict["filter"]["_id"]) + else: + kdu_instance = db_dict["filter"]["_id"] + return kdu_instance - async def _get_secret_data(self, kubectl: Kubectl, name: str) -> (str, str): - v1_core = kubectl.clients[CORE_CLIENT] + async def _get_libjuju(self, vca_id: str = None) -> Libjuju: + """ + Get libjuju object - retries_limit = 10 - secret_name = None - while True: - retries_limit -= 1 - service_accounts = v1_core.list_namespaced_service_account( - ADMIN_NAMESPACE, field_selector="metadata.name={}".format(name) - ) - if len(service_accounts.items) == 0: - raise Exception( - "Service account not found with metadata.name={}".format(name) - ) - service_account = service_accounts.items[0] - if service_account.secrets and len(service_account.secrets) > 0: - secret_name = service_account.secrets[0].name - if secret_name is not None or not retries_limit: - break - if not secret_name: - raise Exception( - "Failed getting the secret from service account {}".format(name) + :param: vca_id: VCA ID + If None, get a libjuju object with a Connection to the default VCA + Else, geta libjuju object with a Connection to the specified VCA + """ + if not vca_id: + while self.loading_libjuju.locked(): + await asyncio.sleep(0.1) + if not self.libjuju: + async with self.loading_libjuju: + vca_connection = await get_connection(self._store) + self.libjuju = Libjuju(vca_connection, loop=self.loop, log=self.log) + return self.libjuju + else: + vca_connection = await get_connection(self._store, vca_id) + return Libjuju( + vca_connection, + loop=self.loop, + log=self.log, + n2vc=self, ) - secret = v1_core.list_namespaced_secret( - ADMIN_NAMESPACE, - field_selector="metadata.name={}".format(secret_name), - ).items[0] - token = secret.data[SERVICE_ACCOUNT_TOKEN_KEY] - client_certificate_data = secret.data[SERVICE_ACCOUNT_ROOT_CA_KEY] + def _get_kubectl(self, credentials: str) -> Kubectl: + """ + Get Kubectl object - return ( - base64.b64decode(token).decode("utf-8"), - base64.b64decode(client_certificate_data).decode("utf-8"), - ) + :param: kubeconfig_credentials: Kubeconfig credentials + """ + kubecfg = tempfile.NamedTemporaryFile() + with open(kubecfg.name, "w") as kubecfg_file: + kubecfg_file.write(credentials) + return Kubectl(config_file=kubecfg.name)