X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=n2vc%2Fk8s_juju_conn.py;h=3d58385ede494b7e0378c0e3605a1bad2bb4808d;hb=c1cf1b9a33dd1929fb834424626661f9c9dce64d;hp=db34994260e5284034c671b5e2e6394109ad5129;hpb=b4e7f5c425df48f7e946d792184d1d1b44879fe9;p=osm%2FN2VC.git diff --git a/n2vc/k8s_juju_conn.py b/n2vc/k8s_juju_conn.py index db34994..3d58385 100644 --- a/n2vc/k8s_juju_conn.py +++ b/n2vc/k8s_juju_conn.py @@ -46,6 +46,11 @@ RBAC_LABEL_KEY_NAME = "rbac-id" ADMIN_NAMESPACE = "kube-system" RBAC_STACK_PREFIX = "juju-credential" +# from juju.bundle import BundleHandler +# import re +# import ssl +# from .vnf import N2VC + def generate_rbac_id(): return binascii.hexlify(os.urandom(4)).decode() @@ -142,8 +147,97 @@ class K8sJujuConnector(K8sConnector): (on error, an exception will be raised) """ + # """Bootstrapping + + # Bootstrapping cannot be done, by design, through the API. We need to + # use the CLI tools. + # """ + + # """ + # WIP: Workflow + + # 1. Has the environment already been bootstrapped? + # - Check the database to see if we have a record for this env + + # 2. If this is a new env, create it + # - Add the k8s cloud to Juju + # - Bootstrap + # - Record it in the database + + # 3. Connect to the Juju controller for this cloud + + # """ + # cluster_uuid = reuse_cluster_uuid + # if not cluster_uuid: + # cluster_uuid = str(uuid4()) + + ################################################## + # TODO: Pull info from db based on the namespace # + ################################################## + + ################################################### + # TODO: Make it idempotent, calling add-k8s and # + # bootstrap whenever reuse_cluster_uuid is passed # + # as parameter # + # `init_env` is called to initialize the K8s # + # cluster for juju. If this initialization fails, # + # it can be called again by LCM with the param # + # reuse_cluster_uuid, e.g. to try to fix it. # + ################################################### + + # This is a new cluster, so bootstrap it + cluster_uuid = reuse_cluster_uuid or str(uuid.uuid4()) + # Is a local k8s cluster? + # localk8s = self.is_local_k8s(k8s_creds) + + # If the k8s is external, the juju controller needs a loadbalancer + # loadbalancer = False if localk8s else True + + # Name the new k8s cloud + # k8s_cloud = "k8s-{}".format(cluster_uuid) + + # self.log.debug("Adding k8s cloud {}".format(k8s_cloud)) + # await self.add_k8s(k8s_cloud, k8s_creds) + + # Bootstrap Juju controller + # self.log.debug("Bootstrapping...") + # await self.bootstrap(k8s_cloud, cluster_uuid, loadbalancer) + # self.log.debug("Bootstrap done.") + + # Get the controller information + + # Parse ~/.local/share/juju/controllers.yaml + # controllers.testing.api-endpoints|ca-cert|uuid + # self.log.debug("Getting controller endpoints") + # with open(os.path.expanduser("~/.local/share/juju/controllers.yaml")) as f: + # controllers = yaml.load(f, Loader=yaml.Loader) + # controller = controllers["controllers"][cluster_uuid] + # endpoints = controller["api-endpoints"] + # juju_endpoint = endpoints[0] + # juju_ca_cert = controller["ca-cert"] + + # Parse ~/.local/share/juju/accounts + # controllers.testing.user|password + # self.log.debug("Getting accounts") + # with open(os.path.expanduser("~/.local/share/juju/accounts.yaml")) as f: + # controllers = yaml.load(f, Loader=yaml.Loader) + # controller = controllers["controllers"][cluster_uuid] + + # juju_user = controller["user"] + # juju_secret = controller["password"] + + # config = { + # "endpoint": juju_endpoint, + # "username": juju_user, + # "secret": juju_secret, + # "cacert": juju_ca_cert, + # "loadbalancer": loadbalancer, + # } + + # Store the cluster configuration so it + # can be used for subsequent calls kubecfg = tempfile.NamedTemporaryFile() with open(kubecfg.name, "w") as kubecfg_file: kubecfg_file.write(k8s_creds) @@ -208,6 +302,32 @@ class K8sJujuConnector(K8sConnector): storage_class=default_storage_class, credential_name=self._get_credential_name(cluster_uuid), ) + # self.log.debug("Setting config") + # await self.set_config(cluster_uuid, config) + + # Test connection + # controller = await self.get_controller(cluster_uuid) + # await controller.disconnect() + + # TODO: Remove these commented lines + # raise Exception("EOL") + # self.juju_public_key = None + # Login to the k8s cluster + # if not self.authenticated: + # await self.login(cluster_uuid) + + # We're creating a new cluster + # print("Getting model {}".format(self.get_namespace(cluster_uuid), + # cluster_uuid=cluster_uuid)) + # model = await self.get_model( + # self.get_namespace(cluster_uuid), + # cluster_uuid=cluster_uuid + # ) + + # Disconnect from the model + # if model and model.is_connected(): + # await model.disconnect() + return cluster_uuid, True except Exception as e: self.log.error("Error initializing k8scluster: {}".format(e)) @@ -259,7 +379,26 @@ class K8sJujuConnector(K8sConnector): """ try: + # Remove k8scluster from database + # self.log.debug("[reset] Removing k8scluster from juju database") + # juju_db = self.db.get_one("admin", {"_id": "juju"}) + + # for k in juju_db["k8sclusters"]: + # if k["_id"] == cluster_uuid: + # juju_db["k8sclusters"].remove(k) + # self.db.set_one( + # table="admin", + # q_filter={"_id": "juju"}, + # update_dict={"k8sclusters": juju_db["k8sclusters"]}, + # ) + # break + + # Destroy the controller (via CLI) + # self.log.debug("[reset] Destroying controller") + # await self.destroy_controller(cluster_uuid) self.log.debug("[reset] Removing k8s cloud") + # k8s_cloud = "k8s-{}".format(cluster_uuid) + # await self.remove_cloud(k8s_cloud) cloud_creds = await self.libjuju.get_cloud_credentials( cluster_uuid, @@ -296,6 +435,20 @@ class K8sJujuConnector(K8sConnector): self.log.debug("Caught exception during reset: {}".format(e)) raise e return True + # TODO: Remove these commented lines + # if not self.authenticated: + # await self.login(cluster_uuid) + + # if self.controller.is_connected(): + # # Destroy the model + # namespace = self.get_namespace(cluster_uuid) + # if await self.has_model(namespace): + # self.log.debug("[reset] Destroying model") + # await self.controller.destroy_model(namespace, destroy_storage=True) + + # # Disconnect from the controller + # self.log.debug("[reset] Disconnecting controller") + # await self.logout() """Deployment""" @@ -326,6 +479,12 @@ class K8sJujuConnector(K8sConnector): """ bundle = kdu_model + # controller = await self.get_controller(cluster_uuid) + + ## + # Get or create the model, based on the NS + # uuid. + if not db_dict: raise K8sException("db_dict must be set") if not bundle: @@ -376,7 +535,43 @@ class K8sJujuConnector(K8sConnector): await self.libjuju.deploy( bundle, model_name=kdu_instance, wait=atomic, timeout=timeout ) + + # Get the application + # if atomic: + # # applications = model.applications + # self.log.debug("[install] Applications: {}".format(model.applications)) + # for name in model.applications: + # self.log.debug("[install] Waiting for {} to settle".format(name)) + # application = model.applications[name] + # try: + # # It's not enough to wait for all units to be active; + # # the application status needs to be active as well. + # self.log.debug("Waiting for all units to be active...") + # await model.block_until( + # lambda: all( + # unit.agent_status == "idle" + # and application.status in ["active", "unknown"] + # and unit.workload_status in ["active", "unknown"] + # for unit in application.units + # ), + # timeout=timeout, + # ) + # self.log.debug("All units active.") + + # # TODO use asyncio.TimeoutError + # except concurrent.futures._base.TimeoutError: + # os.chdir(previous_workdir) + # self.log.debug("[install] Timeout exceeded; resetting cluster") + # await self.reset(cluster_uuid) + # return False + + # Wait for the application to be active + # if model.is_connected(): + # self.log.debug("[install] Disconnecting model") + # await model.disconnect() + # await controller.disconnect() os.chdir(previous_workdir) + return kdu_instance async def instances_list(self, cluster_uuid: str) -> list: @@ -420,6 +615,59 @@ class K8sJujuConnector(K8sConnector): initial release. """ raise MethodNotImplemented() + # TODO: Remove these commented lines + + # model = await self.get_model(namespace, cluster_uuid=cluster_uuid) + + # model = None + # namespace = self.get_namespace(cluster_uuid) + # controller = await self.get_controller(cluster_uuid) + + # try: + # if namespace not in await controller.list_models(): + # raise N2VCNotFound(message="Model {} does not exist".format(namespace)) + + # model = await controller.get_model(namespace) + # with open(kdu_model, "r") as f: + # bundle = yaml.safe_load(f) + + # """ + # { + # 'description': 'Test bundle', + # 'bundle': 'kubernetes', + # 'applications': { + # 'mariadb-k8s': { + # 'charm': 'cs:~charmed-osm/mariadb-k8s-20', + # 'scale': 1, + # 'options': { + # 'password': 'manopw', + # 'root_password': 'osm4u', + # 'user': 'mano' + # }, + # 'series': 'kubernetes' + # } + # } + # } + # """ + # # TODO: This should be returned in an agreed-upon format + # for name in bundle["applications"]: + # self.log.debug(model.applications) + # application = model.applications[name] + # self.log.debug(application) + + # path = bundle["applications"][name]["charm"] + + # try: + # await application.upgrade_charm(switch=path) + # except juju.errors.JujuError as ex: + # if "already running charm" in str(ex): + # # We're already running this version + # pass + # finally: + # if model: + # await model.disconnect() + # await controller.disconnect() + # return True """Rollback""" @@ -452,6 +700,8 @@ class K8sJujuConnector(K8sConnector): :return: Returns True if successful, or raises an exception """ + # controller = await self.get_controller(cluster_uuid) + self.log.debug("[uninstall] Destroying model") await self.libjuju.destroy_model(kdu_instance, total_timeout=3600) @@ -486,6 +736,8 @@ class K8sJujuConnector(K8sConnector): :return: Returns the output of the action """ + # controller = await self.get_controller(cluster_uuid) + if not params or "application-name" not in params: raise K8sException( "Missing application-name argument, \ @@ -503,6 +755,33 @@ class K8sJujuConnector(K8sConnector): output, status = await self.libjuju.execute_action( application_name, kdu_instance, primitive_name, **params ) + # model = await self.get_model(kdu_instance, controller=controller) + + # application_name = params["application-name"] + # application = model.applications[application_name] + + # actions = await application.get_actions() + # if primitive_name not in actions: + # raise K8sException("Primitive {} not found".format(primitive_name)) + + # unit = None + # for u in application.units: + # if await u.is_leader_from_status(): + # unit = u + # break + + # if unit is None: + # raise K8sException("No leader unit found to execute action") + + # self.log.debug("[exec_primitive] Running action: {}".format(primitive_name)) + # action = await unit.run_action(primitive_name, **params) + + # output = await model.get_action_output(action_uuid=action.entity_id) + # status = await model.get_action_status(uuid_or_prefix=action.entity_id) + + # status = ( + # status[action.entity_id] if action.entity_id in status else "failed" + # ) if status != "completed": raise K8sException( @@ -515,6 +794,12 @@ class K8sJujuConnector(K8sConnector): error_msg = "Error executing primitive {}: {}".format(primitive_name, e) self.log.error(error_msg) raise K8sException(message=error_msg) + # finally: + # await controller.disconnect() + # TODO: Remove these commented lines: + # if not self.authenticated: + # self.log.debug("[exec_primitive] Connecting to controller") + # await self.login(cluster_uuid) """Introspection""" @@ -603,11 +888,19 @@ class K8sJujuConnector(K8sConnector): and deployment_time. """ status = {} + # controller = await self.get_controller(cluster_uuid) + # model = await self.get_model(kdu_instance, controller=controller) + + # model_status = await model.get_status() + # status = model_status.applications model_status = await self.libjuju.get_model_status(kdu_instance) for name in model_status.applications: application = model_status.applications[name] status[name] = {"status": application["status"]["status"]} + # await model.disconnect() + # await controller.disconnect() + return status async def get_services( @@ -617,6 +910,14 @@ class K8sJujuConnector(K8sConnector): credentials = self.get_credentials(cluster_uuid=cluster_uuid) + # config_path = "/tmp/{}".format(cluster_uuid) + # config_file = "{}/config".format(config_path) + + # if not os.path.exists(config_path): + # os.makedirs(config_path) + # with open(config_file, "w") as f: + # f.write(credentials) + kubecfg = tempfile.NamedTemporaryFile() with open(kubecfg.name, "w") as kubecfg_file: kubecfg_file.write(credentials) @@ -633,6 +934,14 @@ class K8sJujuConnector(K8sConnector): credentials = self.get_credentials(cluster_uuid=cluster_uuid) + # config_path = "/tmp/{}".format(cluster_uuid) + # config_file = "{}/config".format(config_path) + + # if not os.path.exists(config_path): + # os.makedirs(config_path) + # with open(config_file, "w") as f: + # f.write(credentials) + kubecfg = tempfile.NamedTemporaryFile() with open(kubecfg.name, "w") as kubecfg_file: kubecfg_file.write(credentials) @@ -644,6 +953,156 @@ class K8sJujuConnector(K8sConnector): ) )[0] + # Private methods + # async def add_k8s(self, cloud_name: str, credentials: str,) -> bool: + # """Add a k8s cloud to Juju + + # Adds a Kubernetes cloud to Juju, so it can be bootstrapped with a + # Juju Controller. + + # :param cloud_name str: The name of the cloud to add. + # :param credentials dict: A dictionary representing the output of + # `kubectl config view --raw`. + + # :returns: True if successful, otherwise raises an exception. + # """ + + # cmd = [self.juju_command, "add-k8s", "--local", cloud_name] + # self.log.debug(cmd) + + # process = await asyncio.create_subprocess_exec( + # *cmd, + # stdout=asyncio.subprocess.PIPE, + # stderr=asyncio.subprocess.PIPE, + # stdin=asyncio.subprocess.PIPE, + # ) + + # # Feed the process the credentials + # process.stdin.write(credentials.encode("utf-8")) + # await process.stdin.drain() + # process.stdin.close() + + # _stdout, stderr = await process.communicate() + + # return_code = process.returncode + + # self.log.debug("add-k8s return code: {}".format(return_code)) + + # if return_code > 0: + # raise Exception(stderr) + + # return True + + # async def add_model( + # self, model_name: str, cluster_uuid: str, controller: Controller + # ) -> Model: + # """Adds a model to the controller + + # Adds a new model to the Juju controller + + # :param model_name str: The name of the model to add. + # :param cluster_uuid str: ID of the cluster. + # :param controller: Controller object in which the model will be added + # :returns: The juju.model.Model object of the new model upon success or + # raises an exception. + # """ + + # self.log.debug( + # "Adding model '{}' to cluster_uuid '{}'".format(model_name, cluster_uuid) + # ) + # model = None + # try: + # if self.juju_public_key is not None: + # model = await controller.add_model( + # model_name, config={"authorized-keys": self.juju_public_key} + # ) + # else: + # model = await controller.add_model(model_name) + # except Exception as ex: + # self.log.debug(ex) + # self.log.debug("Caught exception: {}".format(ex)) + # pass + + # return model + + # async def bootstrap( + # self, cloud_name: str, cluster_uuid: str, loadbalancer: bool + # ) -> bool: + # """Bootstrap a Kubernetes controller + + # Bootstrap a Juju controller inside the Kubernetes cluster + + # :param cloud_name str: The name of the cloud. + # :param cluster_uuid str: The UUID of the cluster to bootstrap. + # :param loadbalancer bool: If the controller should use loadbalancer or not. + # :returns: True upon success or raises an exception. + # """ + + # if not loadbalancer: + # cmd = [self.juju_command, "bootstrap", cloud_name, cluster_uuid] + # else: + # """ + # For public clusters, specify that the controller service is using a + # LoadBalancer. + # """ + # cmd = [ + # self.juju_command, + # "bootstrap", + # cloud_name, + # cluster_uuid, + # "--config", + # "controller-service-type=loadbalancer", + # ] + + # self.log.debug( + # "Bootstrapping controller {} in cloud {}".format(cluster_uuid, cloud_name) + # ) + + # process = await asyncio.create_subprocess_exec( + # *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, + # ) + + # _stdout, stderr = await process.communicate() + + # return_code = process.returncode + + # if return_code > 0: + # # + # if b"already exists" not in stderr: + # raise Exception(stderr) + + # return True + + # async def destroy_controller(self, cluster_uuid: str) -> bool: + # """Destroy a Kubernetes controller + + # Destroy an existing Kubernetes controller. + + # :param cluster_uuid str: The UUID of the cluster to bootstrap. + # :returns: True upon success or raises an exception. + # """ + # cmd = [ + # self.juju_command, + # "destroy-controller", + # "--destroy-all-models", + # "--destroy-storage", + # "-y", + # cluster_uuid, + # ] + + # process = await asyncio.create_subprocess_exec( + # *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, + # ) + + # _stdout, stderr = await process.communicate() + + # return_code = process.returncode + + # if return_code > 0: + # # + # if "already exists" not in stderr: + # raise Exception(stderr) + def get_credentials(self, cluster_uuid: str) -> str: """ Get Cluster Kubeconfig @@ -677,6 +1136,51 @@ class K8sJujuConnector(K8sConnector): """ return "cred-{}".format(cluster_uuid) + # def get_config(self, cluster_uuid: str,) -> dict: + # """Get the cluster configuration + + # Gets the configuration of the cluster + + # :param cluster_uuid str: The UUID of the cluster. + # :return: A dict upon success, or raises an exception. + # """ + + # juju_db = self.db.get_one("admin", {"_id": "juju"}) + # config = None + # for k in juju_db["k8sclusters"]: + # if k["_id"] == cluster_uuid: + # config = k["config"] + # self.db.encrypt_decrypt_fields( + # config, + # "decrypt", + # ["secret", "cacert"], + # schema_version="1.1", + # salt=k["_id"], + # ) + # break + # if not config: + # raise Exception( + # "Unable to locate configuration for cluster {}".format(cluster_uuid) + # ) + # return config + + # async def get_model(self, model_name: str, controller: Controller) -> Model: + # """Get a model from the Juju Controller. + + # Note: Model objects returned must call disconnected() before it goes + # out of scope. + + # :param model_name str: The name of the model to get + # :param controller Controller: Controller object + # :return The juju.model.Model object if found, or None. + # """ + + # models = await controller.list_models() + # if model_name not in models: + # raise N2VCNotFound("Model {} not found".format(model_name)) + # self.log.debug("Found model: {}".format(model_name)) + # return await controller.get_model(model_name) + def get_namespace( self, cluster_uuid: str, @@ -687,8 +1191,177 @@ class K8sJujuConnector(K8sConnector): :param cluster_uuid str: The UUID of the cluster :returns: The namespace UUID, or raises an exception """ + # config = self.get_config(cluster_uuid) + + # Make sure the name is in the config + # if "namespace" not in config: + # raise Exception("Namespace not found.") + + # TODO: We want to make sure this is unique to the cluster, in case + # the cluster is being reused. + # Consider pre/appending the cluster id to the namespace string pass + # TODO: Remove these lines of code + # async def has_model(self, model_name: str) -> bool: + # """Check if a model exists in the controller + + # Checks to see if a model exists in the connected Juju controller. + + # :param model_name str: The name of the model + # :return: A boolean indicating if the model exists + # """ + # models = await self.controller.list_models() + + # if model_name in models: + # return True + # return False + + # def is_local_k8s(self, credentials: str,) -> bool: + # """Check if a cluster is local + + # Checks if a cluster is running in the local host + + # :param credentials dict: A dictionary containing the k8s credentials + # :returns: A boolean if the cluster is running locally + # """ + + # creds = yaml.safe_load(credentials) + + # if creds and os.getenv("OSMLCM_VCA_APIPROXY"): + # for cluster in creds["clusters"]: + # if "server" in cluster["cluster"]: + # if os.getenv("OSMLCM_VCA_APIPROXY") in cluster["cluster"]["server"]: + # return True + + # return False + + # async def get_controller(self, cluster_uuid): + # """Login to the Juju controller.""" + + # config = self.get_config(cluster_uuid) + + # juju_endpoint = config["endpoint"] + # juju_user = config["username"] + # juju_secret = config["secret"] + # juju_ca_cert = config["cacert"] + + # controller = Controller() + + # if juju_secret: + # self.log.debug( + # "Connecting to controller... ws://{} as {}".format( + # juju_endpoint, juju_user, + # ) + # ) + # try: + # await controller.connect( + # endpoint=juju_endpoint, + # username=juju_user, + # password=juju_secret, + # cacert=juju_ca_cert, + # ) + # self.log.debug("JujuApi: Logged into controller") + # return controller + # except Exception as ex: + # self.log.debug(ex) + # self.log.debug("Caught exception: {}".format(ex)) + # else: + # self.log.fatal("VCA credentials not configured.") + + # TODO: Remove these commented lines + # self.authenticated = False + # if self.authenticated: + # return + + # self.connecting = True + # juju_public_key = None + # self.authenticated = True + # Test: Make sure we have the credentials loaded + # async def logout(self): + # """Logout of the Juju controller.""" + # self.log.debug("[logout]") + # if not self.authenticated: + # return False + + # for model in self.models: + # self.log.debug("Logging out of model {}".format(model)) + # await self.models[model].disconnect() + + # if self.controller: + # self.log.debug("Disconnecting controller {}".format(self.controller)) + # await self.controller.disconnect() + # self.controller = None + + # self.authenticated = False + + # async def remove_cloud(self, cloud_name: str,) -> bool: + # """Remove a k8s cloud from Juju + + # Removes a Kubernetes cloud from Juju. + + # :param cloud_name str: The name of the cloud to add. + + # :returns: True if successful, otherwise raises an exception. + # """ + + # # Remove the bootstrapped controller + # cmd = [self.juju_command, "remove-k8s", "--client", cloud_name] + # process = await asyncio.create_subprocess_exec( + # *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, + # ) + + # _stdout, stderr = await process.communicate() + + # return_code = process.returncode + + # if return_code > 0: + # raise Exception(stderr) + + # # Remove the cloud from the local config + # cmd = [self.juju_command, "remove-cloud", "--client", cloud_name] + # process = await asyncio.create_subprocess_exec( + # *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, + # ) + + # _stdout, stderr = await process.communicate() + + # return_code = process.returncode + + # if return_code > 0: + # raise Exception(stderr) + + # return True + + # async def set_config(self, cluster_uuid: str, config: dict,) -> bool: + # """Save the cluster configuration + + # Saves the cluster information to the Mongo database + + # :param cluster_uuid str: The UUID of the cluster + # :param config dict: A dictionary containing the cluster configuration + # """ + + # juju_db = self.db.get_one("admin", {"_id": "juju"}) + + # k8sclusters = juju_db["k8sclusters"] if "k8sclusters" in juju_db else [] + # self.db.encrypt_decrypt_fields( + # config, + # "encrypt", + # ["secret", "cacert"], + # schema_version="1.1", + # salt=cluster_uuid, + # ) + # k8sclusters.append({"_id": cluster_uuid, "config": config}) + # self.db.set_one( + # table="admin", + # q_filter={"_id": "juju"}, + # update_dict={"k8sclusters": k8sclusters}, + # ) + + # Private methods to create/delete needed resources in the + # Kubernetes cluster to create the K8s cloud in Juju + def _create_cluster_role( self, kubectl: Kubectl,