X-Git-Url: https://osm.etsi.org/gitweb/?p=osm%2FN2VC.git;a=blobdiff_plain;f=n2vc%2Fk8s_juju_conn.py;h=3316087d975889111ea4166a719babfee9472110;hp=8520687eb70d3827bdd11179b7147ca0016c8f4b;hb=620fa3c11297a0938f25cb867a9a341b50098409;hpb=8c92ecb50a974e1a5e1ae0b156b0e74a1f656b9d diff --git a/n2vc/k8s_juju_conn.py b/n2vc/k8s_juju_conn.py index 8520687..3316087 100644 --- a/n2vc/k8s_juju_conn.py +++ b/n2vc/k8s_juju_conn.py @@ -12,23 +12,22 @@ # See the License for the specific language governing permissions and # limitations under the License. +import asyncio import concurrent from .exceptions import NotImplemented +import io import juju # from juju.bundle import BundleHandler from juju.controller import Controller from juju.model import Model from juju.errors import JujuAPIError, JujuError -import logging - from n2vc.k8s_conn import K8sConnector import os # import re # import ssl -import subprocess # from .vnf import N2VC import uuid @@ -43,7 +42,7 @@ class K8sJujuConnector(K8sConnector): db: object, kubectl_command: str = '/usr/bin/kubectl', juju_command: str = '/usr/bin/juju', - log=None, + log: object = None, on_update_db=None, ): """ @@ -63,16 +62,15 @@ class K8sJujuConnector(K8sConnector): ) self.fs = fs - self.info('Initializing K8S Juju connector') + self.log.debug('Initializing K8S Juju connector') self.authenticated = False self.models = {} - self.log = logging.getLogger(__name__) self.juju_command = juju_command self.juju_secret = "" - self.info('K8S Juju connector initialized') + self.log.debug('K8S Juju connector initialized') """Initialization""" async def init_env( @@ -81,13 +79,14 @@ class K8sJujuConnector(K8sConnector): namespace: str = 'kube-system', reuse_cluster_uuid: str = None, ) -> (str, bool): - """Initialize a Kubernetes environment - - :param k8s_creds dict: A dictionary containing the Kubernetes cluster - configuration - :param namespace str: The Kubernetes namespace to initialize + """ + It prepares a given K8s cluster environment to run Juju bundles. - :return: UUID of the k8s context or raises an exception + :param k8s_creds: credentials to access a given K8s cluster, i.e. a valid '.kube/config' + :param namespace: optional namespace to be used for juju. By default, 'kube-system' will be used + :param reuse_cluster_uuid: existing cluster uuid for reuse + :return: uuid of the K8s cluster and True if connector has installed some software in the cluster + (on error, an exception will be raised) """ """Bootstrapping @@ -118,6 +117,16 @@ class K8sJujuConnector(K8sConnector): # TODO: Pull info from db based on the namespace # ################################################## + ################################################### + # TODO: Make it idempotent, calling add-k8s and # + # bootstrap whenever reuse_cluster_uuid is passed # + # as parameter # + # `init_env` is called to initialize the K8s # + # cluster for juju. If this initialization fails, # + # it can be called again by LCM with the param # + # reuse_cluster_uuid, e.g. to try to fix it. # + ################################################### + if not reuse_cluster_uuid: # This is a new cluster, so bootstrap it @@ -130,21 +139,21 @@ class K8sJujuConnector(K8sConnector): loadbalancer = False if localk8s else True # Name the new k8s cloud - k8s_cloud = "{}-k8s".format(namespace) + k8s_cloud = "k8s-{}".format(cluster_uuid) - print("Adding k8s cloud {}".format(k8s_cloud)) + self.log.debug("Adding k8s cloud {}".format(k8s_cloud)) await self.add_k8s(k8s_cloud, k8s_creds) # Bootstrap Juju controller - print("Bootstrapping...") + self.log.debug("Bootstrapping...") await self.bootstrap(k8s_cloud, cluster_uuid, loadbalancer) - print("Bootstrap done.") + self.log.debug("Bootstrap done.") # Get the controller information # Parse ~/.local/share/juju/controllers.yaml # controllers.testing.api-endpoints|ca-cert|uuid - print("Getting controller endpoints") + self.log.debug("Getting controller endpoints") with open(os.path.expanduser( "~/.local/share/juju/controllers.yaml" )) as f: @@ -156,7 +165,7 @@ class K8sJujuConnector(K8sConnector): # Parse ~/.local/share/juju/accounts # controllers.testing.user|password - print("Getting accounts") + self.log.debug("Getting accounts") with open(os.path.expanduser( "~/.local/share/juju/accounts.yaml" )) as f: @@ -166,11 +175,6 @@ class K8sJujuConnector(K8sConnector): self.juju_user = controller['user'] self.juju_secret = controller['password'] - print("user: {}".format(self.juju_user)) - print("secret: {}".format(self.juju_secret)) - print("endpoint: {}".format(self.juju_endpoint)) - print("ca-cert: {}".format(self.juju_ca_cert)) - # raise Exception("EOL") self.juju_public_key = None @@ -186,7 +190,7 @@ class K8sJujuConnector(K8sConnector): # Store the cluster configuration so it # can be used for subsequent calls - print("Setting config") + self.log.debug("Setting config") await self.set_config(cluster_uuid, config) else: @@ -206,15 +210,15 @@ class K8sJujuConnector(K8sConnector): await self.login(cluster_uuid) # We're creating a new cluster - print("Getting model {}".format(self.get_namespace(cluster_uuid), cluster_uuid=cluster_uuid)) - model = await self.get_model( - self.get_namespace(cluster_uuid), - cluster_uuid=cluster_uuid - ) + #print("Getting model {}".format(self.get_namespace(cluster_uuid), cluster_uuid=cluster_uuid)) + #model = await self.get_model( + # self.get_namespace(cluster_uuid), + # cluster_uuid=cluster_uuid + #) - # Disconnect from the model - if model and model.is_connected(): - await model.disconnect() + ## Disconnect from the model + #if model and model.is_connected(): + # await model.disconnect() return cluster_uuid, True @@ -236,6 +240,16 @@ class K8sJujuConnector(K8sConnector): ): raise NotImplemented() + async def synchronize_repos( + self, + cluster_uuid: str, + name: str + ): + """ + Returns None as currently add_repo is not implemented + """ + return None + """Reset""" async def reset( self, @@ -259,27 +273,28 @@ class K8sJujuConnector(K8sConnector): # Destroy the model namespace = self.get_namespace(cluster_uuid) if await self.has_model(namespace): - print("[reset] Destroying model") + self.log.debug("[reset] Destroying model") await self.controller.destroy_model( namespace, destroy_storage=True ) # Disconnect from the controller - print("[reset] Disconnecting controller") - await self.controller.disconnect() + self.log.debug("[reset] Disconnecting controller") + await self.logout() # Destroy the controller (via CLI) - print("[reset] Destroying controller") + self.log.debug("[reset] Destroying controller") await self.destroy_controller(cluster_uuid) - print("[reset] Removing k8s cloud") - namespace = self.get_namespace(cluster_uuid) - k8s_cloud = "{}-k8s".format(namespace) + self.log.debug("[reset] Removing k8s cloud") + k8s_cloud = "k8s-{}".format(cluster_uuid) await self.remove_cloud(k8s_cloud) except Exception as ex: - print("Caught exception during reset: {}".format(ex)) + self.log.debug("Caught exception during reset: {}".format(ex)) + + return True """Deployment""" @@ -290,7 +305,8 @@ class K8sJujuConnector(K8sConnector): atomic: bool = True, timeout: float = 300, params: dict = None, - db_dict: dict = None + db_dict: dict = None, + kdu_name: str = None ) -> bool: """Install a bundle @@ -301,25 +317,28 @@ class K8sJujuConnector(K8sConnector): :param timeout int: The time, in seconds, to wait for the install to finish :param params dict: Key-value pairs of instantiation parameters + :param kdu_name: Name of the KDU instance to be installed :return: If successful, returns ? """ if not self.authenticated: - print("[install] Logging in to the controller") + self.log.debug("[install] Logging in to the controller") await self.login(cluster_uuid) ## - # Get or create the model, based on the NS + # Get or create the model, based on the NS # uuid. - model_name = db_dict["filter"]["_id"] + if kdu_name: + kdu_instance = "{}-{}".format(kdu_name, db_dict["filter"]["_id"]) + else: + kdu_instance = db_dict["filter"]["_id"] + + self.log.debug("Checking for model named {}".format(kdu_instance)) - self.log.debug("Checking for model named {}".format(model_name)) - model = await self.get_model(model_name, cluster_uuid=cluster_uuid) - if not model: - # Create the new model - self.log.debug("Adding model: {}".format(model_name)) - model = await self.add_model(model_name, cluster_uuid=cluster_uuid) + # Create the new model + self.log.debug("Adding model: {}".format(kdu_instance)) + model = await self.add_model(kdu_instance, cluster_uuid=cluster_uuid) if model: # TODO: Instantiation parameters @@ -332,6 +351,8 @@ class K8sJujuConnector(K8sConnector): - """ + previous_workdir = os.getcwd() + bundle = kdu_model if kdu_model.startswith("cs:"): bundle = kdu_model @@ -339,31 +360,30 @@ class K8sJujuConnector(K8sConnector): # Download the file pass else: - # Local file + new_workdir = kdu_model.strip(kdu_model.split("/")[-1]) - # if kdu_model.endswith(".tar.gz") or kdu_model.endswith(".tgz") - # Uncompress temporarily - # bundle = - pass + os.chdir(new_workdir) + + bundle = "local:{}".format(kdu_model) if not bundle: # Raise named exception that the bundle could not be found raise Exception() - print("[install] deploying {}".format(bundle)) + self.log.debug("[install] deploying {}".format(bundle)) await model.deploy(bundle) # Get the application if atomic: # applications = model.applications - print("[install] Applications: {}".format(model.applications)) + self.log.debug("[install] Applications: {}".format(model.applications)) for name in model.applications: - print("[install] Waiting for {} to settle".format(name)) + self.log.debug("[install] Waiting for {} to settle".format(name)) application = model.applications[name] try: # It's not enough to wait for all units to be active; # the application status needs to be active as well. - print("Waiting for all units to be active...") + self.log.debug("Waiting for all units to be active...") await model.block_until( lambda: all( unit.agent_status == 'idle' @@ -374,19 +394,22 @@ class K8sJujuConnector(K8sConnector): ), timeout=timeout ) - print("All units active.") + self.log.debug("All units active.") except concurrent.futures._base.TimeoutError: - print("[install] Timeout exceeded; resetting cluster") + os.chdir(previous_workdir) + self.log.debug("[install] Timeout exceeded; resetting cluster") await self.reset(cluster_uuid) return False # Wait for the application to be active if model.is_connected(): - print("[install] Disconnecting model") + self.log.debug("[install] Disconnecting model") await model.disconnect() - return True + os.chdir(previous_workdir) + + return kdu_instance raise Exception("Unable to install") async def instances_list( @@ -458,9 +481,9 @@ class K8sJujuConnector(K8sConnector): """ # TODO: This should be returned in an agreed-upon format for name in bundle['applications']: - print(model.applications) + self.log.debug(model.applications) application = model.applications[name] - print(application) + self.log.debug(application) path = bundle['applications'][name]['charm'] @@ -499,35 +522,27 @@ class K8sJujuConnector(K8sConnector): async def uninstall( self, cluster_uuid: str, - kdu_instance: str, + kdu_instance: str ) -> bool: """Uninstall a KDU instance - :param cluster_uuid str: The UUID of the cluster to uninstall + :param cluster_uuid str: The UUID of the cluster :param kdu_instance str: The unique name of the KDU instance :return: Returns True if successful, or raises an exception """ - removed = False - - # Remove an application from the model - model = await self.get_model(self.get_namespace(cluster_uuid), cluster_uuid=cluster_uuid) - - if model: - # Get the application - if kdu_instance not in model.applications: - # TODO: Raise a named exception - raise Exception("Application not found.") + if not self.authenticated: + self.log.debug("[uninstall] Connecting to controller") + await self.login(cluster_uuid) - application = model.applications[kdu_instance] + self.log.debug("[uninstall] Destroying model") - # Destroy the application - await application.destroy() + await self.controller.destroy_models(kdu_instance) - # TODO: Verify removal + self.log.debug("[uninstall] Model destroyed and disconnecting") + await self.logout() - removed = True - return removed + return True """Introspection""" async def inspect_kdu( @@ -650,20 +665,29 @@ class K8sJujuConnector(K8sConnector): """ cmd = [self.juju_command, "add-k8s", "--local", cloud_name] - print(cmd) - p = subprocess.run( - cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - # input=yaml.dump(credentials, Dumper=yaml.Dumper).encode("utf-8"), - input=credentials.encode("utf-8"), - # encoding='ascii' + self.log.debug(cmd) + + process = await asyncio.create_subprocess_exec( + *cmd, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + stdin=asyncio.subprocess.PIPE, ) - retcode = p.returncode - print("add-k8s return code: {}".format(retcode)) - if retcode > 0: - raise Exception(p.stderr) + # Feed the process the credentials + process.stdin.write(credentials.encode("utf-8")) + await process.stdin.drain() + process.stdin.close() + + stdout, stderr = await process.communicate() + + return_code = process.returncode + + self.log.debug("add-k8s return code: {}".format(return_code)) + + if return_code > 0: + raise Exception(stderr) + return True async def add_model( @@ -683,10 +707,16 @@ class K8sJujuConnector(K8sConnector): await self.login(cluster_uuid) self.log.debug("Adding model '{}' to cluster_uuid '{}'".format(model_name, cluster_uuid)) - model = await self.controller.add_model( - model_name, - config={'authorized-keys': self.juju_public_key} - ) + try: + model = await self.controller.add_model( + model_name, + config={'authorized-keys': self.juju_public_key} + ) + except Exception as ex: + self.log.debug(ex) + self.log.debug("Caught exception: {}".format(ex)) + pass + return model async def bootstrap( @@ -713,22 +743,24 @@ class K8sJujuConnector(K8sConnector): """ cmd = [self.juju_command, "bootstrap", cloud_name, cluster_uuid, "--config", "controller-service-type=loadbalancer"] - print("Bootstrapping controller {} in cloud {}".format( + self.log.debug("Bootstrapping controller {} in cloud {}".format( cluster_uuid, cloud_name )) - p = subprocess.run( - cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - # encoding='ascii' + process = await asyncio.create_subprocess_exec( + *cmd, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, ) - retcode = p.returncode - if retcode > 0: + stdout, stderr = await process.communicate() + + return_code = process.returncode + + if return_code > 0: # - if b'already exists' not in p.stderr: - raise Exception(p.stderr) + if b'already exists' not in stderr: + raise Exception(stderr) return True @@ -752,18 +784,20 @@ class K8sJujuConnector(K8sConnector): cluster_uuid ] - p = subprocess.run( - cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - # encoding='ascii' + process = await asyncio.create_subprocess_exec( + *cmd, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, ) - retcode = p.returncode - if retcode > 0: + stdout, stderr = await process.communicate() + + return_code = process.returncode + + if return_code > 0: # - if 'already exists' not in p.stderr: - raise Exception(p.stderr) + if 'already exists' not in stderr: + raise Exception(stderr) def get_config( self, @@ -912,7 +946,7 @@ class K8sJujuConnector(K8sConnector): self.authenticated = True self.log.debug("JujuApi: Logged into controller") except Exception as ex: - print(ex) + self.log.debug(ex) self.log.debug("Caught exception: {}".format(ex)) pass else: @@ -921,12 +955,12 @@ class K8sJujuConnector(K8sConnector): async def logout(self): """Logout of the Juju controller.""" - print("[logout]") + self.log.debug("[logout]") if not self.authenticated: return False for model in self.models: - print("Logging out of model {}".format(model)) + self.log.debug("Logging out of model {}".format(model)) await self.models[model].disconnect() if self.controller: @@ -953,29 +987,33 @@ class K8sJujuConnector(K8sConnector): # Remove the bootstrapped controller cmd = [self.juju_command, "remove-k8s", "--client", cloud_name] - p = subprocess.run( - cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - # encoding='ascii' + process = await asyncio.create_subprocess_exec( + *cmd, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, ) - retcode = p.returncode - if retcode > 0: - raise Exception(p.stderr) + stdout, stderr = await process.communicate() + + return_code = process.returncode + + if return_code > 0: + raise Exception(stderr) # Remove the cloud from the local config cmd = [self.juju_command, "remove-cloud", "--client", cloud_name] - p = subprocess.run( - cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - # encoding='ascii' + process = await asyncio.create_subprocess_exec( + *cmd, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, ) - retcode = p.returncode - if retcode > 0: - raise Exception(p.stderr) + stdout, stderr = await process.communicate() + + return_code = process.returncode + + if return_code > 0: + raise Exception(stderr) return True @@ -995,7 +1033,7 @@ class K8sJujuConnector(K8sConnector): cluster_config = "{}/{}.yaml".format(self.fs.path, cluster_uuid) if not os.path.exists(cluster_config): - print("Writing config to {}".format(cluster_config)) + self.log.debug("Writing config to {}".format(cluster_config)) with open(cluster_config, 'w') as f: f.write(yaml.dump(config, Dumper=yaml.Dumper))