From 1cfed49811309ce587c2b034be690bc352065e81 Mon Sep 17 00:00:00 2001 From: David Garcia Date: Wed, 8 Jun 2022 11:16:54 +0200 Subject: [PATCH 01/16] Fix bug 2060: Ignore "model not found" exception when deleting models Change-Id: Ife06b41bf2bcf32b080b405e607450d9303d19e0 Signed-off-by: David Garcia --- n2vc/libjuju.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/n2vc/libjuju.py b/n2vc/libjuju.py index 4ee0abf..7492acc 100644 --- a/n2vc/libjuju.py +++ b/n2vc/libjuju.py @@ -1422,14 +1422,17 @@ class Libjuju: self.log.info(f"Model {model_name} deleted forcefully") try: - await asyncio.wait_for( - _destroy_model_gracefully(model_name, controller), timeout=120 - ) - except asyncio.TimeoutError: - await _destroy_model_forcefully(model_name, controller) + try: + await asyncio.wait_for( + _destroy_model_gracefully(model_name, controller), timeout=120 + ) + except asyncio.TimeoutError: + await _destroy_model_forcefully(model_name, controller) except juju.errors.JujuError as e: if any("has been removed" in error for error in e.errors): return + if any("model not found" in error for error in e.errors): + return raise e async def destroy_application( -- 2.25.1 From 0439319e76e6a5e71ecf4b3efd1ece2c82d52b53 Mon Sep 17 00:00:00 2001 From: garciadeblas Date: Wed, 8 Jun 2022 15:39:24 +0200 Subject: [PATCH 02/16] Update _split_version to check if the kdu_model is a reference or a file Change-Id: I80dac9f24f652d2a71d9dcbb88cbc2c6850440db Signed-off-by: garciadeblas --- n2vc/k8s_helm3_conn.py | 7 ++++++- n2vc/k8s_helm_base_conn.py | 5 ++++- n2vc/k8s_helm_conn.py | 7 ++++--- 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/n2vc/k8s_helm3_conn.py b/n2vc/k8s_helm3_conn.py index bb08f07..107f954 100644 --- a/n2vc/k8s_helm3_conn.py +++ b/n2vc/k8s_helm3_conn.py @@ -84,7 +84,12 @@ class K8sHelm3Connector(K8sHelmBaseConnector): """Install a helm chart :param cluster_uuid str: The UUID of the cluster to install to - :param kdu_model str: The name or path of a bundle to install + :param kdu_model str: chart/reference (string), which can be either + of these options: + - a name of chart available via the repos known by OSM + (e.g. stable/openldap, stable/openldap:1.2.4) + - a path to a packaged chart (e.g. mychart.tgz) + - a path to an unpacked chart directory or a URL (e.g. mychart) :param kdu_instance: Kdu instance name :param atomic bool: If set, waits until the model is active and resets the cluster on failure. diff --git a/n2vc/k8s_helm_base_conn.py b/n2vc/k8s_helm_base_conn.py index d446b9b..a4eab4b 100644 --- a/n2vc/k8s_helm_base_conn.py +++ b/n2vc/k8s_helm_base_conn.py @@ -371,6 +371,9 @@ class K8sHelmBaseConnector(K8sConnector): return True + def _is_helm_chart_a_file(self, chart_name: str): + return chart_name.count("/") > 1 + async def _install_impl( self, cluster_id: str, @@ -1954,7 +1957,7 @@ class K8sHelmBaseConnector(K8sConnector): def _split_version(self, kdu_model: str) -> (str, str): version = None - if ":" in kdu_model: + if not self._is_helm_chart_a_file(kdu_model) and ":" in kdu_model: parts = kdu_model.split(sep=":") if len(parts) == 2: version = str(parts[1]) diff --git a/n2vc/k8s_helm_conn.py b/n2vc/k8s_helm_conn.py index 8c526f5..5fe624b 100644 --- a/n2vc/k8s_helm_conn.py +++ b/n2vc/k8s_helm_conn.py @@ -110,11 +110,12 @@ class K8sHelmConnector(K8sHelmBaseConnector): happen before any _initial-config-primitive_of the VNF is called). :param cluster_uuid: UUID of a K8s cluster known by OSM - :param kdu_model: chart/ reference (string), which can be either + :param kdu_model: chart/reference (string), which can be either of these options: - a name of chart available via the repos known by OSM - - a path to a packaged chart - - a path to an unpacked chart directory or a URL + (e.g. stable/openldap, stable/openldap:1.2.4) + - a path to a packaged chart (e.g. mychart.tgz) + - a path to an unpacked chart directory or a URL (e.g. mychart) :param kdu_instance: Kdu instance name :param atomic: If set, installation process purges chart/bundle on fail, also will wait until all the K8s objects are active -- 2.25.1 From 547f823676bf59d2c972bc6da0cfd231170b27d0 Mon Sep 17 00:00:00 2001 From: Pedro Escaleira Date: Fri, 3 Jun 2022 19:48:46 +0100 Subject: [PATCH 03/16] Bug 2064 fixed Removed the unnecessary verification for if the repo_url exists in the method get_scale_count of the Helm Base Connection class; The same problem also applies for the scale method, so this verification was also removed there; Also did some fixes/additions to the description of some methods being called in this verification block. Change-Id: I710b8678b4b70c6f24c2150d44bb714e9e4c5b66 Signed-off-by: Pedro Escaleira --- n2vc/k8s_helm3_conn.py | 13 ++++++++ n2vc/k8s_helm_base_conn.py | 67 ++++++++++++++++++++++++++++---------- 2 files changed, 62 insertions(+), 18 deletions(-) diff --git a/n2vc/k8s_helm3_conn.py b/n2vc/k8s_helm3_conn.py index 107f954..74a8947 100644 --- a/n2vc/k8s_helm3_conn.py +++ b/n2vc/k8s_helm3_conn.py @@ -353,6 +353,19 @@ class K8sHelm3Connector(K8sHelmBaseConnector): def _get_inspect_command( self, inspect_command: str, kdu_model: str, repo_str: str, version: str ): + """Generates the command to obtain the information about an Helm Chart package + (´helm show ...´ command) + + Args: + show_command: the second part of the command (`helm show `) + kdu_model: The name or path of an Helm Chart + repo_url: Helm Chart repository url + version: constraint with specific version of the Chart to use + + Returns: + str: the generated Helm Chart command + """ + inspect_command = "{} show {} {}{} {}".format( self._helm_command, inspect_command, kdu_model, repo_str, version ) diff --git a/n2vc/k8s_helm_base_conn.py b/n2vc/k8s_helm_base_conn.py index a4eab4b..ed3ea1c 100644 --- a/n2vc/k8s_helm_base_conn.py +++ b/n2vc/k8s_helm_base_conn.py @@ -645,10 +645,6 @@ class K8sHelmBaseConnector(K8sConnector): kdu_model, version = self._split_version(kdu_model) repo_url = await self._find_repo(kdu_model, cluster_uuid) - if not repo_url: - raise K8sException( - "Repository not found for kdu_model {}".format(kdu_model) - ) _, replica_str = await self._get_replica_count_url( kdu_model, repo_url, resource_name @@ -735,7 +731,7 @@ class K8sHelmBaseConnector(K8sConnector): cluster_uuid: The UUID of the cluster resource_name: Resource name kdu_instance: KDU instance name - kdu_model: The name or path of a bundle + kdu_model: The name or path of an Helm Chart kwargs: Additional parameters Returns: @@ -762,14 +758,11 @@ class K8sHelmBaseConnector(K8sConnector): # Get default value if scale count is not found from provided values if not replicas: - repo_url = await self._find_repo(kdu_model, cluster_uuid) - if not repo_url: - raise K8sException( - "Repository not found for kdu_model {}".format(kdu_model) - ) - + repo_url = await self._find_repo( + kdu_model=kdu_model, cluster_uuid=cluster_uuid + ) replicas, _ = await self._get_replica_count_url( - kdu_model, repo_url, resource_name + kdu_model=kdu_model, repo_url=repo_url, resource_name=resource_name ) if not replicas: @@ -1135,6 +1128,15 @@ class K8sHelmBaseConnector(K8sConnector): ) async def values_kdu(self, kdu_model: str, repo_url: str = None) -> str: + """Method to obtain the Helm Chart package's values + + Args: + kdu_model: The name or path of an Helm Chart + repo_url: Helm Chart repository url + + Returns: + str: the values of the Helm Chart package + """ self.log.debug( "inspect kdu_model values {} from (optional) repo: {}".format( @@ -1355,8 +1357,17 @@ class K8sHelmBaseConnector(K8sConnector): def _get_inspect_command( self, show_command: str, kdu_model: str, repo_str: str, version: str ): - """ - Obtain command to be executed to obtain information about the kdu + """Generates the command to obtain the information about an Helm Chart package + (´helm show ...´ command) + + Args: + show_command: the second part of the command (`helm show `) + kdu_model: The name or path of an Helm Chart + repo_url: Helm Chart repository url + version: constraint with specific version of the Chart to use + + Returns: + str: the generated Helm Chart command """ @abc.abstractmethod @@ -1684,7 +1695,16 @@ class K8sHelmBaseConnector(K8sConnector): async def _exec_inspect_command( self, inspect_command: str, kdu_model: str, repo_url: str = None ): - """Obtains information about a kdu, no cluster (no env).""" + """Obtains information about an Helm Chart package (´helm show´ command) + + Args: + inspect_command: the Helm sub command (`helm show ...`) + kdu_model: The name or path of an Helm Chart + repo_url: Helm Chart repository url + + Returns: + str: the requested info about the Helm Chart package + """ repo_str = "" if repo_url: @@ -1712,13 +1732,13 @@ class K8sHelmBaseConnector(K8sConnector): async def _get_replica_count_url( self, kdu_model: str, - repo_url: str, + repo_url: str = None, resource_name: str = None, ): """Get the replica count value in the Helm Chart Values. Args: - kdu_model: The name or path of a bundle + kdu_model: The name or path of an Helm Chart repo_url: Helm Chart repository url resource_name: Resource name @@ -1727,7 +1747,8 @@ class K8sHelmBaseConnector(K8sConnector): """ kdu_values = yaml.load( - await self.values_kdu(kdu_model, repo_url), Loader=yaml.SafeLoader + await self.values_kdu(kdu_model=kdu_model, repo_url=repo_url), + Loader=yaml.SafeLoader, ) if not kdu_values: @@ -1972,6 +1993,16 @@ class K8sHelmBaseConnector(K8sConnector): return repo_name async def _find_repo(self, kdu_model: str, cluster_uuid: str) -> str: + """Obtain the Helm repository for an Helm Chart + + Args: + kdu_model (str): the KDU model associated with the Helm Chart instantiation + cluster_uuid (str): The cluster UUID associated with the Helm Chart instantiation + + Returns: + str: the repository URL; if Helm Chart is a local one, the function returns None + """ + repo_url = None idx = kdu_model.find("/") if idx >= 0: -- 2.25.1 From aa5deb75e23991d357e30d90fbb68c03f6ba5bd4 Mon Sep 17 00:00:00 2001 From: Pedro Escaleira Date: Sun, 5 Jun 2022 01:29:57 +0100 Subject: [PATCH 04/16] Bug 2067 fixed Added the missing resource_name param to the _get_replica_count_instance method call under the method get_scale_count Change-Id: Iae2f437b82993dbbf71b80feb5b208f29c8d7083 Signed-off-by: Pedro Escaleira --- n2vc/k8s_helm_base_conn.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/n2vc/k8s_helm_base_conn.py b/n2vc/k8s_helm_base_conn.py index ed3ea1c..dd2116c 100644 --- a/n2vc/k8s_helm_base_conn.py +++ b/n2vc/k8s_helm_base_conn.py @@ -753,7 +753,10 @@ class K8sHelmBaseConnector(K8sConnector): ) replicas = await self._get_replica_count_instance( - kdu_instance, instance_info["namespace"], paths["kube_config"] + kdu_instance=kdu_instance, + namespace=instance_info["namespace"], + kubeconfig=paths["kube_config"], + resource_name=resource_name, ) # Get default value if scale count is not found from provided values -- 2.25.1 From 764d8664333e7a6f16353bc8f578c5681f66433f Mon Sep 17 00:00:00 2001 From: Pedro Escaleira Date: Tue, 19 Apr 2022 20:40:09 +0100 Subject: [PATCH 05/16] Bug 1995 fixed: possibility of defining the K8s namespace for Juju Bundles Now, N2VC will use the namespace passed by argument to the methods install and get_services. Also added this argument to other functions where it should be passed. When it is not passed, for now it is obtained from the nsrs, but it should be always passed to avoid queries to the database, while mainaining backward compatibility. Updated the N2VC tests accordingly. Change-Id: Iace944506ba212034efdbb87c6f2d74f8265ea4e Signed-off-by: Pedro Escaleira --- n2vc/k8s_juju_conn.py | 168 ++++++++++++++++++++------ n2vc/tests/unit/test_k8s_juju_conn.py | 71 +++++++---- 2 files changed, 177 insertions(+), 62 deletions(-) diff --git a/n2vc/k8s_juju_conn.py b/n2vc/k8s_juju_conn.py index 99e868b..396d79b 100644 --- a/n2vc/k8s_juju_conn.py +++ b/n2vc/k8s_juju_conn.py @@ -327,12 +327,16 @@ class K8sJujuConnector(K8sConnector): os.chdir(new_workdir) bundle = "local:{}".format(kdu_model) - self.log.debug("Checking for model named {}".format(kdu_instance)) + # default namespace to kdu_instance + if not namespace: + namespace = kdu_instance + + self.log.debug("Checking for model named {}".format(namespace)) # Create the new model - self.log.debug("Adding model: {}".format(kdu_instance)) + self.log.debug("Adding model: {}".format(namespace)) cloud = Cloud(cluster_uuid, self._get_credential_name(cluster_uuid)) - await libjuju.add_model(kdu_instance, cloud) + await libjuju.add_model(namespace, cloud) # if model: # TODO: Instantiation parameters @@ -351,10 +355,10 @@ class K8sJujuConnector(K8sConnector): previous_workdir = "/app/storage" self.log.debug("[install] deploying {}".format(bundle)) - await libjuju.deploy( - bundle, model_name=kdu_instance, wait=atomic, timeout=timeout - ) + await libjuju.deploy(bundle, model_name=namespace, wait=atomic, timeout=timeout) os.chdir(previous_workdir) + + # update information in the database (first, the VCA status, and then, the namespace) if self.on_update_db: await self.on_update_db( cluster_uuid, @@ -362,6 +366,13 @@ class K8sJujuConnector(K8sConnector): filter=db_dict["filter"], vca_id=kwargs.get("vca_id"), ) + + self.db.set_one( + table="nsrs", + q_filter={"_admin.deployed.K8s.kdu-instance": kdu_instance}, + update_dict={"_admin.deployed.K8s.$.namespace": namespace}, + ) + return True async def scale( @@ -370,6 +381,7 @@ class K8sJujuConnector(K8sConnector): scale: int, resource_name: str, total_timeout: float = 1800, + namespace: str = None, **kwargs, ) -> bool: """Scale an application in a model @@ -379,23 +391,27 @@ class K8sJujuConnector(K8sConnector): :param: resource_name str: The application name in the Juju Bundle :param: timeout float: The time, in seconds, to wait for the install to finish + :param namespace str: The namespace (model) where the Bundle was deployed :param kwargs: Additional parameters vca_id (str): VCA ID :return: If successful, returns True """ + model_name = self._obtain_namespace( + kdu_instance=kdu_instance, namespace=namespace + ) try: libjuju = await self._get_libjuju(kwargs.get("vca_id")) await libjuju.scale_application( - model_name=kdu_instance, + model_name=model_name, application_name=resource_name, scale=scale, total_timeout=total_timeout, ) except Exception as e: - error_msg = "Error scaling application {} in kdu instance {}: {}".format( - resource_name, kdu_instance, e + error_msg = "Error scaling application {} of the model {} of the kdu instance {}: {}".format( + resource_name, model_name, kdu_instance, e ) self.log.error(error_msg) raise K8sException(message=error_msg) @@ -405,24 +421,30 @@ class K8sJujuConnector(K8sConnector): self, resource_name: str, kdu_instance: str, + namespace: str = None, **kwargs, ) -> int: """Get an application scale count :param: resource_name str: The application name in the Juju Bundle :param: kdu_instance str: KDU instance name + :param namespace str: The namespace (model) where the Bundle was deployed :param kwargs: Additional parameters vca_id (str): VCA ID :return: Return application instance count """ + model_name = self._obtain_namespace( + kdu_instance=kdu_instance, namespace=namespace + ) try: libjuju = await self._get_libjuju(kwargs.get("vca_id")) - status = await libjuju.get_model_status(kdu_instance) + status = await libjuju.get_model_status(model_name=model_name) return len(status.applications[resource_name].units) except Exception as e: - error_msg = "Error getting scale count from application {} in kdu instance {}: {}".format( - resource_name, kdu_instance, e + error_msg = ( + f"Error getting scale count from application {resource_name} of the model {model_name} of " + f"the kdu instance {kdu_instance}: {e}" ) self.log.error(error_msg) raise K8sException(message=error_msg) @@ -495,42 +517,47 @@ class K8sJujuConnector(K8sConnector): self, cluster_uuid: str, kdu_instance: str, + namespace: str = None, **kwargs, ) -> bool: """Uninstall a KDU instance :param cluster_uuid str: The UUID of the cluster :param kdu_instance str: The unique name of the KDU instance + :param namespace str: The namespace (model) where the Bundle was deployed :param kwargs: Additional parameters vca_id (str): VCA ID :return: Returns True if successful, or raises an exception """ + model_name = self._obtain_namespace( + kdu_instance=kdu_instance, namespace=namespace + ) - self.log.debug("[uninstall] Destroying model") + self.log.debug(f"[uninstall] Destroying model: {model_name}") will_not_delete = False - if kdu_instance not in self.uninstall_locks: - self.uninstall_locks[kdu_instance] = asyncio.Lock(loop=self.loop) - delete_lock = self.uninstall_locks[kdu_instance] + if model_name not in self.uninstall_locks: + self.uninstall_locks[model_name] = asyncio.Lock(loop=self.loop) + delete_lock = self.uninstall_locks[model_name] while delete_lock.locked(): will_not_delete = True await asyncio.sleep(0.1) if will_not_delete: - self.log.info("Model {} deleted by another worker.".format(kdu_instance)) + self.log.info("Model {} deleted by another worker.".format(model_name)) return True try: async with delete_lock: libjuju = await self._get_libjuju(kwargs.get("vca_id")) - await libjuju.destroy_model(kdu_instance, total_timeout=3600) + await libjuju.destroy_model(model_name, total_timeout=3600) finally: - self.uninstall_locks.pop(kdu_instance) + self.uninstall_locks.pop(model_name) - self.log.debug(f"[uninstall] Model {kdu_instance} destroyed") + self.log.debug(f"[uninstall] Model {model_name} destroyed") return True async def upgrade_charm( @@ -565,6 +592,7 @@ class K8sJujuConnector(K8sConnector): timeout: float = 300, params: dict = None, db_dict: dict = None, + namespace: str = None, **kwargs, ) -> str: """Exec primitive (Juju action) @@ -575,6 +603,7 @@ class K8sJujuConnector(K8sConnector): :param timeout: Timeout for action execution :param params: Dictionary of all the parameters needed for the action :param db_dict: Dictionary for any additional data + :param namespace str: The namespace (model) where the Bundle was deployed :param kwargs: Additional parameters vca_id (str): VCA ID @@ -582,6 +611,10 @@ class K8sJujuConnector(K8sConnector): """ libjuju = await self._get_libjuju(kwargs.get("vca_id")) + namespace = self._obtain_namespace( + kdu_instance=kdu_instance, namespace=namespace + ) + if not params or "application-name" not in params: raise K8sException( "Missing application-name argument, \ @@ -590,14 +623,19 @@ class K8sJujuConnector(K8sConnector): try: self.log.debug( "[exec_primitive] Getting model " - "kdu_instance: {}".format(kdu_instance) + "{} for the kdu_instance: {}".format(namespace, kdu_instance) ) application_name = params["application-name"] - actions = await libjuju.get_actions(application_name, kdu_instance) + actions = await libjuju.get_actions( + application_name=application_name, model_name=namespace + ) if primitive_name not in actions: raise K8sException("Primitive {} not found".format(primitive_name)) output, status = await libjuju.execute_action( - application_name, kdu_instance, primitive_name, **params + application_name=application_name, + model_name=namespace, + action_name=primitive_name, + **params, ) if status != "completed": @@ -606,7 +644,9 @@ class K8sJujuConnector(K8sConnector): ) if self.on_update_db: await self.on_update_db( - cluster_uuid, kdu_instance, filter=db_dict["filter"] + cluster_uuid=cluster_uuid, + kdu_instance=kdu_instance, + filter=db_dict["filter"], ) return output @@ -669,11 +709,11 @@ class K8sJujuConnector(K8sConnector): ) -> str: """View the README - If available, returns the README of the bundle. + If available, returns the README of the bundle. - :param kdu_model str: The name or path of a bundle - - :return: If found, returns the contents of the README. + :param kdu_model str: The name or path of a bundle + f + :return: If found, returns the contents of the README. """ readme = None @@ -693,6 +733,7 @@ class K8sJujuConnector(K8sConnector): kdu_instance: str, complete_status: bool = False, yaml_format: bool = False, + namespace: str = None, **kwargs, ) -> Union[str, dict]: """Get the status of the KDU @@ -703,6 +744,7 @@ class K8sJujuConnector(K8sConnector): :param kdu_instance str: The unique id of the KDU instance :param complete_status: To get the complete_status of the KDU :param yaml_format: To get the status in proper format for NSR record + :param namespace str: The namespace (model) where the Bundle was deployed :param: kwargs: Additional parameters vca_id (str): VCA ID @@ -712,7 +754,10 @@ class K8sJujuConnector(K8sConnector): libjuju = await self._get_libjuju(kwargs.get("vca_id")) status = {} - model_status = await libjuju.get_model_status(kdu_instance) + model_name = self._obtain_namespace( + kdu_instance=kdu_instance, namespace=namespace + ) + model_status = await libjuju.get_model_status(model_name=model_name) if not complete_status: for name in model_status.applications: @@ -773,32 +818,44 @@ class K8sJujuConnector(K8sConnector): self.log.error(message) raise Exception(message=message) - async def update_vca_status(self, vcastatus: dict, kdu_instance: str, **kwargs): + async def update_vca_status( + self, vcastatus: dict, kdu_instance: str, namespace: str = None, **kwargs + ): """ Add all configs, actions, executed actions of all applications in a model to vcastatus dict :param vcastatus dict: dict containing vcastatus :param kdu_instance str: The unique id of the KDU instance + :param namespace str: The namespace (model) where the Bundle was deployed :param: kwargs: Additional parameters vca_id (str): VCA ID :return: None """ + + model_name = self._obtain_namespace( + kdu_instance=kdu_instance, namespace=namespace + ) + libjuju = await self._get_libjuju(kwargs.get("vca_id")) try: - for model_name in vcastatus: + for vca_model_name in vcastatus: # Adding executed actions - vcastatus[model_name][ + vcastatus[vca_model_name][ "executedActions" - ] = await libjuju.get_executed_actions(kdu_instance) + ] = await libjuju.get_executed_actions(model_name=model_name) - for application in vcastatus[model_name]["applications"]: + for application in vcastatus[vca_model_name]["applications"]: # Adding application actions - vcastatus[model_name]["applications"][application]["actions"] = {} + vcastatus[vca_model_name]["applications"][application][ + "actions" + ] = {} # Adding application configs - vcastatus[model_name]["applications"][application][ + vcastatus[vca_model_name]["applications"][application][ "configs" - ] = await libjuju.get_application_configs(kdu_instance, application) + ] = await libjuju.get_application_configs( + model_name=model_name, application_name=application + ) except Exception as e: self.log.debug("Error in updating vca status: {}".format(str(e))) @@ -808,10 +865,14 @@ class K8sJujuConnector(K8sConnector): ) -> list: """Return a list of services of a kdu_instance""" + namespace = self._obtain_namespace( + kdu_instance=kdu_instance, namespace=namespace + ) + credentials = self.get_credentials(cluster_uuid=cluster_uuid) kubectl = self._get_kubectl(credentials) return kubectl.get_services( - field_selector="metadata.namespace={}".format(kdu_instance) + field_selector="metadata.namespace={}".format(namespace) ) async def get_service( @@ -917,3 +978,34 @@ class K8sJujuConnector(K8sConnector): with open(kubecfg.name, "w") as kubecfg_file: kubecfg_file.write(credentials) return Kubectl(config_file=kubecfg.name) + + def _obtain_namespace(self, kdu_instance: str, namespace: str = None) -> str: + """ + Obtain the namespace/model name to use in the instantiation of a Juju Bundle in K8s. The default namespace is + the kdu_instance name. However, if the user passes the namespace where he wants to deploy the bundle, + that namespace will be used. + + :param kdu_instance: the default KDU instance name + :param namespace: the namespace passed by the User + """ + + # deault the namespace/model name to the kdu_instance name TODO -> this should be the real return... But + # once the namespace is not passed in most methods, I had to do this in another way. But I think this should + # be the procedure in the future return namespace if namespace else kdu_instance + + # TODO -> has referred above, this should be avoided in the future, this is temporary, in order to avoid + # compatibility issues + return ( + namespace + if namespace + else self._obtain_namespace_from_db(kdu_instance=kdu_instance) + ) + + def _obtain_namespace_from_db(self, kdu_instance: str) -> str: + db_nsrs = self.db.get_one( + table="nsrs", q_filter={"_admin.deployed.K8s.kdu-instance": kdu_instance} + ) + for k8s in db_nsrs["_admin"]["deployed"]["K8s"]: + if k8s.get("kdu-instance") == kdu_instance: + return k8s.get("namespace") + return "" diff --git a/n2vc/tests/unit/test_k8s_juju_conn.py b/n2vc/tests/unit/test_k8s_juju_conn.py index 915738e..3e35494 100644 --- a/n2vc/tests/unit/test_k8s_juju_conn.py +++ b/n2vc/tests/unit/test_k8s_juju_conn.py @@ -67,6 +67,10 @@ class K8sJujuConnTestCase(asynctest.TestCase): ) logging.disable(logging.CRITICAL) + self.kdu_name = "kdu_name" + self.kdu_instance = "{}-{}".format(self.kdu_name, "id") + self.default_namespace = self.kdu_instance + self.k8s_juju_conn = K8sJujuConnector( fs=fslocal.FsLocal(), db=self.db, @@ -83,6 +87,9 @@ class K8sJujuConnTestCase(asynctest.TestCase): self.kubectl.get_services.return_value = [{}] self.k8s_juju_conn._get_kubectl = Mock() self.k8s_juju_conn._get_kubectl.return_value = self.kubectl + self.k8s_juju_conn._obtain_namespace_from_db = Mock( + return_value=self.default_namespace + ) class InitEnvTest(K8sJujuConnTestCase): @@ -203,9 +210,7 @@ class InstallTest(K8sJujuConnTestCase): self.local_bundle = "bundle" self.cs_bundle = "cs:bundle" self.http_bundle = "https://example.com/bundle.yaml" - self.kdu_name = "kdu_name" self.cluster_uuid = "cluster" - self.kdu_instance = "{}-{}".format(self.kdu_name, "id") self.k8s_juju_conn.libjuju.add_model = AsyncMock() self.k8s_juju_conn.libjuju.deploy = AsyncMock() @@ -225,7 +230,7 @@ class InstallTest(K8sJujuConnTestCase): self.k8s_juju_conn.libjuju.add_model.assert_called_once() self.k8s_juju_conn.libjuju.deploy.assert_called_once_with( "local:{}".format(self.local_bundle), - model_name=self.kdu_instance, + model_name=self.default_namespace, wait=True, timeout=1800, ) @@ -245,7 +250,7 @@ class InstallTest(K8sJujuConnTestCase): self.k8s_juju_conn.libjuju.add_model.assert_called_once() self.k8s_juju_conn.libjuju.deploy.assert_called_once_with( self.cs_bundle, - model_name=self.kdu_instance, + model_name=self.default_namespace, wait=True, timeout=1800, ) @@ -265,7 +270,7 @@ class InstallTest(K8sJujuConnTestCase): self.k8s_juju_conn.libjuju.add_model.assert_called_once() self.k8s_juju_conn.libjuju.deploy.assert_called_once_with( self.http_bundle, - model_name=self.kdu_instance, + model_name=self.default_namespace, wait=True, timeout=1800, ) @@ -284,7 +289,7 @@ class InstallTest(K8sJujuConnTestCase): self.k8s_juju_conn.libjuju.add_model.assert_called_once() self.k8s_juju_conn.libjuju.deploy.assert_called_once_with( self.cs_bundle, - model_name=self.kdu_instance, + model_name=self.default_namespace, wait=True, timeout=1800, ) @@ -323,7 +328,7 @@ class InstallTest(K8sJujuConnTestCase): self.k8s_juju_conn.libjuju.add_model.assert_called_once() self.k8s_juju_conn.libjuju.deploy.assert_called_once_with( self.cs_bundle, - model_name=self.kdu_instance, + model_name=self.default_namespace, wait=True, timeout=1800, ) @@ -361,7 +366,7 @@ class InstallTest(K8sJujuConnTestCase): self.k8s_juju_conn.libjuju.add_model.assert_called_once() self.k8s_juju_conn.libjuju.deploy.assert_called_once_with( "local:{}".format(self.local_bundle), - model_name=self.kdu_instance, + model_name=self.default_namespace, wait=True, timeout=1800, ) @@ -395,7 +400,6 @@ class ExecPrimitivesTest(K8sJujuConnTestCase): super(ExecPrimitivesTest, self).setUp() self.action_name = "touch" self.application_name = "myapp" - self.model_name = "model" self.k8s_juju_conn.libjuju.get_actions = AsyncMock() self.k8s_juju_conn.libjuju.execute_action = AsyncMock() @@ -409,16 +413,22 @@ class ExecPrimitivesTest(K8sJujuConnTestCase): output = self.loop.run_until_complete( self.k8s_juju_conn.exec_primitive( - "cluster", self.model_name, self.action_name, params=params + "cluster", self.kdu_instance, self.action_name, params=params ) ) self.assertEqual(output, "success") + self.k8s_juju_conn._obtain_namespace_from_db.assert_called_once_with( + kdu_instance=self.kdu_instance + ) self.k8s_juju_conn.libjuju.get_actions.assert_called_once_with( - self.application_name, self.model_name + application_name=self.application_name, model_name=self.default_namespace ) self.k8s_juju_conn.libjuju.execute_action.assert_called_once_with( - self.application_name, self.model_name, self.action_name, **params + application_name=self.application_name, + model_name=self.default_namespace, + action_name=self.action_name, + **params ) def test_exception(self): @@ -430,16 +440,22 @@ class ExecPrimitivesTest(K8sJujuConnTestCase): with self.assertRaises(Exception): output = self.loop.run_until_complete( self.k8s_juju_conn.exec_primitive( - "cluster", self.model_name, self.action_name, params=params + "cluster", self.kdu_instance, self.action_name, params=params ) ) self.assertIsNone(output) + self.k8s_juju_conn._obtain_namespace_from_db.assert_called_once_with( + kdu_instance=self.kdu_instance + ) self.k8s_juju_conn.libjuju.get_actions.assert_called_once_with( - self.application_name, self.model_name + application_name=self.application_name, model_name=self.default_namespace ) self.k8s_juju_conn.libjuju.execute_action.assert_called_once_with( - self.application_name, self.model_name, self.action_name, **params + application_name=self.application_name, + model_name=self.default_namespace, + action_name=self.action_name, + **params ) def test_missing_application_name_in_params(self): @@ -449,7 +465,7 @@ class ExecPrimitivesTest(K8sJujuConnTestCase): with self.assertRaises(K8sException): output = self.loop.run_until_complete( self.k8s_juju_conn.exec_primitive( - "cluster", self.model_name, self.action_name, params=params + "cluster", self.kdu_instance, self.action_name, params=params ) ) @@ -462,7 +478,7 @@ class ExecPrimitivesTest(K8sJujuConnTestCase): with self.assertRaises(K8sException): output = self.loop.run_until_complete( self.k8s_juju_conn.exec_primitive( - "cluster", self.model_name, self.action_name + "cluster", self.kdu_instance, self.action_name ) ) @@ -481,13 +497,16 @@ class ExecPrimitivesTest(K8sJujuConnTestCase): with self.assertRaises(K8sException): output = self.loop.run_until_complete( self.k8s_juju_conn.exec_primitive( - "cluster", self.model_name, "non-existing-action", params=params + "cluster", self.kdu_instance, "non-existing-action", params=params ) ) self.assertIsNone(output) + self.k8s_juju_conn._obtain_namespace_from_db.assert_called_once_with( + kdu_instance=self.kdu_instance + ) self.k8s_juju_conn.libjuju.get_actions.assert_called_once_with( - self.application_name, self.model_name + application_name=self.application_name, model_name=self.default_namespace ) self.k8s_juju_conn.libjuju.execute_action.assert_not_called() @@ -499,16 +518,22 @@ class ExecPrimitivesTest(K8sJujuConnTestCase): with self.assertRaises(K8sException): output = self.loop.run_until_complete( self.k8s_juju_conn.exec_primitive( - "cluster", self.model_name, self.action_name, params=params + "cluster", self.kdu_instance, self.action_name, params=params ) ) self.assertIsNone(output) + self.k8s_juju_conn._obtain_namespace_from_db.assert_called_once_with( + kdu_instance=self.kdu_instance + ) self.k8s_juju_conn.libjuju.get_actions.assert_called_once_with( - self.application_name, self.model_name + application_name=self.application_name, model_name=self.default_namespace ) self.k8s_juju_conn.libjuju.execute_action.assert_called_once_with( - self.application_name, self.model_name, self.action_name, **params + application_name=self.application_name, + model_name=self.default_namespace, + action_name=self.action_name, + **params ) @@ -647,8 +672,6 @@ class UpdateVcaStatusTest(K8sJujuConnTestCase): def setUp(self): super(UpdateVcaStatusTest, self).setUp() self.vcaStatus = {"model": {"applications": {"app": {"actions": {}}}}} - self.kdu_name = "kdu_name" - self.kdu_instance = "{}-{}".format(self.kdu_name, "id") self.k8s_juju_conn.libjuju.get_executed_actions = AsyncMock() self.k8s_juju_conn.libjuju.get_actions = AsyncMock() self.k8s_juju_conn.libjuju.get_application_configs = AsyncMock() -- 2.25.1 From b46f88d2ce319c7661dc6064c8c76d020e314fb6 Mon Sep 17 00:00:00 2001 From: Pedro Escaleira Date: Sat, 23 Apr 2022 19:55:45 +0100 Subject: [PATCH 06/16] Bug 2005 fixed: removed the while true from K8sHelmBaseConnector._store_status Made the necessary refactor and fixed the tests accordingly. Now, the `_store_status` method is not executed forever, it is only executed once each time is called. Change-Id: Ia96ab3152fe7f838d1b81dd02c2d22373b805f4a Signed-off-by: Pedro Escaleira --- n2vc/k8s_conn.py | 12 ++++ n2vc/k8s_helm_base_conn.py | 91 +++++++++++++------------- n2vc/tests/unit/test_k8s_helm3_conn.py | 10 --- n2vc/tests/unit/test_k8s_helm_conn.py | 10 --- 4 files changed, 56 insertions(+), 67 deletions(-) diff --git a/n2vc/k8s_conn.py b/n2vc/k8s_conn.py index f692abc..ada7075 100644 --- a/n2vc/k8s_conn.py +++ b/n2vc/k8s_conn.py @@ -458,6 +458,18 @@ class K8sConnector(abc.ABC, Loggable): async def write_app_status_to_db( self, db_dict: dict, status: str, detailed_status: str, operation: str ) -> bool: + """ + This method will write the status of the application to the database. + + :param db_dict: A dictionary with the database necessary information. It shall contain the values for the keys: + - "collection": The Mongo DB collection to write to + - "filter": The query filter to use in the update process + - "path": The dot separated keys which targets the object to be updated + :param status: Status of the application + :param detailed_status: Detailed status of the application + :param operation: Operation that is being performed on the application + :return: True if successful + """ if not self.db: self.warning("No db => No database write") diff --git a/n2vc/k8s_helm_base_conn.py b/n2vc/k8s_helm_base_conn.py index dd2116c..f0a049b 100644 --- a/n2vc/k8s_helm_base_conn.py +++ b/n2vc/k8s_helm_base_conn.py @@ -434,7 +434,6 @@ class K8sHelmBaseConnector(K8sConnector): namespace=namespace, db_dict=db_dict, operation="install", - run_once=False, ) ) @@ -463,8 +462,6 @@ class K8sHelmBaseConnector(K8sConnector): namespace=namespace, db_dict=db_dict, operation="install", - run_once=True, - check_every=0, ) if rc != 0: @@ -541,7 +538,6 @@ class K8sHelmBaseConnector(K8sConnector): namespace=instance_info["namespace"], db_dict=db_dict, operation="upgrade", - run_once=False, ) ) @@ -569,8 +565,6 @@ class K8sHelmBaseConnector(K8sConnector): namespace=instance_info["namespace"], db_dict=db_dict, operation="upgrade", - run_once=True, - check_every=0, ) if rc != 0: @@ -680,7 +674,6 @@ class K8sHelmBaseConnector(K8sConnector): namespace=instance_info["namespace"], db_dict=db_dict, operation="scale", - run_once=False, ) ) @@ -703,8 +696,6 @@ class K8sHelmBaseConnector(K8sConnector): namespace=instance_info["namespace"], db_dict=db_dict, operation="scale", - run_once=True, - check_every=0, ) if rc != 0: @@ -820,7 +811,6 @@ class K8sHelmBaseConnector(K8sConnector): namespace=instance_info["namespace"], db_dict=db_dict, operation="rollback", - run_once=False, ) ) @@ -839,8 +829,6 @@ class K8sHelmBaseConnector(K8sConnector): namespace=instance_info["namespace"], db_dict=db_dict, operation="rollback", - run_once=True, - check_every=0, ) if rc != 0: @@ -1860,42 +1848,51 @@ class K8sHelmBaseConnector(K8sConnector): operation: str, kdu_instance: str, namespace: str = None, - check_every: float = 10, db_dict: dict = None, - run_once: bool = False, - ): - while True: - try: - await asyncio.sleep(check_every) - detailed_status = await self._status_kdu( - cluster_id=cluster_id, - kdu_instance=kdu_instance, - yaml_format=False, - namespace=namespace, - ) - status = detailed_status.get("info").get("description") - self.log.debug("KDU {} STATUS: {}.".format(kdu_instance, status)) - # write status to db - result = await self.write_app_status_to_db( - db_dict=db_dict, - status=str(status), - detailed_status=str(detailed_status), - operation=operation, - ) - if not result: - self.log.info("Error writing in database. Task exiting...") - return - except asyncio.CancelledError: - self.log.debug("Task cancelled") - return - except Exception as e: - self.log.debug( - "_store_status exception: {}".format(str(e)), exc_info=True - ) - pass - finally: - if run_once: - return + ) -> None: + """ + Obtains the status of the KDU instance based on Helm Charts, and stores it in the database. + + :param cluster_id (str): the cluster where the KDU instance is deployed + :param operation (str): The operation related to the status to be updated (for instance, "install" or "upgrade") + :param kdu_instance (str): The KDU instance in relation to which the status is obtained + :param namespace (str): The Kubernetes namespace where the KDU instance was deployed. Defaults to None + :param db_dict (dict): A dictionary with the database necessary information. It shall contain the + values for the keys: + - "collection": The Mongo DB collection to write to + - "filter": The query filter to use in the update process + - "path": The dot separated keys which targets the object to be updated + Defaults to None. + """ + + try: + detailed_status = await self._status_kdu( + cluster_id=cluster_id, + kdu_instance=kdu_instance, + yaml_format=False, + namespace=namespace, + ) + + status = detailed_status.get("info").get("description") + self.log.debug(f"Status for KDU {kdu_instance} obtained: {status}.") + + # write status to db + result = await self.write_app_status_to_db( + db_dict=db_dict, + status=str(status), + detailed_status=str(detailed_status), + operation=operation, + ) + + if not result: + self.log.info("Error writing in database. Task exiting...") + + except asyncio.CancelledError as e: + self.log.warning( + f"Exception in method {self._store_status.__name__} (task cancelled): {e}" + ) + except Exception as e: + self.log.warning(f"Exception in method {self._store_status.__name__}: {e}") # params for use in -f file # returns values file option and filename (in order to delete it at the end) diff --git a/n2vc/tests/unit/test_k8s_helm3_conn.py b/n2vc/tests/unit/test_k8s_helm3_conn.py index d1b7903..201306f 100644 --- a/n2vc/tests/unit/test_k8s_helm3_conn.py +++ b/n2vc/tests/unit/test_k8s_helm3_conn.py @@ -206,8 +206,6 @@ class TestK8sHelm3Conn(asynctest.TestCase): namespace=self.namespace, db_dict=db_dict, operation="install", - run_once=True, - check_every=0, ) command = ( "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config /usr/bin/helm3 " @@ -279,8 +277,6 @@ class TestK8sHelm3Conn(asynctest.TestCase): namespace=self.namespace, db_dict=db_dict, operation="upgrade", - run_once=True, - check_every=0, ) command = ( "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config " @@ -373,8 +369,6 @@ class TestK8sHelm3Conn(asynctest.TestCase): namespace=self.namespace, db_dict=db_dict, operation="scale", - run_once=True, - check_every=0, ) @asynctest.fail_on(active_handles=True) @@ -407,8 +401,6 @@ class TestK8sHelm3Conn(asynctest.TestCase): namespace=self.namespace, db_dict=db_dict, operation="rollback", - run_once=True, - check_every=0, ) command = ( "env KUBECONFIG=./tmp/helm3_cluster_id/.kube/config /usr/bin/helm3 " @@ -612,8 +604,6 @@ class TestK8sHelm3Conn(asynctest.TestCase): namespace=self.namespace, db_dict=db_dict, operation="install", - run_once=True, - check_every=0, ) self.helm_conn._status_kdu.assert_called_once_with( cluster_id=self.cluster_id, diff --git a/n2vc/tests/unit/test_k8s_helm_conn.py b/n2vc/tests/unit/test_k8s_helm_conn.py index fb586a3..907ff40 100644 --- a/n2vc/tests/unit/test_k8s_helm_conn.py +++ b/n2vc/tests/unit/test_k8s_helm_conn.py @@ -170,8 +170,6 @@ class TestK8sHelmConn(asynctest.TestCase): namespace=self.namespace, db_dict=db_dict, operation="install", - run_once=True, - check_every=0, ) command = ( "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm install " @@ -214,8 +212,6 @@ class TestK8sHelmConn(asynctest.TestCase): namespace=self.namespace, db_dict=db_dict, operation="upgrade", - run_once=True, - check_every=0, ) command = ( "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm upgrade " @@ -307,8 +303,6 @@ class TestK8sHelmConn(asynctest.TestCase): namespace=self.namespace, db_dict=db_dict, operation="scale", - run_once=True, - check_every=0, ) @asynctest.fail_on(active_handles=True) @@ -341,8 +335,6 @@ class TestK8sHelmConn(asynctest.TestCase): namespace=self.namespace, db_dict=db_dict, operation="rollback", - run_once=True, - check_every=0, ) command = ( "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config " @@ -546,8 +538,6 @@ class TestK8sHelmConn(asynctest.TestCase): namespace=self.namespace, db_dict=db_dict, operation="install", - run_once=True, - check_every=0, ) self.helm_conn._status_kdu.assert_called_once_with( cluster_id=self.cluster_id, -- 2.25.1 From 1f222a91e4d79c3799bb921e3fee3cbc2b2a11a1 Mon Sep 17 00:00:00 2001 From: Pedro Escaleira Date: Mon, 20 Jun 2022 15:40:43 +0100 Subject: [PATCH 07/16] Bug 2085 fixed: added an async lock everytime a cmd is executed in the Helm conn Change-Id: I118ab7264cff8e51f098e9249fbb4d0872290058 Signed-off-by: Pedro Escaleira --- n2vc/k8s_helm_base_conn.py | 43 ++++++++++++++++++++++---------------- 1 file changed, 25 insertions(+), 18 deletions(-) diff --git a/n2vc/k8s_helm_base_conn.py b/n2vc/k8s_helm_base_conn.py index f0a049b..e494a76 100644 --- a/n2vc/k8s_helm_base_conn.py +++ b/n2vc/k8s_helm_base_conn.py @@ -90,6 +90,9 @@ class K8sHelmBaseConnector(K8sConnector): if self._stable_repo_url == "None": self._stable_repo_url = None + # Lock to avoid concurrent execution of helm commands + self.cmd_lock = asyncio.Lock() + def _get_namespace(self, cluster_uuid: str) -> str: """ Obtains the namespace used by the cluster with the uuid passed by argument @@ -1506,17 +1509,18 @@ class K8sHelmBaseConnector(K8sConnector): environ.update(env) try: - process = await asyncio.create_subprocess_exec( - *command, - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE, - env=environ, - ) + async with self.cmd_lock: + process = await asyncio.create_subprocess_exec( + *command, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + env=environ, + ) - # wait for command terminate - stdout, stderr = await process.communicate() + # wait for command terminate + stdout, stderr = await process.communicate() - return_code = process.returncode + return_code = process.returncode output = "" if stdout: @@ -1580,16 +1584,19 @@ class K8sHelmBaseConnector(K8sConnector): environ.update(env) try: - read, write = os.pipe() - await asyncio.create_subprocess_exec(*command1, stdout=write, env=environ) - os.close(write) - process_2 = await asyncio.create_subprocess_exec( - *command2, stdin=read, stdout=asyncio.subprocess.PIPE, env=environ - ) - os.close(read) - stdout, stderr = await process_2.communicate() + async with self.cmd_lock: + read, write = os.pipe() + await asyncio.create_subprocess_exec( + *command1, stdout=write, env=environ + ) + os.close(write) + process_2 = await asyncio.create_subprocess_exec( + *command2, stdin=read, stdout=asyncio.subprocess.PIPE, env=environ + ) + os.close(read) + stdout, stderr = await process_2.communicate() - return_code = process_2.returncode + return_code = process_2.returncode output = "" if stdout: -- 2.25.1 From 553d1b7cf256eb95f8fdb9822a23ac755e1764f3 Mon Sep 17 00:00:00 2001 From: David Garcia Date: Tue, 21 Jun 2022 11:26:57 +0200 Subject: [PATCH 08/16] Fix bug 2036 The unit tests were failing because of a wrong mock Change-Id: Ie8fd9f6cf926e0aee72c1cd5b1601c073c9928ba Signed-off-by: David Garcia --- n2vc/tests/unit/test_libjuju.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/n2vc/tests/unit/test_libjuju.py b/n2vc/tests/unit/test_libjuju.py index 67cd19f..e7f43ab 100644 --- a/n2vc/tests/unit/test_libjuju.py +++ b/n2vc/tests/unit/test_libjuju.py @@ -60,7 +60,7 @@ class LibjujuTestCase(asynctest.TestCase): self.loop = asyncio.get_event_loop() self.db = Mock() mock_base64_to_cacert.return_value = cacert - Connection._load_vca_connection_data = Mock() + # Connection._load_vca_connection_data = Mock() vca_connection = Connection(AsyncMock()) vca_connection._data = ConnectionData( **{ -- 2.25.1 From 287a4fcaa7ad46d7c3f42e0f9538073d7e889c32 Mon Sep 17 00:00:00 2001 From: aticig Date: Wed, 22 Jun 2022 11:40:00 +0300 Subject: [PATCH 09/16] Updating Python-libjuju version to 2.9.10 Change-Id: I1286443df6c76503a95cbaeb1cd9543d3a5841c3 Signed-off-by: aticig --- requirements.in | 2 +- requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.in b/requirements.in index 576d1ff..6c89871 100644 --- a/requirements.in +++ b/requirements.in @@ -13,7 +13,7 @@ # limitations under the License. async-timeout<4 -juju==2.9.8 +juju==2.9.10 kubernetes motor==1.3.1 pyasn1 diff --git a/requirements.txt b/requirements.txt index ef0d3a9..d566bbb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -39,7 +39,7 @@ google-auth==2.3.3 # via kubernetes idna==3.3 # via requests -juju==2.9.8 +juju==2.9.10 # via -r requirements.in jujubundlelib==0.5.7 # via theblues -- 2.25.1 From 23c4455e7095b71ecd349f7457dea8f74b441a4b Mon Sep 17 00:00:00 2001 From: beierlm Date: Fri, 24 Jun 2022 13:45:23 -0400 Subject: [PATCH 10/16] Manual update of pip requirements Change-Id: If32e6a3789884c9d7a9521f4955a608ae59c9381 Signed-off-by: beierlm --- requirements-test.txt | 12 ++++++------ requirements.txt | 36 ++++++++++++++++++------------------ tox.ini | 2 +- 3 files changed, 25 insertions(+), 25 deletions(-) diff --git a/requirements-test.txt b/requirements-test.txt index c6718ee..87df1c9 100644 --- a/requirements-test.txt +++ b/requirements-test.txt @@ -16,11 +16,11 @@ ####################################################################################### asynctest==0.13.0 # via -r requirements-test.in -certifi==2021.10.8 +certifi==2022.6.15 # via requests -charset-normalizer==2.0.10 +charset-normalizer==2.0.12 # via requests -coverage==6.2 +coverage==6.4.1 # via # -r requirements-test.in # nose2 @@ -32,13 +32,13 @@ mccabe==0.6.1 # via flake8 mock==4.0.3 # via -r requirements-test.in -nose2==0.10.0 +nose2==0.11.0 # via -r requirements-test.in pycodestyle==2.8.0 # via flake8 pyflakes==2.4.0 # via flake8 -requests==2.27.1 +requests==2.28.0 # via requests-mock requests-mock==1.9.3 # via -r requirements-test.in @@ -46,5 +46,5 @@ six==1.16.0 # via # nose2 # requests-mock -urllib3==1.26.8 +urllib3==1.26.9 # via requests diff --git a/requirements.txt b/requirements.txt index d566bbb..32769ae 100644 --- a/requirements.txt +++ b/requirements.txt @@ -18,11 +18,11 @@ async-timeout==3.0.1 # via # -r requirements.in # retrying-async -bcrypt==3.2.0 +bcrypt==3.2.2 # via paramiko -cachetools==4.2.4 +cachetools==5.2.0 # via google-auth -certifi==2021.10.8 +certifi==2022.6.15 # via # kubernetes # requests @@ -31,11 +31,11 @@ cffi==1.15.0 # bcrypt # cryptography # pynacl -charset-normalizer==2.0.10 +charset-normalizer==2.0.12 # via requests -cryptography==36.0.1 +cryptography==37.0.2 # via paramiko -google-auth==2.3.3 +google-auth==2.8.0 # via kubernetes idna==3.3 # via requests @@ -43,7 +43,7 @@ juju==2.9.10 # via -r requirements.in jujubundlelib==0.5.7 # via theblues -kubernetes==21.7.0 +kubernetes==24.2.0 # via # -r requirements.in # juju @@ -55,11 +55,11 @@ motor==1.3.1 # via -r requirements.in mypy-extensions==0.4.3 # via typing-inspect -oauthlib==3.1.1 +oauthlib==3.2.0 # via requests-oauthlib -paramiko==2.9.2 +paramiko==2.11.0 # via juju -protobuf==3.19.3 +protobuf==3.20.1 # via macaroonbakery pyasn1==0.4.8 # via @@ -86,7 +86,7 @@ pyrfc3339==1.1 # macaroonbakery python-dateutil==2.8.2 # via kubernetes -pytz==2021.3 +pytz==2022.1 # via pyrfc3339 pyyaml==5.4.1 # via @@ -94,39 +94,39 @@ pyyaml==5.4.1 # juju # jujubundlelib # kubernetes -requests==2.27.1 +requests==2.28.0 # via # kubernetes # macaroonbakery # requests-oauthlib # theblues -requests-oauthlib==1.3.0 +requests-oauthlib==1.3.1 # via kubernetes -retrying-async==1.2.0 +retrying-async==2.0.0 # via -r requirements.in rsa==4.8 # via google-auth six==1.16.0 # via - # bcrypt # google-auth # kubernetes # macaroonbakery + # paramiko # pymacaroons # python-dateutil theblues==0.5.2 # via juju toposort==1.7 # via juju -typing-extensions==4.0.1 +typing-extensions==4.2.0 # via typing-inspect typing-inspect==0.7.1 # via juju -urllib3==1.26.8 +urllib3==1.26.9 # via # kubernetes # requests -websocket-client==1.2.3 +websocket-client==1.3.3 # via kubernetes websockets==7.0 # via juju diff --git a/tox.ini b/tox.ini index 5aaf2a4..2e1199f 100644 --- a/tox.ini +++ b/tox.ini @@ -83,7 +83,7 @@ commands = ####################################################################################### [testenv:pip-compile] -deps = pip-tools==6.4.0 +deps = pip-tools==6.6.2 skip_install = true whitelist_externals = bash [ -- 2.25.1 From 9e297a4a8ddae703467a57d87538d01b76b12d17 Mon Sep 17 00:00:00 2001 From: Mark Beierl Date: Thu, 14 Jul 2022 09:39:01 -0400 Subject: [PATCH 11/16] Jenkins refresh Change the jenkins node label from 'docker' to 'stage_2' as part of the Jenkins cleanup Change-Id: Ifc15cf371e588cd482b296ac1d6e85f8f51a2c22 Signed-off-by: Mark Beierl --- Jenkinsfile | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index e384cbd..d8e7474 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -1,17 +1,19 @@ -/* - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +/* Copyright ETSI OSM and others + * + * All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. You may obtain + * a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ properties([ parameters([ @@ -31,7 +33,7 @@ def devops_checkout() { } } -node('docker') { +node('stage_2') { checkout scm devops_checkout() -- 2.25.1 From 3c443f5899f2cc953b27ed4ac4c5d1a247248c5e Mon Sep 17 00:00:00 2001 From: limon Date: Thu, 21 Jul 2022 13:55:55 +0200 Subject: [PATCH 12/16] Ensure async calls are performed Change-Id: I3f8afe4c7aa4e5ab1e5f2b19edf4c07b5d6a5d64 Signed-off-by: limon --- n2vc/k8s_helm_base_conn.py | 6 +++--- n2vc/tests/unit/test_k8s_helm3_conn.py | 25 ++++++++++++++++++------- n2vc/tests/unit/test_k8s_helm_conn.py | 25 ++++++++++++++++++------- 3 files changed, 39 insertions(+), 17 deletions(-) diff --git a/n2vc/k8s_helm_base_conn.py b/n2vc/k8s_helm_base_conn.py index e494a76..c20b55d 100644 --- a/n2vc/k8s_helm_base_conn.py +++ b/n2vc/k8s_helm_base_conn.py @@ -406,7 +406,7 @@ class K8sHelmBaseConnector(K8sConnector): repo = self._split_repo(kdu_model) if repo: - self.repo_update(cluster_id, repo) + await self.repo_update(cluster_id, repo) command = self._get_install_command( kdu_model, @@ -510,7 +510,7 @@ class K8sHelmBaseConnector(K8sConnector): repo = self._split_repo(kdu_model) if repo: - self.repo_update(cluster_uuid, repo) + await self.repo_update(cluster_uuid, repo) command = self._get_upgrade_command( kdu_model, @@ -1992,7 +1992,7 @@ class K8sHelmBaseConnector(K8sConnector): kdu_model = parts[0] return kdu_model, version - async def _split_repo(self, kdu_model: str) -> str: + def _split_repo(self, kdu_model: str) -> str: repo_name = None idx = kdu_model.find("/") if idx >= 0: diff --git a/n2vc/tests/unit/test_k8s_helm3_conn.py b/n2vc/tests/unit/test_k8s_helm3_conn.py index 201306f..131b5c4 100644 --- a/n2vc/tests/unit/test_k8s_helm3_conn.py +++ b/n2vc/tests/unit/test_k8s_helm3_conn.py @@ -196,9 +196,17 @@ class TestK8sHelm3Conn(asynctest.TestCase): self.helm_conn._create_namespace.assert_called_once_with( self.cluster_id, self.namespace ) - self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id) - self.helm_conn.fs.reverse_sync.assert_called_once_with( - from_path=self.cluster_id + self.helm_conn.fs.sync.assert_has_calls( + [ + asynctest.call(from_path=self.cluster_id), + asynctest.call(from_path=self.cluster_id), + ] + ) + self.helm_conn.fs.reverse_sync.assert_has_calls( + [ + asynctest.call(from_path=self.cluster_id), + asynctest.call(from_path=self.cluster_id), + ] ) self.helm_conn._store_status.assert_called_with( cluster_id=self.cluster_id, @@ -212,7 +220,7 @@ class TestK8sHelm3Conn(asynctest.TestCase): "install stable-openldap-0005399828 --atomic --output yaml " "--timeout 300s --namespace testk8s stable/openldap --version 1.2.2" ) - self.helm_conn._local_async_exec.assert_called_once_with( + self.helm_conn._local_async_exec.assert_called_with( command=command, env=self.env, raise_exception_on_error=False ) @@ -268,8 +276,11 @@ class TestK8sHelm3Conn(asynctest.TestCase): self.cluster_uuid, kdu_instance, kdu_model, atomic=True, db_dict=db_dict ) self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id) - self.helm_conn.fs.reverse_sync.assert_called_once_with( - from_path=self.cluster_id + self.helm_conn.fs.reverse_sync.assert_has_calls( + [ + asynctest.call(from_path=self.cluster_id), + asynctest.call(from_path=self.cluster_id), + ] ) self.helm_conn._store_status.assert_called_with( cluster_id=self.cluster_id, @@ -284,7 +295,7 @@ class TestK8sHelm3Conn(asynctest.TestCase): "--namespace testk8s --atomic --output yaml --timeout 300s " "--version 1.2.3" ) - self.helm_conn._local_async_exec.assert_called_once_with( + self.helm_conn._local_async_exec.assert_called_with( command=command, env=self.env, raise_exception_on_error=False ) diff --git a/n2vc/tests/unit/test_k8s_helm_conn.py b/n2vc/tests/unit/test_k8s_helm_conn.py index 907ff40..5112363 100644 --- a/n2vc/tests/unit/test_k8s_helm_conn.py +++ b/n2vc/tests/unit/test_k8s_helm_conn.py @@ -160,9 +160,17 @@ class TestK8sHelmConn(asynctest.TestCase): db_dict=db_dict, ) - self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id) - self.helm_conn.fs.reverse_sync.assert_called_once_with( - from_path=self.cluster_id + self.helm_conn.fs.sync.assert_has_calls( + [ + asynctest.call(from_path=self.cluster_id), + asynctest.call(from_path=self.cluster_id), + ] + ) + self.helm_conn.fs.reverse_sync.assert_has_calls( + [ + asynctest.call(from_path=self.cluster_id), + asynctest.call(from_path=self.cluster_id), + ] ) self.helm_conn._store_status.assert_called_with( cluster_id=self.cluster_id, @@ -177,7 +185,7 @@ class TestK8sHelmConn(asynctest.TestCase): "--name=stable-openldap-0005399828 --namespace testk8s stable/openldap " "--version 1.2.2" ) - self.helm_conn._local_async_exec.assert_called_once_with( + self.helm_conn._local_async_exec.assert_called_with( command=command, env=self.env, raise_exception_on_error=False ) @@ -203,8 +211,11 @@ class TestK8sHelmConn(asynctest.TestCase): self.cluster_uuid, kdu_instance, kdu_model, atomic=True, db_dict=db_dict ) self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id) - self.helm_conn.fs.reverse_sync.assert_called_once_with( - from_path=self.cluster_id + self.helm_conn.fs.reverse_sync.assert_has_calls( + [ + asynctest.call(from_path=self.cluster_id), + asynctest.call(from_path=self.cluster_id), + ] ) self.helm_conn._store_status.assert_called_with( cluster_id=self.cluster_id, @@ -217,7 +228,7 @@ class TestK8sHelmConn(asynctest.TestCase): "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm upgrade " "--atomic --output yaml --timeout 300 stable-openldap-0005399828 stable/openldap --version 1.2.3" ) - self.helm_conn._local_async_exec.assert_called_once_with( + self.helm_conn._local_async_exec.assert_called_with( command=command, env=self.env, raise_exception_on_error=False ) -- 2.25.1 From 01244641fdac89ce2afd5490e5c6d2bcf7ad05ae Mon Sep 17 00:00:00 2001 From: aticig Date: Thu, 28 Jul 2022 01:12:03 +0300 Subject: [PATCH 13/16] Feature 10944 Change naming of charms Modifying n2vc_conn.py/N2VCConnector _get_application_name method to generate new application naming for charms Change-Id: I1908bdbe4ce1a959a8a407f77913a414ec23fbb4 Signed-off-by: aticig --- n2vc/n2vc_juju_conn.py | 225 ++++- n2vc/tests/unit/test_n2vc_juju_conn.py | 928 +++++++++++++++++- .../unit/testdata/test_db_descriptors.py | 414 ++++++++ tox.ini | 3 +- 4 files changed, 1521 insertions(+), 49 deletions(-) create mode 100644 n2vc/tests/unit/testdata/test_db_descriptors.py diff --git a/n2vc/n2vc_juju_conn.py b/n2vc/n2vc_juju_conn.py index b2ffa0a..5dc394c 100644 --- a/n2vc/n2vc_juju_conn.py +++ b/n2vc/n2vc_juju_conn.py @@ -42,6 +42,7 @@ from n2vc.store import MotorStore from n2vc.utils import get_ee_id_components, generate_random_alfanum_string from n2vc.vca.connection import get_connection from retrying_async import retry +from typing import Tuple class N2VCJujuConnector(N2VCConnector): @@ -1225,20 +1226,41 @@ class N2VCJujuConnector(N2VCConnector): return get_ee_id_components(ee_id) - def _get_application_name(self, namespace: str) -> str: - """ - Build application name from namespace - :param namespace: - :return: app-vnf--vdu--cnt-- + @staticmethod + def _find_charm_level(vnf_id: str, vdu_id: str) -> str: + """Decides the charm level. + Args: + vnf_id (str): VNF id + vdu_id (str): VDU id + + Returns: + charm_level (str): ns-level or vnf-level or vdu-level """ + if vdu_id and not vnf_id: + raise N2VCException(message="If vdu-id exists, vnf-id should be provided.") + if vnf_id and vdu_id: + return "vdu-level" + if vnf_id and not vdu_id: + return "vnf-level" + if not vnf_id and not vdu_id: + return "ns-level" + + @staticmethod + def _generate_backward_compatible_application_name( + vnf_id: str, vdu_id: str, vdu_count: str + ) -> str: + """Generate backward compatible application name + by limiting the app name to 50 characters. - # TODO: Enforce the Juju 50-character application limit + Args: + vnf_id (str): VNF ID + vdu_id (str): VDU ID + vdu_count (str): vdu-count-index - # split namespace components - _, _, vnf_id, vdu_id, vdu_count = self._get_namespace_components( - namespace=namespace - ) + Returns: + application_name (str): generated application name + """ if vnf_id is None or len(vnf_id) == 0: vnf_id = "" else: @@ -1262,6 +1284,189 @@ class N2VCJujuConnector(N2VCConnector): application_name = "app-{}{}{}-{}".format( vnf_id, vdu_id, vdu_count, random_suffix ) + return application_name + + @staticmethod + def _generate_application_name( + charm_level: str, + vnfrs: dict, + vca_records: list, + vnf_count: str = None, + vdu_count: str = None, + ) -> str: + """Generate application name to make the relevant charm of VDU/KDU + in the VNFD descriptor become clearly visible. + Limiting the app name to 50 characters. + + Args: + charm_level (str): VNF ID + vnfrs (dict): VDU ID + vca_records (list): db_nsr["_admin"]["deployed"]["VCA"] as list + vnf_count (str): vnf count index + vdu_count (str): vdu count index + + Returns: + application_name (str): generated application name + + """ + application_name = "" + if charm_level == "ns-level": + if len(vca_records) != 1: + raise N2VCException(message="One VCA record is expected.") + # Only one VCA record is expected if it's ns-level charm. + # Shorten the charm name to its first 40 characters. + charm_name = vca_records[0]["charm_name"][:40] + if not charm_name: + raise N2VCException(message="Charm name should be provided.") + application_name = charm_name + "-ns" + + elif charm_level == "vnf-level": + if len(vca_records) < 1: + raise N2VCException(message="One or more VCA record is expected.") + # If VNF is scaled, more than one VCA record may be included in vca_records + # but ee_descriptor_id is same. + # Shorten the ee_descriptor_id and member-vnf-index-ref + # to first 12 characters. + application_name = ( + vca_records[0]["ee_descriptor_id"][:12] + + "-" + + vnf_count + + "-" + + vnfrs["member-vnf-index-ref"][:12] + + "-vnf" + ) + elif charm_level == "vdu-level": + if len(vca_records) < 1: + raise N2VCException(message="One or more VCA record is expected.") + vdu_profile_id = vnfrs["vdur"][int(vdu_count)]["vdu-id-ref"] + # If vnf/vdu is scaled, more than one VCA record may be included in vca_records + # but ee_descriptor_id is same. + # Shorten the ee_descriptor_id, member-vnf-index-ref and vdu_profile_id + # to first 12 characters. + application_name = ( + vca_records[0]["ee_descriptor_id"][:12] + + "-" + + vnf_count + + "-" + + vnfrs["member-vnf-index-ref"][:12] + + "-" + + vdu_profile_id[:12] + + "-" + + vdu_count + + "-vdu" + ) + + return application_name + + def _get_vnf_count_and_record( + self, charm_level: str, vnf_id_and_count: str + ) -> Tuple[str, dict]: + """Get the vnf count and VNF record depend on charm level + + Args: + charm_level (str) + vnf_id_and_count (str) + + Returns: + (vnf_count (str), db_vnfr(dict)) as Tuple + + """ + vnf_count = "" + db_vnfr = {} + + if charm_level in ("vnf-level", "vdu-level"): + vnf_id = "-".join(vnf_id_and_count.split("-")[:-1]) + vnf_count = vnf_id_and_count.split("-")[-1] + db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id}) + + # If the charm is ns level, it returns empty vnf_count and db_vnfr + return vnf_count, db_vnfr + + @staticmethod + def _get_vca_records(charm_level: str, db_nsr: dict, db_vnfr: dict) -> list: + """Get the VCA records from db_nsr dict + + Args: + charm_level (str): level of charm + db_nsr (dict): NS record from database + db_vnfr (dict): VNF record from database + + Returns: + vca_records (list): List of VCA record dictionaries + + """ + vca_records = {} + if charm_level == "ns-level": + vca_records = list( + filter( + lambda vca_record: vca_record["target_element"] == "ns", + db_nsr["_admin"]["deployed"]["VCA"], + ) + ) + elif charm_level in ["vnf-level", "vdu-level"]: + vca_records = list( + filter( + lambda vca_record: vca_record["member-vnf-index"] + == db_vnfr["member-vnf-index-ref"], + db_nsr["_admin"]["deployed"]["VCA"], + ) + ) + + return vca_records + + def _get_application_name(self, namespace: str) -> str: + """Build application name from namespace + + Application name structure: + NS level: -ns + VNF level: -z--vnf + VDU level: -z-- + -z-vdu + + Application naming for backward compatibility (old structure): + NS level: app- + VNF level: app-vnf--z- + VDU level: app-vnf--z-vdu- + -cnt--z- + + Args: + namespace (str) + + Returns: + application_name (str) + + """ + # split namespace components + ( + nsi_id, + ns_id, + vnf_id_and_count, + vdu_id, + vdu_count, + ) = self._get_namespace_components(namespace=namespace) + + if not ns_id: + raise N2VCException(message="ns-id should be provided.") + + charm_level = self._find_charm_level(vnf_id_and_count, vdu_id) + db_nsr = self.db.get_one("nsrs", {"_id": ns_id}) + vnf_count, db_vnfr = self._get_vnf_count_and_record( + charm_level, vnf_id_and_count + ) + vca_records = self._get_vca_records(charm_level, db_nsr, db_vnfr) + + if all("charm_name" in vca_record.keys() for vca_record in vca_records): + application_name = self._generate_application_name( + charm_level, + db_vnfr, + vca_records, + vnf_count=vnf_count, + vdu_count=vdu_count, + ) + else: + application_name = self._generate_backward_compatible_application_name( + vnf_id_and_count, vdu_id, vdu_count + ) return N2VCJujuConnector._format_app_name(application_name) diff --git a/n2vc/tests/unit/test_n2vc_juju_conn.py b/n2vc/tests/unit/test_n2vc_juju_conn.py index 4fef1f2..ebf36d5 100644 --- a/n2vc/tests/unit/test_n2vc_juju_conn.py +++ b/n2vc/tests/unit/test_n2vc_juju_conn.py @@ -15,7 +15,7 @@ import asyncio import logging -from unittest.mock import Mock +from unittest.mock import Mock, MagicMock from unittest.mock import patch @@ -23,6 +23,7 @@ import asynctest from n2vc.definitions import Offer, RelationEndpoint from n2vc.n2vc_juju_conn import N2VCJujuConnector from osm_common import fslocal +from osm_common.dbmemory import DbMemory from n2vc.exceptions import ( N2VCBadArgumentsException, N2VCException, @@ -30,6 +31,8 @@ from n2vc.exceptions import ( ) from n2vc.tests.unit.utils import AsyncMock from n2vc.vca.connection_data import ConnectionData +from n2vc.tests.unit.testdata import test_db_descriptors as descriptors +import yaml class N2VCJujuConnTestCase(asynctest.TestCase): @@ -134,10 +137,6 @@ class UpdateVcaStatusTest(N2VCJujuConnTestCase): self.n2vc.libjuju.get_application_configs.assert_not_called_once() -@asynctest.mock.patch("osm_common.fslocal.FsLocal.file_exists") -@asynctest.mock.patch( - "osm_common.fslocal.FsLocal.path", new_callable=asynctest.PropertyMock, create=True -) class K8sProxyCharmsTest(N2VCJujuConnTestCase): def setUp(self): super(K8sProxyCharmsTest, self).setUp() @@ -145,6 +144,13 @@ class K8sProxyCharmsTest(N2VCJujuConnTestCase): self.n2vc.libjuju.add_model = AsyncMock() self.n2vc.libjuju.deploy_charm = AsyncMock() self.n2vc.libjuju.model_exists.return_value = False + self.db = DbMemory() + self.fs = fslocal.FsLocal() + self.fs.path = "/" + self.n2vc.fs = self.fs + self.n2vc.db = self.db + self.db.create_list("nsrs", yaml.safe_load(descriptors.db_nsrs_text)) + self.db.create_list("vnfrs", yaml.safe_load(descriptors.db_vnfrs_text)) @patch( "n2vc.n2vc_juju_conn.generate_random_alfanum_string", @@ -153,43 +159,41 @@ class K8sProxyCharmsTest(N2VCJujuConnTestCase): def test_success( self, mock_generate_random_alfanum_string, - mock_path, - mock_file_exists, ): - mock_file_exists.return_value = True - mock_path.return_value = "/path" + self.n2vc.fs.file_exists = MagicMock(create_autospec=True) + self.n2vc.fs.file_exists.return_value = True ee_id = self.loop.run_until_complete( self.n2vc.install_k8s_proxy_charm( - "charm", - "nsi-id.ns-id.vnf-id.vdu", - "////path/", + "simple", + ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0", + "path", {}, ) ) self.n2vc.libjuju.add_model.assert_called_once() self.n2vc.libjuju.deploy_charm.assert_called_once_with( - model_name="ns-id-k8s", - application_name="app-vnf-vnf-id-vdu-vdu-random", - path="/path/path/", + model_name="dbfbd751-3de4-4e68-bd40-ec5ae0a53898-k8s", + application_name="simple-ee-z0-vnf1-vnf", + path="//path", machine_id=None, db_dict={}, progress_timeout=None, total_timeout=None, config=None, ) - self.assertEqual(ee_id, "ns-id-k8s.app-vnf-vnf-id-vdu-vdu-random.k8s") + self.assertEqual( + ee_id, "dbfbd751-3de4-4e68-bd40-ec5ae0a53898-k8s.simple-ee-z0-vnf1-vnf.k8s" + ) def test_no_artifact_path( self, - mock_path, - mock_file_exists, ): with self.assertRaises(N2VCBadArgumentsException): ee_id = self.loop.run_until_complete( self.n2vc.install_k8s_proxy_charm( - "charm", - "nsi-id.ns-id.vnf-id.vdu", + "simple", + ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0", "", {}, ) @@ -198,15 +202,13 @@ class K8sProxyCharmsTest(N2VCJujuConnTestCase): def test_no_db( self, - mock_path, - mock_file_exists, ): with self.assertRaises(N2VCBadArgumentsException): ee_id = self.loop.run_until_complete( self.n2vc.install_k8s_proxy_charm( - "charm", - "nsi-id.ns-id.vnf-id.vdu", - "/path/", + "simple", + ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0", + "path", None, ) ) @@ -214,16 +216,15 @@ class K8sProxyCharmsTest(N2VCJujuConnTestCase): def test_file_not_exists( self, - mock_path, - mock_file_exists, ): - mock_file_exists.return_value = False + self.n2vc.fs.file_exists = MagicMock(create_autospec=True) + self.n2vc.fs.file_exists.return_value = False with self.assertRaises(N2VCBadArgumentsException): ee_id = self.loop.run_until_complete( self.n2vc.install_k8s_proxy_charm( - "charm", - "nsi-id.ns-id.vnf-id.vdu", - "/path/", + "simple", + ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0", + "path", {}, ) ) @@ -231,18 +232,18 @@ class K8sProxyCharmsTest(N2VCJujuConnTestCase): def test_exception( self, - mock_path, - mock_file_exists, ): - mock_file_exists.return_value = True - mock_path.return_value = "/path" + self.n2vc.fs.file_exists = MagicMock(create_autospec=True) + self.n2vc.fs.file_exists.return_value = True + self.n2vc.fs.path = MagicMock(create_autospec=True) + self.n2vc.fs.path.return_value = "path" self.n2vc.libjuju.deploy_charm.side_effect = Exception() with self.assertRaises(N2VCException): ee_id = self.loop.run_until_complete( self.n2vc.install_k8s_proxy_charm( - "charm", - "nsi-id.ns-id.vnf-id.vdu", - "path/", + "simple", + ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0", + "path", {}, ) ) @@ -377,3 +378,854 @@ class UpgradeCharmTest(N2VCJujuConnTestCase): model_name="sample_model", total_timeout=None, ) + + +class GenerateApplicationNameTest(N2VCJujuConnTestCase): + + vnf_id = "dbfbd751-3de4-4e68-bd40-ec5ae0a53898" + + def setUp(self): + super(GenerateApplicationNameTest, self).setUp() + self.db = MagicMock(DbMemory) + + @patch( + "n2vc.n2vc_juju_conn.generate_random_alfanum_string", + **{"return_value": "random"} + ) + def test_generate_backward_compatible_application_name( + self, mock_generate_random_alfanum + ): + vdu_id = "mgmtVM" + vdu_count = "0" + expected_result = "app-vnf-ec5ae0a53898-vdu-mgmtVM-cnt-0-random" + + application_name = self.n2vc._generate_backward_compatible_application_name( + GenerateApplicationNameTest.vnf_id, vdu_id, vdu_count + ) + self.assertEqual(application_name, expected_result) + + @patch( + "n2vc.n2vc_juju_conn.generate_random_alfanum_string", + **{"return_value": "random"} + ) + def test_generate_backward_compatible_application_name_without_vnf_id_vdu_id( + self, mock_generate_random_alfanum + ): + vnf_id = None + vdu_id = "" + vdu_count = None + expected_result = "app--random" + application_name = self.n2vc._generate_backward_compatible_application_name( + vnf_id, vdu_id, vdu_count + ) + + self.assertEqual(application_name, expected_result) + self.assertLess(len(application_name), 50) + + def test_find_charm_level_with_vnf_id(self): + vdu_id = "" + expected_result = "vnf-level" + charm_level = self.n2vc._find_charm_level( + GenerateApplicationNameTest.vnf_id, vdu_id + ) + self.assertEqual(charm_level, expected_result) + + def test_find_charm_level_with_vdu_id(self): + vnf_id = "" + vdu_id = "mgmtVM" + with self.assertRaises(N2VCException): + self.n2vc._find_charm_level(vnf_id, vdu_id) + + def test_find_charm_level_with_vnf_id_and_vdu_id(self): + vdu_id = "mgmtVM" + expected_result = "vdu-level" + charm_level = self.n2vc._find_charm_level( + GenerateApplicationNameTest.vnf_id, vdu_id + ) + self.assertEqual(charm_level, expected_result) + + def test_find_charm_level_without_vnf_id_and_vdu_id(self): + vnf_id = "" + vdu_id = "" + expected_result = "ns-level" + charm_level = self.n2vc._find_charm_level(vnf_id, vdu_id) + self.assertEqual(charm_level, expected_result) + + def test_generate_application_name_ns_charm(self): + charm_level = "ns-level" + vnfrs = {} + vca_records = [ + { + "target_element": "ns", + "member-vnf-index": "", + "vdu_id": None, + "kdu_name": None, + "vdu_count_index": None, + "vnfd_id": None, + "vdu_name": None, + "type": "proxy_charm", + "ee_descriptor_id": None, + "charm_name": "simple-ns-charm-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh", + "ee_id": None, + "application": "", + "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898", + }, + ] + vnf_count = "" + vdu_count = "" + expected_result = "simple-ns-charm-abc-000-rrrr-nnnn-4444-h-ns" + application_name = self.n2vc._generate_application_name( + charm_level, + vnfrs, + vca_records, + vnf_count=vnf_count, + vdu_count=vdu_count, + ) + self.assertEqual(application_name, expected_result) + self.assertLess(len(application_name), 50) + + def test_generate_application_name_ns_charm_empty_vca_records(self): + charm_level = "ns-level" + vnfrs = {} + vca_records = [] + vnf_count = "" + vdu_count = "" + with self.assertRaises(N2VCException): + self.n2vc._generate_application_name( + charm_level, + vnfrs, + vca_records, + vnf_count=vnf_count, + vdu_count=vdu_count, + ) + + def test_generate_application_name_vnf_charm(self): + charm_level = "vnf-level" + vnfrs = { + "member-vnf-index-ref": "vnf111-xxx-yyy-zzz", + } + vca_records = [ + { + "target_element": "vnf/vnf1", + "member-vnf-index": "vnf111-xxx-yyy-zzz", + "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898", + "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444", + "charm_name": "", + "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898", + }, + ] + vnf_count = "1" + vdu_count = "" + expected_result = "simple-ee-ab-1-vnf111-xxx-y-vnf" + application_name = self.n2vc._generate_application_name( + charm_level, + vnfrs, + vca_records, + vnf_count=vnf_count, + vdu_count=vdu_count, + ) + self.assertEqual(application_name, expected_result) + self.assertLess(len(application_name), 50) + + def test_generate_application_name_vdu_charm(self): + charm_level = "vdu-level" + vnfrs = { + "member-vnf-index-ref": "vnf111-xxx-yyy-zzz", + "vdur": [ + {"_id": "38912ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "mgmtVM"}, + {"_id": "45512ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "dataVM"}, + ], + } + vca_records = [ + { + "target_element": "vnf/vnf1/mgmtvm", + "member-vnf-index": "vnf111-xxx-yyy-zzz", + "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a", + "kdu_name": None, + "vdu_count_index": None, + "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898", + "vdu_name": "mgmtvm", + "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444", + "charm_name": "", + "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898", + }, + { + "target_element": "vnf/vnf1/datavm", + "member-vnf-index": "vnf111-xxx-yyy-zzz", + "vdu_id": "45512ff7-5bdd-4228-911f-c2bee259c44a", + "kdu_name": None, + "vdu_count_index": None, + "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898", + "vdu_name": "datavm", + "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-8888-hhh-3333-yyyy-888-hhh-ttt-444", + "charm_name": "", + "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898", + }, + ] + vnf_count = "2" + vdu_count = "0" + expected_result = "simple-ee-ab-2-vnf111-xxx-y-mgmtVM-0-vdu" + application_name = self.n2vc._generate_application_name( + charm_level, + vnfrs, + vca_records, + vnf_count=vnf_count, + vdu_count=vdu_count, + ) + self.assertEqual(application_name, expected_result) + self.assertLess(len(application_name), 50) + + def test_generate_application_name_vdu_charm_wrong_vnfrs(self): + charm_level = "vdu-level" + vnfrs = { + "member-vnf-index-ref": "vnf111-xxx-yyy-zzz", + } + vca_records = [ + { + "target_element": "vnf/vnf1/mgmtvm", + "member-vnf-index": "vnf111-xxx-yyy-zzz", + "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a", + "kdu_name": None, + "vdu_count_index": None, + "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898", + "vdu_name": "mgmtvm", + "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444", + "charm_name": "", + "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898", + }, + ] + vnf_count = "2" + vdu_count = "0" + with self.assertRaises(KeyError): + self.n2vc._generate_application_name( + charm_level, + vnfrs, + vca_records, + vnf_count=vnf_count, + vdu_count=vdu_count, + ) + + def test_get_vnf_count_db_vnfr_ns_charm(self): + self.db.get_one.return_value = {"member-vnf-index-ref": "sample-ref"} + charm_level = "ns-level" + vnf_id_and_count = "m7fbd751-3de4-4e68-bd40-ec5ae0a53898-4" + with patch.object(self.n2vc, "db", self.db): + vnf_count, db_vnfr = self.n2vc._get_vnf_count_and_record( + charm_level, vnf_id_and_count + ) + self.assertEqual(vnf_count, "") + self.assertEqual(db_vnfr, {}) + + def test_get_vnf_count_db_vnfr_vnf_charm(self): + self.db.get_one.return_value = {"member-vnf-index-ref": "sample-ref"} + charm_level = "vnf-level" + vnf_id_and_count = "m7fbd751-3de4-4e68-bd40-ec5ae0a53898-4" + with patch.object(self.n2vc, "db", self.db): + vnf_count, db_vnfr = self.n2vc._get_vnf_count_and_record( + charm_level, vnf_id_and_count + ) + self.assertEqual(vnf_count, "4") + self.assertEqual(db_vnfr, {"member-vnf-index-ref": "sample-ref"}) + + def test_get_vnf_count_db_vnfr_vdu_charm(self): + self.db.get_one.return_value = {"member-vnf-index-ref": "sample-ref"} + charm_level = "vdu-level" + vnf_id_and_count = "m7fbd751-3de4-4e68-bd40-ec5ae0a53898-2" + with patch.object(self.n2vc, "db", self.db): + vnf_count, db_vnfr = self.n2vc._get_vnf_count_and_record( + charm_level, vnf_id_and_count + ) + self.assertEqual(vnf_count, "2") + self.assertEqual(db_vnfr, {"member-vnf-index-ref": "sample-ref"}) + + def test_get_vca_records_vdu_charm(self): + charm_level = "vdu-level" + db_vnfr = { + "member-vnf-index-ref": "vnf111-xxx-yyy-zzz", + "vdur": [ + {"_id": "38912ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "mgmtVM"}, + {"_id": "45512ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "dataVM"}, + ], + } + db_nsr = { + "_admin": { + "deployed": { + "VCA": [ + { + "target_element": "vnf/vnf1/mgmtvm", + "member-vnf-index": "vnf111-xxx-yyy-zzz", + "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a", + "kdu_name": None, + "vdu_count_index": None, + "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898", + "vdu_name": "mgmtvm", + "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444", + "charm_name": "", + "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898", + }, + { + "target_element": "vnf/vnf2/datavm", + "member-vnf-index": "vnf222-xxx-yyy-zzz", + "vdu_id": "45512ff7-5bdd-4228-911f-c2bee259c44a", + "kdu_name": None, + "vdu_count_index": None, + "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898", + "vdu_name": "datavm", + "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-8888-hhh-3333-yyyy-888-hhh-ttt-444", + "charm_name": "", + "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898", + }, + ], + }, + }, + } + expected_result = [ + { + "target_element": "vnf/vnf1/mgmtvm", + "member-vnf-index": "vnf111-xxx-yyy-zzz", + "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a", + "kdu_name": None, + "vdu_count_index": None, + "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898", + "vdu_name": "mgmtvm", + "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444", + "charm_name": "", + "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898", + }, + ] + vca_records = self.n2vc._get_vca_records(charm_level, db_nsr, db_vnfr) + self.assertEqual(vca_records, expected_result) + + def test_get_vca_records_vnf_charm_member_vnf_index_mismatch(self): + charm_level = "vnf-level" + db_vnfr = { + "member-vnf-index-ref": "vnf222-xxx-yyy-zzz", + } + db_nsr = { + "_admin": { + "deployed": { + "VCA": [ + { + "target_element": "vnf/vnf1/mgmtvm", + "member-vnf-index": "vnf111-xxx-yyy-zzz", + "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a", + "kdu_name": None, + "vdu_count_index": None, + "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898", + "vdu_name": "mgmtvm", + "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444", + "charm_name": "", + "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898", + }, + { + "target_element": "vnf/vnf1/mgmtvm", + "member-vnf-index": "vnf111-xxx-yyy-zzz", + "vdu_id": "45512ff7-5bdd-4228-911f-c2bee259c44a", + "kdu_name": None, + "vdu_count_index": None, + "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898", + "vdu_name": "datavm", + "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-8888-hhh-3333-yyyy-888-hhh-ttt-444", + "charm_name": "", + "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898", + }, + ], + }, + }, + } + expected_result = [] + vca_records = self.n2vc._get_vca_records(charm_level, db_nsr, db_vnfr) + self.assertEqual(vca_records, expected_result) + + def test_get_vca_records_ns_charm(self): + charm_level = "ns-level" + db_vnfr = { + "member-vnf-index-ref": "vnf222-xxx-yyy-zzz", + } + db_nsr = { + "_admin": { + "deployed": { + "VCA": [ + { + "target_element": "vnf/vnf1/mgmtvm", + "member-vnf-index": "vnf111-xxx-yyy-zzz", + "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a", + "kdu_name": None, + "vdu_count_index": None, + "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898", + "vdu_name": "mgmtvm", + "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444", + "charm_name": "", + "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898", + }, + { + "target_element": "ns", + "member-vnf-index": None, + "vdu_id": None, + "kdu_name": None, + "vdu_count_index": None, + "vnfd_id": "", + "vdu_name": "", + "ee_descriptor_id": "", + "charm_name": "simple-ns-charm-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444", + "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898", + }, + ], + }, + }, + } + expected_result = [ + { + "target_element": "ns", + "member-vnf-index": None, + "vdu_id": None, + "kdu_name": None, + "vdu_count_index": None, + "vnfd_id": "", + "vdu_name": "", + "ee_descriptor_id": "", + "charm_name": "simple-ns-charm-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444", + "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898", + }, + ] + vca_records = self.n2vc._get_vca_records(charm_level, db_nsr, db_vnfr) + self.assertEqual(vca_records, expected_result) + + def test_get_vca_records_ns_charm_empty_charm_name(self): + charm_level = "ns-level" + db_vnfr = { + "member-vnf-index-ref": "vnf222-xxx-yyy-zzz", + } + db_nsr = { + "_admin": { + "deployed": { + "VCA": [ + { + "target_element": "vnf/vnf1/mgmtvm", + "member-vnf-index": "vnf111-xxx-yyy-zzz", + "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a", + "kdu_name": None, + "vdu_count_index": None, + "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898", + "vdu_name": "mgmtvm", + "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444", + "charm_name": "", + "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898", + }, + { + "target_element": "ns", + "member-vnf-index": None, + "vdu_id": None, + "kdu_name": None, + "vdu_count_index": None, + "vnfd_id": "", + "vdu_name": "", + "ee_descriptor_id": "", + "charm_name": "", + "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898", + }, + ], + }, + }, + } + expected_result = [ + { + "target_element": "ns", + "member-vnf-index": None, + "vdu_id": None, + "kdu_name": None, + "vdu_count_index": None, + "vnfd_id": "", + "vdu_name": "", + "ee_descriptor_id": "", + "charm_name": "", + "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898", + }, + ] + vca_records = self.n2vc._get_vca_records(charm_level, db_nsr, db_vnfr) + self.assertEqual(vca_records, expected_result) + + def test_get_application_name_vnf_charm(self): + namespace = ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0" + self.db.get_one.return_value = { + "_admin": { + "deployed": { + "VCA": [ + { + "target_element": "vnf/vnf1/mgmtvm", + "member-vnf-index": "vnf111-xxx-yyy-zzz", + "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a", + "kdu_name": None, + "vdu_count_index": None, + "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898", + "vdu_name": "mgmtvm", + "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444", + "charm_name": "", + "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898", + }, + { + "target_element": "ns", + "member-vnf-index": None, + "vdu_id": None, + "kdu_name": None, + "vdu_count_index": None, + "vnfd_id": "", + "vdu_name": "", + "ee_descriptor_id": "", + "charm_name": "", + "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898", + }, + ], + }, + }, + } + mock_vnf_count_and_record = MagicMock() + db_vnfr = { + "member-vnf-index-ref": "vnf111-xxx-yyy-zzz", + } + vnf_count = "0" + mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr) + expected_result = "simple-ee-ab-z0-vnf111-xxx-y-vnf" + with patch.object(self.n2vc, "db", self.db), patch.object( + self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record + ): + application_name = self.n2vc._get_application_name(namespace) + self.assertEqual(application_name, expected_result) + self.assertLess(len(application_name), 50) + mock_vnf_count_and_record.assert_called_once_with( + "vnf-level", "1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0" + ) + self.db.get_one.assert_called_once() + + @patch( + "n2vc.n2vc_juju_conn.generate_random_alfanum_string", + **{"return_value": "random"} + ) + def test_get_application_name_vnf_charm_old_naming( + self, mock_generate_random_alfanum + ): + namespace = ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0" + self.db.get_one.return_value = { + "_admin": { + "deployed": { + "VCA": [ + { + "target_element": "vnf/vnf1/mgmtvm", + "member-vnf-index": "vnf111-xxx-yyy-zzz", + "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a", + "kdu_name": None, + "vdu_count_index": None, + "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898", + "vdu_name": "mgmtvm", + "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444", + "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898", + }, + { + "target_element": "ns", + "member-vnf-index": None, + "vdu_id": None, + "kdu_name": None, + "vdu_count_index": None, + "vnfd_id": "", + "vdu_name": "", + "ee_descriptor_id": "", + "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898", + }, + ], + }, + }, + } + mock_vnf_count_and_record = MagicMock() + db_vnfr = { + "member-vnf-index-ref": "vnf111-xxx-yyy-zzz", + } + vnf_count = "0" + mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr) + expected_result = "app-vnf-eb3161eec0-z0-random" + with patch.object(self.n2vc, "db", self.db), patch.object( + self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record + ): + application_name = self.n2vc._get_application_name(namespace) + self.assertEqual(application_name, expected_result) + mock_vnf_count_and_record.assert_called_once_with( + "vnf-level", "1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0" + ) + self.db.get_one.assert_called_once() + + def test_get_application_name_vnf_charm_vnf_index_ref_mismatch(self): + namespace = ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0" + self.db.get_one.return_value = { + "_admin": { + "deployed": { + "VCA": [ + { + "target_element": "vnf/vnf1/mgmtvm", + "member-vnf-index": "vnf111-xxx-yyy-zzz", + "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a", + "kdu_name": None, + "vdu_count_index": None, + "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898", + "vdu_name": "mgmtvm", + "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444", + "charm_name": "", + "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898", + }, + { + "target_element": "ns", + "member-vnf-index": None, + "vdu_id": None, + "kdu_name": None, + "vdu_count_index": None, + "vnfd_id": "", + "vdu_name": "", + "ee_descriptor_id": "", + "charm_name": "", + "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898", + }, + ], + }, + }, + } + mock_vnf_count_and_record = MagicMock() + db_vnfr = { + "member-vnf-index-ref": "vnf222-xxx-yyy-zzz", + } + vnf_count = "0" + mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr) + with patch.object(self.n2vc, "db", self.db), patch.object( + self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record + ): + with self.assertRaises(N2VCException): + self.n2vc._get_application_name(namespace) + mock_vnf_count_and_record.assert_called_once_with( + "vnf-level", "1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0" + ) + self.db.get_one.assert_called_once() + + def test_get_application_name_vdu_charm(self): + namespace = ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0.mgmtVM-0" + self.db.get_one.return_value = { + "_admin": { + "deployed": { + "VCA": [ + { + "target_element": "vnf/vnf1/mgmtvm", + "member-vnf-index": "vnf111-xxx-yyy-zzz", + "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a", + "kdu_name": None, + "vdu_count_index": None, + "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898", + "vdu_name": "mgmtvm", + "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444", + "charm_name": "", + "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898", + }, + { + "target_element": "ns", + "member-vnf-index": None, + "vdu_id": None, + "kdu_name": None, + "vdu_count_index": None, + "vnfd_id": "", + "vdu_name": "", + "ee_descriptor_id": "", + "charm_name": "", + "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898", + }, + ] + } + } + } + mock_vnf_count_and_record = MagicMock() + db_vnfr = { + "member-vnf-index-ref": "vnf111-xxx-yyy-zzz", + "vdur": [ + {"_id": "38912ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "mgmtVM"}, + {"_id": "45512ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "dataVM"}, + ], + } + vnf_count = "0" + mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr) + expected_result = "simple-ee-ab-z0-vnf111-xxx-y-mgmtvm-z0-vdu" + with patch.object(self.n2vc, "db", self.db), patch.object( + self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record + ): + application_name = self.n2vc._get_application_name(namespace) + self.assertEqual(application_name, expected_result) + self.assertLess(len(application_name), 50) + mock_vnf_count_and_record.assert_called_once_with( + "vdu-level", "1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0" + ) + self.db.get_one.assert_called_once() + + @patch( + "n2vc.n2vc_juju_conn.generate_random_alfanum_string", + **{"return_value": "random"} + ) + def test_get_application_name_vdu_charm_old_naming( + self, mock_generate_random_alfanum + ): + namespace = ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0.mgmtVM-0" + self.db.get_one.return_value = { + "_admin": { + "deployed": { + "VCA": [ + { + "target_element": "vnf/vnf1/mgmtvm", + "member-vnf-index": "vnf111-xxx-yyy-zzz", + "vdu_id": "38912ff7-5bdd-4228-911f-c2bee259c44a", + "kdu_name": None, + "vdu_count_index": None, + "vnfd_id": "r7fbd751-3de4-4e68-bd40-ec5ae0a53898", + "vdu_name": "mgmtvm", + "ee_descriptor_id": "simple-ee-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444", + "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898", + }, + { + "target_element": "ns", + "member-vnf-index": None, + "vdu_id": None, + "kdu_name": None, + "vdu_count_index": None, + "vnfd_id": "", + "vdu_name": "", + "ee_descriptor_id": "", + "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898", + }, + ], + }, + }, + } + mock_vnf_count_and_record = MagicMock() + db_vnfr = { + "member-vnf-index-ref": "vnf111-xxx-yyy-zzz", + "vdur": [ + {"_id": "38912ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "mgmtVM"}, + {"_id": "45512ff7-5bdd-4228-911f-c2bee259c44a", "vdu-id-ref": "dataVM"}, + ], + } + vnf_count = "0" + mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr) + expected_result = "app-vnf-eb3161eec0-z0-vdu-mgmtvm-cnt-z0-random" + + with patch.object(self.n2vc, "db", self.db), patch.object( + self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record + ): + application_name = self.n2vc._get_application_name(namespace) + self.assertEqual(application_name, expected_result) + self.assertLess(len(application_name), 50) + mock_vnf_count_and_record.assert_called_once_with( + "vdu-level", "1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0" + ) + self.db.get_one.assert_called_once() + + def test_get_application_name_ns_charm(self): + namespace = ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898" + self.db.get_one.return_value = { + "_admin": { + "deployed": { + "VCA": [ + { + "target_element": "ns", + "member-vnf-index": None, + "vdu_id": None, + "kdu_name": None, + "vdu_count_index": None, + "vnfd_id": "", + "vdu_name": "", + "ee_descriptor_id": "", + "charm_name": "simple-ns-charm-abc-000-rrrr-nnnn-4444-hhh-3333-yyyy-333-hhh-ttt-444", + "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898", + }, + ], + }, + }, + } + mock_vnf_count_and_record = MagicMock() + db_vnfr = {} + vnf_count = "" + mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr) + expected_result = "simple-ns-charm-abc-z000-rrrr-nnnn-z4444-h-ns" + with patch.object(self.n2vc, "db", self.db), patch.object( + self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record + ): + application_name = self.n2vc._get_application_name(namespace) + self.assertEqual(application_name, expected_result) + self.assertLess(len(application_name), 50) + mock_vnf_count_and_record.assert_called_once_with("ns-level", None) + self.db.get_one.assert_called_once() + + def test_get_application_name_ns_charm_empty_charm_name(self): + namespace = ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898" + self.db.get_one.return_value = { + "_admin": { + "deployed": { + "VCA": [ + { + "target_element": "ns", + "member-vnf-index": None, + "vdu_id": None, + "kdu_name": None, + "vdu_count_index": None, + "vnfd_id": "", + "vdu_name": "", + "ee_descriptor_id": "", + "charm_name": "", + "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898", + }, + ], + }, + }, + } + mock_vnf_count_and_record = MagicMock() + db_vnfr = {} + vnf_count = "" + mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr) + with patch.object(self.n2vc, "db", self.db), patch.object( + self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record + ): + with self.assertRaises(N2VCException): + self.n2vc._get_application_name(namespace) + mock_vnf_count_and_record.assert_called_once_with("ns-level", None) + self.db.get_one.assert_called_once() + + @patch( + "n2vc.n2vc_juju_conn.generate_random_alfanum_string", + **{"return_value": "random"} + ) + def test_get_application_name_ns_charm_old_naming( + self, mock_generate_random_alfanum + ): + namespace = ".dbfbd751-3de4-4e68-bd40-ec5ae0a53898" + self.db.get_one.return_value = { + "_admin": { + "deployed": { + "VCA": [ + { + "target_element": "ns", + "member-vnf-index": None, + "vdu_id": None, + "kdu_name": None, + "vdu_count_index": None, + "vnfd_id": "", + "vdu_name": "", + "ee_descriptor_id": "", + "model": "dbfbd751-3de4-4e68-bd40-ec5ae0a53898", + }, + ], + }, + }, + } + mock_vnf_count_and_record = MagicMock() + db_vnfr = {} + vnf_count = "" + mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr) + expected_result = "app-random" + with patch.object(self.n2vc, "db", self.db), patch.object( + self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record + ): + application_name = self.n2vc._get_application_name(namespace) + self.assertEqual(application_name, expected_result) + self.assertLess(len(application_name), 50) + mock_vnf_count_and_record.assert_called_once_with("ns-level", None) + self.db.get_one.assert_called_once() diff --git a/n2vc/tests/unit/testdata/test_db_descriptors.py b/n2vc/tests/unit/testdata/test_db_descriptors.py new file mode 100644 index 0000000..c6f3670 --- /dev/null +++ b/n2vc/tests/unit/testdata/test_db_descriptors.py @@ -0,0 +1,414 @@ +# Copyright 2022 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +db_nsrs_text = """ +--- +- _id: dbfbd751-3de4-4e68-bd40-ec5ae0a53898 + name: k8s-ns + name-ref: k8s-ns + short-name: k8s-ns + admin-status: ENABLED + nsState: READY + currentOperation: IDLE + currentOperationID: null + errorDescription: null + errorDetail: null + deploymentStatus: null + configurationStatus: + - elementType: VNF + elementUnderConfiguration: 1b6a4eb3-4fbf-415e-985c-4aeb3161eec0 + status: READY + - elementType: VNF + elementUnderConfiguration: 17892d73-aa19-4b87-9a00-1d094f07a6b3 + status: READY + vcaStatus: null + nsd: + _id: 12f320b5-2a57-40f4-82b5-020a6b1171d7 + id: k8s_proxy_charm-ns + version: '1.0' + name: k8s_proxy_charm-ns + vnfd-id: + - k8s_proxy_charm-vnf + virtual-link-desc: + - id: mgmtnet + mgmt-network: true + - id: datanet + df: + - id: default-df + vnf-profile: + - id: vnf1 + virtual-link-connectivity: + - constituent-cpd-id: + - constituent-base-element-id: vnf1 + constituent-cpd-id: vnf-mgmt-ext + virtual-link-profile-id: mgmtnet + - constituent-cpd-id: + - constituent-base-element-id: vnf1 + constituent-cpd-id: vnf-data-ext + virtual-link-profile-id: datanet + vnfd-id: k8s_proxy_charm-vnf + - id: vnf2 + virtual-link-connectivity: + - constituent-cpd-id: + - constituent-base-element-id: vnf2 + constituent-cpd-id: vnf-mgmt-ext + virtual-link-profile-id: mgmtnet + - constituent-cpd-id: + - constituent-base-element-id: vnf2 + constituent-cpd-id: vnf-data-ext + virtual-link-profile-id: datanet + vnfd-id: k8s_proxy_charm-vnf + description: NS with 2 VNFs with cloudinit connected by datanet and mgmtnet VLs + _admin: + userDefinedData: {} + revision: 1 + created: 1658990740.88281 + modified: 1658990741.09266 + projects_read: + - 51e0e80fe533469d98766caa16552a3e + projects_write: + - 51e0e80fe533469d98766caa16552a3e + onboardingState: ONBOARDED + operationalState: ENABLED + usageState: NOT_IN_USE + storage: + fs: mongo + path: /app/storage/ + folder: '12f320b5-2a57-40f4-82b5-020a6b1171d7:1' + pkg-dir: k8s_proxy_charm_ns + descriptor: k8s_proxy_charm_ns/k8s_proxy_charm_nsd.yaml + zipfile: k8s_proxy_charm_ns.tar.gz + datacenter: bad7338b-ae46-43d4-a434-c3337a8054ac + resource-orchestrator: osmopenmano + description: default description + constituent-vnfr-ref: + - 1b6a4eb3-4fbf-415e-985c-4aeb3161eec0 + - 17892d73-aa19-4b87-9a00-1d094f07a6b3 + operational-status: running + config-status: configured + detailed-status: Done + orchestration-progress: {} + create-time: 1658998097.57611 + nsd-name-ref: k8s_proxy_charm-ns + operational-events: [] + nsd-ref: k8s_proxy_charm-ns + nsd-id: 12f320b5-2a57-40f4-82b5-020a6b1171d7 + vnfd-id: + - 6d9e1ca1-f387-4d01-9876-066fc7311e0f + instantiate_params: + nsdId: 12f320b5-2a57-40f4-82b5-020a6b1171d7 + nsName: k8s-ns + nsDescription: default description + vimAccountId: bad7338b-ae46-43d4-a434-c3337a8054ac + vld: + - name: mgmtnet + vim-network-name: osm-ext + additionalParamsForNs: null + ns-instance-config-ref: dbfbd751-3de4-4e68-bd40-ec5ae0a53898 + id: dbfbd751-3de4-4e68-bd40-ec5ae0a53898 + ssh-authorized-key: null + flavor: + - id: '0' + memory-mb: 1024 + name: mgmtVM-flv + storage-gb: '10' + vcpu-count: 1 + vim_info: + 'vim:bad7338b-ae46-43d4-a434-c3337a8054ac': + vim_details: null + vim_id: 17a9ba76-beb7-4ad4-a481-97de37174866 + vim_status: DONE + - vcpu-count: 1 + memory-mb: 1024 + storage-gb: '10' + name: mgmtVM-flv + id: '1' + image: + - id: '0' + image: ubuntu18.04 + vim_info: + 'vim:bad7338b-ae46-43d4-a434-c3337a8054ac': + vim_details: null + vim_id: 919fc71a-6acd-4ee3-8123-739a9abbc2e7 + vim_status: DONE + - image: 'Canonical:UbuntuServer:18.04-LTS:latest' + vim-type: azure + id: '1' + - image: 'ubuntu-os-cloud:image-family:ubuntu-1804-lts' + vim-type: gcp + id: '2' + - image: ubuntu/images/hvm-ssd/ubuntu-artful-17.10-amd64-server-20180509 + vim-type: aws + id: '3' + affinity-or-anti-affinity-group: [] + revision: 1 + vld: + - id: mgmtnet + mgmt-network: true + name: mgmtnet + type: null + vim_info: + 'vim:bad7338b-ae46-43d4-a434-c3337a8054ac': + vim_account_id: bad7338b-ae46-43d4-a434-c3337a8054ac + vim_network_name: osm-ext + vim_details: > + {admin_state_up: true, availability_zone_hints: [], + availability_zones: [nova], created_at: '2019-10-17T23:44:03Z', + description: '', encapsulation: vlan, encapsulation_id: 2148, + encapsulation_type: vlan, id: 21ea5d92-24f1-40ab-8d28-83230e277a49, + ipv4_address_scope: null, + ipv6_address_scope: null, is_default: false, mtu: 1500, name: osm-ext, port_security_enabled: true, project_id: 456b6471010b4737b47a0dd599c920c5, 'provider:network_type': vlan, 'provider:physical_network': physnet1, 'provider:segmentation_id': 2148, revision_number: 1009, + 'router:external': true, segmentation_id: 2148, shared: true, status: ACTIVE, subnets: [{subnet: {allocation_pools: [{end: 172.21.249.255, start: 172.21.248.1}], cidr: 172.21.248.0/22, created_at: '2019-10-17T23:44:07Z', description: '', dns_nameservers: [], + enable_dhcp: true, gateway_ip: 172.21.251.254, host_routes: [], id: d14f68b7-8287-41fe-b533-dafb2240680a, ip_version: 4, ipv6_address_mode: null, ipv6_ra_mode: null, name: osm-ext-subnet, network_id: 21ea5d92-24f1-40ab-8d28-83230e277a49, project_id: 456b6471010b4737b47a0dd599c920c5, + revision_number: 5, service_types: [], subnetpool_id: null, tags: [], tenant_id: 456b6471010b4737b47a0dd599c920c5, updated_at: '2020-09-14T15:15:06Z'}}], tags: [], tenant_id: 456b6471010b4737b47a0dd599c920c5, type: data, updated_at: '2022-07-05T18:39:02Z'} + vim_id: 21ea5d92-24f1-40ab-8d28-83230e277a49 + vim_status: ACTIVE + - id: datanet + mgmt-network: false + name: datanet + type: null + vim_info: + 'vim:bad7338b-ae46-43d4-a434-c3337a8054ac': + vim_account_id: bad7338b-ae46-43d4-a434-c3337a8054ac + vim_network_name: null + vim_details: > + {admin_state_up: true, availability_zone_hints: [], + availability_zones: [nova], created_at: '2022-07-28T08:41:59Z', + description: '', encapsulation: vxlan, encapsulation_id: 27, + encapsulation_type: vxlan, id: 34056287-3cd5-42cb-92d3-413382b50813, + ipv4_address_scope: null, + ipv6_address_scope: null, mtu: 1450, name: k8s-ns-datanet, port_security_enabled: true, project_id: 71c7971a7cab4b72bd5c10dbe6617f1e, 'provider:network_type': vxlan, 'provider:physical_network': null, 'provider:segmentation_id': 27, revision_number: 2, 'router:external': false, + segmentation_id: 27, shared: false, status: ACTIVE, subnets: [{subnet: {allocation_pools: [{end: 192.168.181.254, start: 192.168.181.1}], cidr: 192.168.181.0/24, created_at: '2022-07-28T08:41:59Z', description: '', dns_nameservers: [], enable_dhcp: true, gateway_ip: null, + host_routes: [], id: ab2920f8-881b-4bef-82a5-9582a7930786, ip_version: 4, ipv6_address_mode: null, ipv6_ra_mode: null, name: k8s-ns-datanet-subnet, network_id: 34056287-3cd5-42cb-92d3-413382b50813, project_id: 71c7971a7cab4b72bd5c10dbe6617f1e, revision_number: 0, + service_types: [], subnetpool_id: null, tags: [], tenant_id: 71c7971a7cab4b72bd5c10dbe6617f1e, updated_at: '2022-07-28T08:41:59Z'}}], tags: [], tenant_id: 71c7971a7cab4b72bd5c10dbe6617f1e, type: bridge, updated_at: '2022-07-28T08:41:59Z'} + vim_id: 34056287-3cd5-42cb-92d3-413382b50813 + vim_status: ACTIVE + _admin: + created: 1658998097.58182 + modified: 1658998193.42562 + projects_read: + - 51e0e80fe533469d98766caa16552a3e + projects_write: + - 51e0e80fe533469d98766caa16552a3e + nsState: INSTANTIATED + current-operation: null + nslcmop: null + operation-type: null + deployed: + RO: + vnfd: [] + operational-status: running + VCA: + - target_element: vnf/vnf1 + member-vnf-index: vnf1 + vdu_id: null + kdu_name: null + vdu_count_index: 0 + operational-status: init + detailed-status: '' + step: initial-deploy + vnfd_id: k8s_proxy_charm-vnf + vdu_name: null + type: k8s_proxy_charm + ee_descriptor_id: simple-ee + charm_name: '' + ee_id: dbfbd751-3de4-4e68-bd40-ec5ae0a53898-k8s.simple-ee-z0-vnf1-vnf.k8s + application: simple-ee-z0-vnf1-vnf + model: dbfbd751-3de4-4e68-bd40-ec5ae0a53898-k8s + config_sw_installed: true + - target_element: vnf/vnf2 + member-vnf-index: vnf2 + vdu_id: null + kdu_name: null + vdu_count_index: 0 + operational-status: init + detailed-status: '' + step: initial-deploy + vnfd_id: k8s_proxy_charm-vnf + vdu_name: null + type: k8s_proxy_charm + ee_descriptor_id: simple-ee + charm_name: '' + ee_id: dbfbd751-3de4-4e68-bd40-ec5ae0a53898-k8s.simple-ee-z0-vnf2-vnf.k8s + application: simple-ee-z0-vnf2-vnf + model: dbfbd751-3de4-4e68-bd40-ec5ae0a53898-k8s + config_sw_installed: true + K8s: [] +""" + +db_vnfrs_text = """ +- _id: 1b6a4eb3-4fbf-415e-985c-4aeb3161eec0 + id: 1b6a4eb3-4fbf-415e-985c-4aeb3161eec0 + nsr-id-ref: dbfbd751-3de4-4e68-bd40-ec5ae0a53898 + member-vnf-index-ref: vnf1 + additionalParamsForVnf: null + created-time: 1658998097.58036 + vnfd-ref: k8s_proxy_charm-vnf + vnfd-id: 6d9e1ca1-f387-4d01-9876-066fc7311e0f + vim-account-id: bad7338b-ae46-43d4-a434-c3337a8054ac + vca-id: null + vdur: + - _id: 38912ff7-5bdd-4228-911f-c2bee259c44a + additionalParams: + OSM: + count_index: 0 + member_vnf_index: vnf1 + ns_id: dbfbd751-3de4-4e68-bd40-ec5ae0a53898 + vdu: + mgmtVM-0: + count_index: 0 + interfaces: + dataVM-xe0: + name: dataVM-xe0 + mgmtVM-eth0: + name: mgmtVM-eth0 + vdu_id: mgmtVM + vdu_id: mgmtVM + vim_account_id: bad7338b-ae46-43d4-a434-c3337a8054ac + vnf_id: 1b6a4eb3-4fbf-415e-985c-4aeb3161eec0 + vnfd_id: 6d9e1ca1-f387-4d01-9876-066fc7311e0f + vnfd_ref: k8s_proxy_charm-vnf + affinity-or-anti-affinity-group-id: [] + alt-image-ids: + - '1' + - '2' + - '3' + cloud-init: '6d9e1ca1-f387-4d01-9876-066fc7311e0f:file:cloud-config.txt' + count-index: 0 + id: 38912ff7-5bdd-4228-911f-c2bee259c44a + interfaces: + - external-connection-point-ref: vnf-mgmt-ext + internal-connection-point-ref: mgmtVM-eth0-int + mgmt-interface: true + mgmt-vnf: true + name: mgmtVM-eth0 + ns-vld-id: mgmtnet + position: 1 + type: PARAVIRT + compute_node: nfvisrv11 + ip-address: 172.21.248.199 + mac-address: 'fa:16:3e:4d:65:e9' + pci: null + vlan: 2148 + - external-connection-point-ref: vnf-data-ext + internal-connection-point-ref: dataVM-xe0-int + name: dataVM-xe0 + ns-vld-id: datanet + position: 2 + type: PARAVIRT + compute_node: nfvisrv11 + ip-address: 192.168.181.179 + mac-address: 'fa:16:3e:ca:b5:d3' + pci: null + vlan: null + internal-connection-point: + - connection-point-id: mgmtVM-eth0-int + id: mgmtVM-eth0-int + name: mgmtVM-eth0-int + - connection-point-id: dataVM-xe0-int + id: dataVM-xe0-int + name: dataVM-xe0-int + ip-address: 172.21.248.199 + ns-flavor-id: '0' + ns-image-id: '0' + ssh-access-required: true + ssh-keys: + - > + ssh-rsa + AAAAB3NzaC1yc2EAAAADAQABAAACAQDW3dtEDKfwZL0WZp6LeJUZFlZzYAHP7M4AsJwl2YFO/wmblfrTpWZ8tRyGwyjQacB7Zb7J07wD5AZACE71A3Nc9zjI22/gWN7N8X+ZxH6ywcr1GdXBqZDBeOdzD4pRb11E9mydGZ9l++KtFRtlF4G7IFYuxkOiSCJrkgiKuVDGodtQ/6VUKwxuI8U6N7MxtIBN2L3IfvMwuNyTo1daiUabQMwQKt/Q8Zpp78zsZ6SoxU+eYAHzbeTjAfNwhA88nRzRZn7tQW+gWl9wbSINbr2+JetTN+BTot/CMPmKzzul9tZrzhSzck1QSM3UDrD36ctRdaLABnWCoxpm0wJthNt693xVrFP+bMgK2BR0fyu9WwVEcHkC9CZ8yoi37k5rGVtoDw6sW6lxQ5QKS+Plv/YjGKqK3Ro/UoIEhgxcW53uz4PveyMBss4geB9ad/1T8dtugd288qfCWJRBpJBrE497EalhHolF3L/2bEu3uCKN0TY4POzqP/5cuAUc/uTJ2mjZewJdlJtrn7IyFtSUypeuVmXRx5LwByQw9EwPhUZlKVjYEHYmu5YTKlFSWyorWgRLBBIK7LLPj+bCGgLeT+fXmip6eFquAyVtoQfDofQ/gc0OXEA1uKfK2VFKg1le+joz1WA/XieGSvKRQ4aZorYgi/FzbpxKj2a60cZubJMq5w== + root@lcm-7b6bcf7cdd-5h2ql + - >- + ssh-rsa + AAAAB3NzaC1yc2EAAAADAQABAAABAQDtg65/Jh3KDWC9+YzkTz8Md/uhalkjPo15DSxlUNWzYQNFUzaG5Pt0trDwQ29UOQIUy1CB9HpWSZMTA1ESet/+cyXWkZ9MznAmGLQBdnwqWU792UQf6rv74Zpned8MbnKQXfs8gog1ZFFKRMcwitNRqs8xs8XsPLE/l1Jo2QemhM0fIRofjJiLKYaKeGP59Fb8UlIeGDaxmIFgLs8bAZvrmjbae3o4b1fZDNboqlQbHb9rakxI9uCnsaBrCmelXpP9EFmENx85vdHEwCAfCRvSWKnbXuOojJJzFM5odoWFZo8AuIhEb5ZiLkGet3CvCfWZZPpQc4TuNDaY0t1XUegH + juju-client-key + vdu-id-ref: mgmtVM + vdu-name: mgmtVM + vim_info: + 'vim:bad7338b-ae46-43d4-a434-c3337a8054ac': + interfaces: + - vim_info: > + {admin_state_up: true, allowed_address_pairs: [], + 'binding:host_id': nfvisrv11, 'binding:profile': {}, + 'binding:vif_details': {bridge_name: br-int, connectivity: l2, + datapath_type: system, ovs_hybrid_plug: true, port_filter: true}, + 'binding:vif_type': ovs, 'binding:vnic_type': normal, + created_at: '2022-07-28T08:42:04Z', description: '', device_id: 1fabddca-0dcf-4702-a5f3-5cc028c2aba7, device_owner: 'compute:nova', extra_dhcp_opts: [], fixed_ips: [{ip_address: 172.21.248.199, subnet_id: d14f68b7-8287-41fe-b533-dafb2240680a}], id: e053d44f-1d67-4274-b85d-1cef243353d6, + mac_address: 'fa:16:3e:4d:65:e9', name: mgmtVM-eth0, network_id: 21ea5d92-24f1-40ab-8d28-83230e277a49, port_security_enabled: true, project_id: 71c7971a7cab4b72bd5c10dbe6617f1e, revision_number: 4, security_groups: [1de4b2c2-e4be-4e91-985c-d887e2715949], status: ACTIVE, + tags: [], tenant_id: 71c7971a7cab4b72bd5c10dbe6617f1e, updated_at: '2022-07-28T08:42:16Z'} + mac_address: 'fa:16:3e:4d:65:e9' + vim_net_id: 21ea5d92-24f1-40ab-8d28-83230e277a49 + vim_interface_id: e053d44f-1d67-4274-b85d-1cef243353d6 + compute_node: nfvisrv11 + pci: null + vlan: 2148 + ip_address: 172.21.248.199 + mgmt_vnf_interface: true + mgmt_vdu_interface: true + - vim_info: > + {admin_state_up: true, allowed_address_pairs: [], + 'binding:host_id': nfvisrv11, 'binding:profile': {}, + 'binding:vif_details': {bridge_name: br-int, connectivity: l2, + datapath_type: system, ovs_hybrid_plug: true, port_filter: true}, + 'binding:vif_type': ovs, 'binding:vnic_type': normal, + created_at: '2022-07-28T08:42:04Z', description: '', device_id: 1fabddca-0dcf-4702-a5f3-5cc028c2aba7, device_owner: 'compute:nova', extra_dhcp_opts: [], fixed_ips: [{ip_address: 192.168.181.179, subnet_id: ab2920f8-881b-4bef-82a5-9582a7930786}], id: 8a34c944-0fc1-41ae-9dbc-9743e5988162, + mac_address: 'fa:16:3e:ca:b5:d3', name: dataVM-xe0, network_id: 34056287-3cd5-42cb-92d3-413382b50813, port_security_enabled: true, project_id: 71c7971a7cab4b72bd5c10dbe6617f1e, revision_number: 4, security_groups: [1de4b2c2-e4be-4e91-985c-d887e2715949], status: ACTIVE, + tags: [], tenant_id: 71c7971a7cab4b72bd5c10dbe6617f1e, updated_at: '2022-07-28T08:42:15Z'} + mac_address: 'fa:16:3e:ca:b5:d3' + vim_net_id: 34056287-3cd5-42cb-92d3-413382b50813 + vim_interface_id: 8a34c944-0fc1-41ae-9dbc-9743e5988162 + compute_node: nfvisrv11 + pci: null + vlan: null + ip_address: 192.168.181.179 + vim_details: > + {'OS-DCF:diskConfig': MANUAL, 'OS-EXT-AZ:availability_zone': nova, + 'OS-EXT-SRV-ATTR:host': nfvisrv11, + 'OS-EXT-SRV-ATTR:hypervisor_hostname': nfvisrv11, + 'OS-EXT-SRV-ATTR:instance_name': instance-0002967a, + 'OS-EXT-STS:power_state': 1, 'OS-EXT-STS:task_state': null, + 'OS-EXT-STS:vm_state': active, 'OS-SRV-USG:launched_at': '2022-07-28T08:42:17.000000', 'OS-SRV-USG:terminated_at': null, accessIPv4: '', accessIPv6: '', addresses: {k8s-ns-datanet: [{'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:ca:b5:d3', 'OS-EXT-IPS:type': fixed, + addr: 192.168.181.179, version: 4}], osm-ext: [{'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:4d:65:e9', 'OS-EXT-IPS:type': fixed, addr: 172.21.248.199, version: 4}]}, config_drive: '', created: '2022-07-28T08:42:06Z', flavor: {id: 17a9ba76-beb7-4ad4-a481-97de37174866, + links: [{href: 'http://172.21.247.1:8774/flavors/17a9ba76-beb7-4ad4-a481-97de37174866', rel: bookmark}]}, hostId: 2aa7155bd281bd308d8e3776af56d428210c21aab788a8cbdf5ef500, id: 1fabddca-0dcf-4702-a5f3-5cc028c2aba7, image: {id: 919fc71a-6acd-4ee3-8123-739a9abbc2e7, + links: [{href: 'http://172.21.247.1:8774/images/919fc71a-6acd-4ee3-8123-739a9abbc2e7', rel: bookmark}]}, key_name: null, links: [{href: 'http://172.21.247.1:8774/v2.1/servers/1fabddca-0dcf-4702-a5f3-5cc028c2aba7', rel: self}, {href: 'http://172.21.247.1:8774/servers/1fabddca-0dcf-4702-a5f3-5cc028c2aba7', + rel: bookmark}], metadata: {}, name: k8s-ns-vnf1-mgmtVM-0, 'os-extended-volumes:volumes_attached': [], progress: 0, security_groups: [{name: default}, {name: default}], status: ACTIVE, tenant_id: 71c7971a7cab4b72bd5c10dbe6617f1e, updated: '2022-07-28T08:42:17Z', + user_id: f043c84f940b4fc8a01a98714ea97c80} + vim_id: 1fabddca-0dcf-4702-a5f3-5cc028c2aba7 + vim_status: ACTIVE + vim_name: k8s-ns-vnf1-mgmtVM-0 + virtual-storages: + - id: mgmtVM-storage + size-of-storage: '10' + status: ACTIVE + vim-id: 1fabddca-0dcf-4702-a5f3-5cc028c2aba7 + name: k8s-ns-vnf1-mgmtVM-0 + connection-point: + - name: vnf-mgmt-ext + connection-point-id: mgmtVM-eth0-int + connection-point-vdu-id: mgmtVM + id: vnf-mgmt-ext + - name: vnf-data-ext + connection-point-id: dataVM-xe0-int + connection-point-vdu-id: mgmtVM + id: vnf-data-ext + ip-address: 172.21.248.199 + revision: 1 + _admin: + created: 1658998097.58048 + modified: 1658998097.58048 + projects_read: + - 51e0e80fe533469d98766caa16552a3e + projects_write: + - 51e0e80fe533469d98766caa16552a3e + nsState: INSTANTIATED +""" diff --git a/tox.ini b/tox.ini index 2e1199f..5ae56bd 100644 --- a/tox.ini +++ b/tox.ini @@ -121,7 +121,8 @@ ignore = E125, E203, E226, - E241 + E241, + E501 exclude = .git, __pycache__, -- 2.25.1 From 4c856b3bae4f30d9e1bdd429884c1ae84bc629f0 Mon Sep 17 00:00:00 2001 From: aticig Date: Fri, 19 Aug 2022 19:58:13 +0300 Subject: [PATCH 14/16] Fixing charm application name creation if DU is Helm Chart/KDU If deployment unit is a Helm chart/KDU, vdu_profile_id and vdu_count will be empty string. Change-Id: Ifb6aed0f08f2d7687d0cd6a31506268926ac7f63 Signed-off-by: aticig --- n2vc/n2vc_juju_conn.py | 13 ++++++- n2vc/tests/unit/test_n2vc_juju_conn.py | 48 ++++++++++++++++++++++++++ 2 files changed, 60 insertions(+), 1 deletion(-) diff --git a/n2vc/n2vc_juju_conn.py b/n2vc/n2vc_juju_conn.py index 5dc394c..f0569b1 100644 --- a/n2vc/n2vc_juju_conn.py +++ b/n2vc/n2vc_juju_conn.py @@ -1338,7 +1338,18 @@ class N2VCJujuConnector(N2VCConnector): elif charm_level == "vdu-level": if len(vca_records) < 1: raise N2VCException(message="One or more VCA record is expected.") - vdu_profile_id = vnfrs["vdur"][int(vdu_count)]["vdu-id-ref"] + + # Charms are also used for deployments with Helm charts. + # If deployment unit is a Helm chart/KDU, + # vdu_profile_id and vdu_count will be empty string. + vdu_profile_id = "" + + if vdu_count is None: + vdu_count = "" + + elif vdu_count: + vdu_profile_id = vnfrs["vdur"][int(vdu_count)]["vdu-id-ref"] + # If vnf/vdu is scaled, more than one VCA record may be included in vca_records # but ee_descriptor_id is same. # Shorten the ee_descriptor_id, member-vnf-index-ref and vdu_profile_id diff --git a/n2vc/tests/unit/test_n2vc_juju_conn.py b/n2vc/tests/unit/test_n2vc_juju_conn.py index ebf36d5..8720d96 100644 --- a/n2vc/tests/unit/test_n2vc_juju_conn.py +++ b/n2vc/tests/unit/test_n2vc_juju_conn.py @@ -1058,6 +1058,54 @@ class GenerateApplicationNameTest(N2VCJujuConnTestCase): ) self.db.get_one.assert_called_once() + def test_get_application_name_kdu_charm(self): + namespace = ".82b11965-e580-47c0-9ee0-329f318a305b.1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0.ldap" + self.db.get_one.return_value = { + "_admin": { + "deployed": { + "VCA": [ + { + "target_element": "vnf/openldap/kdu/ldap", + "member-vnf-index": "openldap", + "vdu_id": None, + "kdu_name": "ldap", + "vdu_count_index": 0, + "operational-status": "init", + "detailed-status": "", + "step": "initial-deploy", + "vnfd_id": "openldap_knf", + "vdu_name": None, + "type": "lxc_proxy_charm", + "ee_descriptor_id": "openldap-ee", + "charm_name": "", + "ee_id": "", + "application": "openldap-ee-z0-openldap-vdu", + "model": "82b11965-e580-47c0-9ee0-329f318a305b", + "config_sw_installed": True, + }, + ] + } + } + } + mock_vnf_count_and_record = MagicMock() + db_vnfr = { + "member-vnf-index-ref": "openldap", + "vdur": {}, + } + vnf_count = "0" + mock_vnf_count_and_record.return_value = (vnf_count, db_vnfr) + expected_result = "openldap-ee-z0-openldap-vdu" + with patch.object(self.n2vc, "db", self.db), patch.object( + self.n2vc, "_get_vnf_count_and_record", mock_vnf_count_and_record + ): + application_name = self.n2vc._get_application_name(namespace) + self.assertEqual(application_name, expected_result) + self.assertLess(len(application_name), 50) + mock_vnf_count_and_record.assert_called_once_with( + "vdu-level", "1b6a4eb3-4fbf-415e-985c-4aeb3161eec0-0" + ) + self.db.get_one.assert_called_once() + @patch( "n2vc.n2vc_juju_conn.generate_random_alfanum_string", **{"return_value": "random"} -- 2.25.1 From 015abee87f591b8e28f6b982ae4fb9c67e8791bb Mon Sep 17 00:00:00 2001 From: Mark Beierl Date: Fri, 19 Aug 2022 15:02:24 -0400 Subject: [PATCH 15/16] Bug 1890: Use output from action Pass the output from the action command back to the caller, regardless of the status of action completion. This allows for failure messages to be passed through to the LCM for storage in the action record. Change-Id: I268c43225b71a80e54712743e61586d57952553e Signed-off-by: Mark Beierl --- n2vc/n2vc_juju_conn.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/n2vc/n2vc_juju_conn.py b/n2vc/n2vc_juju_conn.py index f0569b1..c6d00a8 100644 --- a/n2vc/n2vc_juju_conn.py +++ b/n2vc/n2vc_juju_conn.py @@ -1061,15 +1061,17 @@ class N2VCJujuConnector(N2VCConnector): if status == "completed": return output else: - raise Exception("status is not completed: {}".format(status)) + if "output" in output: + raise Exception(f'{status}: {output["output"]}') + else: + raise Exception( + f"{status}: No further information received from action" + ) + except Exception as e: - self.log.error( - "Error executing primitive {}: {}".format(primitive_name, e) - ) + self.log.error(f"Error executing primitive {primitive_name}: {e}") raise N2VCExecutionException( - message="Error executing primitive {} into ee={} : {}".format( - primitive_name, ee_id, e - ), + message=f"Error executing primitive {primitive_name} in ee={ee_id}: {e}", primitive_name=primitive_name, ) -- 2.25.1 From 6343d434fa3cec28d8b9b470054d3a13ada8865a Mon Sep 17 00:00:00 2001 From: Patricia Reinoso Date: Tue, 23 Aug 2022 06:22:01 +0000 Subject: [PATCH 16/16] Fix bug 2071: create secret Manually create a service-account-token secret for service account when k8s cluster is created in case k8s version >=1.24. ServiceAccount object should be created first. When deleting a k8s cluster. Corresponding ServiceAccount and Secret are automatically deleted as well. In k8s < 1.24, secret is automatically created. Change-Id: I160b1f87a64cf7a1bbb1fa8587259d8fbd3e6cd4 Signed-off-by: Patricia Reinoso --- n2vc/kubectl.py | 73 ++++++++++++- n2vc/tests/unit/test_kubectl.py | 175 ++++++++++++++++++++++++++++++++ 2 files changed, 243 insertions(+), 5 deletions(-) diff --git a/n2vc/kubectl.py b/n2vc/kubectl.py index a56b6cd..8b8008e 100644 --- a/n2vc/kubectl.py +++ b/n2vc/kubectl.py @@ -16,9 +16,12 @@ import base64 import logging from typing import Dict import typing +import uuid +from distutils.version import LooseVersion from kubernetes import client, config +from kubernetes.client.api import VersionApi from kubernetes.client.models import ( V1ClusterRole, V1ObjectMeta, @@ -27,6 +30,8 @@ from kubernetes.client.models import ( V1ClusterRoleBinding, V1RoleRef, V1Subject, + V1Secret, + V1SecretReference, ) from kubernetes.client.rest import ApiException from retrying_async import retry @@ -178,6 +183,58 @@ class Kubectl: """ self.clients[RBAC_CLIENT].delete_cluster_role(name) + def _get_kubectl_version(self): + version = VersionApi().get_code() + return "{}.{}".format(version.major, version.minor) + + def _need_to_create_new_secret(self): + min_k8s_version = "1.24" + current_k8s_version = self._get_kubectl_version() + return LooseVersion(min_k8s_version) <= LooseVersion(current_k8s_version) + + def _get_secret_name(self, service_account_name: str): + random_alphanum = str(uuid.uuid4())[:5] + return "{}-token-{}".format(service_account_name, random_alphanum) + + def _create_service_account_secret( + self, service_account_name: str, namespace: str, secret_name: str + ): + """ + Create a secret for the service account. K8s version >= 1.24 + + :param: service_account_name: Name of the service account + :param: namespace: Kubernetes namespace for service account metadata + :param: secret_name: Name of the secret + """ + v1_core = self.clients[CORE_CLIENT] + secrets = v1_core.list_namespaced_secret( + namespace, field_selector="metadata.name={}".format(secret_name) + ).items + + if len(secrets) > 0: + raise Exception( + "Secret with metadata.name={} already exists".format(secret_name) + ) + + annotations = {"kubernetes.io/service-account.name": service_account_name} + metadata = V1ObjectMeta( + name=secret_name, namespace=namespace, annotations=annotations + ) + type = "kubernetes.io/service-account-token" + secret = V1Secret(metadata=metadata, type=type) + v1_core.create_namespaced_secret(namespace, secret) + + def _get_secret_reference_list(self, namespace: str, secret_name: str): + """ + Return a secret reference list with one secret. + K8s version >= 1.24 + + :param: namespace: Kubernetes namespace for service account metadata + :param: secret_name: Name of the secret + :rtype: list[V1SecretReference] + """ + return [V1SecretReference(name=secret_name, namespace=namespace)] + def create_service_account( self, name: str, @@ -192,7 +249,8 @@ class Kubectl: :param: namespace: Kubernetes namespace for service account metadata Default: kube-system """ - service_accounts = self.clients[CORE_CLIENT].list_namespaced_service_account( + v1_core = self.clients[CORE_CLIENT] + service_accounts = v1_core.list_namespaced_service_account( namespace, field_selector="metadata.name={}".format(name) ) if len(service_accounts.items) > 0: @@ -201,11 +259,16 @@ class Kubectl: ) metadata = V1ObjectMeta(name=name, labels=labels, namespace=namespace) - service_account = V1ServiceAccount(metadata=metadata) - self.clients[CORE_CLIENT].create_namespaced_service_account( - namespace, service_account - ) + if self._need_to_create_new_secret(): + secret_name = self._get_secret_name(name) + secrets = self._get_secret_reference_list(namespace, secret_name) + service_account = V1ServiceAccount(metadata=metadata, secrets=secrets) + v1_core.create_namespaced_service_account(namespace, service_account) + self._create_service_account_secret(name, namespace, secret_name) + else: + service_account = V1ServiceAccount(metadata=metadata) + v1_core.create_namespaced_service_account(namespace, service_account) def delete_service_account(self, name: str, namespace: str = "kube-system"): """ diff --git a/n2vc/tests/unit/test_kubectl.py b/n2vc/tests/unit/test_kubectl.py index eb9b01d..e67168e 100644 --- a/n2vc/tests/unit/test_kubectl.py +++ b/n2vc/tests/unit/test_kubectl.py @@ -16,6 +16,12 @@ from unittest import TestCase, mock from n2vc.kubectl import Kubectl, CORE_CLIENT from n2vc.utils import Dict from kubernetes.client.rest import ApiException +from kubernetes.client import ( + V1ObjectMeta, + V1Secret, + V1ServiceAccount, + V1SecretReference, +) class FakeK8sResourceMetadata: @@ -66,6 +72,38 @@ class FakeK8sStorageClassesList: return self._items +class FakeK8sServiceAccountsList: + def __init__(self, items=[]): + self._items = items + + @property + def items(self): + return self._items + + +class FakeK8sSecretList: + def __init__(self, items=[]): + self._items = items + + @property + def items(self): + return self._items + + +class FakeK8sVersionApiCode: + def __init__(self, major: str, minor: str): + self._major = major + self._minor = minor + + @property + def major(self): + return self._major + + @property + def minor(self): + return self._minor + + fake_list_services = Dict( { "items": [ @@ -248,3 +286,140 @@ class GetDefaultStorageClass(KubectlTestCase): sc_name = kubectl.get_default_storage_class() self.assertEqual(sc_name, self.default_sc_name) mock_list_storage_class.assert_called_once() + + +@mock.patch("kubernetes.client.VersionApi.get_code") +@mock.patch("kubernetes.client.CoreV1Api.list_namespaced_secret") +@mock.patch("kubernetes.client.CoreV1Api.create_namespaced_secret") +@mock.patch("kubernetes.client.CoreV1Api.create_namespaced_service_account") +@mock.patch("kubernetes.client.CoreV1Api.list_namespaced_service_account") +class CreateServiceAccountClass(KubectlTestCase): + @mock.patch("kubernetes.config.load_kube_config") + def setUp(self, mock_load_kube_config): + super(CreateServiceAccountClass, self).setUp() + self.service_account_name = "Service_account" + self.labels = {"Key1": "Value1", "Key2": "Value2"} + self.namespace = "kubernetes" + self.token_id = "abc12345" + self.kubectl = Kubectl() + + def assert_create_secret(self, mock_create_secret, secret_name): + annotations = {"kubernetes.io/service-account.name": self.service_account_name} + secret_metadata = V1ObjectMeta( + name=secret_name, namespace=self.namespace, annotations=annotations + ) + secret_type = "kubernetes.io/service-account-token" + secret = V1Secret(metadata=secret_metadata, type=secret_type) + mock_create_secret.assert_called_once_with(self.namespace, secret) + + def assert_create_service_account_v_1_24( + self, mock_create_service_account, secret_name + ): + sevice_account_metadata = V1ObjectMeta( + name=self.service_account_name, labels=self.labels, namespace=self.namespace + ) + secrets = [V1SecretReference(name=secret_name, namespace=self.namespace)] + service_account = V1ServiceAccount( + metadata=sevice_account_metadata, secrets=secrets + ) + mock_create_service_account.assert_called_once_with( + self.namespace, service_account + ) + + def assert_create_service_account_v_1_23(self, mock_create_service_account): + metadata = V1ObjectMeta( + name=self.service_account_name, labels=self.labels, namespace=self.namespace + ) + service_account = V1ServiceAccount(metadata=metadata) + mock_create_service_account.assert_called_once_with( + self.namespace, service_account + ) + + @mock.patch("n2vc.kubectl.uuid.uuid4") + def test_secret_is_created_when_k8s_1_24( + self, + mock_uuid4, + mock_list_service_account, + mock_create_service_account, + mock_create_secret, + mock_list_secret, + mock_version, + ): + mock_list_service_account.return_value = FakeK8sServiceAccountsList(items=[]) + mock_list_secret.return_value = FakeK8sSecretList(items=[]) + mock_version.return_value = FakeK8sVersionApiCode("1", "24") + mock_uuid4.return_value = self.token_id + self.kubectl.create_service_account( + self.service_account_name, self.labels, self.namespace + ) + secret_name = "{}-token-{}".format(self.service_account_name, self.token_id[:5]) + self.assert_create_service_account_v_1_24( + mock_create_service_account, secret_name + ) + self.assert_create_secret(mock_create_secret, secret_name) + + def test_secret_is_not_created_when_k8s_1_23( + self, + mock_list_service_account, + mock_create_service_account, + mock_create_secret, + mock_list_secret, + mock_version, + ): + mock_list_service_account.return_value = FakeK8sServiceAccountsList(items=[]) + mock_version.return_value = FakeK8sVersionApiCode("1", "23+") + self.kubectl.create_service_account( + self.service_account_name, self.labels, self.namespace + ) + self.assert_create_service_account_v_1_23(mock_create_service_account) + mock_create_secret.assert_not_called() + mock_list_secret.assert_not_called() + + def test_raise_exception_if_service_account_already_exists( + self, + mock_list_service_account, + mock_create_service_account, + mock_create_secret, + mock_list_secret, + mock_version, + ): + mock_list_service_account.return_value = FakeK8sServiceAccountsList(items=[1]) + with self.assertRaises(Exception) as context: + self.kubectl.create_service_account( + self.service_account_name, self.labels, self.namespace + ) + self.assertTrue( + "Service account with metadata.name={} already exists".format( + self.service_account_name + ) + in str(context.exception) + ) + mock_create_service_account.assert_not_called() + mock_create_secret.assert_not_called() + + @mock.patch("n2vc.kubectl.uuid.uuid4") + def test_raise_exception_if_secret_already_exists( + self, + mock_uuid4, + mock_list_service_account, + mock_create_service_account, + mock_create_secret, + mock_list_secret, + mock_version, + ): + mock_list_service_account.return_value = FakeK8sServiceAccountsList(items=[]) + mock_list_secret.return_value = FakeK8sSecretList(items=[1]) + mock_version.return_value = FakeK8sVersionApiCode("1", "24+") + mock_uuid4.return_value = self.token_id + with self.assertRaises(Exception) as context: + self.kubectl.create_service_account( + self.service_account_name, self.labels, self.namespace + ) + self.assertTrue( + "Secret with metadata.name={}-token-{} already exists".format( + self.service_account_name, self.token_id[:5] + ) + in str(context.exception) + ) + mock_create_service_account.assert_called() + mock_create_secret.assert_not_called() -- 2.25.1