X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=n2vc%2Fk8s_helm_conn.py;h=c8c95ee466a73d1d8a7d2ce9e8e594ccc0219c97;hb=60a3a96717d7c36ba7a65573da59a6bc039f5e28;hp=6bbc0fa79b0b69eda28060a981441aafcfb440ae;hpb=dd32206a3adca6e6dc18bf8aa13a926108f7280c;p=osm%2FN2VC.git diff --git a/n2vc/k8s_helm_conn.py b/n2vc/k8s_helm_conn.py index 6bbc0fa..c8c95ee 100644 --- a/n2vc/k8s_helm_conn.py +++ b/n2vc/k8s_helm_conn.py @@ -20,6 +20,7 @@ # contact with: nfvlabs@tid.es ## import asyncio +from typing import Union import os import yaml @@ -163,7 +164,6 @@ class K8sHelmConnector(K8sHelmBaseConnector): return True async def inspect_kdu(self, kdu_model: str, repo_url: str = None) -> str: - self.log.debug( "inspect kdu_model {} from (optional) repo: {}".format(kdu_model, repo_url) ) @@ -233,14 +233,15 @@ class K8sHelmConnector(K8sHelmBaseConnector): return paths, env - async def _get_services(self, cluster_id, kdu_instance, namespace): - + async def _get_services(self, cluster_id, kdu_instance, namespace, kubeconfig): # init config, env paths, env = self._init_paths_env( cluster_name=cluster_id, create_if_not_exist=True ) - command1 = "{} get manifest {} ".format(self._helm_command, kdu_instance) + command1 = "env KUBECONFIG={} {} get manifest {} ".format( + kubeconfig, self._helm_command, kdu_instance + ) command2 = "{} get --namespace={} -f -".format(self.kubectl_command, namespace) output, _rc = await self._local_async_exec_pipe( command1, command2, env=env, raise_exception_on_error=True @@ -299,8 +300,8 @@ class K8sHelmConnector(K8sHelmBaseConnector): ) command = ( - "{} --kubeconfig={} --tiller-namespace={} --home={} --service-account {} " - " {} init" + "{} init --kubeconfig={} --tiller-namespace={} --home={} --service-account {} " + " {}" ).format( self._helm_command, paths["kube_config"], @@ -323,8 +324,8 @@ class K8sHelmConnector(K8sHelmBaseConnector): ): self.log.info("Initializing helm in client: {}".format(cluster_id)) command = ( - "{} --kubeconfig={} --tiller-namespace={} " - "--home={} init --client-only {} " + "{} init --kubeconfig={} --tiller-namespace={} " + "--home={} --client-only {} " ).format( self._helm_command, paths["kube_config"], @@ -416,7 +417,6 @@ class K8sHelmConnector(K8sHelmBaseConnector): self.log.debug("namespace not found") async def _instances_list(self, cluster_id): - # init paths, env paths, env = self._init_paths_env( cluster_name=cluster_id, create_if_not_exist=True @@ -452,10 +452,9 @@ class K8sHelmConnector(K8sHelmBaseConnector): cluster_id: str, kdu_instance: str, namespace: str = None, + yaml_format: bool = False, show_error_log: bool = False, - return_text: bool = False, - ): - + ) -> Union[str, dict]: self.log.debug( "status of kdu_instance: {}, namespace: {} ".format(kdu_instance, namespace) ) @@ -464,7 +463,9 @@ class K8sHelmConnector(K8sHelmBaseConnector): paths, env = self._init_paths_env( cluster_name=cluster_id, create_if_not_exist=True ) - command = "{} status {} --output yaml".format(self._helm_command, kdu_instance) + command = ("env KUBECONFIG={} {} status {} --output yaml").format( + paths["kube_config"], self._helm_command, kdu_instance + ) output, rc = await self._local_async_exec( command=command, raise_exception_on_error=True, @@ -472,7 +473,7 @@ class K8sHelmConnector(K8sHelmBaseConnector): env=env, ) - if return_text: + if yaml_format: return str(output) if rc != 0: @@ -486,6 +487,15 @@ class K8sHelmConnector(K8sHelmBaseConnector): except KeyError: pass + # parse the manifest to a list of dictionaries + if "manifest" in data: + manifest_str = data.get("manifest") + manifest_docs = yaml.load_all(manifest_str, Loader=yaml.SafeLoader) + + data["manifest"] = [] + for doc in manifest_docs: + data["manifest"].append(doc) + # parse field 'resources' try: resources = str(data.get("info").get("status").get("resources")) @@ -515,9 +525,13 @@ class K8sHelmConnector(K8sHelmBaseConnector): ) async def _is_install_completed(self, cluster_id: str, kdu_instance: str) -> bool: + # init config, env + paths, env = self._init_paths_env( + cluster_name=cluster_id, create_if_not_exist=True + ) status = await self._status_kdu( - cluster_id=cluster_id, kdu_instance=kdu_instance, return_text=False + cluster_id=cluster_id, kdu_instance=kdu_instance, yaml_format=False ) # extract info.status.resources-> str @@ -567,9 +581,16 @@ class K8sHelmConnector(K8sHelmBaseConnector): return ready def _get_install_command( - self, kdu_model, kdu_instance, namespace, params_str, version, atomic, timeout + self, + kdu_model, + kdu_instance, + namespace, + params_str, + version, + atomic, + timeout, + kubeconfig, ) -> str: - timeout_str = "" if timeout: timeout_str = "--timeout {}".format(timeout) @@ -589,8 +610,9 @@ class K8sHelmConnector(K8sHelmBaseConnector): version_str = version_str = "--version {}".format(version) command = ( - "{helm} install {atomic} --output yaml " + "env KUBECONFIG={kubeconfig} {helm} install {atomic} --output yaml " "{params} {timeout} --name={name} {ns} {model} {ver}".format( + kubeconfig=kubeconfig, helm=self._helm_command, atomic=atomic_str, params=params_str, @@ -604,9 +626,16 @@ class K8sHelmConnector(K8sHelmBaseConnector): return command def _get_upgrade_command( - self, kdu_model, kdu_instance, namespace, params_str, version, atomic, timeout + self, + kdu_model, + kdu_instance, + namespace, + params_str, + version, + atomic, + timeout, + kubeconfig, ) -> str: - timeout_str = "" if timeout: timeout_str = "--timeout {}".format(timeout) @@ -621,7 +650,10 @@ class K8sHelmConnector(K8sHelmBaseConnector): if version: version_str = "--version {}".format(version) - command = "{helm} upgrade {atomic} --output yaml {params} {timeout} {name} {model} {ver}".format( + command = ( + "env KUBECONFIG={kubeconfig} {helm} upgrade {atomic} --output yaml {params} {timeout} {name} {model} {ver}" + ).format( + kubeconfig=kubeconfig, helm=self._helm_command, atomic=atomic_str, params=params_str, @@ -632,10 +664,16 @@ class K8sHelmConnector(K8sHelmBaseConnector): ) return command - def _get_rollback_command(self, kdu_instance, namespace, revision) -> str: - return "{} rollback {} {} --wait".format( - self._helm_command, kdu_instance, revision + def _get_rollback_command( + self, kdu_instance, namespace, revision, kubeconfig + ) -> str: + return "env KUBECONFIG={} {} rollback {} {} --wait".format( + kubeconfig, self._helm_command, kdu_instance, revision ) - def _get_uninstall_command(self, kdu_instance: str, namespace: str) -> str: - return "{} delete --purge {}".format(self._helm_command, kdu_instance) + def _get_uninstall_command( + self, kdu_instance: str, namespace: str, kubeconfig: str + ) -> str: + return "env KUBECONFIG={} {} delete --purge {}".format( + kubeconfig, self._helm_command, kdu_instance + )