# For those usages not covered by the Apache License, Version 2.0 please
# contact with: nfvlabs@tid.es
##
+from typing import Union
import os
import yaml
"""
def __init__(
- self,
- fs: object,
- db: object,
- kubectl_command: str = "/usr/bin/kubectl",
- helm_command: str = "/usr/bin/helm3",
- log: object = None,
- on_update_db=None,
- vca_config: dict = None,
+ self,
+ fs: object,
+ db: object,
+ kubectl_command: str = "/usr/bin/kubectl",
+ helm_command: str = "/usr/bin/helm3",
+ log: object = None,
+ on_update_db=None,
):
"""
Initializes helm connector for helm v3
"""
# parent class
- K8sHelmBaseConnector.__init__(self,
- db=db,
- log=log,
- fs=fs,
- kubectl_command=kubectl_command,
- helm_command=helm_command,
- on_update_db=on_update_db,
- vca_config=vca_config)
+ K8sHelmBaseConnector.__init__(
+ self,
+ db=db,
+ log=log,
+ fs=fs,
+ kubectl_command=kubectl_command,
+ helm_command=helm_command,
+ on_update_db=on_update_db,
+ )
self.log.info("K8S Helm3 connector initialized")
async def install(
- self,
- cluster_uuid: str,
- kdu_model: str,
- kdu_instance: str,
- atomic: bool = True,
- timeout: float = 300,
- params: dict = None,
- db_dict: dict = None,
- kdu_name: str = None,
- namespace: str = None,
- **kwargs,
+ self,
+ cluster_uuid: str,
+ kdu_model: str,
+ kdu_instance: str,
+ atomic: bool = True,
+ timeout: float = 300,
+ params: dict = None,
+ db_dict: dict = None,
+ kdu_name: str = None,
+ namespace: str = None,
+ **kwargs,
):
"""Install a helm chart
:return: True if successful
"""
- _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
- self.log.debug("installing {} in cluster {}".format(kdu_model, cluster_id))
+
+ self.log.debug("installing {} in cluster {}".format(kdu_model, cluster_uuid))
# sync local dir
- self.fs.sync(from_path=cluster_id)
+ self.fs.sync(from_path=cluster_uuid)
# init env, paths
paths, env = self._init_paths_env(
- cluster_name=cluster_id, create_if_not_exist=True
+ cluster_name=cluster_uuid, create_if_not_exist=True
)
# for helm3 if namespace does not exist must create it
if namespace and namespace != "kube-system":
- namespaces = await self._get_namespaces(cluster_id)
- if namespace not in namespaces:
- await self._create_namespace(cluster_id, namespace)
+ if not await self._namespace_exists(cluster_uuid, namespace):
+ try:
+ await self._create_namespace(cluster_uuid, namespace)
+ except Exception as e:
+ if not await self._namespace_exists(cluster_uuid, namespace):
+ err_msg = (
+ "namespace {} does not exist in cluster_id {} "
+ "error message: ".format(namespace, e)
+ )
+ self.log.error(err_msg)
+ raise K8sException(err_msg)
await self._install_impl(
- cluster_id,
+ cluster_uuid,
kdu_model,
paths,
env,
)
# sync fs
- self.fs.reverse_sync(from_path=cluster_id)
+ self.fs.reverse_sync(from_path=cluster_uuid)
self.log.debug("Returning kdu_instance {}".format(kdu_instance))
return True
"inspect kdu_model {} from (optional) repo: {}".format(kdu_model, repo_url)
)
- return await self._exec_inspect_comand(
+ return await self._exec_inspect_command(
inspect_command="all", kdu_model=kdu_model, repo_url=repo_url
)
paths = {
"kube_dir": kube_dir,
"kube_config": config_filename,
- "cluster_dir": cluster_dir
+ "cluster_dir": cluster_dir,
}
# 3 - Prepare environment variables
"HELM_CACHE_HOME": helm_path_cache,
"HELM_CONFIG_HOME": helm_path_config,
"HELM_DATA_HOME": helm_path_data,
- "KUBECONFIG": config_filename
+ "KUBECONFIG": config_filename,
}
for file_name, file in paths.items():
return paths, env
- async def _get_namespaces(self,
- cluster_id: str):
+ async def _namespace_exists(self, cluster_id, namespace) -> bool:
+ self.log.debug(
+ "checking if namespace {} exists cluster_id {}".format(
+ namespace, cluster_id
+ )
+ )
+ namespaces = await self._get_namespaces(cluster_id)
+ return namespace in namespaces if namespaces else False
+
+ async def _get_namespaces(self, cluster_id: str):
self.log.debug("get namespaces cluster_id {}".format(cluster_id))
return namespaces
- async def _create_namespace(self,
- cluster_id: str,
- namespace: str):
+ async def _create_namespace(self, cluster_id: str, namespace: str):
self.log.debug(f"create namespace: {cluster_id} for cluster_id: {namespace}")
return _rc
- async def _get_services(self, cluster_id: str, kdu_instance: str, namespace: str):
+ async def _get_services(
+ self, cluster_id: str, kdu_instance: str, namespace: str, kubeconfig: str
+ ):
# init config, env
paths, env = self._init_paths_env(
cluster_name=cluster_id, create_if_not_exist=True
)
- command1 = "{} get manifest {} --namespace={}".format(
- self._helm_command, kdu_instance, namespace
- )
- command2 = "{} get --namespace={} -f -".format(
- self.kubectl_command, namespace
+ command1 = "env KUBECONFIG={} {} get manifest {} --namespace={}".format(
+ kubeconfig, self._helm_command, kdu_instance, namespace
)
+ command2 = "{} get --namespace={} -f -".format(self.kubectl_command, namespace)
output, _rc = await self._local_async_exec_pipe(
command1, command2, env=env, raise_exception_on_error=True
)
if namespace not in namespaces:
await self._create_namespace(cluster_id, namespace)
- # If default repo is not included add
- cluster_uuid = "{}:{}".format(namespace, cluster_id)
- repo_list = await self.repo_list(cluster_uuid)
- for repo in repo_list:
- self.log.debug("repo")
- if repo["name"] == "stable":
- self.log.debug("Default repo already present")
- break
- else:
- await self.repo_add(cluster_uuid,
- "stable",
- self._stable_repo_url)
+ repo_list = await self.repo_list(cluster_id)
+ stable_repo = [repo for repo in repo_list if repo["name"] == "stable"]
+ if not stable_repo and self._stable_repo_url:
+ await self.repo_add(cluster_id, "stable", self._stable_repo_url)
# Returns False as no software needs to be uninstalled
return False
cluster_name=cluster_id, create_if_not_exist=True
)
- command = "{} list --all-namespaces --output yaml".format(
- self._helm_command
- )
+ command = "{} list --all-namespaces --output yaml".format(self._helm_command)
output, _rc = await self._local_async_exec(
command=command, raise_exception_on_error=True, env=env
)
else:
return []
- def _get_inspect_command(self, inspect_command: str, kdu_model: str, repo_str: str,
- version: str):
+ def _get_inspect_command(
+ self, inspect_command: str, kdu_model: str, repo_str: str, version: str
+ ):
inspect_command = "{} show {} {}{} {}".format(
self._helm_command, inspect_command, kdu_model, repo_str, version
)
return inspect_command
+ def _get_get_command(
+ self, get_command: str, kdu_instance: str, namespace: str, kubeconfig: str
+ ):
+ get_command = (
+ "env KUBECONFIG={} {} get {} {} --namespace={} --output yaml".format(
+ kubeconfig, self._helm_command, get_command, kdu_instance, namespace
+ )
+ )
+ return get_command
+
async def _status_kdu(
self,
cluster_id: str,
kdu_instance: str,
namespace: str = None,
+ yaml_format: bool = False,
show_error_log: bool = False,
- return_text: bool = False,
- ):
+ ) -> Union[str, dict]:
- self.log.debug("status of kdu_instance: {}, namespace: {} ".format(kdu_instance, namespace))
+ self.log.debug(
+ "status of kdu_instance: {}, namespace: {} ".format(kdu_instance, namespace)
+ )
if not namespace:
namespace = "kube-system"
paths, env = self._init_paths_env(
cluster_name=cluster_id, create_if_not_exist=True
)
- command = "{} status {} --namespace={} --output yaml".format(
- self._helm_command, kdu_instance, namespace
+ command = "env KUBECONFIG={} {} status {} --namespace={} --output yaml".format(
+ paths["kube_config"], self._helm_command, kdu_instance, namespace
)
output, rc = await self._local_async_exec(
command=command,
raise_exception_on_error=True,
show_error_log=show_error_log,
- env=env
+ env=env,
)
- if return_text:
+ if yaml_format:
return str(output)
if rc != 0:
# remove field 'notes' and manifest
try:
del data.get("info")["notes"]
- del data["manifest"]
except KeyError:
pass
- # unable to parse 'resources' as currently it is not included in helm3
+ # parse the manifest to a list of dictionaries
+ if "manifest" in data:
+ manifest_str = data.get("manifest")
+ manifest_docs = yaml.load_all(manifest_str, Loader=yaml.SafeLoader)
+
+ data["manifest"] = []
+ for doc in manifest_docs:
+ data["manifest"].append(doc)
+
return data
- def _get_install_command(self, kdu_model: str, kdu_instance: str, namespace: str,
- params_str: str, version: str, atomic: bool, timeout: float) -> str:
+ def _get_install_command(
+ self,
+ kdu_model: str,
+ kdu_instance: str,
+ namespace: str,
+ params_str: str,
+ version: str,
+ atomic: bool,
+ timeout: float,
+ kubeconfig: str,
+ ) -> str:
timeout_str = ""
if timeout:
version_str = "--version {}".format(version)
command = (
- "{helm} install {name} {atomic} --output yaml "
+ "env KUBECONFIG={kubeconfig} {helm} install {name} {atomic} --output yaml "
"{params} {timeout} {ns} {model} {ver}".format(
+ kubeconfig=kubeconfig,
helm=self._helm_command,
name=kdu_instance,
atomic=atomic_str,
)
return command
- def _get_upgrade_command(self, kdu_model: str, kdu_instance: str, namespace: str,
- params_str: str, version: str, atomic: bool, timeout: float) -> str:
+ def _get_upgrade_scale_command(
+ self,
+ kdu_model: str,
+ kdu_instance: str,
+ namespace: str,
+ scale: int,
+ version: str,
+ atomic: bool,
+ replica_str: str,
+ timeout: float,
+ resource_name: str,
+ kubeconfig: str,
+ ) -> str:
timeout_str = ""
if timeout:
if namespace:
namespace_str = "--namespace {}".format(namespace)
+ # scale
+ if resource_name:
+ scale_dict = {"{}.{}".format(resource_name, replica_str): scale}
+ else:
+ scale_dict = {replica_str: scale}
+
+ scale_str = self._params_to_set_option(scale_dict)
+
command = (
- "{helm} upgrade {name} {model} {namespace} {atomic} --output yaml {params} "
- "{timeout} {ver}".format(
- helm=self._helm_command,
- name=kdu_instance,
- namespace=namespace_str,
- atomic=atomic_str,
- params=params_str,
- timeout=timeout_str,
- model=kdu_model,
- ver=version_str,
- )
+ "env KUBECONFIG={kubeconfig} {helm} upgrade {name} {model} {namespace} {atomic} --output yaml {scale} "
+ "{timeout} {ver}"
+ ).format(
+ helm=self._helm_command,
+ name=kdu_instance,
+ namespace=namespace_str,
+ atomic=atomic_str,
+ scale=scale_str,
+ timeout=timeout_str,
+ model=kdu_model,
+ ver=version_str,
+ kubeconfig=kubeconfig,
+ )
+ return command
+
+ def _get_upgrade_command(
+ self,
+ kdu_model: str,
+ kdu_instance: str,
+ namespace: str,
+ params_str: str,
+ version: str,
+ atomic: bool,
+ timeout: float,
+ kubeconfig: str,
+ ) -> str:
+
+ timeout_str = ""
+ if timeout:
+ timeout_str = "--timeout {}s".format(timeout)
+
+ # atomic
+ atomic_str = ""
+ if atomic:
+ atomic_str = "--atomic"
+
+ # version
+ version_str = ""
+ if version:
+ version_str = "--version {}".format(version)
+
+ # namespace
+ namespace_str = ""
+ if namespace:
+ namespace_str = "--namespace {}".format(namespace)
+
+ command = (
+ "env KUBECONFIG={kubeconfig} {helm} upgrade {name} {model} {namespace} {atomic} "
+ "--output yaml {params} {timeout} {ver}"
+ ).format(
+ kubeconfig=kubeconfig,
+ helm=self._helm_command,
+ name=kdu_instance,
+ namespace=namespace_str,
+ atomic=atomic_str,
+ params=params_str,
+ timeout=timeout_str,
+ model=kdu_model,
+ ver=version_str,
)
return command
- def _get_rollback_command(self, kdu_instance: str, namespace: str, revision: float) -> str:
- return "{} rollback {} {} --namespace={} --wait".format(
- self._helm_command, kdu_instance, revision, namespace
+ def _get_rollback_command(
+ self, kdu_instance: str, namespace: str, revision: float, kubeconfig: str
+ ) -> str:
+ return "env KUBECONFIG={} {} rollback {} {} --namespace={} --wait".format(
+ kubeconfig, self._helm_command, kdu_instance, revision, namespace
)
- def _get_uninstall_command(self, kdu_instance: str, namespace: str) -> str:
+ def _get_uninstall_command(
+ self, kdu_instance: str, namespace: str, kubeconfig: str
+ ) -> str:
- return "{} uninstall {} --namespace={}".format(
- self._helm_command, kdu_instance, namespace)
+ return "env KUBECONFIG={} {} uninstall {} --namespace={}".format(
+ kubeconfig, self._helm_command, kdu_instance, namespace
+ )
def _get_helm_chart_repos_ids(self, cluster_uuid) -> list:
repo_ids = []