# contact with: nfvlabs@tid.es
##
import asyncio
+from typing import Union
import os
import yaml
happen before any _initial-config-primitive_of the VNF is called).
:param cluster_uuid: UUID of a K8s cluster known by OSM
- :param kdu_model: chart/ reference (string), which can be either
+ :param kdu_model: chart/reference (string), which can be either
of these options:
- a name of chart available via the repos known by OSM
- - a path to a packaged chart
- - a path to an unpacked chart directory or a URL
+ (e.g. stable/openldap, stable/openldap:1.2.4)
+ - a path to a packaged chart (e.g. mychart.tgz)
+ - a path to an unpacked chart directory or a URL (e.g. mychart)
:param kdu_instance: Kdu instance name
:param atomic: If set, installation process purges chart/bundle on fail, also
will wait until all the K8s objects are active
:param kwargs: Additional parameters (None yet)
:return: True if successful
"""
- _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
- self.log.debug("installing {} in cluster {}".format(kdu_model, cluster_id))
+ self.log.debug("installing {} in cluster {}".format(kdu_model, cluster_uuid))
# sync local dir
- self.fs.sync(from_path=cluster_id)
+ self.fs.sync(from_path=cluster_uuid)
# init env, paths
paths, env = self._init_paths_env(
- cluster_name=cluster_id, create_if_not_exist=True
+ cluster_name=cluster_uuid, create_if_not_exist=True
)
await self._install_impl(
- cluster_id,
+ cluster_uuid,
kdu_model,
paths,
env,
)
# sync fs
- self.fs.reverse_sync(from_path=cluster_id)
+ self.fs.reverse_sync(from_path=cluster_uuid)
self.log.debug("Returning kdu_instance {}".format(kdu_instance))
return True
"inspect kdu_model {} from (optional) repo: {}".format(kdu_model, repo_url)
)
- return await self._exec_inspect_comand(
+ return await self._exec_inspect_command(
inspect_command="", kdu_model=kdu_model, repo_url=repo_url
)
else:
self.log.info("Helm client already initialized")
- # remove old stable repo and add new one
- cluster_uuid = "{}:{}".format(namespace, cluster_id)
- repo_list = await self.repo_list(cluster_uuid)
+ repo_list = await self.repo_list(cluster_id)
for repo in repo_list:
if repo["name"] == "stable" and repo["url"] != self._stable_repo_url:
self.log.debug("Add new stable repo url: {}")
- await self.repo_remove(cluster_uuid, "stable")
+ await self.repo_remove(cluster_id, "stable")
if self._stable_repo_url:
- await self.repo_add(cluster_uuid, "stable", self._stable_repo_url)
+ await self.repo_add(cluster_id, "stable", self._stable_repo_url)
break
return n2vc_installed_sw
output, _rc = await self._local_async_exec(
command=command, raise_exception_on_error=False, env=env
)
- command = "{} --kubeconfig={} --namespace kube-system delete serviceaccount/{}".format(
- self.kubectl_command, paths["kube_config"], self.service_account
+ command = (
+ "{} --kubeconfig={} --namespace {} delete serviceaccount/{}".format(
+ self.kubectl_command,
+ paths["kube_config"],
+ namespace,
+ self.service_account,
+ )
)
output, _rc = await self._local_async_exec(
command=command, raise_exception_on_error=False, env=env
)
return inspect_command
+ def _get_get_command(
+ self, get_command: str, kdu_instance: str, namespace: str, kubeconfig: str
+ ):
+ get_command = "env KUBECONFIG={} {} get {} {} --output yaml".format(
+ kubeconfig, self._helm_command, get_command, kdu_instance
+ )
+ return get_command
+
async def _status_kdu(
self,
cluster_id: str,
kdu_instance: str,
namespace: str = None,
+ yaml_format: bool = False,
show_error_log: bool = False,
- return_text: bool = False,
- ):
+ ) -> Union[str, dict]:
self.log.debug(
"status of kdu_instance: {}, namespace: {} ".format(kdu_instance, namespace)
env=env,
)
- if return_text:
+ if yaml_format:
return str(output)
if rc != 0:
except KeyError:
pass
+ # parse the manifest to a list of dictionaries
+ if "manifest" in data:
+ manifest_str = data.get("manifest")
+ manifest_docs = yaml.load_all(manifest_str, Loader=yaml.SafeLoader)
+
+ data["manifest"] = []
+ for doc in manifest_docs:
+ data["manifest"].append(doc)
+
# parse field 'resources'
try:
resources = str(data.get("info").get("status").get("resources"))
)
status = await self._status_kdu(
- cluster_id=cluster_id, kdu_instance=kdu_instance, return_text=False
+ cluster_id=cluster_id, kdu_instance=kdu_instance, yaml_format=False
)
# extract info.status.resources-> str
# version
version_str = ""
if version:
- version_str = version_str = "--version {}".format(version)
+ version_str = "--version {}".format(version)
command = (
"env KUBECONFIG={kubeconfig} {helm} install {atomic} --output yaml "
)
return command
+ def _get_upgrade_scale_command(
+ self,
+ kdu_model: str,
+ kdu_instance: str,
+ namespace: str,
+ scale: int,
+ version: str,
+ atomic: bool,
+ replica_str: str,
+ timeout: float,
+ resource_name: str,
+ kubeconfig: str,
+ ) -> str:
+
+ timeout_str = ""
+ if timeout:
+ timeout_str = "--timeout {}s".format(timeout)
+
+ # atomic
+ atomic_str = ""
+ if atomic:
+ atomic_str = "--atomic"
+
+ # version
+ version_str = ""
+ if version:
+ version_str = "--version {}".format(version)
+
+ # scale
+ if resource_name:
+ scale_dict = {"{}.{}".format(resource_name, replica_str): scale}
+ else:
+ scale_dict = {replica_str: scale}
+
+ scale_str = self._params_to_set_option(scale_dict)
+
+ command = (
+ "env KUBECONFIG={kubeconfig} {helm} upgrade {atomic} --output yaml {scale} {timeout} {name} {model} {ver}"
+ ).format(
+ helm=self._helm_command,
+ name=kdu_instance,
+ atomic=atomic_str,
+ scale=scale_str,
+ timeout=timeout_str,
+ model=kdu_model,
+ ver=version_str,
+ kubeconfig=kubeconfig,
+ )
+ return command
+
def _get_upgrade_command(
self,
kdu_model,