Bug 1980 fixed
[osm/N2VC.git] / n2vc / k8s_helm3_conn.py
index 72ba062..bb08f07 100644 (file)
@@ -19,6 +19,7 @@
 # For those usages not covered by the Apache License, Version 2.0 please
 # contact with: nfvlabs@tid.es
 ##
+from typing import Union
 import os
 import yaml
 
@@ -35,13 +36,13 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
     """
 
     def __init__(
-            self,
-            fs: object,
-            db: object,
-            kubectl_command: str = "/usr/bin/kubectl",
-            helm_command: str = "/usr/bin/helm3",
-            log: object = None,
-            on_update_db=None,
+        self,
+        fs: object,
+        db: object,
+        kubectl_command: str = "/usr/bin/kubectl",
+        helm_command: str = "/usr/bin/helm3",
+        log: object = None,
+        on_update_db=None,
     ):
         """
         Initializes helm connector for helm v3
@@ -55,23 +56,100 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         """
 
         # parent class
-        K8sHelmBaseConnector.__init__(self,
-                                      db=db,
-                                      log=log,
-                                      fs=fs,
-                                      kubectl_command=kubectl_command,
-                                      helm_command=helm_command,
-                                      on_update_db=on_update_db)
+        K8sHelmBaseConnector.__init__(
+            self,
+            db=db,
+            log=log,
+            fs=fs,
+            kubectl_command=kubectl_command,
+            helm_command=helm_command,
+            on_update_db=on_update_db,
+        )
 
         self.log.info("K8S Helm3 connector initialized")
 
+    async def install(
+        self,
+        cluster_uuid: str,
+        kdu_model: str,
+        kdu_instance: str,
+        atomic: bool = True,
+        timeout: float = 300,
+        params: dict = None,
+        db_dict: dict = None,
+        kdu_name: str = None,
+        namespace: str = None,
+        **kwargs,
+    ):
+        """Install a helm chart
+
+        :param cluster_uuid str: The UUID of the cluster to install to
+        :param kdu_model str: The name or path of a bundle to install
+        :param kdu_instance: Kdu instance name
+        :param atomic bool: If set, waits until the model is active and resets
+                            the cluster on failure.
+        :param timeout int: The time, in seconds, to wait for the install
+                            to finish
+        :param params dict: Key-value pairs of instantiation parameters
+        :param kdu_name: Name of the KDU instance to be installed
+        :param namespace: K8s namespace to use for the KDU instance
+
+        :param kwargs: Additional parameters (None yet)
+
+        :return: True if successful
+        """
+
+        self.log.debug("installing {} in cluster {}".format(kdu_model, cluster_uuid))
+
+        # sync local dir
+        self.fs.sync(from_path=cluster_uuid)
+
+        # init env, paths
+        paths, env = self._init_paths_env(
+            cluster_name=cluster_uuid, create_if_not_exist=True
+        )
+
+        # for helm3 if namespace does not exist must create it
+        if namespace and namespace != "kube-system":
+            if not await self._namespace_exists(cluster_uuid, namespace):
+                try:
+                    await self._create_namespace(cluster_uuid, namespace)
+                except Exception as e:
+                    if not await self._namespace_exists(cluster_uuid, namespace):
+                        err_msg = (
+                            "namespace {} does not exist in cluster_id {} "
+                            "error message: ".format(namespace, e)
+                        )
+                        self.log.error(err_msg)
+                        raise K8sException(err_msg)
+
+        await self._install_impl(
+            cluster_uuid,
+            kdu_model,
+            paths,
+            env,
+            kdu_instance,
+            atomic=atomic,
+            timeout=timeout,
+            params=params,
+            db_dict=db_dict,
+            kdu_name=kdu_name,
+            namespace=namespace,
+        )
+
+        # sync fs
+        self.fs.reverse_sync(from_path=cluster_uuid)
+
+        self.log.debug("Returning kdu_instance {}".format(kdu_instance))
+        return True
+
     async def inspect_kdu(self, kdu_model: str, repo_url: str = None) -> str:
 
         self.log.debug(
             "inspect kdu_model {} from (optional) repo: {}".format(kdu_model, repo_url)
         )
 
-        return await self._exec_inspect_comand(
+        return await self._exec_inspect_command(
             inspect_command="all", kdu_model=kdu_model, repo_url=repo_url
         )
 
@@ -138,7 +216,7 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         paths = {
             "kube_dir": kube_dir,
             "kube_config": config_filename,
-            "cluster_dir": cluster_dir
+            "cluster_dir": cluster_dir,
         }
 
         # 3 - Prepare environment variables
@@ -146,7 +224,7 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
             "HELM_CACHE_HOME": helm_path_cache,
             "HELM_CONFIG_HOME": helm_path_config,
             "HELM_DATA_HOME": helm_path_data,
-            "KUBECONFIG": config_filename
+            "KUBECONFIG": config_filename,
         }
 
         for file_name, file in paths.items():
@@ -157,8 +235,16 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
 
         return paths, env
 
-    async def _get_namespaces(self,
-                              cluster_id: str):
+    async def _namespace_exists(self, cluster_id, namespace) -> bool:
+        self.log.debug(
+            "checking if namespace {} exists cluster_id {}".format(
+                namespace, cluster_id
+            )
+        )
+        namespaces = await self._get_namespaces(cluster_id)
+        return namespace in namespaces if namespaces else False
+
+    async def _get_namespaces(self, cluster_id: str):
 
         self.log.debug("get namespaces cluster_id {}".format(cluster_id))
 
@@ -180,9 +266,7 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
 
         return namespaces
 
-    async def _create_namespace(self,
-                                cluster_id: str,
-                                namespace: str):
+    async def _create_namespace(self, cluster_id: str, namespace: str):
 
         self.log.debug(f"create namespace: {cluster_id} for cluster_id: {namespace}")
 
@@ -201,19 +285,19 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
 
         return _rc
 
-    async def _get_services(self, cluster_id: str, kdu_instance: str, namespace: str):
+    async def _get_services(
+        self, cluster_id: str, kdu_instance: str, namespace: str, kubeconfig: str
+    ):
 
         # init config, env
         paths, env = self._init_paths_env(
             cluster_name=cluster_id, create_if_not_exist=True
         )
 
-        command1 = "{} get manifest {} --namespace={}".format(
-            self._helm_command, kdu_instance, namespace
-        )
-        command2 = "{} get --namespace={} -f -".format(
-            self.kubectl_command, namespace
+        command1 = "env KUBECONFIG={} {} get manifest {} --namespace={}".format(
+            kubeconfig, self._helm_command, kdu_instance, namespace
         )
+        command2 = "{} get --namespace={} -f -".format(self.kubectl_command, namespace)
         output, _rc = await self._local_async_exec_pipe(
             command1, command2, env=env, raise_exception_on_error=True
         )
@@ -231,18 +315,10 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
             if namespace not in namespaces:
                 await self._create_namespace(cluster_id, namespace)
 
-        # If default repo is not included add
-        cluster_uuid = "{}:{}".format(namespace, cluster_id)
-        repo_list = await self.repo_list(cluster_uuid)
-        for repo in repo_list:
-            self.log.debug("repo")
-            if repo["name"] == "stable":
-                self.log.debug("Default repo already present")
-                break
-        else:
-            await self.repo_add(cluster_uuid,
-                                "stable",
-                                "https://kubernetes-charts.storage.googleapis.com/")
+        repo_list = await self.repo_list(cluster_id)
+        stable_repo = [repo for repo in repo_list if repo["name"] == "stable"]
+        if not stable_repo and self._stable_repo_url:
+            await self.repo_add(cluster_id, "stable", self._stable_repo_url)
 
         # Returns False as no software needs to be uninstalled
         return False
@@ -258,9 +334,7 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
             cluster_name=cluster_id, create_if_not_exist=True
         )
 
-        command = "{} list --all-namespaces  --output yaml".format(
-            self._helm_command
-        )
+        command = "{} list --all-namespaces  --output yaml".format(self._helm_command)
         output, _rc = await self._local_async_exec(
             command=command, raise_exception_on_error=True, env=env
         )
@@ -271,23 +345,36 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         else:
             return []
 
-    def _get_inspect_command(self, inspect_command: str, kdu_model: str, repo_str: str,
-                             version: str):
+    def _get_inspect_command(
+        self, inspect_command: str, kdu_model: str, repo_str: str, version: str
+    ):
         inspect_command = "{} show {} {}{} {}".format(
             self._helm_command, inspect_command, kdu_model, repo_str, version
         )
         return inspect_command
 
+    def _get_get_command(
+        self, get_command: str, kdu_instance: str, namespace: str, kubeconfig: str
+    ):
+        get_command = (
+            "env KUBECONFIG={} {} get {} {} --namespace={} --output yaml".format(
+                kubeconfig, self._helm_command, get_command, kdu_instance, namespace
+            )
+        )
+        return get_command
+
     async def _status_kdu(
         self,
         cluster_id: str,
         kdu_instance: str,
         namespace: str = None,
+        yaml_format: bool = False,
         show_error_log: bool = False,
-        return_text: bool = False,
-    ):
+    ) -> Union[str, dict]:
 
-        self.log.debug("status of kdu_instance: {}, namespace: {} ".format(kdu_instance, namespace))
+        self.log.debug(
+            "status of kdu_instance: {}, namespace: {} ".format(kdu_instance, namespace)
+        )
 
         if not namespace:
             namespace = "kube-system"
@@ -296,18 +383,18 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         paths, env = self._init_paths_env(
             cluster_name=cluster_id, create_if_not_exist=True
         )
-        command = "{} status {} --namespace={} --output yaml".format(
-            self._helm_command, kdu_instance, namespace
+        command = "env KUBECONFIG={} {} status {} --namespace={} --output yaml".format(
+            paths["kube_config"], self._helm_command, kdu_instance, namespace
         )
 
         output, rc = await self._local_async_exec(
             command=command,
             raise_exception_on_error=True,
             show_error_log=show_error_log,
-            env=env
+            env=env,
         )
 
-        if return_text:
+        if yaml_format:
             return str(output)
 
         if rc != 0:
@@ -318,15 +405,31 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         # remove field 'notes' and manifest
         try:
             del data.get("info")["notes"]
-            del data["manifest"]
         except KeyError:
             pass
 
-        # unable to parse 'resources' as currently it is not included in helm3
+        # parse the manifest to a list of dictionaries
+        if "manifest" in data:
+            manifest_str = data.get("manifest")
+            manifest_docs = yaml.load_all(manifest_str, Loader=yaml.SafeLoader)
+
+            data["manifest"] = []
+            for doc in manifest_docs:
+                data["manifest"].append(doc)
+
         return data
 
-    def _get_install_command(self, kdu_model: str, kdu_instance: str, namespace: str,
-                             params_str: str, version: str, atomic: bool, timeout: float) -> str:
+    def _get_install_command(
+        self,
+        kdu_model: str,
+        kdu_instance: str,
+        namespace: str,
+        params_str: str,
+        version: str,
+        atomic: bool,
+        timeout: float,
+        kubeconfig: str,
+    ) -> str:
 
         timeout_str = ""
         if timeout:
@@ -347,8 +450,9 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
             version_str = "--version {}".format(version)
 
         command = (
-            "{helm} install {name} {atomic} --output yaml  "
+            "env KUBECONFIG={kubeconfig} {helm} install {name} {atomic} --output yaml  "
             "{params} {timeout} {ns} {model} {ver}".format(
+                kubeconfig=kubeconfig,
                 helm=self._helm_command,
                 name=kdu_instance,
                 atomic=atomic_str,
@@ -361,8 +465,74 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         )
         return command
 
-    def _get_upgrade_command(self, kdu_model: str, kdu_instance: str, namespace: str,
-                             params_str: str, version: str, atomic: bool, timeout: float) -> str:
+    def _get_upgrade_scale_command(
+        self,
+        kdu_model: str,
+        kdu_instance: str,
+        namespace: str,
+        scale: int,
+        version: str,
+        atomic: bool,
+        replica_str: str,
+        timeout: float,
+        resource_name: str,
+        kubeconfig: str,
+    ) -> str:
+
+        timeout_str = ""
+        if timeout:
+            timeout_str = "--timeout {}s".format(timeout)
+
+        # atomic
+        atomic_str = ""
+        if atomic:
+            atomic_str = "--atomic"
+
+        # version
+        version_str = ""
+        if version:
+            version_str = "--version {}".format(version)
+
+        # namespace
+        namespace_str = ""
+        if namespace:
+            namespace_str = "--namespace {}".format(namespace)
+
+        # scale
+        if resource_name:
+            scale_dict = {"{}.{}".format(resource_name, replica_str): scale}
+        else:
+            scale_dict = {replica_str: scale}
+
+        scale_str = self._params_to_set_option(scale_dict)
+
+        command = (
+            "env KUBECONFIG={kubeconfig} {helm} upgrade {name} {model} {namespace} {atomic} --output yaml {scale} "
+            "{timeout} {ver}"
+        ).format(
+            helm=self._helm_command,
+            name=kdu_instance,
+            namespace=namespace_str,
+            atomic=atomic_str,
+            scale=scale_str,
+            timeout=timeout_str,
+            model=kdu_model,
+            ver=version_str,
+            kubeconfig=kubeconfig,
+        )
+        return command
+
+    def _get_upgrade_command(
+        self,
+        kdu_model: str,
+        kdu_instance: str,
+        namespace: str,
+        params_str: str,
+        version: str,
+        atomic: bool,
+        timeout: float,
+        kubeconfig: str,
+    ) -> str:
 
         timeout_str = ""
         if timeout:
@@ -384,26 +554,44 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
             namespace_str = "--namespace {}".format(namespace)
 
         command = (
-            "{helm} upgrade {name} {model} {namespace} {atomic} --output yaml {params} "
-            "{timeout}  {ver}".format(
-                helm=self._helm_command,
-                name=kdu_instance,
-                namespace=namespace_str,
-                atomic=atomic_str,
-                params=params_str,
-                timeout=timeout_str,
-                model=kdu_model,
-                ver=version_str,
-            )
+            "env KUBECONFIG={kubeconfig} {helm} upgrade {name} {model} {namespace} {atomic} "
+            "--output yaml {params} {timeout} {ver}"
+        ).format(
+            kubeconfig=kubeconfig,
+            helm=self._helm_command,
+            name=kdu_instance,
+            namespace=namespace_str,
+            atomic=atomic_str,
+            params=params_str,
+            timeout=timeout_str,
+            model=kdu_model,
+            ver=version_str,
         )
         return command
 
-    def _get_rollback_command(self, kdu_instance: str, namespace: str, revision: float) -> str:
-        return "{} rollback {} {} --namespace={} --wait".format(
-            self._helm_command, kdu_instance, revision, namespace
+    def _get_rollback_command(
+        self, kdu_instance: str, namespace: str, revision: float, kubeconfig: str
+    ) -> str:
+        return "env KUBECONFIG={} {} rollback {} {} --namespace={} --wait".format(
+            kubeconfig, self._helm_command, kdu_instance, revision, namespace
         )
 
-    def _get_uninstall_command(self, kdu_instance: str, namespace: str) -> str:
+    def _get_uninstall_command(
+        self, kdu_instance: str, namespace: str, kubeconfig: str
+    ) -> str:
+
+        return "env KUBECONFIG={} {} uninstall {} --namespace={}".format(
+            kubeconfig, self._helm_command, kdu_instance, namespace
+        )
 
-        return "{} uninstall {} --namespace={}".format(
-            self._helm_command, kdu_instance, namespace)
+    def _get_helm_chart_repos_ids(self, cluster_uuid) -> list:
+        repo_ids = []
+        cluster_filter = {"_admin.helm-chart-v3.id": cluster_uuid}
+        cluster = self.db.get_one("k8sclusters", cluster_filter)
+        if cluster:
+            repo_ids = cluster.get("_admin").get("helm_chart_repos") or []
+            return repo_ids
+        else:
+            raise K8sException(
+                "k8cluster with helm-id : {} not found".format(cluster_uuid)
+            )