Revert "Bug 1964 fixed: removed the variable cluster_uuid from init_env method" 13/12213/2
authorDavid Garcia <david.garcia@canonical.com>
Fri, 17 Jun 2022 12:27:54 +0000 (14:27 +0200)
committerDavid Garcia <david.garcia@canonical.com>
Fri, 17 Jun 2022 14:46:58 +0000 (16:46 +0200)
This reverts commit 25a1392579da2e8e4789e0b8f35abbaa372fde08.

Change-Id: If90fe74a1d7d4b915bbb839ace19999403aa5548
Signed-off-by: David Garcia <david.garcia@canonical.com>
n2vc/k8s_helm3_conn.py
n2vc/k8s_helm_base_conn.py
n2vc/k8s_helm_conn.py
n2vc/tests/unit/test_k8s_helm3_conn.py
n2vc/tests/unit/test_k8s_helm_conn.py

index f8de7c4..298c44e 100644 (file)
@@ -98,24 +98,24 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
 
         :return: True if successful
         """
-
-        self.log.debug("installing {} in cluster {}".format(kdu_model, cluster_uuid))
+        _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
+        self.log.debug("installing {} in cluster {}".format(kdu_model, cluster_id))
 
         # sync local dir
-        self.fs.sync(from_path=cluster_uuid)
+        self.fs.sync(from_path=cluster_id)
 
         # init env, paths
         paths, env = self._init_paths_env(
-            cluster_name=cluster_uuid, create_if_not_exist=True
+            cluster_name=cluster_id, create_if_not_exist=True
         )
 
         # for helm3 if namespace does not exist must create it
         if namespace and namespace != "kube-system":
-            if not await self._namespace_exists(cluster_uuid, namespace):
+            if not await self._namespace_exists(cluster_id, namespace):
                 try:
-                    await self._create_namespace(cluster_uuid, namespace)
+                    await self._create_namespace(cluster_id, namespace)
                 except Exception as e:
-                    if not await self._namespace_exists(cluster_uuid, namespace):
+                    if not await self._namespace_exists(cluster_id, namespace):
                         err_msg = (
                             "namespace {} does not exist in cluster_id {} "
                             "error message: ".format(namespace, e)
@@ -124,7 +124,7 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
                         raise K8sException(err_msg)
 
         await self._install_impl(
-            cluster_uuid,
+            cluster_id,
             kdu_model,
             paths,
             env,
@@ -138,7 +138,7 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
         )
 
         # sync fs
-        self.fs.reverse_sync(from_path=cluster_uuid)
+        self.fs.reverse_sync(from_path=cluster_id)
 
         self.log.debug("Returning kdu_instance {}".format(kdu_instance))
         return True
@@ -315,10 +315,12 @@ class K8sHelm3Connector(K8sHelmBaseConnector):
             if namespace not in namespaces:
                 await self._create_namespace(cluster_id, namespace)
 
-        repo_list = await self.repo_list(cluster_id)
+        # If default repo is not included add
+        cluster_uuid = "{}:{}".format(namespace, cluster_id)
+        repo_list = await self.repo_list(cluster_uuid)
         stable_repo = [repo for repo in repo_list if repo["name"] == "stable"]
         if not stable_repo and self._stable_repo_url:
-            await self.repo_add(cluster_id, "stable", self._stable_repo_url)
+            await self.repo_add(cluster_uuid, "stable", self._stable_repo_url)
 
         # Returns False as no software needs to be uninstalled
         return False
index 176d9fc..b6aa7e9 100644 (file)
@@ -90,18 +90,14 @@ class K8sHelmBaseConnector(K8sConnector):
         if self._stable_repo_url == "None":
             self._stable_repo_url = None
 
-    def _get_namespace(self, cluster_uuid: str) -> str:
+    @staticmethod
+    def _get_namespace_cluster_id(cluster_uuid: str) -> (str, str):
         """
-        Obtains the namespace used by the cluster with the uuid passed by argument
-
-        param: cluster_uuid: cluster's uuid
+        Parses cluster_uuid stored at database that can be either 'namespace:cluster_id' or only
+        cluster_id for backward compatibility
         """
-
-        # first, obtain the cluster corresponding to the uuid passed by argument
-        k8scluster = self.db.get_one(
-            "k8sclusters", q_filter={"_id": cluster_uuid}, fail_on_empty=False
-        )
-        return k8scluster.get("namespace")
+        namespace, _, cluster_id = cluster_uuid.rpartition(":")
+        return namespace, cluster_id
 
     async def init_env(
         self,
@@ -125,9 +121,11 @@ class K8sHelmBaseConnector(K8sConnector):
         """
 
         if reuse_cluster_uuid:
-            cluster_id = reuse_cluster_uuid
+            namespace_, cluster_id = self._get_namespace_cluster_id(reuse_cluster_uuid)
+            namespace = namespace_ or namespace
         else:
             cluster_id = str(uuid4())
+        cluster_uuid = "{}:{}".format(namespace, cluster_id)
 
         self.log.debug(
             "Initializing K8S Cluster {}. namespace: {}".format(cluster_id, namespace)
@@ -149,24 +147,25 @@ class K8sHelmBaseConnector(K8sConnector):
 
         self.log.info("Cluster {} initialized".format(cluster_id))
 
-        return cluster_id, n2vc_installed_sw
+        return cluster_uuid, n2vc_installed_sw
 
     async def repo_add(
         self, cluster_uuid: str, name: str, url: str, repo_type: str = "chart"
     ):
+        _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
         self.log.debug(
             "Cluster {}, adding {} repository {}. URL: {}".format(
-                cluster_uuid, repo_type, name, url
+                cluster_id, repo_type, name, url
             )
         )
 
         # init_env
         paths, env = self._init_paths_env(
-            cluster_name=cluster_uuid, create_if_not_exist=True
+            cluster_name=cluster_id, create_if_not_exist=True
         )
 
         # sync local dir
-        self.fs.sync(from_path=cluster_uuid)
+        self.fs.sync(from_path=cluster_id)
 
         # helm repo add name url
         command = "env KUBECONFIG={} {} repo add {} {}".format(
@@ -221,15 +220,16 @@ class K8sHelmBaseConnector(K8sConnector):
         :return: list of registered repositories: [ (name, url) .... ]
         """
 
-        self.log.debug("list repositories for cluster {}".format(cluster_uuid))
+        _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
+        self.log.debug("list repositories for cluster {}".format(cluster_id))
 
         # config filename
         paths, env = self._init_paths_env(
-            cluster_name=cluster_uuid, create_if_not_exist=True
+            cluster_name=cluster_id, create_if_not_exist=True
         )
 
         # sync local dir
-        self.fs.sync(from_path=cluster_uuid)
+        self.fs.sync(from_path=cluster_id)
 
         command = "env KUBECONFIG={} {} repo list --output yaml".format(
             paths["kube_config"], self._helm_command
@@ -241,7 +241,7 @@ class K8sHelmBaseConnector(K8sConnector):
         )
 
         # sync fs
-        self.fs.reverse_sync(from_path=cluster_uuid)
+        self.fs.reverse_sync(from_path=cluster_id)
 
         if _rc == 0:
             if output and len(output) > 0:
@@ -254,17 +254,17 @@ class K8sHelmBaseConnector(K8sConnector):
             return []
 
     async def repo_remove(self, cluster_uuid: str, name: str):
-        self.log.debug(
-            "remove {} repositories for cluster {}".format(name, cluster_uuid)
-        )
+
+        _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
+        self.log.debug("remove {} repositories for cluster {}".format(name, cluster_id))
 
         # init env, paths
         paths, env = self._init_paths_env(
-            cluster_name=cluster_uuid, create_if_not_exist=True
+            cluster_name=cluster_id, create_if_not_exist=True
         )
 
         # sync local dir
-        self.fs.sync(from_path=cluster_uuid)
+        self.fs.sync(from_path=cluster_id)
 
         command = "env KUBECONFIG={} {} repo remove {}".format(
             paths["kube_config"], self._helm_command, name
@@ -274,7 +274,7 @@ class K8sHelmBaseConnector(K8sConnector):
         )
 
         # sync fs
-        self.fs.reverse_sync(from_path=cluster_uuid)
+        self.fs.reverse_sync(from_path=cluster_id)
 
     async def reset(
         self,
@@ -293,15 +293,15 @@ class K8sHelmBaseConnector(K8sConnector):
         :param kwargs: Additional parameters (None yet)
         :return: Returns True if successful or raises an exception.
         """
-        namespace = self._get_namespace(cluster_uuid=cluster_uuid)
+        namespace, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
         self.log.debug(
             "Resetting K8s environment. cluster uuid: {} uninstall={}".format(
-                cluster_uuid, uninstall_sw
+                cluster_id, uninstall_sw
             )
         )
 
         # sync local dir
-        self.fs.sync(from_path=cluster_uuid)
+        self.fs.sync(from_path=cluster_id)
 
         # uninstall releases if needed.
         if uninstall_sw:
@@ -330,20 +330,20 @@ class K8sHelmBaseConnector(K8sConnector):
                 else:
                     msg = (
                         "Cluster uuid: {} has releases and not force. Leaving K8s helm environment"
-                    ).format(cluster_uuid)
+                    ).format(cluster_id)
                     self.log.warn(msg)
                     uninstall_sw = (
                         False  # Allow to remove k8s cluster without removing Tiller
                     )
 
         if uninstall_sw:
-            await self._uninstall_sw(cluster_id=cluster_uuid, namespace=namespace)
+            await self._uninstall_sw(cluster_idnamespace)
 
         # delete cluster directory
-        self.log.debug("Removing directory {}".format(cluster_uuid))
-        self.fs.file_delete(cluster_uuid, ignore_non_exist=True)
+        self.log.debug("Removing directory {}".format(cluster_id))
+        self.fs.file_delete(cluster_id, ignore_non_exist=True)
         # Remove also local directorio if still exist
-        direct = self.fs.path + "/" + cluster_uuid
+        direct = self.fs.path + "/" + cluster_id
         shutil.rmtree(direct, ignore_errors=True)
 
         return True
@@ -461,10 +461,11 @@ class K8sHelmBaseConnector(K8sConnector):
         params: dict = None,
         db_dict: dict = None,
     ):
-        self.log.debug("upgrading {} in cluster {}".format(kdu_model, cluster_uuid))
+        _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
+        self.log.debug("upgrading {} in cluster {}".format(kdu_model, cluster_id))
 
         # sync local dir
-        self.fs.sync(from_path=cluster_uuid)
+        self.fs.sync(from_path=cluster_id)
 
         # look for instance to obtain namespace
         instance_info = await self.get_instance_info(cluster_uuid, kdu_instance)
@@ -473,15 +474,15 @@ class K8sHelmBaseConnector(K8sConnector):
 
         # init env, paths
         paths, env = self._init_paths_env(
-            cluster_name=cluster_uuid, create_if_not_exist=True
+            cluster_name=cluster_id, create_if_not_exist=True
         )
 
         # sync local dir
-        self.fs.sync(from_path=cluster_uuid)
+        self.fs.sync(from_path=cluster_id)
 
         # params to str
         params_str, file_to_delete = self._params_to_file_option(
-            cluster_id=cluster_uuid, params=params
+            cluster_id=cluster_id, params=params
         )
 
         # version
@@ -520,7 +521,7 @@ class K8sHelmBaseConnector(K8sConnector):
             # write status in another task
             status_task = asyncio.ensure_future(
                 coro_or_future=self._store_status(
-                    cluster_id=cluster_uuid,
+                    cluster_id=cluster_id,
                     kdu_instance=kdu_instance,
                     namespace=instance_info["namespace"],
                     db_dict=db_dict,
@@ -548,7 +549,7 @@ class K8sHelmBaseConnector(K8sConnector):
 
         # write final status
         await self._store_status(
-            cluster_id=cluster_uuid,
+            cluster_id=cluster_id,
             kdu_instance=kdu_instance,
             namespace=instance_info["namespace"],
             db_dict=db_dict,
@@ -563,7 +564,7 @@ class K8sHelmBaseConnector(K8sConnector):
             raise K8sException(msg)
 
         # sync fs
-        self.fs.reverse_sync(from_path=cluster_uuid)
+        self.fs.reverse_sync(from_path=cluster_id)
 
         # return new revision number
         instance = await self.get_instance_info(
@@ -597,14 +598,16 @@ class K8sHelmBaseConnector(K8sConnector):
     async def rollback(
         self, cluster_uuid: str, kdu_instance: str, revision=0, db_dict: dict = None
     ):
+
+        _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
         self.log.debug(
             "rollback kdu_instance {} to revision {} from cluster {}".format(
-                kdu_instance, revision, cluster_uuid
+                kdu_instance, revision, cluster_id
             )
         )
 
         # sync local dir
-        self.fs.sync(from_path=cluster_uuid)
+        self.fs.sync(from_path=cluster_id)
 
         # look for instance to obtain namespace
         instance_info = await self.get_instance_info(cluster_uuid, kdu_instance)
@@ -613,11 +616,11 @@ class K8sHelmBaseConnector(K8sConnector):
 
         # init env, paths
         paths, env = self._init_paths_env(
-            cluster_name=cluster_uuid, create_if_not_exist=True
+            cluster_name=cluster_id, create_if_not_exist=True
         )
 
         # sync local dir
-        self.fs.sync(from_path=cluster_uuid)
+        self.fs.sync(from_path=cluster_id)
 
         command = self._get_rollback_command(
             kdu_instance, instance_info["namespace"], revision, paths["kube_config"]
@@ -634,7 +637,7 @@ class K8sHelmBaseConnector(K8sConnector):
         # write status in another task
         status_task = asyncio.ensure_future(
             coro_or_future=self._store_status(
-                cluster_id=cluster_uuid,
+                cluster_id=cluster_id,
                 kdu_instance=kdu_instance,
                 namespace=instance_info["namespace"],
                 db_dict=db_dict,
@@ -653,7 +656,7 @@ class K8sHelmBaseConnector(K8sConnector):
 
         # write final status
         await self._store_status(
-            cluster_id=cluster_uuid,
+            cluster_id=cluster_id,
             kdu_instance=kdu_instance,
             namespace=instance_info["namespace"],
             db_dict=db_dict,
@@ -668,7 +671,7 @@ class K8sHelmBaseConnector(K8sConnector):
             raise K8sException(msg)
 
         # sync fs
-        self.fs.reverse_sync(from_path=cluster_uuid)
+        self.fs.reverse_sync(from_path=cluster_id)
 
         # return new revision number
         instance = await self.get_instance_info(
@@ -693,14 +696,13 @@ class K8sHelmBaseConnector(K8sConnector):
         :return: True if successful
         """
 
+        _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
         self.log.debug(
-            "uninstall kdu_instance {} from cluster {}".format(
-                kdu_instance, cluster_uuid
-            )
+            "uninstall kdu_instance {} from cluster {}".format(kdu_instance, cluster_id)
         )
 
         # sync local dir
-        self.fs.sync(from_path=cluster_uuid)
+        self.fs.sync(from_path=cluster_id)
 
         # look for instance to obtain namespace
         instance_info = await self.get_instance_info(cluster_uuid, kdu_instance)
@@ -709,11 +711,11 @@ class K8sHelmBaseConnector(K8sConnector):
             return True
         # init env, paths
         paths, env = self._init_paths_env(
-            cluster_name=cluster_uuid, create_if_not_exist=True
+            cluster_name=cluster_id, create_if_not_exist=True
         )
 
         # sync local dir
-        self.fs.sync(from_path=cluster_uuid)
+        self.fs.sync(from_path=cluster_id)
 
         command = self._get_uninstall_command(
             kdu_instance, instance_info["namespace"], paths["kube_config"]
@@ -723,7 +725,7 @@ class K8sHelmBaseConnector(K8sConnector):
         )
 
         # sync fs
-        self.fs.reverse_sync(from_path=cluster_uuid)
+        self.fs.reverse_sync(from_path=cluster_id)
 
         return self._output_to_table(output)
 
@@ -735,16 +737,17 @@ class K8sHelmBaseConnector(K8sConnector):
         :return:
         """
 
-        self.log.debug("list releases for cluster {}".format(cluster_uuid))
+        _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
+        self.log.debug("list releases for cluster {}".format(cluster_id))
 
         # sync local dir
-        self.fs.sync(from_path=cluster_uuid)
+        self.fs.sync(from_path=cluster_id)
 
         # execute internal command
-        result = await self._instances_list(cluster_uuid)
+        result = await self._instances_list(cluster_id)
 
         # sync fs
-        self.fs.reverse_sync(from_path=cluster_uuid)
+        self.fs.reverse_sync(from_path=cluster_id)
 
         return result
 
@@ -802,6 +805,7 @@ class K8sHelmBaseConnector(K8sConnector):
         - `external_ip` List of external ips (in case they are available)
         """
 
+        _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
         self.log.debug(
             "get_services: cluster_uuid: {}, kdu_instance: {}".format(
                 cluster_uuid, kdu_instance
@@ -810,24 +814,24 @@ class K8sHelmBaseConnector(K8sConnector):
 
         # init env, paths
         paths, env = self._init_paths_env(
-            cluster_name=cluster_uuid, create_if_not_exist=True
+            cluster_name=cluster_id, create_if_not_exist=True
         )
 
         # sync local dir
-        self.fs.sync(from_path=cluster_uuid)
+        self.fs.sync(from_path=cluster_id)
 
         # get list of services names for kdu
         service_names = await self._get_services(
-            cluster_uuid, kdu_instance, namespace, paths["kube_config"]
+            cluster_id, kdu_instance, namespace, paths["kube_config"]
         )
 
         service_list = []
         for service in service_names:
-            service = await self._get_service(cluster_uuid, service, namespace)
+            service = await self._get_service(cluster_id, service, namespace)
             service_list.append(service)
 
         # sync fs
-        self.fs.reverse_sync(from_path=cluster_uuid)
+        self.fs.reverse_sync(from_path=cluster_id)
 
         return service_list
 
@@ -841,13 +845,15 @@ class K8sHelmBaseConnector(K8sConnector):
             )
         )
 
+        _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
+
         # sync local dir
-        self.fs.sync(from_path=cluster_uuid)
+        self.fs.sync(from_path=cluster_id)
 
-        service = await self._get_service(cluster_uuid, service_name, namespace)
+        service = await self._get_service(cluster_id, service_name, namespace)
 
         # sync fs
-        self.fs.reverse_sync(from_path=cluster_uuid)
+        self.fs.reverse_sync(from_path=cluster_id)
 
         return service
 
@@ -885,11 +891,13 @@ class K8sHelmBaseConnector(K8sConnector):
             )
         )
 
+        _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
+
         # sync local dir
-        self.fs.sync(from_path=cluster_uuid)
+        self.fs.sync(from_path=cluster_id)
 
         # get instance: needed to obtain namespace
-        instances = await self._instances_list(cluster_id=cluster_uuid)
+        instances = await self._instances_list(cluster_id=cluster_id)
         for instance in instances:
             if instance.get("name") == kdu_instance:
                 break
@@ -897,12 +905,12 @@ class K8sHelmBaseConnector(K8sConnector):
             # instance does not exist
             raise K8sException(
                 "Instance name: {} not found in cluster: {}".format(
-                    kdu_instance, cluster_uuid
+                    kdu_instance, cluster_id
                 )
             )
 
         status = await self._status_kdu(
-            cluster_id=cluster_uuid,
+            cluster_id=cluster_id,
             kdu_instance=kdu_instance,
             namespace=instance["namespace"],
             yaml_format=yaml_format,
@@ -910,7 +918,7 @@ class K8sHelmBaseConnector(K8sConnector):
         )
 
         # sync fs
-        self.fs.reverse_sync(from_path=cluster_uuid)
+        self.fs.reverse_sync(from_path=cluster_id)
 
         return status
 
index b1ad379..f0cbb46 100644 (file)
@@ -132,18 +132,19 @@ class K8sHelmConnector(K8sHelmBaseConnector):
         :param kwargs: Additional parameters (None yet)
         :return: True if successful
         """
-        self.log.debug("installing {} in cluster {}".format(kdu_model, cluster_uuid))
+        _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
+        self.log.debug("installing {} in cluster {}".format(kdu_model, cluster_id))
 
         # sync local dir
-        self.fs.sync(from_path=cluster_uuid)
+        self.fs.sync(from_path=cluster_id)
 
         # init env, paths
         paths, env = self._init_paths_env(
-            cluster_name=cluster_uuid, create_if_not_exist=True
+            cluster_name=cluster_id, create_if_not_exist=True
         )
 
         await self._install_impl(
-            cluster_uuid,
+            cluster_id,
             kdu_model,
             paths,
             env,
@@ -157,7 +158,7 @@ class K8sHelmConnector(K8sHelmBaseConnector):
         )
 
         # sync fs
-        self.fs.reverse_sync(from_path=cluster_uuid)
+        self.fs.reverse_sync(from_path=cluster_id)
 
         self.log.debug("Returning kdu_instance {}".format(kdu_instance))
         return True
@@ -342,13 +343,15 @@ class K8sHelmConnector(K8sHelmBaseConnector):
             else:
                 self.log.info("Helm client already initialized")
 
-        repo_list = await self.repo_list(cluster_id)
+        # remove old stable repo and add new one
+        cluster_uuid = "{}:{}".format(namespace, cluster_id)
+        repo_list = await self.repo_list(cluster_uuid)
         for repo in repo_list:
             if repo["name"] == "stable" and repo["url"] != self._stable_repo_url:
                 self.log.debug("Add new stable repo url: {}")
-                await self.repo_remove(cluster_id, "stable")
+                await self.repo_remove(cluster_uuid, "stable")
                 if self._stable_repo_url:
-                    await self.repo_add(cluster_id, "stable", self._stable_repo_url)
+                    await self.repo_add(cluster_uuid, "stable", self._stable_repo_url)
                 break
 
         return n2vc_installed_sw
@@ -405,13 +408,8 @@ class K8sHelmConnector(K8sHelmBaseConnector):
             output, _rc = await self._local_async_exec(
                 command=command, raise_exception_on_error=False, env=env
             )
-            command = (
-                "{} --kubeconfig={} --namespace {} delete serviceaccount/{}".format(
-                    self.kubectl_command,
-                    paths["kube_config"],
-                    namespace,
-                    self.service_account,
-                )
+            command = "{} --kubeconfig={} --namespace kube-system delete serviceaccount/{}".format(
+                self.kubectl_command, paths["kube_config"], self.service_account
             )
             output, _rc = await self._local_async_exec(
                 command=command, raise_exception_on_error=False, env=env
index 25a5c0d..3163b85 100644 (file)
@@ -39,7 +39,7 @@ class TestK8sHelm3Conn(asynctest.TestCase):
         self.fs.path = "./tmp/"
         self.namespace = "testk8s"
         self.cluster_id = "helm3_cluster_id"
-        self.cluster_uuid = self.cluster_id
+        self.cluster_uuid = "{}:{}".format(self.namespace, self.cluster_id)
         # pass fake kubectl and helm commands to make sure it does not call actual commands
         K8sHelm3Connector._check_file_exists = asynctest.Mock(return_value=True)
         cluster_dir = self.fs.path + self.cluster_id
@@ -66,8 +66,8 @@ class TestK8sHelm3Conn(asynctest.TestCase):
 
         self.assertEqual(
             k8scluster_uuid,
-            self.cluster_id,
-            "Check cluster_uuid",
+            "{}:{}".format(self.namespace, self.cluster_id),
+            "Check cluster_uuid format: <namespace>.<cluster_id>",
         )
         self.helm_conn._get_namespaces.assert_called_once_with(self.cluster_id)
         self.helm_conn._create_namespace.assert_called_once_with(
@@ -92,7 +92,7 @@ class TestK8sHelm3Conn(asynctest.TestCase):
 
         self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
         self.helm_conn.fs.reverse_sync.assert_called_once_with(
-            from_path=self.cluster_id
+            from_path=self.cluster_uuid
         )
         self.assertEqual(
             self.helm_conn._local_async_exec.call_count,
@@ -559,7 +559,6 @@ class TestK8sHelm3Conn(asynctest.TestCase):
                 "updated": "2020-10-30 11:11:20.376744191 +0000 UTC",
             }
         ]
-        self.helm_conn._get_namespace = Mock(return_value=self.namespace)
         self.helm_conn._uninstall_sw = asynctest.CoroutineMock()
         self.helm_conn.instances_list = asynctest.CoroutineMock(return_value=instances)
         self.helm_conn.uninstall = asynctest.CoroutineMock()
@@ -569,9 +568,6 @@ class TestK8sHelm3Conn(asynctest.TestCase):
         self.helm_conn.fs.file_delete.assert_called_once_with(
             self.cluster_id, ignore_non_exist=True
         )
-        self.helm_conn._get_namespace.assert_called_once_with(
-            cluster_uuid=self.cluster_uuid
-        )
         self.helm_conn.instances_list.assert_called_once_with(
             cluster_uuid=self.cluster_uuid
         )
@@ -579,7 +575,7 @@ class TestK8sHelm3Conn(asynctest.TestCase):
             cluster_uuid=self.cluster_uuid, kdu_instance=kdu_instance
         )
         self.helm_conn._uninstall_sw.assert_called_once_with(
-            cluster_id=self.cluster_id, namespace=self.namespace
+            self.cluster_id, self.namespace
         )
 
     @asynctest.fail_on(active_handles=True)
index 9508b66..d6c7835 100644 (file)
@@ -38,7 +38,7 @@ class TestK8sHelmConn(asynctest.TestCase):
         self.namespace = "testk8s"
         self.service_account = "osm"
         self.cluster_id = "helm_cluster_id"
-        self.cluster_uuid = self.cluster_id
+        self.cluster_uuid = "{}:{}".format(self.namespace, self.cluster_id)
         # pass fake kubectl and helm commands to make sure it does not call actual commands
         K8sHelmConnector._check_file_exists = asynctest.Mock(return_value=True)
         K8sHelmConnector._local_async_exec = asynctest.CoroutineMock(
@@ -69,7 +69,7 @@ class TestK8sHelmConn(asynctest.TestCase):
 
         self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id)
         self.helm_conn.fs.reverse_sync.assert_called_once_with(
-            from_path=self.cluster_id
+            from_path=self.cluster_uuid
         )
         self.assertEqual(
             self.helm_conn._local_async_exec.call_count,
@@ -492,7 +492,6 @@ class TestK8sHelmConn(asynctest.TestCase):
                 "updated": "2020-10-30 11:11:20.376744191 +0000 UTC",
             }
         ]
-        self.helm_conn._get_namespace = Mock(return_value=self.namespace)
         self.helm_conn._uninstall_sw = asynctest.CoroutineMock()
         self.helm_conn.instances_list = asynctest.CoroutineMock(return_value=instances)
         self.helm_conn.uninstall = asynctest.CoroutineMock()
@@ -502,9 +501,6 @@ class TestK8sHelmConn(asynctest.TestCase):
         self.helm_conn.fs.file_delete.assert_called_once_with(
             self.cluster_id, ignore_non_exist=True
         )
-        self.helm_conn._get_namespace.assert_called_once_with(
-            cluster_uuid=self.cluster_uuid
-        )
         self.helm_conn.instances_list.assert_called_once_with(
             cluster_uuid=self.cluster_uuid
         )
@@ -512,7 +508,7 @@ class TestK8sHelmConn(asynctest.TestCase):
             cluster_uuid=self.cluster_uuid, kdu_instance=kdu_instance
         )
         self.helm_conn._uninstall_sw.assert_called_once_with(
-            cluster_id=self.cluster_id, namespace=self.namespace
+            self.cluster_id, self.namespace
         )
 
     @asynctest.fail_on(active_handles=True)
@@ -547,10 +543,8 @@ class TestK8sHelmConn(asynctest.TestCase):
         )
         call2_kargs = calls[2][1]
         command_2 = (
-            "/usr/bin/kubectl --kubeconfig={} --namespace {} delete "
-            "serviceaccount/{}".format(
-                self.kube_config, self.namespace, self.service_account
-            )
+            "/usr/bin/kubectl --kubeconfig={} --namespace kube-system delete "
+            "serviceaccount/{}".format(self.kube_config, self.service_account)
         )
         self.assertEqual(
             call2_kargs,