summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
5da5ad6)
This was applied to the class K8sHelmBaseConnector, and then returned
the variable cluster_id instead of cluster_uuid;
Also, refactored the overall Helm related code, in order to use the correct cluster uuid (and fixed the corresponding tests);
Also made a small fix in the function _uninstall_sw, to remove the service accounts when using different namespaces
Change-Id: Ibfdc82f325abf5fd27a225e73f949483a4e68fe2
Signed-off-by: Pedro Escaleira <escaleira@av.it.pt>
:return: True if successful
"""
:return: True if successful
"""
- _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
- self.log.debug("installing {} in cluster {}".format(kdu_model, cluster_id))
+
+ self.log.debug("installing {} in cluster {}".format(kdu_model, cluster_uuid))
- self.fs.sync(from_path=cluster_id)
+ self.fs.sync(from_path=cluster_uuid)
# init env, paths
paths, env = self._init_paths_env(
# init env, paths
paths, env = self._init_paths_env(
- cluster_name=cluster_id, create_if_not_exist=True
+ cluster_name=cluster_uuid, create_if_not_exist=True
)
# for helm3 if namespace does not exist must create it
if namespace and namespace != "kube-system":
)
# for helm3 if namespace does not exist must create it
if namespace and namespace != "kube-system":
- if not await self._namespace_exists(cluster_id, namespace):
+ if not await self._namespace_exists(cluster_uuid, namespace):
- await self._create_namespace(cluster_id, namespace)
+ await self._create_namespace(cluster_uuid, namespace)
- if not await self._namespace_exists(cluster_id, namespace):
+ if not await self._namespace_exists(cluster_uuid, namespace):
err_msg = (
"namespace {} does not exist in cluster_id {} "
"error message: ".format(namespace, e)
err_msg = (
"namespace {} does not exist in cluster_id {} "
"error message: ".format(namespace, e)
raise K8sException(err_msg)
await self._install_impl(
raise K8sException(err_msg)
await self._install_impl(
- self.fs.reverse_sync(from_path=cluster_id)
+ self.fs.reverse_sync(from_path=cluster_uuid)
self.log.debug("Returning kdu_instance {}".format(kdu_instance))
return True
self.log.debug("Returning kdu_instance {}".format(kdu_instance))
return True
if namespace not in namespaces:
await self._create_namespace(cluster_id, namespace)
if namespace not in namespaces:
await self._create_namespace(cluster_id, namespace)
- # If default repo is not included add
- cluster_uuid = "{}:{}".format(namespace, cluster_id)
- repo_list = await self.repo_list(cluster_uuid)
+ repo_list = await self.repo_list(cluster_id)
stable_repo = [repo for repo in repo_list if repo["name"] == "stable"]
if not stable_repo and self._stable_repo_url:
stable_repo = [repo for repo in repo_list if repo["name"] == "stable"]
if not stable_repo and self._stable_repo_url:
- await self.repo_add(cluster_uuid, "stable", self._stable_repo_url)
+ await self.repo_add(cluster_id, "stable", self._stable_repo_url)
# Returns False as no software needs to be uninstalled
return False
# Returns False as no software needs to be uninstalled
return False
if self._stable_repo_url == "None":
self._stable_repo_url = None
if self._stable_repo_url == "None":
self._stable_repo_url = None
- @staticmethod
- def _get_namespace_cluster_id(cluster_uuid: str) -> (str, str):
+ def _get_namespace(self, cluster_uuid: str) -> str:
- Parses cluster_uuid stored at database that can be either 'namespace:cluster_id' or only
- cluster_id for backward compatibility
+ Obtains the namespace used by the cluster with the uuid passed by argument
+
+ param: cluster_uuid: cluster's uuid
- namespace, _, cluster_id = cluster_uuid.rpartition(":")
- return namespace, cluster_id
+
+ # first, obtain the cluster corresponding to the uuid passed by argument
+ k8scluster = self.db.get_one(
+ "k8sclusters", q_filter={"_id": cluster_uuid}, fail_on_empty=False
+ )
+ return k8scluster.get("namespace")
async def init_env(
self,
async def init_env(
self,
"""
if reuse_cluster_uuid:
"""
if reuse_cluster_uuid:
- namespace_, cluster_id = self._get_namespace_cluster_id(reuse_cluster_uuid)
- namespace = namespace_ or namespace
+ cluster_id = reuse_cluster_uuid
else:
cluster_id = str(uuid4())
else:
cluster_id = str(uuid4())
- cluster_uuid = "{}:{}".format(namespace, cluster_id)
self.log.debug(
"Initializing K8S Cluster {}. namespace: {}".format(cluster_id, namespace)
self.log.debug(
"Initializing K8S Cluster {}. namespace: {}".format(cluster_id, namespace)
self.log.info("Cluster {} initialized".format(cluster_id))
self.log.info("Cluster {} initialized".format(cluster_id))
- return cluster_uuid, n2vc_installed_sw
+ return cluster_id, n2vc_installed_sw
async def repo_add(
self, cluster_uuid: str, name: str, url: str, repo_type: str = "chart"
):
async def repo_add(
self, cluster_uuid: str, name: str, url: str, repo_type: str = "chart"
):
- _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
self.log.debug(
"Cluster {}, adding {} repository {}. URL: {}".format(
self.log.debug(
"Cluster {}, adding {} repository {}. URL: {}".format(
- cluster_id, repo_type, name, url
+ cluster_uuid, repo_type, name, url
)
)
# init_env
paths, env = self._init_paths_env(
)
)
# init_env
paths, env = self._init_paths_env(
- cluster_name=cluster_id, create_if_not_exist=True
+ cluster_name=cluster_uuid, create_if_not_exist=True
- self.fs.sync(from_path=cluster_id)
+ self.fs.sync(from_path=cluster_uuid)
# helm repo update
command = "env KUBECONFIG={} {} repo update".format(
# helm repo update
command = "env KUBECONFIG={} {} repo update".format(
- self.fs.reverse_sync(from_path=cluster_id)
+ self.fs.reverse_sync(from_path=cluster_uuid)
async def repo_list(self, cluster_uuid: str) -> list:
"""
async def repo_list(self, cluster_uuid: str) -> list:
"""
:return: list of registered repositories: [ (name, url) .... ]
"""
:return: list of registered repositories: [ (name, url) .... ]
"""
- _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
- self.log.debug("list repositories for cluster {}".format(cluster_id))
+ self.log.debug("list repositories for cluster {}".format(cluster_uuid))
# config filename
paths, env = self._init_paths_env(
# config filename
paths, env = self._init_paths_env(
- cluster_name=cluster_id, create_if_not_exist=True
+ cluster_name=cluster_uuid, create_if_not_exist=True
- self.fs.sync(from_path=cluster_id)
+ self.fs.sync(from_path=cluster_uuid)
command = "env KUBECONFIG={} {} repo list --output yaml".format(
paths["kube_config"], self._helm_command
command = "env KUBECONFIG={} {} repo list --output yaml".format(
paths["kube_config"], self._helm_command
- self.fs.reverse_sync(from_path=cluster_id)
+ self.fs.reverse_sync(from_path=cluster_uuid)
if _rc == 0:
if output and len(output) > 0:
if _rc == 0:
if output and len(output) > 0:
return []
async def repo_remove(self, cluster_uuid: str, name: str):
return []
async def repo_remove(self, cluster_uuid: str, name: str):
-
- _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
- self.log.debug("remove {} repositories for cluster {}".format(name, cluster_id))
+ self.log.debug(
+ "remove {} repositories for cluster {}".format(name, cluster_uuid)
+ )
# init env, paths
paths, env = self._init_paths_env(
# init env, paths
paths, env = self._init_paths_env(
- cluster_name=cluster_id, create_if_not_exist=True
+ cluster_name=cluster_uuid, create_if_not_exist=True
- self.fs.sync(from_path=cluster_id)
+ self.fs.sync(from_path=cluster_uuid)
command = "env KUBECONFIG={} {} repo remove {}".format(
paths["kube_config"], self._helm_command, name
command = "env KUBECONFIG={} {} repo remove {}".format(
paths["kube_config"], self._helm_command, name
- self.fs.reverse_sync(from_path=cluster_id)
+ self.fs.reverse_sync(from_path=cluster_uuid)
:param kwargs: Additional parameters (None yet)
:return: Returns True if successful or raises an exception.
"""
:param kwargs: Additional parameters (None yet)
:return: Returns True if successful or raises an exception.
"""
- namespace, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
+ namespace = self._get_namespace(cluster_uuid=cluster_uuid)
self.log.debug(
"Resetting K8s environment. cluster uuid: {} uninstall={}".format(
self.log.debug(
"Resetting K8s environment. cluster uuid: {} uninstall={}".format(
- cluster_id, uninstall_sw
+ cluster_uuid, uninstall_sw
- self.fs.sync(from_path=cluster_id)
+ self.fs.sync(from_path=cluster_uuid)
# uninstall releases if needed.
if uninstall_sw:
# uninstall releases if needed.
if uninstall_sw:
else:
msg = (
"Cluster uuid: {} has releases and not force. Leaving K8s helm environment"
else:
msg = (
"Cluster uuid: {} has releases and not force. Leaving K8s helm environment"
self.log.warn(msg)
uninstall_sw = (
False # Allow to remove k8s cluster without removing Tiller
)
if uninstall_sw:
self.log.warn(msg)
uninstall_sw = (
False # Allow to remove k8s cluster without removing Tiller
)
if uninstall_sw:
- await self._uninstall_sw(cluster_id, namespace)
+ await self._uninstall_sw(cluster_id=cluster_uuid, namespace=namespace)
# delete cluster directory
# delete cluster directory
- self.log.debug("Removing directory {}".format(cluster_id))
- self.fs.file_delete(cluster_id, ignore_non_exist=True)
+ self.log.debug("Removing directory {}".format(cluster_uuid))
+ self.fs.file_delete(cluster_uuid, ignore_non_exist=True)
# Remove also local directorio if still exist
# Remove also local directorio if still exist
- direct = self.fs.path + "/" + cluster_id
+ direct = self.fs.path + "/" + cluster_uuid
shutil.rmtree(direct, ignore_errors=True)
return True
shutil.rmtree(direct, ignore_errors=True)
return True
params: dict = None,
db_dict: dict = None,
):
params: dict = None,
db_dict: dict = None,
):
- _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
- self.log.debug("upgrading {} in cluster {}".format(kdu_model, cluster_id))
+ self.log.debug("upgrading {} in cluster {}".format(kdu_model, cluster_uuid))
- self.fs.sync(from_path=cluster_id)
+ self.fs.sync(from_path=cluster_uuid)
# look for instance to obtain namespace
instance_info = await self.get_instance_info(cluster_uuid, kdu_instance)
# look for instance to obtain namespace
instance_info = await self.get_instance_info(cluster_uuid, kdu_instance)
# init env, paths
paths, env = self._init_paths_env(
# init env, paths
paths, env = self._init_paths_env(
- cluster_name=cluster_id, create_if_not_exist=True
+ cluster_name=cluster_uuid, create_if_not_exist=True
- self.fs.sync(from_path=cluster_id)
+ self.fs.sync(from_path=cluster_uuid)
# params to str
params_str, file_to_delete = self._params_to_file_option(
# params to str
params_str, file_to_delete = self._params_to_file_option(
- cluster_id=cluster_id, params=params
+ cluster_id=cluster_uuid, params=params
# write status in another task
status_task = asyncio.ensure_future(
coro_or_future=self._store_status(
# write status in another task
status_task = asyncio.ensure_future(
coro_or_future=self._store_status(
+ cluster_id=cluster_uuid,
kdu_instance=kdu_instance,
namespace=instance_info["namespace"],
db_dict=db_dict,
kdu_instance=kdu_instance,
namespace=instance_info["namespace"],
db_dict=db_dict,
# write final status
await self._store_status(
# write final status
await self._store_status(
+ cluster_id=cluster_uuid,
kdu_instance=kdu_instance,
namespace=instance_info["namespace"],
db_dict=db_dict,
kdu_instance=kdu_instance,
namespace=instance_info["namespace"],
db_dict=db_dict,
raise K8sException(msg)
# sync fs
raise K8sException(msg)
# sync fs
- self.fs.reverse_sync(from_path=cluster_id)
+ self.fs.reverse_sync(from_path=cluster_uuid)
# return new revision number
instance = await self.get_instance_info(
# return new revision number
instance = await self.get_instance_info(
async def rollback(
self, cluster_uuid: str, kdu_instance: str, revision=0, db_dict: dict = None
):
async def rollback(
self, cluster_uuid: str, kdu_instance: str, revision=0, db_dict: dict = None
):
-
- _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
self.log.debug(
"rollback kdu_instance {} to revision {} from cluster {}".format(
self.log.debug(
"rollback kdu_instance {} to revision {} from cluster {}".format(
- kdu_instance, revision, cluster_id
+ kdu_instance, revision, cluster_uuid
- self.fs.sync(from_path=cluster_id)
+ self.fs.sync(from_path=cluster_uuid)
# look for instance to obtain namespace
instance_info = await self.get_instance_info(cluster_uuid, kdu_instance)
# look for instance to obtain namespace
instance_info = await self.get_instance_info(cluster_uuid, kdu_instance)
# init env, paths
paths, env = self._init_paths_env(
# init env, paths
paths, env = self._init_paths_env(
- cluster_name=cluster_id, create_if_not_exist=True
+ cluster_name=cluster_uuid, create_if_not_exist=True
- self.fs.sync(from_path=cluster_id)
+ self.fs.sync(from_path=cluster_uuid)
command = self._get_rollback_command(
kdu_instance, instance_info["namespace"], revision, paths["kube_config"]
command = self._get_rollback_command(
kdu_instance, instance_info["namespace"], revision, paths["kube_config"]
# write status in another task
status_task = asyncio.ensure_future(
coro_or_future=self._store_status(
# write status in another task
status_task = asyncio.ensure_future(
coro_or_future=self._store_status(
+ cluster_id=cluster_uuid,
kdu_instance=kdu_instance,
namespace=instance_info["namespace"],
db_dict=db_dict,
kdu_instance=kdu_instance,
namespace=instance_info["namespace"],
db_dict=db_dict,
# write final status
await self._store_status(
# write final status
await self._store_status(
+ cluster_id=cluster_uuid,
kdu_instance=kdu_instance,
namespace=instance_info["namespace"],
db_dict=db_dict,
kdu_instance=kdu_instance,
namespace=instance_info["namespace"],
db_dict=db_dict,
raise K8sException(msg)
# sync fs
raise K8sException(msg)
# sync fs
- self.fs.reverse_sync(from_path=cluster_id)
+ self.fs.reverse_sync(from_path=cluster_uuid)
# return new revision number
instance = await self.get_instance_info(
# return new revision number
instance = await self.get_instance_info(
:return: True if successful
"""
:return: True if successful
"""
- _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
- "uninstall kdu_instance {} from cluster {}".format(kdu_instance, cluster_id)
+ "uninstall kdu_instance {} from cluster {}".format(
+ kdu_instance, cluster_uuid
+ )
- self.fs.sync(from_path=cluster_id)
+ self.fs.sync(from_path=cluster_uuid)
# look for instance to obtain namespace
instance_info = await self.get_instance_info(cluster_uuid, kdu_instance)
# look for instance to obtain namespace
instance_info = await self.get_instance_info(cluster_uuid, kdu_instance)
return True
# init env, paths
paths, env = self._init_paths_env(
return True
# init env, paths
paths, env = self._init_paths_env(
- cluster_name=cluster_id, create_if_not_exist=True
+ cluster_name=cluster_uuid, create_if_not_exist=True
- self.fs.sync(from_path=cluster_id)
+ self.fs.sync(from_path=cluster_uuid)
command = self._get_uninstall_command(
kdu_instance, instance_info["namespace"], paths["kube_config"]
command = self._get_uninstall_command(
kdu_instance, instance_info["namespace"], paths["kube_config"]
- self.fs.reverse_sync(from_path=cluster_id)
+ self.fs.reverse_sync(from_path=cluster_uuid)
return self._output_to_table(output)
return self._output_to_table(output)
- _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
- self.log.debug("list releases for cluster {}".format(cluster_id))
+ self.log.debug("list releases for cluster {}".format(cluster_uuid))
- self.fs.sync(from_path=cluster_id)
+ self.fs.sync(from_path=cluster_uuid)
# execute internal command
# execute internal command
- result = await self._instances_list(cluster_id)
+ result = await self._instances_list(cluster_uuid)
- self.fs.reverse_sync(from_path=cluster_id)
+ self.fs.reverse_sync(from_path=cluster_uuid)
- `external_ip` List of external ips (in case they are available)
"""
- `external_ip` List of external ips (in case they are available)
"""
- _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
self.log.debug(
"get_services: cluster_uuid: {}, kdu_instance: {}".format(
cluster_uuid, kdu_instance
self.log.debug(
"get_services: cluster_uuid: {}, kdu_instance: {}".format(
cluster_uuid, kdu_instance
# init env, paths
paths, env = self._init_paths_env(
# init env, paths
paths, env = self._init_paths_env(
- cluster_name=cluster_id, create_if_not_exist=True
+ cluster_name=cluster_uuid, create_if_not_exist=True
- self.fs.sync(from_path=cluster_id)
+ self.fs.sync(from_path=cluster_uuid)
# get list of services names for kdu
service_names = await self._get_services(
# get list of services names for kdu
service_names = await self._get_services(
- cluster_id, kdu_instance, namespace, paths["kube_config"]
+ cluster_uuid, kdu_instance, namespace, paths["kube_config"]
)
service_list = []
for service in service_names:
)
service_list = []
for service in service_names:
- service = await self._get_service(cluster_id, service, namespace)
+ service = await self._get_service(cluster_uuid, service, namespace)
service_list.append(service)
# sync fs
service_list.append(service)
# sync fs
- self.fs.reverse_sync(from_path=cluster_id)
+ self.fs.reverse_sync(from_path=cluster_uuid)
- _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
-
- self.fs.sync(from_path=cluster_id)
+ self.fs.sync(from_path=cluster_uuid)
- service = await self._get_service(cluster_id, service_name, namespace)
+ service = await self._get_service(cluster_uuid, service_name, namespace)
- self.fs.reverse_sync(from_path=cluster_id)
+ self.fs.reverse_sync(from_path=cluster_uuid)
- _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
-
- self.fs.sync(from_path=cluster_id)
+ self.fs.sync(from_path=cluster_uuid)
# get instance: needed to obtain namespace
# get instance: needed to obtain namespace
- instances = await self._instances_list(cluster_id=cluster_id)
+ instances = await self._instances_list(cluster_id=cluster_uuid)
for instance in instances:
if instance.get("name") == kdu_instance:
break
for instance in instances:
if instance.get("name") == kdu_instance:
break
# instance does not exist
raise K8sException(
"Instance name: {} not found in cluster: {}".format(
# instance does not exist
raise K8sException(
"Instance name: {} not found in cluster: {}".format(
- kdu_instance, cluster_id
+ kdu_instance, cluster_uuid
)
)
status = await self._status_kdu(
)
)
status = await self._status_kdu(
+ cluster_id=cluster_uuid,
kdu_instance=kdu_instance,
namespace=instance["namespace"],
show_error_log=True,
kdu_instance=kdu_instance,
namespace=instance["namespace"],
show_error_log=True,
- self.fs.reverse_sync(from_path=cluster_id)
+ self.fs.reverse_sync(from_path=cluster_uuid)
:param kwargs: Additional parameters (None yet)
:return: True if successful
"""
:param kwargs: Additional parameters (None yet)
:return: True if successful
"""
- _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
- self.log.debug("installing {} in cluster {}".format(kdu_model, cluster_id))
+ self.log.debug("installing {} in cluster {}".format(kdu_model, cluster_uuid))
- self.fs.sync(from_path=cluster_id)
+ self.fs.sync(from_path=cluster_uuid)
# init env, paths
paths, env = self._init_paths_env(
# init env, paths
paths, env = self._init_paths_env(
- cluster_name=cluster_id, create_if_not_exist=True
+ cluster_name=cluster_uuid, create_if_not_exist=True
)
await self._install_impl(
)
await self._install_impl(
- self.fs.reverse_sync(from_path=cluster_id)
+ self.fs.reverse_sync(from_path=cluster_uuid)
self.log.debug("Returning kdu_instance {}".format(kdu_instance))
return True
self.log.debug("Returning kdu_instance {}".format(kdu_instance))
return True
else:
self.log.info("Helm client already initialized")
else:
self.log.info("Helm client already initialized")
- # remove old stable repo and add new one
- cluster_uuid = "{}:{}".format(namespace, cluster_id)
- repo_list = await self.repo_list(cluster_uuid)
+ repo_list = await self.repo_list(cluster_id)
for repo in repo_list:
if repo["name"] == "stable" and repo["url"] != self._stable_repo_url:
self.log.debug("Add new stable repo url: {}")
for repo in repo_list:
if repo["name"] == "stable" and repo["url"] != self._stable_repo_url:
self.log.debug("Add new stable repo url: {}")
- await self.repo_remove(cluster_uuid, "stable")
+ await self.repo_remove(cluster_id, "stable")
if self._stable_repo_url:
if self._stable_repo_url:
- await self.repo_add(cluster_uuid, "stable", self._stable_repo_url)
+ await self.repo_add(cluster_id, "stable", self._stable_repo_url)
break
return n2vc_installed_sw
break
return n2vc_installed_sw
output, _rc = await self._local_async_exec(
command=command, raise_exception_on_error=False, env=env
)
output, _rc = await self._local_async_exec(
command=command, raise_exception_on_error=False, env=env
)
- command = "{} --kubeconfig={} --namespace kube-system delete serviceaccount/{}".format(
- self.kubectl_command, paths["kube_config"], self.service_account
+ command = (
+ "{} --kubeconfig={} --namespace {} delete serviceaccount/{}".format(
+ self.kubectl_command,
+ paths["kube_config"],
+ namespace,
+ self.service_account,
+ )
)
output, _rc = await self._local_async_exec(
command=command, raise_exception_on_error=False, env=env
)
output, _rc = await self._local_async_exec(
command=command, raise_exception_on_error=False, env=env
self.fs.path = "./tmp/"
self.namespace = "testk8s"
self.cluster_id = "helm3_cluster_id"
self.fs.path = "./tmp/"
self.namespace = "testk8s"
self.cluster_id = "helm3_cluster_id"
- self.cluster_uuid = "{}:{}".format(self.namespace, self.cluster_id)
+ self.cluster_uuid = self.cluster_id
# pass fake kubectl and helm commands to make sure it does not call actual commands
K8sHelm3Connector._check_file_exists = asynctest.Mock(return_value=True)
cluster_dir = self.fs.path + self.cluster_id
# pass fake kubectl and helm commands to make sure it does not call actual commands
K8sHelm3Connector._check_file_exists = asynctest.Mock(return_value=True)
cluster_dir = self.fs.path + self.cluster_id
self.assertEqual(
k8scluster_uuid,
self.assertEqual(
k8scluster_uuid,
- "{}:{}".format(self.namespace, self.cluster_id),
- "Check cluster_uuid format: <namespace>.<cluster_id>",
+ self.cluster_id,
+ "Check cluster_uuid",
)
self.helm_conn._get_namespaces.assert_called_once_with(self.cluster_id)
self.helm_conn._create_namespace.assert_called_once_with(
)
self.helm_conn._get_namespaces.assert_called_once_with(self.cluster_id)
self.helm_conn._create_namespace.assert_called_once_with(
"updated": "2020-10-30 11:11:20.376744191 +0000 UTC",
}
]
"updated": "2020-10-30 11:11:20.376744191 +0000 UTC",
}
]
+ self.helm_conn._get_namespace = Mock(return_value=self.namespace)
self.helm_conn._uninstall_sw = asynctest.CoroutineMock()
self.helm_conn.instances_list = asynctest.CoroutineMock(return_value=instances)
self.helm_conn.uninstall = asynctest.CoroutineMock()
self.helm_conn._uninstall_sw = asynctest.CoroutineMock()
self.helm_conn.instances_list = asynctest.CoroutineMock(return_value=instances)
self.helm_conn.uninstall = asynctest.CoroutineMock()
self.helm_conn.fs.file_delete.assert_called_once_with(
self.cluster_id, ignore_non_exist=True
)
self.helm_conn.fs.file_delete.assert_called_once_with(
self.cluster_id, ignore_non_exist=True
)
+ self.helm_conn._get_namespace.assert_called_once_with(
+ cluster_uuid=self.cluster_uuid
+ )
self.helm_conn.instances_list.assert_called_once_with(
cluster_uuid=self.cluster_uuid
)
self.helm_conn.instances_list.assert_called_once_with(
cluster_uuid=self.cluster_uuid
)
cluster_uuid=self.cluster_uuid, kdu_instance=kdu_instance
)
self.helm_conn._uninstall_sw.assert_called_once_with(
cluster_uuid=self.cluster_uuid, kdu_instance=kdu_instance
)
self.helm_conn._uninstall_sw.assert_called_once_with(
- self.cluster_id, self.namespace
+ cluster_id=self.cluster_id, namespace=self.namespace
)
@asynctest.fail_on(active_handles=True)
)
@asynctest.fail_on(active_handles=True)
self.namespace = "testk8s"
self.service_account = "osm"
self.cluster_id = "helm_cluster_id"
self.namespace = "testk8s"
self.service_account = "osm"
self.cluster_id = "helm_cluster_id"
- self.cluster_uuid = "{}:{}".format(self.namespace, self.cluster_id)
+ self.cluster_uuid = self.cluster_id
# pass fake kubectl and helm commands to make sure it does not call actual commands
K8sHelmConnector._check_file_exists = asynctest.Mock(return_value=True)
K8sHelmConnector._local_async_exec = asynctest.CoroutineMock(
# pass fake kubectl and helm commands to make sure it does not call actual commands
K8sHelmConnector._check_file_exists = asynctest.Mock(return_value=True)
K8sHelmConnector._local_async_exec = asynctest.CoroutineMock(
"updated": "2020-10-30 11:11:20.376744191 +0000 UTC",
}
]
"updated": "2020-10-30 11:11:20.376744191 +0000 UTC",
}
]
+ self.helm_conn._get_namespace = Mock(return_value=self.namespace)
self.helm_conn._uninstall_sw = asynctest.CoroutineMock()
self.helm_conn.instances_list = asynctest.CoroutineMock(return_value=instances)
self.helm_conn.uninstall = asynctest.CoroutineMock()
self.helm_conn._uninstall_sw = asynctest.CoroutineMock()
self.helm_conn.instances_list = asynctest.CoroutineMock(return_value=instances)
self.helm_conn.uninstall = asynctest.CoroutineMock()
self.helm_conn.fs.file_delete.assert_called_once_with(
self.cluster_id, ignore_non_exist=True
)
self.helm_conn.fs.file_delete.assert_called_once_with(
self.cluster_id, ignore_non_exist=True
)
+ self.helm_conn._get_namespace.assert_called_once_with(
+ cluster_uuid=self.cluster_uuid
+ )
self.helm_conn.instances_list.assert_called_once_with(
cluster_uuid=self.cluster_uuid
)
self.helm_conn.instances_list.assert_called_once_with(
cluster_uuid=self.cluster_uuid
)
cluster_uuid=self.cluster_uuid, kdu_instance=kdu_instance
)
self.helm_conn._uninstall_sw.assert_called_once_with(
cluster_uuid=self.cluster_uuid, kdu_instance=kdu_instance
)
self.helm_conn._uninstall_sw.assert_called_once_with(
- self.cluster_id, self.namespace
+ cluster_id=self.cluster_id, namespace=self.namespace
)
@asynctest.fail_on(active_handles=True)
)
@asynctest.fail_on(active_handles=True)
)
call2_kargs = calls[2][1]
command_2 = (
)
call2_kargs = calls[2][1]
command_2 = (
- "/usr/bin/kubectl --kubeconfig={} --namespace kube-system delete "
- "serviceaccount/{}".format(self.kube_config, self.service_account)
+ "/usr/bin/kubectl --kubeconfig={} --namespace {} delete "
+ "serviceaccount/{}".format(
+ self.kube_config, self.namespace, self.service_account
+ )
)
self.assertEqual(
call2_kargs,
)
self.assertEqual(
call2_kargs,