if self._stable_repo_url == "None":
self._stable_repo_url = None
- def _get_namespace(self, cluster_uuid: str) -> str:
+ @staticmethod
+ def _get_namespace_cluster_id(cluster_uuid: str) -> (str, str):
"""
- Obtains the namespace used by the cluster with the uuid passed by argument
-
- param: cluster_uuid: cluster's uuid
+ Parses cluster_uuid stored at database that can be either 'namespace:cluster_id' or only
+ cluster_id for backward compatibility
"""
-
- # first, obtain the cluster corresponding to the uuid passed by argument
- k8scluster = self.db.get_one(
- "k8sclusters", q_filter={"_id": cluster_uuid}, fail_on_empty=False
- )
- return k8scluster.get("namespace")
+ namespace, _, cluster_id = cluster_uuid.rpartition(":")
+ return namespace, cluster_id
async def init_env(
self,
"""
if reuse_cluster_uuid:
- cluster_id = reuse_cluster_uuid
+ namespace_, cluster_id = self._get_namespace_cluster_id(reuse_cluster_uuid)
+ namespace = namespace_ or namespace
else:
cluster_id = str(uuid4())
+ cluster_uuid = "{}:{}".format(namespace, cluster_id)
self.log.debug(
"Initializing K8S Cluster {}. namespace: {}".format(cluster_id, namespace)
self.log.info("Cluster {} initialized".format(cluster_id))
- return cluster_id, n2vc_installed_sw
+ return cluster_uuid, n2vc_installed_sw
async def repo_add(
self, cluster_uuid: str, name: str, url: str, repo_type: str = "chart"
):
+ _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
self.log.debug(
"Cluster {}, adding {} repository {}. URL: {}".format(
- cluster_uuid, repo_type, name, url
+ cluster_id, repo_type, name, url
)
)
# init_env
paths, env = self._init_paths_env(
- cluster_name=cluster_uuid, create_if_not_exist=True
+ cluster_name=cluster_id, create_if_not_exist=True
)
# sync local dir
- self.fs.sync(from_path=cluster_uuid)
+ self.fs.sync(from_path=cluster_id)
# helm repo add name url
command = "env KUBECONFIG={} {} repo add {} {}".format(
:return: list of registered repositories: [ (name, url) .... ]
"""
- self.log.debug("list repositories for cluster {}".format(cluster_uuid))
+ _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
+ self.log.debug("list repositories for cluster {}".format(cluster_id))
# config filename
paths, env = self._init_paths_env(
- cluster_name=cluster_uuid, create_if_not_exist=True
+ cluster_name=cluster_id, create_if_not_exist=True
)
# sync local dir
- self.fs.sync(from_path=cluster_uuid)
+ self.fs.sync(from_path=cluster_id)
command = "env KUBECONFIG={} {} repo list --output yaml".format(
paths["kube_config"], self._helm_command
)
# sync fs
- self.fs.reverse_sync(from_path=cluster_uuid)
+ self.fs.reverse_sync(from_path=cluster_id)
if _rc == 0:
if output and len(output) > 0:
return []
async def repo_remove(self, cluster_uuid: str, name: str):
- self.log.debug(
- "remove {} repositories for cluster {}".format(name, cluster_uuid)
- )
+
+ _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
+ self.log.debug("remove {} repositories for cluster {}".format(name, cluster_id))
# init env, paths
paths, env = self._init_paths_env(
- cluster_name=cluster_uuid, create_if_not_exist=True
+ cluster_name=cluster_id, create_if_not_exist=True
)
# sync local dir
- self.fs.sync(from_path=cluster_uuid)
+ self.fs.sync(from_path=cluster_id)
command = "env KUBECONFIG={} {} repo remove {}".format(
paths["kube_config"], self._helm_command, name
)
# sync fs
- self.fs.reverse_sync(from_path=cluster_uuid)
+ self.fs.reverse_sync(from_path=cluster_id)
async def reset(
self,
:param kwargs: Additional parameters (None yet)
:return: Returns True if successful or raises an exception.
"""
- namespace = self._get_namespace(cluster_uuid=cluster_uuid)
+ namespace, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
self.log.debug(
"Resetting K8s environment. cluster uuid: {} uninstall={}".format(
- cluster_uuid, uninstall_sw
+ cluster_id, uninstall_sw
)
)
# sync local dir
- self.fs.sync(from_path=cluster_uuid)
+ self.fs.sync(from_path=cluster_id)
# uninstall releases if needed.
if uninstall_sw:
else:
msg = (
"Cluster uuid: {} has releases and not force. Leaving K8s helm environment"
- ).format(cluster_uuid)
+ ).format(cluster_id)
self.log.warn(msg)
uninstall_sw = (
False # Allow to remove k8s cluster without removing Tiller
)
if uninstall_sw:
- await self._uninstall_sw(cluster_id=cluster_uuid, namespace=namespace)
+ await self._uninstall_sw(cluster_id, namespace)
# delete cluster directory
- self.log.debug("Removing directory {}".format(cluster_uuid))
- self.fs.file_delete(cluster_uuid, ignore_non_exist=True)
+ self.log.debug("Removing directory {}".format(cluster_id))
+ self.fs.file_delete(cluster_id, ignore_non_exist=True)
# Remove also local directorio if still exist
- direct = self.fs.path + "/" + cluster_uuid
+ direct = self.fs.path + "/" + cluster_id
shutil.rmtree(direct, ignore_errors=True)
return True
params: dict = None,
db_dict: dict = None,
):
- self.log.debug("upgrading {} in cluster {}".format(kdu_model, cluster_uuid))
+ _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
+ self.log.debug("upgrading {} in cluster {}".format(kdu_model, cluster_id))
# sync local dir
- self.fs.sync(from_path=cluster_uuid)
+ self.fs.sync(from_path=cluster_id)
# look for instance to obtain namespace
instance_info = await self.get_instance_info(cluster_uuid, kdu_instance)
# init env, paths
paths, env = self._init_paths_env(
- cluster_name=cluster_uuid, create_if_not_exist=True
+ cluster_name=cluster_id, create_if_not_exist=True
)
# sync local dir
- self.fs.sync(from_path=cluster_uuid)
+ self.fs.sync(from_path=cluster_id)
# params to str
params_str, file_to_delete = self._params_to_file_option(
- cluster_id=cluster_uuid, params=params
+ cluster_id=cluster_id, params=params
)
# version
# write status in another task
status_task = asyncio.ensure_future(
coro_or_future=self._store_status(
- cluster_id=cluster_uuid,
+ cluster_id=cluster_id,
kdu_instance=kdu_instance,
namespace=instance_info["namespace"],
db_dict=db_dict,
# write final status
await self._store_status(
- cluster_id=cluster_uuid,
+ cluster_id=cluster_id,
kdu_instance=kdu_instance,
namespace=instance_info["namespace"],
db_dict=db_dict,
raise K8sException(msg)
# sync fs
- self.fs.reverse_sync(from_path=cluster_uuid)
+ self.fs.reverse_sync(from_path=cluster_id)
# return new revision number
instance = await self.get_instance_info(
async def rollback(
self, cluster_uuid: str, kdu_instance: str, revision=0, db_dict: dict = None
):
+
+ _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
self.log.debug(
"rollback kdu_instance {} to revision {} from cluster {}".format(
- kdu_instance, revision, cluster_uuid
+ kdu_instance, revision, cluster_id
)
)
# sync local dir
- self.fs.sync(from_path=cluster_uuid)
+ self.fs.sync(from_path=cluster_id)
# look for instance to obtain namespace
instance_info = await self.get_instance_info(cluster_uuid, kdu_instance)
# init env, paths
paths, env = self._init_paths_env(
- cluster_name=cluster_uuid, create_if_not_exist=True
+ cluster_name=cluster_id, create_if_not_exist=True
)
# sync local dir
- self.fs.sync(from_path=cluster_uuid)
+ self.fs.sync(from_path=cluster_id)
command = self._get_rollback_command(
kdu_instance, instance_info["namespace"], revision, paths["kube_config"]
# write status in another task
status_task = asyncio.ensure_future(
coro_or_future=self._store_status(
- cluster_id=cluster_uuid,
+ cluster_id=cluster_id,
kdu_instance=kdu_instance,
namespace=instance_info["namespace"],
db_dict=db_dict,
# write final status
await self._store_status(
- cluster_id=cluster_uuid,
+ cluster_id=cluster_id,
kdu_instance=kdu_instance,
namespace=instance_info["namespace"],
db_dict=db_dict,
raise K8sException(msg)
# sync fs
- self.fs.reverse_sync(from_path=cluster_uuid)
+ self.fs.reverse_sync(from_path=cluster_id)
# return new revision number
instance = await self.get_instance_info(
:return: True if successful
"""
+ _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
self.log.debug(
- "uninstall kdu_instance {} from cluster {}".format(
- kdu_instance, cluster_uuid
- )
+ "uninstall kdu_instance {} from cluster {}".format(kdu_instance, cluster_id)
)
# sync local dir
- self.fs.sync(from_path=cluster_uuid)
+ self.fs.sync(from_path=cluster_id)
# look for instance to obtain namespace
instance_info = await self.get_instance_info(cluster_uuid, kdu_instance)
return True
# init env, paths
paths, env = self._init_paths_env(
- cluster_name=cluster_uuid, create_if_not_exist=True
+ cluster_name=cluster_id, create_if_not_exist=True
)
# sync local dir
- self.fs.sync(from_path=cluster_uuid)
+ self.fs.sync(from_path=cluster_id)
command = self._get_uninstall_command(
kdu_instance, instance_info["namespace"], paths["kube_config"]
)
# sync fs
- self.fs.reverse_sync(from_path=cluster_uuid)
+ self.fs.reverse_sync(from_path=cluster_id)
return self._output_to_table(output)
:return:
"""
- self.log.debug("list releases for cluster {}".format(cluster_uuid))
+ _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
+ self.log.debug("list releases for cluster {}".format(cluster_id))
# sync local dir
- self.fs.sync(from_path=cluster_uuid)
+ self.fs.sync(from_path=cluster_id)
# execute internal command
- result = await self._instances_list(cluster_uuid)
+ result = await self._instances_list(cluster_id)
# sync fs
- self.fs.reverse_sync(from_path=cluster_uuid)
+ self.fs.reverse_sync(from_path=cluster_id)
return result
- `external_ip` List of external ips (in case they are available)
"""
+ _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
self.log.debug(
"get_services: cluster_uuid: {}, kdu_instance: {}".format(
cluster_uuid, kdu_instance
# init env, paths
paths, env = self._init_paths_env(
- cluster_name=cluster_uuid, create_if_not_exist=True
+ cluster_name=cluster_id, create_if_not_exist=True
)
# sync local dir
- self.fs.sync(from_path=cluster_uuid)
+ self.fs.sync(from_path=cluster_id)
# get list of services names for kdu
service_names = await self._get_services(
- cluster_uuid, kdu_instance, namespace, paths["kube_config"]
+ cluster_id, kdu_instance, namespace, paths["kube_config"]
)
service_list = []
for service in service_names:
- service = await self._get_service(cluster_uuid, service, namespace)
+ service = await self._get_service(cluster_id, service, namespace)
service_list.append(service)
# sync fs
- self.fs.reverse_sync(from_path=cluster_uuid)
+ self.fs.reverse_sync(from_path=cluster_id)
return service_list
)
)
+ _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
+
# sync local dir
- self.fs.sync(from_path=cluster_uuid)
+ self.fs.sync(from_path=cluster_id)
- service = await self._get_service(cluster_uuid, service_name, namespace)
+ service = await self._get_service(cluster_id, service_name, namespace)
# sync fs
- self.fs.reverse_sync(from_path=cluster_uuid)
+ self.fs.reverse_sync(from_path=cluster_id)
return service
)
)
+ _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
+
# sync local dir
- self.fs.sync(from_path=cluster_uuid)
+ self.fs.sync(from_path=cluster_id)
# get instance: needed to obtain namespace
- instances = await self._instances_list(cluster_id=cluster_uuid)
+ instances = await self._instances_list(cluster_id=cluster_id)
for instance in instances:
if instance.get("name") == kdu_instance:
break
# instance does not exist
raise K8sException(
"Instance name: {} not found in cluster: {}".format(
- kdu_instance, cluster_uuid
+ kdu_instance, cluster_id
)
)
status = await self._status_kdu(
- cluster_id=cluster_uuid,
+ cluster_id=cluster_id,
kdu_instance=kdu_instance,
namespace=instance["namespace"],
yaml_format=yaml_format,
)
# sync fs
- self.fs.reverse_sync(from_path=cluster_uuid)
+ self.fs.reverse_sync(from_path=cluster_id)
return status