)
db_cluster["current_operation"] = None
- # Retrieve credentials
+ # Retrieve credentials and subnets and register the cluster in k8sclusters collection
cluster_creds = None
+ db_register = self.db.get_one("k8sclusters", {"name": db_cluster["name"]})
if db_cluster["resourceState"] == "READY" and db_cluster["state"] == "CREATED":
+ # Retrieve credentials
result, cluster_creds = await self.odu.get_cluster_credentials(db_cluster)
# TODO: manage the case where the credentials are not available
if result:
db_cluster["credentials"] = cluster_creds
- # Update db_cluster
- self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
- self.update_default_profile_agekeys(db_cluster_copy)
- self.update_profile_state(db_cluster, workflow_status, resource_status)
-
- # Register the cluster in k8sclusters collection
- db_register = self.db.get_one("k8sclusters", {"name": db_cluster["name"]})
- if cluster_creds:
+ # Retrieve subnets
+ if db_cluster["vim_type"] == "aws":
+ generic_object = await self.odu.list_object(
+ api_group="ec2.aws.upbound.io",
+ api_plural="subnets",
+ api_version="v1beta1",
+ )
+ private_subnet = []
+ public_subnet = []
+ for subnet in generic_object:
+ labels = subnet.get("metadata", {}).get("labels", {})
+ status = subnet.get("status", {}).get("atProvider", {})
+ # Extract relevant label values
+ cluster_label = labels.get("cluster")
+ access_label = labels.get("access")
+ subnet_id = status.get("id")
+ # Apply filtering
+ if cluster_label == db_cluster["name"] and subnet_id:
+ if access_label == "private":
+ private_subnet.append(subnet_id)
+ elif access_label == "public":
+ public_subnet.append(subnet_id)
+ # Update db_cluster
+ db_cluster["private_subnet"] = private_subnet
+ db_cluster["public_subnet"] = public_subnet
+
+ # Register the cluster in k8sclusters collection
db_register["credentials"] = cluster_creds
# To call the lcm.py for registering the cluster in k8scluster lcm.
self.db.set_one("k8sclusters", {"_id": db_register["_id"]}, db_register)
self.logger.debug(f"Register is : {register}")
else:
db_register["_admin"]["operationalState"] = "ERROR"
- result, cluster_creds = await self.odu.get_cluster_credentials(db_cluster)
- # To call the lcm.py for registering the cluster in k8scluster lcm.
- db_register["credentials"] = cluster_creds
self.db.set_one("k8sclusters", {"_id": db_register["_id"]}, db_register)
+ # Update db_cluster
+ self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
+ self.update_default_profile_agekeys(db_cluster_copy)
+ self.update_profile_state(db_cluster, workflow_status, resource_status)
+
return
async def check_create_cluster(self, op_id, op_params, content):
]
else:
return False, "Not suitable VIM account to check cluster status"
- if nodegroup_name:
- nodegroup_check = {
- "item": f"nodegroup_{cloud_type}",
- "name": nodegroup_name,
- "namespace": "",
- "condition": {
- "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
- "value": "True",
- },
- "timeout": self._checkloop_resource_timeout,
- "enable": True,
- "resourceState": "IN_PROGRESS.RESOURCE_READY.NODEGROUP",
- }
- checkings_list.insert(3, nodegroup_check)
+ if cloud_type != "aws":
+ if nodegroup_name:
+ nodegroup_check = {
+ "item": f"nodegroup_{cloud_type}",
+ "name": nodegroup_name,
+ "namespace": "",
+ "condition": {
+ "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
+ "value": "True",
+ },
+ "timeout": self._checkloop_resource_timeout,
+ "enable": True,
+ "resourceState": "IN_PROGRESS.RESOURCE_READY.NODEGROUP",
+ }
+ checkings_list.insert(3, nodegroup_check)
return await self.common_check_list(
op_id, checkings_list, "clusters", db_cluster
)
V1Volume,
V1VolumeMount,
V1Container,
+ V1ConfigMap,
)
from kubernetes.client.rest import ApiException
from kubernetes.stream import stream
self.logger.error(f"Failed to create namespaced secret: {e}")
raise
+ async def create_configmap(self, name: str, data: dict, namespace: str):
+ """
+ Create secret with data
+
+ :param: name: Name of the configmap
+ :param: data: Dict with data content.
+ :param: namespace: Name of the namespace where the configmap will be stored
+
+ :return: None
+ """
+ self.logger.debug(f"Kubectl cfg file: {self._config_file}")
+ self.logger.debug("Enter create_configmap function")
+ v1_core = self.clients[CORE_CLIENT]
+ self.logger.debug(f"v1_core: {v1_core}")
+ config_map = V1ConfigMap(
+ metadata=V1ObjectMeta(name=name, namespace=namespace),
+ data=data,
+ )
+ self.logger.debug(f"config_map: {config_map}")
+ try:
+ v1_core.create_namespaced_config_map(namespace, config_map)
+ self.logger.info("Namespaced configmap was created")
+ except ApiException as e:
+ self.logger.error(f"Failed to create namespaced configmap: {e}")
+ raise
+
+ def delete_configmap(self, name: str, namespace: str):
+ """
+ Delete a configmap
+
+ :param: name: Name of the configmap
+ :param: namespace: Kubernetes namespace
+ """
+ self.logger.debug(f"Kubectl cfg file: {self._config_file}")
+ self.clients[CORE_CLIENT].delete_namespaced_config_map(name, namespace)
+
async def create_certificate(
self,
namespace: str,
import yaml
import base64
+import json
def gather_age_key(cluster):
else:
raise Exception("Not suitable VIM account to register cluster")
+ # Create configmap for subnet
+ configmap_name = None
+ data = {}
+ private_subnets = op_params.get("private_subnet")
+ public_subnets = op_params.get("public_subnet")
+ if private_subnets or public_subnets:
+ configmap_name = f"{cluster_name}-parameters"
+ configmap_namespace = "managed-resources"
+ data["private_subnets"] = f"{json.dumps(private_subnets)}"
+ data["public_subnets"] = f"{json.dumps(public_subnets)}"
+ try:
+ self.logger.debug(f"Testing kubectl: {self._kubectl}")
+ self.logger.debug(
+ f"Testing kubectl configuration: {self._kubectl.configuration}"
+ )
+ self.logger.debug(
+ f"Testing kubectl configuration Host: {self._kubectl.configuration.host}"
+ )
+ await self.create_configmap(
+ configmap_name,
+ configmap_namespace,
+ data,
+ )
+ except Exception as e:
+ self.logger.info(f"Cannot create configmap {configmap_name}: {e}")
+ return False, f"Cannot create configmap {configmap_name}: {e}"
+
# Render workflow
# workflow_kwargs = {
# "git_fleet_url": self._repo_fleet_url,
public_key_mgmt=self._pubkey,
public_key_new_cluster=public_key_new_cluster,
secret_name_private_key_new_cluster=secret_name,
- vm_size=db_cluster["node_size"],
- node_count=db_cluster["node_count"],
+ vm_size=db_cluster.get("node_size", "default"),
+ node_count=db_cluster.get("node_count", "default"),
k8s_version=db_cluster["k8s_version"],
cluster_location=db_cluster["region_name"],
+ configmap_name=configmap_name if configmap_name else "default",
+ cluster_iam_role=db_cluster.get("iam_role", "default"),
+ cluster_private_subnets_id=db_cluster.get("private_subnet", "default"),
+ cluster_public_subnets_id=db_cluster.get("public_subnet", "default"),
osm_project_name=osm_project_name,
rg_name=db_cluster.get("resource_group", "''"),
preemptible_nodes=db_cluster.get("preemptible_nodes", "false"),
"name": f"secret-age-{content['cluster']['git_name'].lower()}",
"namespace": "osm-workflows",
}
- ]
+ ],
+ "configmaps": [
+ {
+ "name": f"{content['cluster']['name']}-parameters",
+ "namespace": "managed-resources",
+ }
+ ],
}
try:
await self.clean_items(items)
self.logger.error(
f"Could not delete secret {secret_name} in namespace {secret_namespace}: {e}"
)
+
+
+async def create_configmap(self, configmap_name, configmap_namespace, data):
+ self.logger.info(f"Checking content of configmap {data} ...")
+ self.logger.info(
+ f"Calling N2VC kubectl to create configmap. Namespace: {configmap_namespace}. configmap name: {configmap_name}. data:{data}."
+ )
+ await self._kubectl.create_configmap(
+ name=configmap_name,
+ data=data,
+ namespace=configmap_namespace,
+ )
+ self.logger.info(f"configmap {configmap_name} CREATED")
+
+
+def delete_configmap(self, configmap_name, configmap_namespace):
+ try:
+ self._kubectl.delete_configmap(
+ name=configmap_name, namespace=configmap_namespace
+ )
+ self.logger.info(
+ f"Deleted configmap {configmap_name} in namespace {configmap_namespace}"
+ )
+ except Exception as e:
+ self.logger.error(
+ f"Could not delete configmap {configmap_name} in namespace {configmap_namespace}: {e}"
+ )
- name: cloned_sw_catalogs_folder_name
value: "sw-catalogs-osm"
+ # Specific paramters - EKS only
+ - name: cluster_iam_role
+ value: {{ cluster_iam_role }}
+ - name: cluster_private_subnets_id
+ value: "{{ cluster_private_subnets_id }}"
+ - name: cluster_public_subnets_id
+ value: "{{ cluster_public_subnets_id }}"
+ - name: cluster_subnets_configmap_name
+ value: {{ configmap_name }}
+
# Debug/dry run?
- name: debug
value: "{{ workflow_debug }}"
render_yaml_template = odu_render.render_yaml_template
create_secret = odu_common.create_secret
delete_secret = odu_common.delete_secret
+ create_configmap = odu_common.create_configmap
+ delete_configmap = odu_common.delete_configmap
async def launch_workflow(self, key, op_id, op_params, content):
self.logger.info(
f"Testing kubectl configuration Host: {self._kubectl.configuration.host}"
)
await self._kubectl.delete_pvc(name, namespace)
+ # Delete configmaps
+ for configmap in items.get("configmaps", []):
+ name = configmap["name"]
+ namespace = configmap["namespace"]
+ self.logger.info(f"Deleting configmap {name} in namespace {namespace}")
+ self.logger.debug(f"Testing kubectl: {self._kubectl}")
+ self.logger.debug(
+ f"Testing kubectl configuration: {self._kubectl.configuration}"
+ )
+ self.logger.debug(
+ f"Testing kubectl configuration Host: {self._kubectl.configuration.host}"
+ )
+ self.delete_configmap(name, namespace)
+
+ async def list_object(self, api_group, api_plural, api_version):
+ self.logger.info(
+ f"Api group: {api_group} Api plural: {api_plural} Api version: {api_version}"
+ )
+ generic_object = await self._kubectl.list_generic_object(
+ api_group=api_group,
+ api_plural=api_plural,
+ api_version=api_version,
+ namespace="",
+ )
+ return generic_object