Update cluster configuration to support feature 11055 28/15228/6
authorrshri <shrinithi.r@tataelxsi.co.in>
Wed, 11 Jun 2025 18:19:07 +0000 (18:19 +0000)
committergarciadeblas <gerardo.garciadeblas@telefonica.com>
Tue, 29 Jul 2025 14:43:08 +0000 (16:43 +0200)
Change-Id: I350be30250ac87aebe83f3284e2dab859df254f8
Signed-off-by: rshri <shrinithi.r@tataelxsi.co.in>
osm_lcm/k8s.py
osm_lcm/n2vc/kubectl.py
osm_lcm/odu_libs/cluster_mgmt.py
osm_lcm/odu_libs/common.py
osm_lcm/odu_libs/templates/launcher-create-crossplane-cluster-and-bootstrap.j2
osm_lcm/odu_workflows.py

index 3a2d8b7..a273b42 100644 (file)
@@ -478,22 +478,43 @@ class ClusterLcm(GitOpsLcm):
         )
         db_cluster["current_operation"] = None
 
-        # Retrieve credentials
+        # Retrieve credentials and subnets and register the cluster in k8sclusters collection
         cluster_creds = None
+        db_register = self.db.get_one("k8sclusters", {"name": db_cluster["name"]})
         if db_cluster["resourceState"] == "READY" and db_cluster["state"] == "CREATED":
+            # Retrieve credentials
             result, cluster_creds = await self.odu.get_cluster_credentials(db_cluster)
             # TODO: manage the case where the credentials are not available
             if result:
                 db_cluster["credentials"] = cluster_creds
 
-        # Update db_cluster
-        self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
-        self.update_default_profile_agekeys(db_cluster_copy)
-        self.update_profile_state(db_cluster, workflow_status, resource_status)
-
-        # Register the cluster in k8sclusters collection
-        db_register = self.db.get_one("k8sclusters", {"name": db_cluster["name"]})
-        if cluster_creds:
+            # Retrieve subnets
+            if db_cluster["vim_type"] == "aws":
+                generic_object = await self.odu.list_object(
+                    api_group="ec2.aws.upbound.io",
+                    api_plural="subnets",
+                    api_version="v1beta1",
+                )
+                private_subnet = []
+                public_subnet = []
+                for subnet in generic_object:
+                    labels = subnet.get("metadata", {}).get("labels", {})
+                    status = subnet.get("status", {}).get("atProvider", {})
+                    # Extract relevant label values
+                    cluster_label = labels.get("cluster")
+                    access_label = labels.get("access")
+                    subnet_id = status.get("id")
+                    # Apply filtering
+                    if cluster_label == db_cluster["name"] and subnet_id:
+                        if access_label == "private":
+                            private_subnet.append(subnet_id)
+                        elif access_label == "public":
+                            public_subnet.append(subnet_id)
+                # Update db_cluster
+                db_cluster["private_subnet"] = private_subnet
+                db_cluster["public_subnet"] = public_subnet
+
+            # Register the cluster in k8sclusters collection
             db_register["credentials"] = cluster_creds
             # To call the lcm.py for registering the cluster in k8scluster lcm.
             self.db.set_one("k8sclusters", {"_id": db_register["_id"]}, db_register)
@@ -501,11 +522,13 @@ class ClusterLcm(GitOpsLcm):
             self.logger.debug(f"Register is : {register}")
         else:
             db_register["_admin"]["operationalState"] = "ERROR"
-            result, cluster_creds = await self.odu.get_cluster_credentials(db_cluster)
-            # To call the lcm.py for registering the cluster in k8scluster lcm.
-            db_register["credentials"] = cluster_creds
             self.db.set_one("k8sclusters", {"_id": db_register["_id"]}, db_register)
 
+        # Update db_cluster
+        self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
+        self.update_default_profile_agekeys(db_cluster_copy)
+        self.update_profile_state(db_cluster, workflow_status, resource_status)
+
         return
 
     async def check_create_cluster(self, op_id, op_params, content):
@@ -577,20 +600,21 @@ class ClusterLcm(GitOpsLcm):
             ]
         else:
             return False, "Not suitable VIM account to check cluster status"
-        if nodegroup_name:
-            nodegroup_check = {
-                "item": f"nodegroup_{cloud_type}",
-                "name": nodegroup_name,
-                "namespace": "",
-                "condition": {
-                    "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
-                    "value": "True",
-                },
-                "timeout": self._checkloop_resource_timeout,
-                "enable": True,
-                "resourceState": "IN_PROGRESS.RESOURCE_READY.NODEGROUP",
-            }
-            checkings_list.insert(3, nodegroup_check)
+        if cloud_type != "aws":
+            if nodegroup_name:
+                nodegroup_check = {
+                    "item": f"nodegroup_{cloud_type}",
+                    "name": nodegroup_name,
+                    "namespace": "",
+                    "condition": {
+                        "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
+                        "value": "True",
+                    },
+                    "timeout": self._checkloop_resource_timeout,
+                    "enable": True,
+                    "resourceState": "IN_PROGRESS.RESOURCE_READY.NODEGROUP",
+                }
+                checkings_list.insert(3, nodegroup_check)
         return await self.common_check_list(
             op_id, checkings_list, "clusters", db_cluster
         )
index 67a404c..2c1a5b0 100644 (file)
@@ -50,6 +50,7 @@ from kubernetes.client.models import (
     V1Volume,
     V1VolumeMount,
     V1Container,
+    V1ConfigMap,
 )
 from kubernetes.client.rest import ApiException
 from kubernetes.stream import stream
@@ -538,6 +539,42 @@ class Kubectl:
             self.logger.error(f"Failed to create namespaced secret: {e}")
             raise
 
+    async def create_configmap(self, name: str, data: dict, namespace: str):
+        """
+        Create secret with data
+
+        :param: name:        Name of the configmap
+        :param: data:        Dict with data content.
+        :param: namespace:   Name of the namespace where the configmap will be stored
+
+        :return: None
+        """
+        self.logger.debug(f"Kubectl cfg file: {self._config_file}")
+        self.logger.debug("Enter create_configmap function")
+        v1_core = self.clients[CORE_CLIENT]
+        self.logger.debug(f"v1_core: {v1_core}")
+        config_map = V1ConfigMap(
+            metadata=V1ObjectMeta(name=name, namespace=namespace),
+            data=data,
+        )
+        self.logger.debug(f"config_map: {config_map}")
+        try:
+            v1_core.create_namespaced_config_map(namespace, config_map)
+            self.logger.info("Namespaced configmap was created")
+        except ApiException as e:
+            self.logger.error(f"Failed to create namespaced configmap: {e}")
+            raise
+
+    def delete_configmap(self, name: str, namespace: str):
+        """
+        Delete a configmap
+
+        :param: name:       Name of the configmap
+        :param: namespace:  Kubernetes namespace
+        """
+        self.logger.debug(f"Kubectl cfg file: {self._config_file}")
+        self.clients[CORE_CLIENT].delete_namespaced_config_map(name, namespace)
+
     async def create_certificate(
         self,
         namespace: str,
index c5ea6c0..60c0378 100644 (file)
@@ -18,6 +18,7 @@
 
 import yaml
 import base64
+import json
 
 
 def gather_age_key(cluster):
@@ -93,6 +94,33 @@ async def create_cluster(self, op_id, op_params, content):
     else:
         raise Exception("Not suitable VIM account to register cluster")
 
+    # Create configmap for subnet
+    configmap_name = None
+    data = {}
+    private_subnets = op_params.get("private_subnet")
+    public_subnets = op_params.get("public_subnet")
+    if private_subnets or public_subnets:
+        configmap_name = f"{cluster_name}-parameters"
+        configmap_namespace = "managed-resources"
+        data["private_subnets"] = f"{json.dumps(private_subnets)}"
+        data["public_subnets"] = f"{json.dumps(public_subnets)}"
+        try:
+            self.logger.debug(f"Testing kubectl: {self._kubectl}")
+            self.logger.debug(
+                f"Testing kubectl configuration: {self._kubectl.configuration}"
+            )
+            self.logger.debug(
+                f"Testing kubectl configuration Host: {self._kubectl.configuration.host}"
+            )
+            await self.create_configmap(
+                configmap_name,
+                configmap_namespace,
+                data,
+            )
+        except Exception as e:
+            self.logger.info(f"Cannot create configmap {configmap_name}: {e}")
+            return False, f"Cannot create configmap {configmap_name}: {e}"
+
     # Render workflow
     # workflow_kwargs = {
     #     "git_fleet_url": self._repo_fleet_url,
@@ -116,10 +144,14 @@ async def create_cluster(self, op_id, op_params, content):
         public_key_mgmt=self._pubkey,
         public_key_new_cluster=public_key_new_cluster,
         secret_name_private_key_new_cluster=secret_name,
-        vm_size=db_cluster["node_size"],
-        node_count=db_cluster["node_count"],
+        vm_size=db_cluster.get("node_size", "default"),
+        node_count=db_cluster.get("node_count", "default"),
         k8s_version=db_cluster["k8s_version"],
         cluster_location=db_cluster["region_name"],
+        configmap_name=configmap_name if configmap_name else "default",
+        cluster_iam_role=db_cluster.get("iam_role", "default"),
+        cluster_private_subnets_id=db_cluster.get("private_subnet", "default"),
+        cluster_public_subnets_id=db_cluster.get("public_subnet", "default"),
         osm_project_name=osm_project_name,
         rg_name=db_cluster.get("resource_group", "''"),
         preemptible_nodes=db_cluster.get("preemptible_nodes", "false"),
@@ -484,7 +516,13 @@ async def clean_items_cluster_create(self, op_id, op_params, content):
                 "name": f"secret-age-{content['cluster']['git_name'].lower()}",
                 "namespace": "osm-workflows",
             }
-        ]
+        ],
+        "configmaps": [
+            {
+                "name": f"{content['cluster']['name']}-parameters",
+                "namespace": "managed-resources",
+            }
+        ],
     }
     try:
         await self.clean_items(items)
index 0debd7f..d33524e 100644 (file)
@@ -63,3 +63,30 @@ def delete_secret(self, secret_name, secret_namespace):
         self.logger.error(
             f"Could not delete secret {secret_name} in namespace {secret_namespace}: {e}"
         )
+
+
+async def create_configmap(self, configmap_name, configmap_namespace, data):
+    self.logger.info(f"Checking content of configmap {data} ...")
+    self.logger.info(
+        f"Calling N2VC kubectl to create configmap. Namespace: {configmap_namespace}. configmap name: {configmap_name}. data:{data}."
+    )
+    await self._kubectl.create_configmap(
+        name=configmap_name,
+        data=data,
+        namespace=configmap_namespace,
+    )
+    self.logger.info(f"configmap {configmap_name} CREATED")
+
+
+def delete_configmap(self, configmap_name, configmap_namespace):
+    try:
+        self._kubectl.delete_configmap(
+            name=configmap_name, namespace=configmap_namespace
+        )
+        self.logger.info(
+            f"Deleted configmap {configmap_name} in namespace {configmap_namespace}"
+        )
+    except Exception as e:
+        self.logger.error(
+            f"Could not delete configmap {configmap_name} in namespace {configmap_namespace}: {e}"
+        )
index 11f0fed..f6db45c 100644 (file)
@@ -89,6 +89,16 @@ spec:
     - name: cloned_sw_catalogs_folder_name
       value: "sw-catalogs-osm"
 
+    # Specific paramters - EKS only
+    - name: cluster_iam_role
+      value: {{ cluster_iam_role }}
+    - name: cluster_private_subnets_id
+      value: "{{ cluster_private_subnets_id }}"
+    - name: cluster_public_subnets_id
+      value: "{{ cluster_public_subnets_id }}"
+    - name: cluster_subnets_configmap_name
+      value: {{ configmap_name }}
+
     # Debug/dry run?
     - name: debug
       value: "{{ workflow_debug }}"
index 0244567..730acfe 100644 (file)
@@ -192,6 +192,8 @@ class OduWorkflow(LcmBase):
     render_yaml_template = odu_render.render_yaml_template
     create_secret = odu_common.create_secret
     delete_secret = odu_common.delete_secret
+    create_configmap = odu_common.create_configmap
+    delete_configmap = odu_common.delete_configmap
 
     async def launch_workflow(self, key, op_id, op_params, content):
         self.logger.info(
@@ -268,3 +270,28 @@ class OduWorkflow(LcmBase):
                 f"Testing kubectl configuration Host: {self._kubectl.configuration.host}"
             )
             await self._kubectl.delete_pvc(name, namespace)
+        # Delete configmaps
+        for configmap in items.get("configmaps", []):
+            name = configmap["name"]
+            namespace = configmap["namespace"]
+            self.logger.info(f"Deleting configmap {name} in namespace {namespace}")
+            self.logger.debug(f"Testing kubectl: {self._kubectl}")
+            self.logger.debug(
+                f"Testing kubectl configuration: {self._kubectl.configuration}"
+            )
+            self.logger.debug(
+                f"Testing kubectl configuration Host: {self._kubectl.configuration.host}"
+            )
+            self.delete_configmap(name, namespace)
+
+    async def list_object(self, api_group, api_plural, api_version):
+        self.logger.info(
+            f"Api group: {api_group} Api plural: {api_plural} Api version: {api_version}"
+        )
+        generic_object = await self._kubectl.list_generic_object(
+            api_group=api_group,
+            api_plural=api_plural,
+            api_version=api_version,
+            namespace="",
+        )
+        return generic_object