Log operation id with every iteration in readiness_loop 11/14911/2
authorgarciadeblas <gerardo.garciadeblas@telefonica.com>
Wed, 5 Feb 2025 15:36:17 +0000 (16:36 +0100)
committergarciadeblas <gerardo.garciadeblas@telefonica.com>
Thu, 6 Feb 2025 08:58:05 +0000 (09:58 +0100)
Change-Id: I2117c207d7ff732af19810ee8ffdeeef5bead1c6
Signed-off-by: garciadeblas <gerardo.garciadeblas@telefonica.com>
osm_lcm/k8s.py
osm_lcm/odu_libs/workflows.py

index 0e68bc3..67173e7 100644 (file)
@@ -160,7 +160,7 @@ class GitOpsLcm(LcmBase):
 
     async def check_workflow_and_update_db(self, op_id, workflow_name, db_content):
         workflow_status, workflow_msg = await self.odu.check_workflow_status(
-            workflow_name
+            op_id, workflow_name
         )
         self.logger.info(
             "Workflow Status: {} Workflow Message: {}".format(
@@ -221,6 +221,7 @@ class GitOpsLcm(LcmBase):
             for checking in checkings_list:
                 if checking["enable"]:
                     status, message = await self.odu.readiness_loop(
+                        op_id=op_id,
                         item=checking["item"],
                         name=checking["name"],
                         namespace=checking["namespace"],
@@ -354,7 +355,7 @@ class ClusterLcm(GitOpsLcm):
 
         self.logger.info("workflow_name is: {}".format(workflow_name))
         workflow_status, workflow_msg = await self.odu.check_workflow_status(
-            workflow_name
+            op_id, workflow_name
         )
         self.logger.info(
             "workflow_status is: {} and workflow_msg is: {}".format(
@@ -595,7 +596,7 @@ class ClusterLcm(GitOpsLcm):
 
         self.logger.info("workflow_name is: {}".format(workflow_name))
         workflow_status, workflow_msg = await self.odu.check_workflow_status(
-            workflow_name
+            op_id, workflow_name
         )
         self.logger.info(
             "workflow_status is: {} and workflow_msg is: {}".format(
@@ -777,7 +778,7 @@ class ClusterLcm(GitOpsLcm):
 
         self.logger.info("workflow_name is: {}".format(workflow_name))
         workflow_status, workflow_msg = await self.odu.check_workflow_status(
-            workflow_name
+            op_id, workflow_name
         )
         self.logger.info(
             "workflow_status is: {} and workflow_msg is: {}".format(
@@ -864,7 +865,7 @@ class ClusterLcm(GitOpsLcm):
 
         self.logger.info("workflow_name is: {}".format(workflow_name))
         workflow_status, workflow_msg = await self.odu.check_workflow_status(
-            workflow_name
+            op_id, workflow_name
         )
         self.logger.info(
             "workflow_status is: {} and workflow_msg is: {}".format(
@@ -952,7 +953,7 @@ class ClusterLcm(GitOpsLcm):
 
         self.logger.info("workflow_name is: {}".format(workflow_name))
         workflow_status, workflow_msg = await self.odu.check_workflow_status(
-            workflow_name
+            op_id, workflow_name
         )
         self.logger.info(
             "workflow_status is: {} and workflow_msg is: {}".format(
@@ -1077,7 +1078,7 @@ class ClusterLcm(GitOpsLcm):
 
         self.logger.info("workflow_name is: {}".format(workflow_name))
         workflow_status, workflow_msg = await self.odu.check_workflow_status(
-            workflow_name
+            op_id, workflow_name
         )
         self.logger.info(
             "workflow_status is: {} and workflow_msg is: {}".format(
@@ -1182,7 +1183,7 @@ class ClusterLcm(GitOpsLcm):
             return
         self.logger.info("workflow_name is: {}".format(workflow_name))
         workflow_status, workflow_msg = await self.odu.check_workflow_status(
-            workflow_name
+            op_id, workflow_name
         )
         self.logger.info(
             "Workflow Status: {} Workflow Message: {}".format(
@@ -1342,7 +1343,7 @@ class CloudCredentialsLcm(GitOpsLcm):
         )
 
         workflow_status, workflow_msg = await self.odu.check_workflow_status(
-            workflow_name
+            op_id, workflow_name
         )
 
         self.logger.info(
@@ -1394,7 +1395,7 @@ class CloudCredentialsLcm(GitOpsLcm):
             "update_cloud_credentials", op_id, op_params, db_content
         )
         workflow_status, workflow_msg = await self.odu.check_workflow_status(
-            workflow_name
+            op_id, workflow_name
         )
         self.logger.info(
             "Workflow Status: {} Workflow Msg: {}".format(workflow_status, workflow_msg)
@@ -1430,7 +1431,7 @@ class CloudCredentialsLcm(GitOpsLcm):
             "delete_cloud_credentials", op_id, op_params, db_content
         )
         workflow_status, workflow_msg = await self.odu.check_workflow_status(
-            workflow_name
+            op_id, workflow_name
         )
         self.logger.info(
             "Workflow Status: {} Workflow Msg: {}".format(workflow_status, workflow_msg)
index 84a2b9e..651d0d9 100644 (file)
@@ -21,13 +21,14 @@ from math import ceil
 from jsonpath_ng.ext import parse
 
 
-async def check_workflow_status(self, workflow_name):
-    self.logger.info(f"check_workflow_status Enter: {workflow_name}")
+async def check_workflow_status(self, op_id, workflow_name):
+    self.logger.info(f"Op {op_id}, check_workflow_status Enter: {workflow_name}")
     if not workflow_name:
         return False, "Workflow was not launched"
     try:
         # First check if the workflow ends successfully
         completed, message = await self.readiness_loop(
+            op_id,
             item="workflow",
             name=workflow_name,
             namespace="osm-workflows",
@@ -41,6 +42,7 @@ async def check_workflow_status(self, workflow_name):
         if completed:
             # Then check if the workflow has a failed task
             return await self.readiness_loop(
+                op_id,
                 item="workflow",
                 name=workflow_name,
                 namespace="osm-workflows",
@@ -58,13 +60,13 @@ async def check_workflow_status(self, workflow_name):
 
 
 async def readiness_loop(
-    self, item, name, namespace, condition, deleted, timeout, kubectl=None
+    self, op_id, item, name, namespace, condition, deleted, timeout, kubectl=None
 ):
     if kubectl is None:
         kubectl = self._kubectl
-    self.logger.info("readiness_loop Enter")
+    self.logger.info("Op {op_id} readiness_loop Enter")
     self.logger.info(
-        f"{item} {name}. Namespace: '{namespace}'. Condition: {condition}. Deleted: {deleted}. Timeout: {timeout}"
+        f"Op {op_id}. {item} {name}. Namespace: '{namespace}'. Condition: {condition}. Deleted: {deleted}. Timeout: {timeout}"
     )
     item_api_map = {
         "workflow": {
@@ -113,8 +115,9 @@ async def readiness_loop(
     api_version = item_api_map[item]["api_version"]
 
     while counter <= max_iterations:
+        iteration_prefix = f"Op {op_id}. Iteration {counter}/{max_iterations}"
         try:
-            self.logger.info(f"Iteration {counter}/{max_iterations}")
+            self.logger.info(f"Op {op_id}. Iteration {counter}/{max_iterations}")
             generic_object = await kubectl.get_generic_object(
                 api_group=api_group,
                 api_plural=api_plural,
@@ -125,11 +128,11 @@ async def readiness_loop(
             if deleted:
                 if generic_object:
                     self.logger.info(
-                        f"Iteration {counter}/{max_iterations}: Found {api_plural}. Name: {name}. Namespace: '{namespace}'. API: {api_group}/{api_version}"
+                        f"{iteration_prefix}. Found {api_plural}. Name: {name}. Namespace: '{namespace}'. API: {api_group}/{api_version}"
                     )
                 else:
                     self.logger.info(
-                        f"{item} {name} deleted after {counter} iterations (aprox {counter*retry_time} seconds)"
+                        f"{iteration_prefix}. {item} {name} deleted after {counter} iterations (aprox {counter*retry_time} seconds)"
                     )
                     return True, "COMPLETED"
             else:
@@ -140,7 +143,7 @@ async def readiness_loop(
                     # self.logger.debug(f"{yaml.safe_dump(generic_object)}")
                     conditions = generic_object.get("status", {}).get("conditions", [])
                     self.logger.info(
-                        f"Iteration {counter}/{max_iterations}. Object found: {item} status conditions: {conditions}"
+                        f"{iteration_prefix}. Object found: {item} status conditions: {conditions}"
                     )
                     jsonpath_expr = parse(condition["jsonpath_filter"])
                     match = jsonpath_expr.find(generic_object)
@@ -151,20 +154,20 @@ async def readiness_loop(
                         )
                         if condition_function(condition["value"], value):
                             self.logger.info(
-                                f"{item} {name} met the condition {condition} with {value} in {counter} iterations (aprox {counter*retry_time} seconds)"
+                                f"{iteration_prefix}. {item} {name} met the condition {condition} with {value} in {counter} iterations (aprox {counter*retry_time} seconds)"
                             )
                             return True, "COMPLETED"
                         else:
                             self.logger.info(
-                                f"Iteration {counter}/{max_iterations}: {item} {name} did not meet the condition {condition} with value {value}"
+                                f"{iteration_prefix}. {item} {name} did not meet the condition {condition} with value {value}"
                             )
                     else:
                         self.logger.info(
-                            f"Iteration {counter}/{max_iterations}. No match for filter {condition.get('jsonpath_filter', '-')} in {item} {name}"
+                            f"{iteration_prefix}. No match for filter {condition.get('jsonpath_filter', '-')} in {item} {name}"
                         )
                 else:
                     self.logger.info(
-                        f"Iteration {counter}/{max_iterations}: Could not find {api_plural}. Name: {name}. Namespace: '{namespace}'. API: {api_group}/{api_version}"
+                        f"{iteration_prefix}. Could not find {api_plural}. Name: {name}. Namespace: '{namespace}'. API: {api_group}/{api_version}"
                     )
         except Exception as e:
             self.logger.error(f"Exception: {e}")
@@ -173,5 +176,5 @@ async def readiness_loop(
         counter += 1
     return (
         False,
-        f"{item} {name} was not ready after {max_iterations} iterations (aprox {timeout} seconds)",
+        f"Op {op_id}. {item} {name} was not ready after {max_iterations} iterations (aprox {timeout} seconds)",
     )