blob: c9497931cd87c9187e8191028de73b14d0339db6 [file] [log] [blame]
rshri932105f2024-07-05 15:11:55 +00001# -*- coding: utf-8 -*-
2
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12# implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15
16__author__ = (
17 "Shrinithi R <shrinithi.r@tataelxsi.co.in>",
18 "Shahithya Y <shahithya.y@tataelxsi.co.in>",
19)
20
rshric3564942024-11-12 18:12:38 +000021import copy
rshri932105f2024-07-05 15:11:55 +000022import logging
yshahd940c652024-10-17 06:11:12 +000023from time import time
garciadeblas72412282024-11-07 12:41:54 +010024import traceback
rshri932105f2024-07-05 15:11:55 +000025from osm_lcm.lcm_utils import LcmBase
26from copy import deepcopy
27from osm_lcm import odu_workflows
28from osm_lcm import vim_sdn
rshri948f7de2024-12-02 03:42:35 +000029from osm_lcm.data_utils.list_utils import find_in_list
garciadeblasad6d1ba2025-01-22 16:02:18 +010030from osm_lcm.n2vc.kubectl import Kubectl
31import yaml
rshri932105f2024-07-05 15:11:55 +000032
yshah2f39b8a2024-12-19 11:06:24 +000033MAP_PROFILE = {
34 "infra_controller_profiles": "infra-controllers",
35 "infra_config_profiles": "infra-configs",
36 "resource_profiles": "managed_resources",
37 "app_profiles": "apps",
38}
39
rshri932105f2024-07-05 15:11:55 +000040
garciadeblas72412282024-11-07 12:41:54 +010041class GitOpsLcm(LcmBase):
garciadeblasea865ff2024-11-20 12:42:49 +010042 db_collection = "gitops"
yshah564ec9c2024-11-29 07:33:32 +000043 workflow_status = None
44 resource_status = None
45
46 profile_collection_mapping = {
47 "infra_controller_profiles": "k8sinfra_controller",
48 "infra_config_profiles": "k8sinfra_config",
49 "resource_profiles": "k8sresource",
50 "app_profiles": "k8sapp",
51 }
garciadeblasea865ff2024-11-20 12:42:49 +010052
yshah6bad8892025-02-11 12:37:04 +000053 profile_type_mapping = {
54 "infra-controllers": "infra_controller_profiles",
55 "infra-configs": "infra_config_profiles",
56 "managed-resources": "resource_profiles",
57 "applications": "app_profiles",
58 }
59
garciadeblas72412282024-11-07 12:41:54 +010060 def __init__(self, msg, lcm_tasks, config):
61 self.logger = logging.getLogger("lcm.gitops")
62 self.lcm_tasks = lcm_tasks
63 self.odu = odu_workflows.OduWorkflow(msg, self.lcm_tasks, config)
64 self._checkloop_kustomization_timeout = 900
65 self._checkloop_resource_timeout = 900
66 self._workflows = {}
67 super().__init__(msg, self.logger)
68
69 async def check_dummy_operation(self, op_id, op_params, content):
70 self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
71 return True, "OK"
72
garciadeblasea865ff2024-11-20 12:42:49 +010073 def initialize_operation(self, item_id, op_id):
74 db_item = self.db.get_one(self.db_collection, {"_id": item_id})
75 operation = next(
76 (op for op in db_item.get("operationHistory", []) if op["op_id"] == op_id),
77 None,
78 )
79 operation["workflowState"] = "PROCESSING"
80 operation["resourceState"] = "NOT_READY"
81 operation["operationState"] = "IN_PROGRESS"
82 operation["gitOperationInfo"] = None
83 db_item["current_operation"] = operation["op_id"]
84 self.db.set_one(self.db_collection, {"_id": item_id}, db_item)
85
yshah564ec9c2024-11-29 07:33:32 +000086 def get_operation_params(self, item, operation_id):
87 operation_history = item.get("operationHistory", [])
88 operation = find_in_list(
89 operation_history, lambda op: op["op_id"] == operation_id
90 )
91 return operation.get("operationParams", {})
92
93 def get_operation_type(self, item, operation_id):
94 operation_history = item.get("operationHistory", [])
95 operation = find_in_list(
96 operation_history, lambda op: op["op_id"] == operation_id
97 )
98 return operation.get("operationType", {})
99
garciadeblasbe890702024-12-20 11:39:13 +0100100 def update_state_operation_history(
101 self, content, op_id, workflow_state=None, resource_state=None
102 ):
103 self.logger.info(
104 f"Update state of operation {op_id} in Operation History in DB"
105 )
106 self.logger.info(
107 f"Workflow state: {workflow_state}. Resource state: {resource_state}"
108 )
109 self.logger.debug(f"Content: {content}")
110
111 op_num = 0
112 for operation in content["operationHistory"]:
113 self.logger.debug("Operations: {}".format(operation))
114 if operation["op_id"] == op_id:
115 self.logger.debug("Found operation number: {}".format(op_num))
116 if workflow_state is not None:
117 operation["workflowState"] = workflow_state
118
119 if resource_state is not None:
120 operation["resourceState"] = resource_state
121 break
122 op_num += 1
123 self.logger.debug("content: {}".format(content))
124
125 return content
126
garciadeblas7eae6f42024-11-08 10:41:38 +0100127 def update_operation_history(
garciadeblasf9092892024-12-12 11:07:08 +0100128 self, content, op_id, workflow_status=None, resource_status=None, op_end=True
garciadeblas7eae6f42024-11-08 10:41:38 +0100129 ):
130 self.logger.info(
131 f"Update Operation History in DB. Workflow status: {workflow_status}. Resource status: {resource_status}"
132 )
133 self.logger.debug(f"Content: {content}")
134
garciadeblas7eae6f42024-11-08 10:41:38 +0100135 op_num = 0
136 for operation in content["operationHistory"]:
137 self.logger.debug("Operations: {}".format(operation))
138 if operation["op_id"] == op_id:
139 self.logger.debug("Found operation number: {}".format(op_num))
garciadeblas8bde3f42024-12-20 10:37:12 +0100140 if workflow_status is not None:
141 if workflow_status:
142 operation["workflowState"] = "COMPLETED"
143 operation["result"] = True
144 else:
145 operation["workflowState"] = "ERROR"
146 operation["operationState"] = "FAILED"
147 operation["result"] = False
garciadeblas7eae6f42024-11-08 10:41:38 +0100148
garciadeblas8bde3f42024-12-20 10:37:12 +0100149 if resource_status is not None:
150 if resource_status:
151 operation["resourceState"] = "READY"
152 operation["operationState"] = "COMPLETED"
153 operation["result"] = True
154 else:
155 operation["resourceState"] = "NOT_READY"
156 operation["operationState"] = "FAILED"
157 operation["result"] = False
garciadeblas7eae6f42024-11-08 10:41:38 +0100158
garciadeblasf9092892024-12-12 11:07:08 +0100159 if op_end:
160 now = time()
161 operation["endDate"] = now
garciadeblas7eae6f42024-11-08 10:41:38 +0100162 break
163 op_num += 1
164 self.logger.debug("content: {}".format(content))
165
166 return content
167
garciadeblas33b36e72025-01-17 12:49:19 +0100168 async def check_workflow_and_update_db(self, op_id, workflow_name, db_content):
yshah564ec9c2024-11-29 07:33:32 +0000169 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +0100170 op_id, workflow_name
yshah564ec9c2024-11-29 07:33:32 +0000171 )
172 self.logger.info(
173 "Workflow Status: {} Workflow Message: {}".format(
174 workflow_status, workflow_msg
175 )
176 )
177 operation_type = self.get_operation_type(db_content, op_id)
178 if operation_type == "create" and workflow_status:
179 db_content["state"] = "CREATED"
180 elif operation_type == "create" and not workflow_status:
181 db_content["state"] = "FAILED_CREATION"
182 elif operation_type == "delete" and workflow_status:
183 db_content["state"] = "DELETED"
184 elif operation_type == "delete" and not workflow_status:
185 db_content["state"] = "FAILED_DELETION"
186
187 if workflow_status:
188 db_content["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
189 else:
190 db_content["resourceState"] = "ERROR"
191
192 db_content = self.update_operation_history(
193 db_content, op_id, workflow_status, None
194 )
195 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
196 return workflow_status
197
garciadeblas33b36e72025-01-17 12:49:19 +0100198 async def check_resource_and_update_db(
199 self, resource_name, op_id, op_params, db_content
200 ):
yshah564ec9c2024-11-29 07:33:32 +0000201 workflow_status = True
202
203 resource_status, resource_msg = await self.check_resource_status(
204 resource_name, op_id, op_params, db_content
205 )
206 self.logger.info(
207 "Resource Status: {} Resource Message: {}".format(
208 resource_status, resource_msg
209 )
210 )
211
212 if resource_status:
213 db_content["resourceState"] = "READY"
214 else:
215 db_content["resourceState"] = "ERROR"
216
217 db_content = self.update_operation_history(
218 db_content, op_id, workflow_status, resource_status
219 )
220 db_content["operatingState"] = "IDLE"
221 db_content["current_operation"] = None
222 return resource_status, db_content
223
garciadeblasad6d1ba2025-01-22 16:02:18 +0100224 async def common_check_list(
garciadeblas6d8acf32025-02-06 13:34:37 +0100225 self, op_id, checkings_list, db_collection, db_item, kubectl_obj=None
garciadeblasad6d1ba2025-01-22 16:02:18 +0100226 ):
garciadeblas72412282024-11-07 12:41:54 +0100227 try:
228 for checking in checkings_list:
229 if checking["enable"]:
230 status, message = await self.odu.readiness_loop(
garciadeblasc89134b2025-02-05 16:36:17 +0100231 op_id=op_id,
garciadeblas72412282024-11-07 12:41:54 +0100232 item=checking["item"],
233 name=checking["name"],
234 namespace=checking["namespace"],
garciadeblas7cf480d2025-01-27 16:53:45 +0100235 condition=checking.get("condition"),
garciadeblasad6d1ba2025-01-22 16:02:18 +0100236 deleted=checking.get("deleted", False),
garciadeblas72412282024-11-07 12:41:54 +0100237 timeout=checking["timeout"],
garciadeblas6d8acf32025-02-06 13:34:37 +0100238 kubectl_obj=kubectl_obj,
garciadeblas72412282024-11-07 12:41:54 +0100239 )
240 if not status:
garciadeblas656818e2025-01-21 18:48:58 +0100241 error_message = "Resources not ready: "
242 error_message += checking.get("error_message", "")
243 return status, f"{error_message}: {message}"
garciadeblas7eae6f42024-11-08 10:41:38 +0100244 else:
245 db_item["resourceState"] = checking["resourceState"]
garciadeblasbe890702024-12-20 11:39:13 +0100246 db_item = self.update_state_operation_history(
247 db_item, op_id, None, checking["resourceState"]
garciadeblas7eae6f42024-11-08 10:41:38 +0100248 )
249 self.db.set_one(db_collection, {"_id": db_item["_id"]}, db_item)
garciadeblas72412282024-11-07 12:41:54 +0100250 except Exception as e:
251 self.logger.debug(traceback.format_exc())
252 self.logger.debug(f"Exception: {e}", exc_info=True)
253 return False, f"Unexpected exception: {e}"
254 return True, "OK"
255
256 async def check_resource_status(self, key, op_id, op_params, content):
257 self.logger.info(
garciadeblasad6d1ba2025-01-22 16:02:18 +0100258 f"Check resource status. Key: {key}. Operation: {op_id}. Params: {op_params}."
garciadeblas72412282024-11-07 12:41:54 +0100259 )
garciadeblasad6d1ba2025-01-22 16:02:18 +0100260 self.logger.debug(f"Check resource status. Content: {content}")
garciadeblas72412282024-11-07 12:41:54 +0100261 check_resource_function = self._workflows.get(key, {}).get(
262 "check_resource_function"
263 )
264 self.logger.info("check_resource function : {}".format(check_resource_function))
265 if check_resource_function:
266 return await check_resource_function(op_id, op_params, content)
267 else:
268 return await self.check_dummy_operation(op_id, op_params, content)
269
yshahb36649f2025-02-28 09:01:51 +0000270 def check_force_delete_and_delete_from_db(
271 self, _id, workflow_status, resource_status, force
272 ):
273 self.logger.info(
274 f" Force: {force} Workflow status: {workflow_status} Resource Status: {resource_status}"
275 )
276 if force and (not workflow_status or not resource_status):
277 self.db.del_one(self.db_collection, {"_id": _id})
278 return True
279 return False
280
garciadeblasd41e9292025-03-11 15:44:25 +0100281 def decrypt_age_keys(self, content, fields=["age_pubkey", "age_privkey"]):
282 self.db.encrypt_decrypt_fields(
283 content,
284 "decrypt",
285 fields,
286 schema_version="1.11",
287 salt=content["_id"],
288 )
289
290 def encrypt_age_keys(self, content, fields=["age_pubkey", "age_privkey"]):
291 self.db.encrypt_decrypt_fields(
292 content,
293 "encrypt",
294 fields,
295 schema_version="1.11",
296 salt=content["_id"],
297 )
298
garciadeblas995cbf32024-12-18 12:54:00 +0100299 def decrypted_copy(self, content, fields=["age_pubkey", "age_privkey"]):
300 # This deep copy is intended to be passed to ODU workflows.
301 content_copy = copy.deepcopy(content)
rshric3564942024-11-12 18:12:38 +0000302
303 # decrypting the key
304 self.db.encrypt_decrypt_fields(
garciadeblas995cbf32024-12-18 12:54:00 +0100305 content_copy,
rshric3564942024-11-12 18:12:38 +0000306 "decrypt",
garciadeblas995cbf32024-12-18 12:54:00 +0100307 fields,
rshric3564942024-11-12 18:12:38 +0000308 schema_version="1.11",
garciadeblas995cbf32024-12-18 12:54:00 +0100309 salt=content_copy["_id"],
rshric3564942024-11-12 18:12:38 +0000310 )
garciadeblas995cbf32024-12-18 12:54:00 +0100311 return content_copy
rshric3564942024-11-12 18:12:38 +0000312
yshah5e109152025-05-19 12:29:01 +0000313 def delete_ksu_dependency(self, _id, data):
314 used_oka = []
315 existing_oka = []
316
317 for oka_data in data["oka"]:
318 if oka_data.get("_id"):
319 used_oka.append(oka_data["_id"])
320
321 all_ksu_data = self.db.get_list("ksus", {})
322 for ksu_data in all_ksu_data:
323 if ksu_data["_id"] != _id:
324 for oka_data in ksu_data["oka"]:
325 if oka_data.get("_id"):
326 if oka_data["_id"] not in existing_oka:
327 existing_oka.append(oka_data["_id"])
328
329 self.logger.info(f"Used OKA: {used_oka}")
330 self.logger.info(f"Existing OKA: {existing_oka}")
331
332 for oka_id in used_oka:
333 if oka_id not in existing_oka:
334 self.db.set_one(
335 "okas", {"_id": oka_id}, {"_admin.usageState": "NOT_IN_USE"}
336 )
337
338 return
339
yshah6bad8892025-02-11 12:37:04 +0000340 def delete_profile_ksu(self, _id, profile_type):
341 filter_q = {"profile": {"_id": _id, "profile_type": profile_type}}
342 ksu_list = self.db.get_list("ksus", filter_q)
yshah5e109152025-05-19 12:29:01 +0000343 for ksu_data in ksu_list:
344 self.delete_ksu_dependency(ksu_data["_id"], ksu_data)
345
yshah6bad8892025-02-11 12:37:04 +0000346 if ksu_list:
347 self.db.del_list("ksus", filter_q)
348 return
349
garciadeblasad6d1ba2025-01-22 16:02:18 +0100350 def cluster_kubectl(self, db_cluster):
351 cluster_kubeconfig = db_cluster["credentials"]
352 kubeconfig_path = f"/tmp/{db_cluster['_id']}_kubeconfig.yaml"
353 with open(kubeconfig_path, "w") as kubeconfig_file:
354 yaml.safe_dump(cluster_kubeconfig, kubeconfig_file)
355 return Kubectl(config_file=kubeconfig_path)
356
garciadeblas72412282024-11-07 12:41:54 +0100357
yshah83a30572025-06-13 08:38:49 +0000358class NodeGroupLcm(GitOpsLcm):
359 db_collection = "nodegroups"
360
361 def __init__(self, msg, lcm_tasks, config):
362 """
363 Init, Connect to database, filesystem storage, and messaging
364 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
365 :return: None
366 """
367 super().__init__(msg, lcm_tasks, config)
368 self._workflows = {
369 "add_nodegroup": {
370 "check_resource_function": self.check_add_nodegroup,
371 },
372 "scale_nodegroup": {
373 "check_resource_function": self.check_scale_nodegroup,
374 },
375 "delete_nodegroup": {
376 "check_resource_function": self.check_delete_nodegroup,
377 },
378 }
379
380 async def create(self, params, order_id):
381 self.logger.info("Add NodeGroup Enter")
382
383 # To get the nodegroup and op ids
384 nodegroup_id = params["nodegroup_id"]
385 op_id = params["operation_id"]
386
387 # To initialize the operation states
388 self.initialize_operation(nodegroup_id, op_id)
389
390 # To get the nodegroup details and control plane from DB
391 db_nodegroup = self.db.get_one(self.db_collection, {"_id": nodegroup_id})
392 db_cluster = self.db.get_one("clusters", {"_id": db_nodegroup["cluster_id"]})
393
394 # To get the operation params details
395 op_params = self.get_operation_params(db_nodegroup, op_id)
396 self.logger.info(f"Operations Params: {op_params}")
397
398 db_vim = self.db.get_one("vim_accounts", {"name": db_cluster["vim_account"]})
399
400 # To copy the cluster content and decrypting fields to use in workflows
401 workflow_content = {
402 "nodegroup": db_nodegroup,
403 "cluster": db_cluster,
404 "vim_account": db_vim,
405 }
406 self.logger.info(f"Workflow content: {workflow_content}")
407
408 workflow_res, workflow_name = await self.odu.launch_workflow(
409 "add_nodegroup", op_id, op_params, workflow_content
410 )
411 self.logger.info("workflow_name is: {}".format(workflow_name))
412
413 workflow_status = await self.check_workflow_and_update_db(
414 op_id, workflow_name, db_nodegroup
415 )
416
417 # Clean items used in the workflow, no matter if the workflow succeeded
418 clean_status, clean_msg = await self.odu.clean_items_workflow(
419 "add_nodegroup", op_id, op_params, workflow_content
420 )
421 self.logger.info(
422 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
423 )
424 if workflow_status:
425 resource_status, content = await self.check_resource_and_update_db(
426 "add_nodegroup", op_id, op_params, db_nodegroup
427 )
428 self.db.set_one(self.db_collection, {"_id": db_nodegroup["_id"]}, db_nodegroup)
429 self.logger.info(f"Add NodeGroup Exit with resource status: {resource_status}")
430 return
431
432 async def check_add_nodegroup(self, op_id, op_params, content):
433 self.logger.info(f"check_add_nodegroup Operation {op_id}. Params: {op_params}.")
434 self.logger.info(f"Content: {content}")
435 db_nodegroup = content
436 nodegroup_name = db_nodegroup["git_name"].lower()
437 nodegroup_kustomization_name = nodegroup_name
438 checkings_list = [
439 {
440 "item": "kustomization",
441 "name": nodegroup_kustomization_name,
442 "namespace": "managed-resources",
443 "condition": {
444 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
445 "value": "True",
446 },
447 "timeout": self._checkloop_kustomization_timeout,
448 "enable": True,
449 "resourceState": "IN_PROGRESS.KUSTOMIZATION_READY",
450 },
451 {
yshah4a2abbe2025-09-23 09:58:27 +0000452 "item": "nodegroup_aws",
yshah83a30572025-06-13 08:38:49 +0000453 "name": nodegroup_name,
454 "namespace": "",
455 "condition": {
456 "jsonpath_filter": "status.conditions[?(@.type=='Synced')].status",
457 "value": "True",
458 },
459 "timeout": self._checkloop_resource_timeout,
460 "enable": True,
461 "resourceState": "IN_PROGRESS.RESOURCE_SYNCED.NODEGROUP",
462 },
463 {
yshah4a2abbe2025-09-23 09:58:27 +0000464 "item": "nodegroup_aws",
yshah83a30572025-06-13 08:38:49 +0000465 "name": nodegroup_name,
466 "namespace": "",
467 "condition": {
468 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
469 "value": "True",
470 },
471 "timeout": self._checkloop_resource_timeout,
472 "enable": True,
473 "resourceState": "IN_PROGRESS.RESOURCE_READY.NODEGROUP",
474 },
475 ]
476 self.logger.info(f"Checking list: {checkings_list}")
477 result, message = await self.common_check_list(
478 op_id, checkings_list, "nodegroups", db_nodegroup
479 )
480 if not result:
481 return False, message
482 return True, "OK"
483
484 async def scale(self, params, order_id):
485 self.logger.info("Scale nodegroup Enter")
486
487 op_id = params["operation_id"]
488 nodegroup_id = params["nodegroup_id"]
489
490 # To initialize the operation states
491 self.initialize_operation(nodegroup_id, op_id)
492
493 db_nodegroup = self.db.get_one(self.db_collection, {"_id": nodegroup_id})
494 db_cluster = self.db.get_one("clusters", {"_id": db_nodegroup["cluster_id"]})
495 op_params = self.get_operation_params(db_nodegroup, op_id)
496 db_vim = self.db.get_one("vim_accounts", {"name": db_cluster["vim_account"]})
497
498 workflow_content = {
499 "nodegroup": db_nodegroup,
500 "cluster": db_cluster,
501 "vim_account": db_vim,
502 }
503 self.logger.info(f"Workflow content: {workflow_content}")
504
505 workflow_res, workflow_name = await self.odu.launch_workflow(
506 "scale_nodegroup", op_id, op_params, workflow_content
507 )
508 self.logger.info("workflow_name is: {}".format(workflow_name))
509
510 workflow_status = await self.check_workflow_and_update_db(
511 op_id, workflow_name, db_nodegroup
512 )
513
514 # Clean items used in the workflow, no matter if the workflow succeeded
515 clean_status, clean_msg = await self.odu.clean_items_workflow(
516 "scale_nodegroup", op_id, op_params, workflow_content
517 )
518 self.logger.info(
519 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
520 )
521
522 if workflow_status:
523 resource_status, content = await self.check_resource_and_update_db(
524 "scale_nodegroup", op_id, op_params, db_nodegroup
525 )
526
527 if resource_status:
528 db_nodegroup["state"] = "READY"
529 self.db.set_one(
530 self.db_collection, {"_id": db_nodegroup["_id"]}, db_nodegroup
531 )
532 self.logger.info(
533 f"Nodegroup Scale Exit with resource status: {resource_status}"
534 )
535 return
536
537 async def check_scale_nodegroup(self, op_id, op_params, content):
538 self.logger.info(
539 f"check_scale_nodegroup Operation {op_id}. Params: {op_params}."
540 )
541 self.logger.debug(f"Content: {content}")
542 db_nodegroup = content
543 nodegroup_name = db_nodegroup["git_name"].lower()
544 nodegroup_kustomization_name = nodegroup_name
545 checkings_list = [
546 {
547 "item": "kustomization",
548 "name": nodegroup_kustomization_name,
549 "namespace": "managed-resources",
550 "condition": {
551 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
552 "value": "True",
553 },
554 "timeout": self._checkloop_kustomization_timeout,
555 "enable": True,
556 "resourceState": "IN_PROGRESS.KUSTOMIZATION_READY",
557 },
558 {
yshah4a2abbe2025-09-23 09:58:27 +0000559 "item": "nodegroup_aws",
yshah83a30572025-06-13 08:38:49 +0000560 "name": nodegroup_name,
561 "namespace": "",
562 "condition": {
563 "jsonpath_filter": "status.atProvider.scalingConfig[0].desiredSize",
564 "value": f"{op_params['node_count']}",
565 },
566 "timeout": self._checkloop_resource_timeout,
567 "enable": True,
568 "resourceState": "IN_PROGRESS.RESOURCE_SYNCED.NODEGROUP",
569 },
570 ]
571 self.logger.info(f"Checking list: {checkings_list}")
572 return await self.common_check_list(
573 op_id, checkings_list, "nodegroups", db_nodegroup
574 )
575
576 async def delete(self, params, order_id):
577 self.logger.info("Delete nodegroup Enter")
578
579 op_id = params["operation_id"]
580 nodegroup_id = params["nodegroup_id"]
581
582 # To initialize the operation states
583 self.initialize_operation(nodegroup_id, op_id)
584
585 db_nodegroup = self.db.get_one(self.db_collection, {"_id": nodegroup_id})
586 db_cluster = self.db.get_one("clusters", {"_id": db_nodegroup["cluster_id"]})
587 op_params = self.get_operation_params(db_nodegroup, op_id)
588
589 workflow_content = {"nodegroup": db_nodegroup, "cluster": db_cluster}
590
591 workflow_res, workflow_name = await self.odu.launch_workflow(
592 "delete_nodegroup", op_id, op_params, workflow_content
593 )
594 self.logger.info("workflow_name is: {}".format(workflow_name))
595
596 workflow_status = await self.check_workflow_and_update_db(
597 op_id, workflow_name, db_nodegroup
598 )
599
600 # Clean items used in the workflow, no matter if the workflow succeeded
601 clean_status, clean_msg = await self.odu.clean_items_workflow(
602 "delete_nodegroup", op_id, op_params, workflow_content
603 )
604 self.logger.info(
605 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
606 )
607
608 if workflow_status:
609 resource_status, content = await self.check_resource_and_update_db(
610 "delete_nodegroup", op_id, op_params, db_nodegroup
611 )
612
613 if resource_status:
614 node_count = db_cluster.get("node_count")
615 new_node_count = node_count - 1
616 self.logger.info(f"New Node count: {new_node_count}")
617 db_cluster["node_count"] = new_node_count
618 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
619 db_nodegroup["state"] = "DELETED"
620 self.db.set_one(
621 self.db_collection, {"_id": db_nodegroup["_id"]}, db_nodegroup
622 )
623 self.db.del_one(self.db_collection, {"_id": db_nodegroup["_id"]})
624 self.logger.info(
625 f"Nodegroup Delete Exit with resource status: {resource_status}"
626 )
627 return
628
629 async def check_delete_nodegroup(self, op_id, op_params, content):
630 self.logger.info(
631 f"check_delete_nodegroup Operation {op_id}. Params: {op_params}."
632 )
633 db_nodegroup = content
634 nodegroup_name = db_nodegroup["git_name"].lower()
635 nodegroup_kustomization_name = nodegroup_name
636 checkings_list = [
637 {
638 "item": "kustomization",
639 "name": nodegroup_kustomization_name,
640 "namespace": "managed-resources",
641 "deleted": True,
642 "timeout": self._checkloop_kustomization_timeout,
643 "enable": True,
644 "resourceState": "IN_PROGRESS.KUSTOMIZATION_DELETED",
645 },
646 {
yshah4a2abbe2025-09-23 09:58:27 +0000647 "item": "nodegroup_aws",
yshah83a30572025-06-13 08:38:49 +0000648 "name": nodegroup_name,
649 "namespace": "",
650 "deleted": True,
651 "timeout": self._checkloop_resource_timeout,
652 "enable": True,
653 "resourceState": "IN_PROGRESS.RESOURCE_DELETED.NODEGROUP",
654 },
655 ]
656 self.logger.info(f"Checking list: {checkings_list}")
657 return await self.common_check_list(
658 op_id, checkings_list, "nodegroups", db_nodegroup
659 )
660
661
garciadeblas72412282024-11-07 12:41:54 +0100662class ClusterLcm(GitOpsLcm):
garciadeblas96b94f52024-07-08 16:18:21 +0200663 db_collection = "clusters"
rshri932105f2024-07-05 15:11:55 +0000664
665 def __init__(self, msg, lcm_tasks, config):
666 """
667 Init, Connect to database, filesystem storage, and messaging
668 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
669 :return: None
670 """
garciadeblas72412282024-11-07 12:41:54 +0100671 super().__init__(msg, lcm_tasks, config)
672 self._workflows = {
673 "create_cluster": {
674 "check_resource_function": self.check_create_cluster,
675 },
garciadeblasb0a42c22024-11-13 16:00:10 +0100676 "register_cluster": {
677 "check_resource_function": self.check_register_cluster,
678 },
679 "update_cluster": {
680 "check_resource_function": self.check_update_cluster,
681 },
garciadeblasad6d1ba2025-01-22 16:02:18 +0100682 "delete_cluster": {
683 "check_resource_function": self.check_delete_cluster,
684 },
garciadeblas72412282024-11-07 12:41:54 +0100685 }
rshri932105f2024-07-05 15:11:55 +0000686 self.regist = vim_sdn.K8sClusterLcm(msg, self.lcm_tasks, config)
687
rshri948f7de2024-12-02 03:42:35 +0000688 async def create(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +0000689 self.logger.info("cluster Create Enter")
garciadeblas8bdb3d42025-04-04 00:19:13 +0200690 workflow_status = None
691 resource_status = None
rshri932105f2024-07-05 15:11:55 +0000692
garciadeblas995cbf32024-12-18 12:54:00 +0100693 # To get the cluster and op ids
rshri948f7de2024-12-02 03:42:35 +0000694 cluster_id = params["cluster_id"]
rshri948f7de2024-12-02 03:42:35 +0000695 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +0000696
697 # To initialize the operation states
698 self.initialize_operation(cluster_id, op_id)
699
garciadeblas995cbf32024-12-18 12:54:00 +0100700 # To get the cluster
701 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
702
703 # To get the operation params details
704 op_params = self.get_operation_params(db_cluster, op_id)
705
706 # To copy the cluster content and decrypting fields to use in workflows
garciadeblasd41e9292025-03-11 15:44:25 +0100707 db_cluster_copy = self.decrypted_copy(db_cluster)
garciadeblas995cbf32024-12-18 12:54:00 +0100708 workflow_content = {
garciadeblasd41e9292025-03-11 15:44:25 +0100709 "cluster": db_cluster_copy,
garciadeblas995cbf32024-12-18 12:54:00 +0100710 }
rshric3564942024-11-12 18:12:38 +0000711
rshri948f7de2024-12-02 03:42:35 +0000712 # To get the vim account details
rshric3564942024-11-12 18:12:38 +0000713 db_vim = self.db.get_one("vim_accounts", {"name": db_cluster["vim_account"]})
garciadeblas995cbf32024-12-18 12:54:00 +0100714 workflow_content["vim_account"] = db_vim
rshric3564942024-11-12 18:12:38 +0000715
garciadeblas41859ce2025-02-04 16:08:51 +0100716 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +0100717 "create_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +0200718 )
garciadeblas41859ce2025-02-04 16:08:51 +0100719 if not workflow_res:
720 self.logger.error(f"Failed to launch workflow: {workflow_name}")
721 db_cluster["state"] = "FAILED_CREATION"
722 db_cluster["resourceState"] = "ERROR"
723 db_cluster = self.update_operation_history(
724 db_cluster, op_id, workflow_status=False, resource_status=None
725 )
726 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
727 # Clean items used in the workflow, no matter if the workflow succeeded
728 clean_status, clean_msg = await self.odu.clean_items_workflow(
729 "create_cluster", op_id, op_params, workflow_content
730 )
731 self.logger.info(
732 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
733 )
734 return
rshri932105f2024-07-05 15:11:55 +0000735
garciadeblas26d733c2025-02-03 16:12:43 +0100736 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +0200737 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +0100738 op_id, workflow_name
garciadeblas96b94f52024-07-08 16:18:21 +0200739 )
rshri932105f2024-07-05 15:11:55 +0000740 self.logger.info(
garciadeblas26d733c2025-02-03 16:12:43 +0100741 "workflow_status is: {} and workflow_msg is: {}".format(
rshri932105f2024-07-05 15:11:55 +0000742 workflow_status, workflow_msg
743 )
744 )
745 if workflow_status:
garciadeblas96b94f52024-07-08 16:18:21 +0200746 db_cluster["state"] = "CREATED"
747 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
rshri932105f2024-07-05 15:11:55 +0000748 else:
garciadeblas96b94f52024-07-08 16:18:21 +0200749 db_cluster["state"] = "FAILED_CREATION"
750 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +0000751 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +0000752 db_cluster = self.update_operation_history(
753 db_cluster, op_id, workflow_status, None
754 )
garciadeblas96b94f52024-07-08 16:18:21 +0200755 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +0000756
garciadeblas28bff0f2024-09-16 12:53:07 +0200757 # Clean items used in the workflow, no matter if the workflow succeeded
758 clean_status, clean_msg = await self.odu.clean_items_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +0100759 "create_cluster", op_id, op_params, workflow_content
garciadeblas28bff0f2024-09-16 12:53:07 +0200760 )
761 self.logger.info(
762 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
763 )
764
rshri932105f2024-07-05 15:11:55 +0000765 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +0100766 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +0100767 "create_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +0000768 )
769 self.logger.info(
770 "resource_status is :{} and resource_msg is :{}".format(
771 resource_status, resource_msg
772 )
773 )
774 if resource_status:
garciadeblas96b94f52024-07-08 16:18:21 +0200775 db_cluster["resourceState"] = "READY"
rshri932105f2024-07-05 15:11:55 +0000776 else:
garciadeblas96b94f52024-07-08 16:18:21 +0200777 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +0000778
garciadeblas96b94f52024-07-08 16:18:21 +0200779 db_cluster["operatingState"] = "IDLE"
780 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +0000781 db_cluster, op_id, workflow_status, resource_status
rshri932105f2024-07-05 15:11:55 +0000782 )
shahithya70a3fc92024-11-12 11:01:05 +0000783 db_cluster["current_operation"] = None
garciadeblas41a600e2025-01-21 11:49:38 +0100784
rshrif8911b92025-06-11 18:19:07 +0000785 # Retrieve credentials and subnets and register the cluster in k8sclusters collection
garciadeblas41a600e2025-01-21 11:49:38 +0100786 cluster_creds = None
rshrif8911b92025-06-11 18:19:07 +0000787 db_register = self.db.get_one("k8sclusters", {"name": db_cluster["name"]})
garciadeblas41a600e2025-01-21 11:49:38 +0100788 if db_cluster["resourceState"] == "READY" and db_cluster["state"] == "CREATED":
rshrif8911b92025-06-11 18:19:07 +0000789 # Retrieve credentials
garciadeblas41a600e2025-01-21 11:49:38 +0100790 result, cluster_creds = await self.odu.get_cluster_credentials(db_cluster)
791 # TODO: manage the case where the credentials are not available
792 if result:
793 db_cluster["credentials"] = cluster_creds
794
rshrif8911b92025-06-11 18:19:07 +0000795 # Retrieve subnets
yshah1f070ba2025-09-23 09:47:12 +0000796 if op_params.get("private_subnet") and op_params.get("public_subnet"):
797 db_cluster["private_subnet"] = op_params["private_subnet"]
798 db_cluster["public_subnet"] = op_params["public_subnet"]
799 else:
800 if db_vim["vim_type"] == "aws":
801 generic_object = await self.odu.list_object(
802 api_group="ec2.aws.upbound.io",
803 api_plural="subnets",
804 api_version="v1beta1",
805 )
806 private_subnet = []
807 public_subnet = []
808 for subnet in generic_object:
809 labels = subnet.get("metadata", {}).get("labels", {})
810 status = subnet.get("status", {}).get("atProvider", {})
811 # Extract relevant label values
812 cluster_label = labels.get("cluster")
813 access_label = labels.get("access")
814 subnet_id = status.get("id")
815 # Apply filtering
816 if cluster_label == db_cluster["name"] and subnet_id:
817 if access_label == "private":
818 private_subnet.append(subnet_id)
819 elif access_label == "public":
820 public_subnet.append(subnet_id)
821 # Update db_cluster
822 db_cluster["private_subnet"] = private_subnet
823 db_cluster["public_subnet"] = public_subnet
824 self.logger.info("DB cluster: {}".format(db_cluster))
rshri948f7de2024-12-02 03:42:35 +0000825
rshrif8911b92025-06-11 18:19:07 +0000826 # Register the cluster in k8sclusters collection
rshri948f7de2024-12-02 03:42:35 +0000827 db_register["credentials"] = cluster_creds
garciadeblas41a600e2025-01-21 11:49:38 +0100828 # To call the lcm.py for registering the cluster in k8scluster lcm.
rshri948f7de2024-12-02 03:42:35 +0000829 self.db.set_one("k8sclusters", {"_id": db_register["_id"]}, db_register)
830 register = await self.regist.create(db_register, order_id)
831 self.logger.debug(f"Register is : {register}")
832 else:
833 db_register["_admin"]["operationalState"] = "ERROR"
rshri948f7de2024-12-02 03:42:35 +0000834 self.db.set_one("k8sclusters", {"_id": db_register["_id"]}, db_register)
835
rshrif8911b92025-06-11 18:19:07 +0000836 # Update db_cluster
837 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
838 self.update_default_profile_agekeys(db_cluster_copy)
839 self.update_profile_state(db_cluster, workflow_status, resource_status)
840
rshri932105f2024-07-05 15:11:55 +0000841 return
842
garciadeblas72412282024-11-07 12:41:54 +0100843 async def check_create_cluster(self, op_id, op_params, content):
garciadeblas7eae6f42024-11-08 10:41:38 +0100844 self.logger.info(
845 f"check_create_cluster Operation {op_id}. Params: {op_params}."
846 )
garciadeblas72412282024-11-07 12:41:54 +0100847 db_cluster = content["cluster"]
848 cluster_name = db_cluster["git_name"].lower()
849 cluster_kustomization_name = cluster_name
850 db_vim_account = content["vim_account"]
851 cloud_type = db_vim_account["vim_type"]
garciadeblas1ca09852025-05-30 11:19:06 +0200852 nodegroup_name = ""
garciadeblas72412282024-11-07 12:41:54 +0100853 if cloud_type == "aws":
garciadeblas1ca09852025-05-30 11:19:06 +0200854 nodegroup_name = f"{cluster_name}-nodegroup"
garciadeblas72412282024-11-07 12:41:54 +0100855 cluster_name = f"{cluster_name}-cluster"
856 elif cloud_type == "gcp":
garciadeblas1ca09852025-05-30 11:19:06 +0200857 nodegroup_name = f"nodepool-{cluster_name}"
garciadeblas72412282024-11-07 12:41:54 +0100858 bootstrap = op_params.get("bootstrap", True)
859 if cloud_type in ("azure", "gcp", "aws"):
860 checkings_list = [
861 {
862 "item": "kustomization",
863 "name": cluster_kustomization_name,
864 "namespace": "managed-resources",
garciadeblas7cf480d2025-01-27 16:53:45 +0100865 "condition": {
866 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
867 "value": "True",
868 },
yshahcb9075f2024-11-22 12:08:57 +0000869 "timeout": 1500,
garciadeblas72412282024-11-07 12:41:54 +0100870 "enable": True,
garciadeblas7eae6f42024-11-08 10:41:38 +0100871 "resourceState": "IN_PROGRESS.KUSTOMIZATION_READY",
garciadeblas72412282024-11-07 12:41:54 +0100872 },
873 {
874 "item": f"cluster_{cloud_type}",
875 "name": cluster_name,
876 "namespace": "",
garciadeblas7cf480d2025-01-27 16:53:45 +0100877 "condition": {
878 "jsonpath_filter": "status.conditions[?(@.type=='Synced')].status",
879 "value": "True",
880 },
garciadeblas72412282024-11-07 12:41:54 +0100881 "timeout": self._checkloop_resource_timeout,
882 "enable": True,
garciadeblas7eae6f42024-11-08 10:41:38 +0100883 "resourceState": "IN_PROGRESS.RESOURCE_SYNCED.CLUSTER",
garciadeblas72412282024-11-07 12:41:54 +0100884 },
885 {
886 "item": f"cluster_{cloud_type}",
887 "name": cluster_name,
888 "namespace": "",
garciadeblas7cf480d2025-01-27 16:53:45 +0100889 "condition": {
890 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
891 "value": "True",
892 },
garciadeblas72412282024-11-07 12:41:54 +0100893 "timeout": self._checkloop_resource_timeout,
894 "enable": True,
garciadeblas7eae6f42024-11-08 10:41:38 +0100895 "resourceState": "IN_PROGRESS.RESOURCE_READY.CLUSTER",
garciadeblas72412282024-11-07 12:41:54 +0100896 },
897 {
898 "item": "kustomization",
899 "name": f"{cluster_kustomization_name}-bstrp-fluxctrl",
900 "namespace": "managed-resources",
garciadeblas7cf480d2025-01-27 16:53:45 +0100901 "condition": {
902 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
903 "value": "True",
904 },
yshahcb9075f2024-11-22 12:08:57 +0000905 "timeout": self._checkloop_resource_timeout,
garciadeblas72412282024-11-07 12:41:54 +0100906 "enable": bootstrap,
garciadeblas7eae6f42024-11-08 10:41:38 +0100907 "resourceState": "IN_PROGRESS.BOOTSTRAP_OK",
garciadeblas72412282024-11-07 12:41:54 +0100908 },
909 ]
910 else:
911 return False, "Not suitable VIM account to check cluster status"
rshrif8911b92025-06-11 18:19:07 +0000912 if cloud_type != "aws":
913 if nodegroup_name:
914 nodegroup_check = {
915 "item": f"nodegroup_{cloud_type}",
916 "name": nodegroup_name,
917 "namespace": "",
918 "condition": {
919 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
920 "value": "True",
921 },
922 "timeout": self._checkloop_resource_timeout,
923 "enable": True,
924 "resourceState": "IN_PROGRESS.RESOURCE_READY.NODEGROUP",
925 }
926 checkings_list.insert(3, nodegroup_check)
yshahcb9075f2024-11-22 12:08:57 +0000927 return await self.common_check_list(
928 op_id, checkings_list, "clusters", db_cluster
929 )
garciadeblas72412282024-11-07 12:41:54 +0100930
garciadeblasd41e9292025-03-11 15:44:25 +0100931 def update_default_profile_agekeys(self, db_cluster):
932 profiles = [
933 "infra_controller_profiles",
934 "infra_config_profiles",
935 "app_profiles",
936 "resource_profiles",
937 ]
938 self.logger.debug("the db_cluster is :{}".format(db_cluster))
939 for profile_type in profiles:
940 profile_id = db_cluster[profile_type]
941 db_collection = self.profile_collection_mapping[profile_type]
942 db_profile = self.db.get_one(db_collection, {"_id": profile_id})
943 db_profile["age_pubkey"] = db_cluster["age_pubkey"]
944 db_profile["age_privkey"] = db_cluster["age_privkey"]
945 self.encrypt_age_keys(db_profile)
946 self.db.set_one(db_collection, {"_id": db_profile["_id"]}, db_profile)
947
garciadeblas96b94f52024-07-08 16:18:21 +0200948 def update_profile_state(self, db_cluster, workflow_status, resource_status):
rshri932105f2024-07-05 15:11:55 +0000949 profiles = [
950 "infra_controller_profiles",
951 "infra_config_profiles",
952 "app_profiles",
953 "resource_profiles",
954 ]
garciadeblasd41e9292025-03-11 15:44:25 +0100955 self.logger.debug("the db_cluster is :{}".format(db_cluster))
rshri932105f2024-07-05 15:11:55 +0000956 for profile_type in profiles:
garciadeblas96b94f52024-07-08 16:18:21 +0200957 profile_id = db_cluster[profile_type]
rshri948f7de2024-12-02 03:42:35 +0000958 db_collection = self.profile_collection_mapping[profile_type]
rshri932105f2024-07-05 15:11:55 +0000959 db_profile = self.db.get_one(db_collection, {"_id": profile_id})
yshahcb9075f2024-11-22 12:08:57 +0000960 op_id = db_profile["operationHistory"][-1].get("op_id")
garciadeblas96b94f52024-07-08 16:18:21 +0200961 db_profile["state"] = db_cluster["state"]
962 db_profile["resourceState"] = db_cluster["resourceState"]
963 db_profile["operatingState"] = db_cluster["operatingState"]
rshri932105f2024-07-05 15:11:55 +0000964 db_profile = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +0000965 db_profile, op_id, workflow_status, resource_status
rshri932105f2024-07-05 15:11:55 +0000966 )
rshri932105f2024-07-05 15:11:55 +0000967 self.db.set_one(db_collection, {"_id": db_profile["_id"]}, db_profile)
968
rshri948f7de2024-12-02 03:42:35 +0000969 async def delete(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +0000970 self.logger.info("cluster delete Enter")
rshri948f7de2024-12-02 03:42:35 +0000971
garciadeblas926ffac2025-02-12 16:45:40 +0100972 try:
973 # To get the cluster and op ids
974 cluster_id = params["cluster_id"]
975 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +0000976
garciadeblas926ffac2025-02-12 16:45:40 +0100977 # To initialize the operation states
978 self.initialize_operation(cluster_id, op_id)
rshri948f7de2024-12-02 03:42:35 +0000979
garciadeblas926ffac2025-02-12 16:45:40 +0100980 # To get the cluster
981 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
garciadeblas995cbf32024-12-18 12:54:00 +0100982
garciadeblas926ffac2025-02-12 16:45:40 +0100983 # To get the operation params details
984 op_params = self.get_operation_params(db_cluster, op_id)
garciadeblas995cbf32024-12-18 12:54:00 +0100985
garciadeblas926ffac2025-02-12 16:45:40 +0100986 # To copy the cluster content and decrypting fields to use in workflows
987 workflow_content = {
988 "cluster": self.decrypted_copy(db_cluster),
989 }
rshri948f7de2024-12-02 03:42:35 +0000990
garciadeblas926ffac2025-02-12 16:45:40 +0100991 # To get the vim account details
992 db_vim = self.db.get_one(
993 "vim_accounts", {"name": db_cluster["vim_account"]}
994 )
995 workflow_content["vim_account"] = db_vim
996 except Exception as e:
997 self.logger.debug(traceback.format_exc())
998 self.logger.debug(f"Exception: {e}", exc_info=True)
999 raise e
garciadeblasad6d1ba2025-01-22 16:02:18 +01001000
garciadeblas41859ce2025-02-04 16:08:51 +01001001 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001002 "delete_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +02001003 )
garciadeblas41859ce2025-02-04 16:08:51 +01001004 if not workflow_res:
1005 self.logger.error(f"Failed to launch workflow: {workflow_name}")
1006 db_cluster["state"] = "FAILED_DELETION"
1007 db_cluster["resourceState"] = "ERROR"
1008 db_cluster = self.update_operation_history(
1009 db_cluster, op_id, workflow_status=False, resource_status=None
1010 )
1011 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
1012 # Clean items used in the workflow, no matter if the workflow succeeded
1013 clean_status, clean_msg = await self.odu.clean_items_workflow(
1014 "delete_cluster", op_id, op_params, workflow_content
1015 )
1016 self.logger.info(
1017 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1018 )
1019 return
rshri932105f2024-07-05 15:11:55 +00001020
garciadeblas26d733c2025-02-03 16:12:43 +01001021 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +02001022 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +01001023 op_id, workflow_name
garciadeblas96b94f52024-07-08 16:18:21 +02001024 )
rshri932105f2024-07-05 15:11:55 +00001025 self.logger.info(
garciadeblas26d733c2025-02-03 16:12:43 +01001026 "workflow_status is: {} and workflow_msg is: {}".format(
rshri932105f2024-07-05 15:11:55 +00001027 workflow_status, workflow_msg
1028 )
1029 )
1030 if workflow_status:
garciadeblas96b94f52024-07-08 16:18:21 +02001031 db_cluster["state"] = "DELETED"
1032 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
rshri932105f2024-07-05 15:11:55 +00001033 else:
garciadeblas96b94f52024-07-08 16:18:21 +02001034 db_cluster["state"] = "FAILED_DELETION"
1035 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +00001036 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +00001037 db_cluster = self.update_operation_history(
1038 db_cluster, op_id, workflow_status, None
1039 )
garciadeblas96b94f52024-07-08 16:18:21 +02001040 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +00001041
garciadeblas98f9a3d2024-12-10 13:42:47 +01001042 # Clean items used in the workflow or in the cluster, no matter if the workflow succeeded
1043 clean_status, clean_msg = await self.odu.clean_items_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001044 "delete_cluster", op_id, op_params, workflow_content
garciadeblas98f9a3d2024-12-10 13:42:47 +01001045 )
1046 self.logger.info(
1047 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1048 )
1049
rshri932105f2024-07-05 15:11:55 +00001050 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001051 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +01001052 "delete_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +00001053 )
1054 self.logger.info(
1055 "resource_status is :{} and resource_msg is :{}".format(
1056 resource_status, resource_msg
1057 )
1058 )
1059 if resource_status:
garciadeblas96b94f52024-07-08 16:18:21 +02001060 db_cluster["resourceState"] = "READY"
rshri932105f2024-07-05 15:11:55 +00001061 else:
garciadeblas96b94f52024-07-08 16:18:21 +02001062 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +00001063
garciadeblas96b94f52024-07-08 16:18:21 +02001064 db_cluster["operatingState"] = "IDLE"
1065 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +00001066 db_cluster, op_id, workflow_status, resource_status
garciadeblas96b94f52024-07-08 16:18:21 +02001067 )
shahithya70a3fc92024-11-12 11:01:05 +00001068 db_cluster["current_operation"] = None
garciadeblas96b94f52024-07-08 16:18:21 +02001069 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +00001070
yshahb36649f2025-02-28 09:01:51 +00001071 force = params.get("force", False)
1072 if force:
1073 force_delete_status = self.check_force_delete_and_delete_from_db(
1074 cluster_id, workflow_status, resource_status, force
1075 )
1076 if force_delete_status:
1077 return
1078
garciadeblas96b94f52024-07-08 16:18:21 +02001079 # To delete it from DB
1080 if db_cluster["state"] == "DELETED":
1081 self.delete_cluster(db_cluster)
rshri948f7de2024-12-02 03:42:35 +00001082
1083 # To delete it from k8scluster collection
1084 self.db.del_one("k8sclusters", {"name": db_cluster["name"]})
1085
rshri932105f2024-07-05 15:11:55 +00001086 return
1087
garciadeblasad6d1ba2025-01-22 16:02:18 +01001088 async def check_delete_cluster(self, op_id, op_params, content):
1089 self.logger.info(
1090 f"check_delete_cluster Operation {op_id}. Params: {op_params}."
1091 )
1092 self.logger.debug(f"Content: {content}")
1093 db_cluster = content["cluster"]
1094 cluster_name = db_cluster["git_name"].lower()
1095 cluster_kustomization_name = cluster_name
1096 db_vim_account = content["vim_account"]
1097 cloud_type = db_vim_account["vim_type"]
1098 if cloud_type == "aws":
1099 cluster_name = f"{cluster_name}-cluster"
1100 if cloud_type in ("azure", "gcp", "aws"):
1101 checkings_list = [
1102 {
1103 "item": "kustomization",
1104 "name": cluster_kustomization_name,
1105 "namespace": "managed-resources",
1106 "deleted": True,
1107 "timeout": self._checkloop_kustomization_timeout,
1108 "enable": True,
1109 "resourceState": "IN_PROGRESS.KUSTOMIZATION_DELETED",
1110 },
1111 {
1112 "item": f"cluster_{cloud_type}",
1113 "name": cluster_name,
1114 "namespace": "",
1115 "deleted": True,
1116 "timeout": self._checkloop_resource_timeout,
1117 "enable": True,
1118 "resourceState": "IN_PROGRESS.RESOURCE_DELETED.CLUSTER",
1119 },
1120 ]
1121 else:
1122 return False, "Not suitable VIM account to check cluster status"
1123 return await self.common_check_list(
1124 op_id, checkings_list, "clusters", db_cluster
1125 )
1126
garciadeblas96b94f52024-07-08 16:18:21 +02001127 def delete_cluster(self, db_cluster):
1128 # Actually, item_content is equal to db_cluster
rshri932105f2024-07-05 15:11:55 +00001129 # detach profiles
1130 update_dict = None
1131 profiles_to_detach = [
1132 "infra_controller_profiles",
1133 "infra_config_profiles",
1134 "app_profiles",
1135 "resource_profiles",
1136 ]
rshri948f7de2024-12-02 03:42:35 +00001137 """
rshri932105f2024-07-05 15:11:55 +00001138 profiles_collection = {
1139 "infra_controller_profiles": "k8sinfra_controller",
1140 "infra_config_profiles": "k8sinfra_config",
1141 "app_profiles": "k8sapp",
1142 "resource_profiles": "k8sresource",
1143 }
rshri948f7de2024-12-02 03:42:35 +00001144 """
rshri932105f2024-07-05 15:11:55 +00001145 for profile_type in profiles_to_detach:
garciadeblas96b94f52024-07-08 16:18:21 +02001146 if db_cluster.get(profile_type):
garciadeblas96b94f52024-07-08 16:18:21 +02001147 profile_ids = db_cluster[profile_type]
rshri932105f2024-07-05 15:11:55 +00001148 profile_ids_copy = deepcopy(profile_ids)
rshri932105f2024-07-05 15:11:55 +00001149 for profile_id in profile_ids_copy:
rshri948f7de2024-12-02 03:42:35 +00001150 db_collection = self.profile_collection_mapping[profile_type]
rshri932105f2024-07-05 15:11:55 +00001151 db_profile = self.db.get_one(db_collection, {"_id": profile_id})
garciadeblasc2552852024-10-22 12:39:32 +02001152 self.logger.debug("the db_profile is :{}".format(db_profile))
1153 self.logger.debug(
garciadeblas96b94f52024-07-08 16:18:21 +02001154 "the item_content name is :{}".format(db_cluster["name"])
rshri932105f2024-07-05 15:11:55 +00001155 )
garciadeblasc2552852024-10-22 12:39:32 +02001156 self.logger.debug(
rshri932105f2024-07-05 15:11:55 +00001157 "the db_profile name is :{}".format(db_profile["name"])
1158 )
garciadeblas96b94f52024-07-08 16:18:21 +02001159 if db_cluster["name"] == db_profile["name"]:
yshah6bad8892025-02-11 12:37:04 +00001160 self.delete_profile_ksu(profile_id, profile_type)
rshri932105f2024-07-05 15:11:55 +00001161 self.db.del_one(db_collection, {"_id": profile_id})
1162 else:
rshri932105f2024-07-05 15:11:55 +00001163 profile_ids.remove(profile_id)
1164 update_dict = {profile_type: profile_ids}
rshri932105f2024-07-05 15:11:55 +00001165 self.db.set_one(
garciadeblas96b94f52024-07-08 16:18:21 +02001166 "clusters", {"_id": db_cluster["_id"]}, update_dict
rshri932105f2024-07-05 15:11:55 +00001167 )
garciadeblas96b94f52024-07-08 16:18:21 +02001168 self.db.del_one("clusters", {"_id": db_cluster["_id"]})
rshri932105f2024-07-05 15:11:55 +00001169
rshri948f7de2024-12-02 03:42:35 +00001170 async def attach_profile(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001171 self.logger.info("profile attach Enter")
rshri948f7de2024-12-02 03:42:35 +00001172
garciadeblas995cbf32024-12-18 12:54:00 +01001173 # To get the cluster and op ids
rshri948f7de2024-12-02 03:42:35 +00001174 cluster_id = params["cluster_id"]
rshri948f7de2024-12-02 03:42:35 +00001175 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +00001176
1177 # To initialize the operation states
1178 self.initialize_operation(cluster_id, op_id)
1179
garciadeblas995cbf32024-12-18 12:54:00 +01001180 # To get the cluster
1181 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
1182
1183 # To get the operation params details
1184 op_params = self.get_operation_params(db_cluster, op_id)
1185
1186 # To copy the cluster content and decrypting fields to use in workflows
1187 workflow_content = {
1188 "cluster": self.decrypted_copy(db_cluster),
1189 }
rshri948f7de2024-12-02 03:42:35 +00001190
1191 # To get the profile details
1192 profile_id = params["profile_id"]
1193 profile_type = params["profile_type"]
1194 profile_collection = self.profile_collection_mapping[profile_type]
1195 db_profile = self.db.get_one(profile_collection, {"_id": profile_id})
1196 db_profile["profile_type"] = profile_type
1197 # content["profile"] = db_profile
garciadeblas995cbf32024-12-18 12:54:00 +01001198 workflow_content["profile"] = db_profile
rshri932105f2024-07-05 15:11:55 +00001199
garciadeblas41859ce2025-02-04 16:08:51 +01001200 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001201 "attach_profile_to_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +02001202 )
garciadeblas41859ce2025-02-04 16:08:51 +01001203 if not workflow_res:
1204 self.logger.error(f"Failed to launch workflow: {workflow_name}")
1205 db_cluster["resourceState"] = "ERROR"
1206 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
1207 db_cluster = self.update_operation_history(
1208 db_cluster, op_id, workflow_status=False, resource_status=None
1209 )
1210 return
rshri932105f2024-07-05 15:11:55 +00001211
garciadeblas26d733c2025-02-03 16:12:43 +01001212 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +02001213 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +01001214 op_id, workflow_name
garciadeblas96b94f52024-07-08 16:18:21 +02001215 )
rshri932105f2024-07-05 15:11:55 +00001216 self.logger.info(
garciadeblas26d733c2025-02-03 16:12:43 +01001217 "workflow_status is: {} and workflow_msg is: {}".format(
rshri932105f2024-07-05 15:11:55 +00001218 workflow_status, workflow_msg
1219 )
1220 )
1221 if workflow_status:
1222 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
1223 else:
1224 db_cluster["resourceState"] = "ERROR"
1225 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +00001226 db_cluster = self.update_operation_history(
1227 db_cluster, op_id, workflow_status, None
1228 )
rshri932105f2024-07-05 15:11:55 +00001229 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
1230
1231 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001232 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +01001233 "attach_profile_to_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +00001234 )
1235 self.logger.info(
1236 "resource_status is :{} and resource_msg is :{}".format(
1237 resource_status, resource_msg
1238 )
1239 )
1240 if resource_status:
1241 db_cluster["resourceState"] = "READY"
1242 else:
1243 db_cluster["resourceState"] = "ERROR"
1244
1245 db_cluster["operatingState"] = "IDLE"
1246 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +00001247 db_cluster, op_id, workflow_status, resource_status
rshri932105f2024-07-05 15:11:55 +00001248 )
rshri932105f2024-07-05 15:11:55 +00001249 profile_list = db_cluster[profile_type]
rshri932105f2024-07-05 15:11:55 +00001250 if resource_status:
rshri932105f2024-07-05 15:11:55 +00001251 profile_list.append(profile_id)
rshri932105f2024-07-05 15:11:55 +00001252 db_cluster[profile_type] = profile_list
shahithya70a3fc92024-11-12 11:01:05 +00001253 db_cluster["current_operation"] = None
rshri932105f2024-07-05 15:11:55 +00001254 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
1255
1256 return
1257
rshri948f7de2024-12-02 03:42:35 +00001258 async def detach_profile(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001259 self.logger.info("profile dettach Enter")
rshri948f7de2024-12-02 03:42:35 +00001260
garciadeblas995cbf32024-12-18 12:54:00 +01001261 # To get the cluster and op ids
rshri948f7de2024-12-02 03:42:35 +00001262 cluster_id = params["cluster_id"]
rshri948f7de2024-12-02 03:42:35 +00001263 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +00001264
1265 # To initialize the operation states
1266 self.initialize_operation(cluster_id, op_id)
1267
garciadeblas995cbf32024-12-18 12:54:00 +01001268 # To get the cluster
1269 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
1270
1271 # To get the operation params details
1272 op_params = self.get_operation_params(db_cluster, op_id)
1273
1274 # To copy the cluster content and decrypting fields to use in workflows
1275 workflow_content = {
1276 "cluster": self.decrypted_copy(db_cluster),
1277 }
rshri948f7de2024-12-02 03:42:35 +00001278
1279 # To get the profile details
1280 profile_id = params["profile_id"]
1281 profile_type = params["profile_type"]
1282 profile_collection = self.profile_collection_mapping[profile_type]
1283 db_profile = self.db.get_one(profile_collection, {"_id": profile_id})
1284 db_profile["profile_type"] = profile_type
garciadeblas995cbf32024-12-18 12:54:00 +01001285 workflow_content["profile"] = db_profile
rshri932105f2024-07-05 15:11:55 +00001286
garciadeblas41859ce2025-02-04 16:08:51 +01001287 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001288 "detach_profile_from_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +02001289 )
garciadeblas41859ce2025-02-04 16:08:51 +01001290 if not workflow_res:
1291 self.logger.error(f"Failed to launch workflow: {workflow_name}")
1292 db_cluster["resourceState"] = "ERROR"
1293 db_cluster = self.update_operation_history(
1294 db_cluster, op_id, workflow_status=False, resource_status=None
1295 )
1296 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
1297 return
rshri932105f2024-07-05 15:11:55 +00001298
garciadeblas26d733c2025-02-03 16:12:43 +01001299 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +02001300 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +01001301 op_id, workflow_name
garciadeblas96b94f52024-07-08 16:18:21 +02001302 )
rshri932105f2024-07-05 15:11:55 +00001303 self.logger.info(
garciadeblas26d733c2025-02-03 16:12:43 +01001304 "workflow_status is: {} and workflow_msg is: {}".format(
rshri932105f2024-07-05 15:11:55 +00001305 workflow_status, workflow_msg
1306 )
1307 )
1308 if workflow_status:
1309 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
1310 else:
1311 db_cluster["resourceState"] = "ERROR"
1312 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +00001313 db_cluster = self.update_operation_history(
1314 db_cluster, op_id, workflow_status, None
1315 )
rshri932105f2024-07-05 15:11:55 +00001316 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
1317
1318 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001319 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +01001320 "detach_profile_from_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +00001321 )
1322 self.logger.info(
1323 "resource_status is :{} and resource_msg is :{}".format(
1324 resource_status, resource_msg
1325 )
1326 )
1327 if resource_status:
1328 db_cluster["resourceState"] = "READY"
1329 else:
1330 db_cluster["resourceState"] = "ERROR"
1331
1332 db_cluster["operatingState"] = "IDLE"
1333 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +00001334 db_cluster, op_id, workflow_status, resource_status
rshri932105f2024-07-05 15:11:55 +00001335 )
rshri932105f2024-07-05 15:11:55 +00001336 profile_list = db_cluster[profile_type]
1337 self.logger.info("profile list is : {}".format(profile_list))
1338 if resource_status:
rshri932105f2024-07-05 15:11:55 +00001339 profile_list.remove(profile_id)
rshri932105f2024-07-05 15:11:55 +00001340 db_cluster[profile_type] = profile_list
shahithya70a3fc92024-11-12 11:01:05 +00001341 db_cluster["current_operation"] = None
rshri932105f2024-07-05 15:11:55 +00001342 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
1343
1344 return
1345
rshri948f7de2024-12-02 03:42:35 +00001346 async def register(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001347 self.logger.info("cluster register enter")
garciadeblas8bdb3d42025-04-04 00:19:13 +02001348 workflow_status = None
1349 resource_status = None
rshri932105f2024-07-05 15:11:55 +00001350
garciadeblas995cbf32024-12-18 12:54:00 +01001351 # To get the cluster and op ids
rshri948f7de2024-12-02 03:42:35 +00001352 cluster_id = params["cluster_id"]
rshri948f7de2024-12-02 03:42:35 +00001353 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +00001354
1355 # To initialize the operation states
1356 self.initialize_operation(cluster_id, op_id)
1357
garciadeblas995cbf32024-12-18 12:54:00 +01001358 # To get the cluster
1359 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
1360
1361 # To get the operation params details
1362 op_params = self.get_operation_params(db_cluster, op_id)
1363
1364 # To copy the cluster content and decrypting fields to use in workflows
garciadeblas8bdb3d42025-04-04 00:19:13 +02001365 db_cluster_copy = self.decrypted_copy(db_cluster)
garciadeblas995cbf32024-12-18 12:54:00 +01001366 workflow_content = {
garciadeblas8bdb3d42025-04-04 00:19:13 +02001367 "cluster": db_cluster_copy,
garciadeblas995cbf32024-12-18 12:54:00 +01001368 }
rshric3564942024-11-12 18:12:38 +00001369
garciadeblas41859ce2025-02-04 16:08:51 +01001370 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001371 "register_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +02001372 )
garciadeblas41859ce2025-02-04 16:08:51 +01001373 if not workflow_res:
1374 self.logger.error(f"Failed to launch workflow: {workflow_name}")
1375 db_cluster["state"] = "FAILED_CREATION"
1376 db_cluster["resourceState"] = "ERROR"
1377 db_cluster = self.update_operation_history(
1378 db_cluster, op_id, workflow_status=False, resource_status=None
1379 )
1380 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
1381 # Clean items used in the workflow, no matter if the workflow succeeded
1382 clean_status, clean_msg = await self.odu.clean_items_workflow(
1383 "register_cluster", op_id, op_params, workflow_content
1384 )
1385 self.logger.info(
1386 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1387 )
1388 return
rshri932105f2024-07-05 15:11:55 +00001389
garciadeblas26d733c2025-02-03 16:12:43 +01001390 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +02001391 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +01001392 op_id, workflow_name
garciadeblas96b94f52024-07-08 16:18:21 +02001393 )
rshri932105f2024-07-05 15:11:55 +00001394 self.logger.info(
garciadeblas26d733c2025-02-03 16:12:43 +01001395 "workflow_status is: {} and workflow_msg is: {}".format(
rshri932105f2024-07-05 15:11:55 +00001396 workflow_status, workflow_msg
1397 )
1398 )
1399 if workflow_status:
garciadeblas96b94f52024-07-08 16:18:21 +02001400 db_cluster["state"] = "CREATED"
1401 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
rshri932105f2024-07-05 15:11:55 +00001402 else:
garciadeblas96b94f52024-07-08 16:18:21 +02001403 db_cluster["state"] = "FAILED_CREATION"
1404 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +00001405 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +00001406 db_cluster = self.update_operation_history(
1407 db_cluster, op_id, workflow_status, None
1408 )
garciadeblas96b94f52024-07-08 16:18:21 +02001409 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +00001410
garciadeblasdde3a312024-09-17 13:25:06 +02001411 # Clean items used in the workflow, no matter if the workflow succeeded
1412 clean_status, clean_msg = await self.odu.clean_items_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001413 "register_cluster", op_id, op_params, workflow_content
garciadeblasdde3a312024-09-17 13:25:06 +02001414 )
1415 self.logger.info(
1416 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1417 )
1418
rshri932105f2024-07-05 15:11:55 +00001419 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001420 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +01001421 "register_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +00001422 )
1423 self.logger.info(
1424 "resource_status is :{} and resource_msg is :{}".format(
1425 resource_status, resource_msg
1426 )
1427 )
1428 if resource_status:
garciadeblas96b94f52024-07-08 16:18:21 +02001429 db_cluster["resourceState"] = "READY"
rshri932105f2024-07-05 15:11:55 +00001430 else:
garciadeblas96b94f52024-07-08 16:18:21 +02001431 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +00001432
garciadeblas96b94f52024-07-08 16:18:21 +02001433 db_cluster["operatingState"] = "IDLE"
1434 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +00001435 db_cluster, op_id, workflow_status, resource_status
rshri932105f2024-07-05 15:11:55 +00001436 )
shahithya70a3fc92024-11-12 11:01:05 +00001437 db_cluster["current_operation"] = None
garciadeblas96b94f52024-07-08 16:18:21 +02001438 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri948f7de2024-12-02 03:42:35 +00001439
garciadeblas8bdb3d42025-04-04 00:19:13 +02001440 # Update default profile agekeys and state
1441 self.update_default_profile_agekeys(db_cluster_copy)
1442 self.update_profile_state(db_cluster, workflow_status, resource_status)
1443
rshri948f7de2024-12-02 03:42:35 +00001444 db_register = self.db.get_one("k8sclusters", {"name": db_cluster["name"]})
1445 db_register["credentials"] = db_cluster["credentials"]
1446 self.db.set_one("k8sclusters", {"_id": db_register["_id"]}, db_register)
1447
1448 if db_cluster["resourceState"] == "READY" and db_cluster["state"] == "CREATED":
1449 # To call the lcm.py for registering the cluster in k8scluster lcm.
1450 register = await self.regist.create(db_register, order_id)
1451 self.logger.debug(f"Register is : {register}")
1452 else:
1453 db_register["_admin"]["operationalState"] = "ERROR"
1454 self.db.set_one("k8sclusters", {"_id": db_register["_id"]}, db_register)
1455
rshri932105f2024-07-05 15:11:55 +00001456 return
1457
garciadeblasad6d1ba2025-01-22 16:02:18 +01001458 async def check_register_cluster(self, op_id, op_params, content):
1459 self.logger.info(
1460 f"check_register_cluster Operation {op_id}. Params: {op_params}."
1461 )
1462 # self.logger.debug(f"Content: {content}")
1463 db_cluster = content["cluster"]
1464 cluster_name = db_cluster["git_name"].lower()
1465 cluster_kustomization_name = cluster_name
1466 bootstrap = op_params.get("bootstrap", True)
1467 checkings_list = [
1468 {
1469 "item": "kustomization",
1470 "name": f"{cluster_kustomization_name}-bstrp-fluxctrl",
1471 "namespace": "managed-resources",
garciadeblas7cf480d2025-01-27 16:53:45 +01001472 "condition": {
1473 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
1474 "value": "True",
1475 },
garciadeblasad6d1ba2025-01-22 16:02:18 +01001476 "timeout": self._checkloop_kustomization_timeout,
1477 "enable": bootstrap,
1478 "resourceState": "IN_PROGRESS.BOOTSTRAP_OK",
1479 },
1480 ]
1481 return await self.common_check_list(
1482 op_id, checkings_list, "clusters", db_cluster
1483 )
1484
rshri948f7de2024-12-02 03:42:35 +00001485 async def deregister(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001486 self.logger.info("cluster deregister enter")
1487
garciadeblas995cbf32024-12-18 12:54:00 +01001488 # To get the cluster and op ids
rshri948f7de2024-12-02 03:42:35 +00001489 cluster_id = params["cluster_id"]
rshri948f7de2024-12-02 03:42:35 +00001490 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +00001491
1492 # To initialize the operation states
1493 self.initialize_operation(cluster_id, op_id)
1494
garciadeblas995cbf32024-12-18 12:54:00 +01001495 # To get the cluster
1496 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
1497
1498 # To get the operation params details
1499 op_params = self.get_operation_params(db_cluster, op_id)
1500
1501 # To copy the cluster content and decrypting fields to use in workflows
1502 workflow_content = {
1503 "cluster": self.decrypted_copy(db_cluster),
1504 }
rshri932105f2024-07-05 15:11:55 +00001505
garciadeblas41859ce2025-02-04 16:08:51 +01001506 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001507 "deregister_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +02001508 )
garciadeblas41859ce2025-02-04 16:08:51 +01001509 if not workflow_res:
1510 self.logger.error(f"Failed to launch workflow: {workflow_name}")
1511 db_cluster["state"] = "FAILED_DELETION"
1512 db_cluster["resourceState"] = "ERROR"
1513 db_cluster = self.update_operation_history(
1514 db_cluster, op_id, workflow_status=False, resource_status=None
1515 )
1516 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
garciadeblas41859ce2025-02-04 16:08:51 +01001517 return
rshri932105f2024-07-05 15:11:55 +00001518
garciadeblas26d733c2025-02-03 16:12:43 +01001519 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +02001520 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +01001521 op_id, workflow_name
garciadeblas96b94f52024-07-08 16:18:21 +02001522 )
rshri932105f2024-07-05 15:11:55 +00001523 self.logger.info(
garciadeblas26d733c2025-02-03 16:12:43 +01001524 "workflow_status is: {} and workflow_msg is: {}".format(
rshri932105f2024-07-05 15:11:55 +00001525 workflow_status, workflow_msg
1526 )
1527 )
1528 if workflow_status:
garciadeblas96b94f52024-07-08 16:18:21 +02001529 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
rshri932105f2024-07-05 15:11:55 +00001530 else:
garciadeblas96b94f52024-07-08 16:18:21 +02001531 db_cluster["state"] = "FAILED_DELETION"
1532 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +00001533 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +00001534 db_cluster = self.update_operation_history(
1535 db_cluster, op_id, workflow_status, None
1536 )
garciadeblas96b94f52024-07-08 16:18:21 +02001537 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +00001538
1539 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001540 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +01001541 "deregister_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +00001542 )
1543 self.logger.info(
1544 "resource_status is :{} and resource_msg is :{}".format(
1545 resource_status, resource_msg
1546 )
1547 )
1548 if resource_status:
garciadeblas96b94f52024-07-08 16:18:21 +02001549 db_cluster["resourceState"] = "READY"
rshri932105f2024-07-05 15:11:55 +00001550 else:
garciadeblas96b94f52024-07-08 16:18:21 +02001551 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +00001552
garciadeblas96b94f52024-07-08 16:18:21 +02001553 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +00001554 db_cluster, op_id, workflow_status, resource_status
garciadeblas96b94f52024-07-08 16:18:21 +02001555 )
1556 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +00001557
garciadeblas93380452025-02-05 09:32:52 +01001558 await self.delete(params, order_id)
1559 # Clean items used in the workflow or in the cluster, no matter if the workflow succeeded
1560 clean_status, clean_msg = await self.odu.clean_items_workflow(
1561 "deregister_cluster", op_id, op_params, workflow_content
1562 )
1563 self.logger.info(
1564 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1565 )
1566 return
rshri932105f2024-07-05 15:11:55 +00001567
rshri948f7de2024-12-02 03:42:35 +00001568 async def get_creds(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02001569 self.logger.info("Cluster get creds Enter")
rshri948f7de2024-12-02 03:42:35 +00001570 cluster_id = params["cluster_id"]
1571 op_id = params["operation_id"]
1572 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
garciadeblas96b94f52024-07-08 16:18:21 +02001573 result, cluster_creds = await self.odu.get_cluster_credentials(db_cluster)
1574 if result:
1575 db_cluster["credentials"] = cluster_creds
yshahd940c652024-10-17 06:11:12 +00001576 op_len = 0
1577 for operations in db_cluster["operationHistory"]:
1578 if operations["op_id"] == op_id:
1579 db_cluster["operationHistory"][op_len]["result"] = result
1580 db_cluster["operationHistory"][op_len]["endDate"] = time()
1581 op_len += 1
shahithya70a3fc92024-11-12 11:01:05 +00001582 db_cluster["current_operation"] = None
yshahd940c652024-10-17 06:11:12 +00001583 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri948f7de2024-12-02 03:42:35 +00001584 self.logger.info("Cluster Get Creds Exit")
yshah771dea82024-07-05 15:11:49 +00001585 return
1586
rshri948f7de2024-12-02 03:42:35 +00001587 async def update(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02001588 self.logger.info("Cluster update Enter")
rshri948f7de2024-12-02 03:42:35 +00001589 # To get the cluster details
1590 cluster_id = params["cluster_id"]
1591 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
1592
1593 # To get the operation params details
1594 op_id = params["operation_id"]
1595 op_params = self.get_operation_params(db_cluster, op_id)
yshah771dea82024-07-05 15:11:49 +00001596
garciadeblas995cbf32024-12-18 12:54:00 +01001597 # To copy the cluster content and decrypting fields to use in workflows
1598 workflow_content = {
1599 "cluster": self.decrypted_copy(db_cluster),
1600 }
rshric3564942024-11-12 18:12:38 +00001601
1602 # vim account details
1603 db_vim = self.db.get_one("vim_accounts", {"name": db_cluster["vim_account"]})
garciadeblas995cbf32024-12-18 12:54:00 +01001604 workflow_content["vim_account"] = db_vim
rshric3564942024-11-12 18:12:38 +00001605
garciadeblas41859ce2025-02-04 16:08:51 +01001606 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001607 "update_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +02001608 )
garciadeblas41859ce2025-02-04 16:08:51 +01001609 if not workflow_res:
1610 self.logger.error(f"Failed to launch workflow: {workflow_name}")
1611 db_cluster["resourceState"] = "ERROR"
1612 db_cluster = self.update_operation_history(
1613 db_cluster, op_id, workflow_status=False, resource_status=None
1614 )
1615 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
1616 # Clean items used in the workflow, no matter if the workflow succeeded
1617 clean_status, clean_msg = await self.odu.clean_items_workflow(
1618 "update_cluster", op_id, op_params, workflow_content
1619 )
1620 self.logger.info(
1621 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1622 )
1623 return
garciadeblas26d733c2025-02-03 16:12:43 +01001624 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +02001625 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +01001626 op_id, workflow_name
garciadeblas96b94f52024-07-08 16:18:21 +02001627 )
1628 self.logger.info(
1629 "Workflow Status: {} Workflow Message: {}".format(
1630 workflow_status, workflow_msg
yshah771dea82024-07-05 15:11:49 +00001631 )
garciadeblas96b94f52024-07-08 16:18:21 +02001632 )
1633
1634 if workflow_status:
1635 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
1636 else:
1637 db_cluster["resourceState"] = "ERROR"
1638
yshahcb9075f2024-11-22 12:08:57 +00001639 db_cluster = self.update_operation_history(
1640 db_cluster, op_id, workflow_status, None
1641 )
garciadeblas96b94f52024-07-08 16:18:21 +02001642 # self.logger.info("Db content: {}".format(db_content))
1643 # self.db.set_one(self.db_collection, {"_id": _id}, db_cluster)
1644 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
1645
garciadeblas28bff0f2024-09-16 12:53:07 +02001646 # Clean items used in the workflow, no matter if the workflow succeeded
1647 clean_status, clean_msg = await self.odu.clean_items_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001648 "update_cluster", op_id, op_params, workflow_content
garciadeblas28bff0f2024-09-16 12:53:07 +02001649 )
1650 self.logger.info(
1651 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1652 )
garciadeblas96b94f52024-07-08 16:18:21 +02001653 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001654 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +01001655 "update_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +02001656 )
1657 self.logger.info(
1658 "Resource Status: {} Resource Message: {}".format(
1659 resource_status, resource_msg
1660 )
1661 )
yshah771dea82024-07-05 15:11:49 +00001662
1663 if resource_status:
garciadeblas96b94f52024-07-08 16:18:21 +02001664 db_cluster["resourceState"] = "READY"
yshah771dea82024-07-05 15:11:49 +00001665 else:
garciadeblas96b94f52024-07-08 16:18:21 +02001666 db_cluster["resourceState"] = "ERROR"
yshah771dea82024-07-05 15:11:49 +00001667
yshah0defcd52024-11-18 07:41:35 +00001668 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +00001669 db_cluster, op_id, workflow_status, resource_status
yshah0defcd52024-11-18 07:41:35 +00001670 )
1671
garciadeblas96b94f52024-07-08 16:18:21 +02001672 db_cluster["operatingState"] = "IDLE"
garciadeblas96b94f52024-07-08 16:18:21 +02001673 # self.logger.info("db_cluster: {}".format(db_cluster))
garciadeblas7cf480d2025-01-27 16:53:45 +01001674 # TODO: verify condition
garciadeblas96b94f52024-07-08 16:18:21 +02001675 # For the moment, if the workflow completed successfully, then we update the db accordingly.
1676 if workflow_status:
1677 if "k8s_version" in op_params:
1678 db_cluster["k8s_version"] = op_params["k8s_version"]
yshah0defcd52024-11-18 07:41:35 +00001679 if "node_count" in op_params:
garciadeblas96b94f52024-07-08 16:18:21 +02001680 db_cluster["node_count"] = op_params["node_count"]
yshah0defcd52024-11-18 07:41:35 +00001681 if "node_size" in op_params:
1682 db_cluster["node_count"] = op_params["node_size"]
garciadeblas96b94f52024-07-08 16:18:21 +02001683 # self.db.set_one(self.db_collection, {"_id": _id}, db_content)
shahithya70a3fc92024-11-12 11:01:05 +00001684 db_cluster["current_operation"] = None
garciadeblas96b94f52024-07-08 16:18:21 +02001685 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
yshah771dea82024-07-05 15:11:49 +00001686 return
1687
garciadeblasad6d1ba2025-01-22 16:02:18 +01001688 async def check_update_cluster(self, op_id, op_params, content):
1689 self.logger.info(
1690 f"check_update_cluster Operation {op_id}. Params: {op_params}."
1691 )
1692 self.logger.debug(f"Content: {content}")
garciadeblasd7d8bde2025-01-27 18:31:06 +01001693 # return await self.check_dummy_operation(op_id, op_params, content)
1694 db_cluster = content["cluster"]
1695 cluster_name = db_cluster["git_name"].lower()
1696 cluster_kustomization_name = cluster_name
1697 db_vim_account = content["vim_account"]
1698 cloud_type = db_vim_account["vim_type"]
1699 if cloud_type == "aws":
1700 cluster_name = f"{cluster_name}-cluster"
1701 if cloud_type in ("azure", "gcp", "aws"):
1702 checkings_list = [
1703 {
1704 "item": "kustomization",
1705 "name": cluster_kustomization_name,
1706 "namespace": "managed-resources",
1707 "condition": {
1708 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
1709 "value": "True",
1710 },
1711 "timeout": self._checkloop_kustomization_timeout,
1712 "enable": True,
1713 "resourceState": "IN_PROGRESS.KUSTOMIZATION_READY",
1714 },
1715 ]
1716 else:
1717 return False, "Not suitable VIM account to check cluster status"
1718 # Scale operation
1719 if "node_count" in op_params:
garciadeblas1ca09852025-05-30 11:19:06 +02001720 if cloud_type in ("azure", "gcp"):
1721 checkings_list.append(
1722 {
1723 "item": f"cluster_{cloud_type}",
1724 "name": cluster_name,
1725 "namespace": "",
1726 "condition": {
1727 "jsonpath_filter": "status.atProvider.defaultNodePool[0].nodeCount",
1728 "value": f"{op_params['node_count']}",
1729 },
1730 "timeout": self._checkloop_resource_timeout * 3,
1731 "enable": True,
1732 "resourceState": "IN_PROGRESS.RESOURCE_READY.NODE_COUNT.CLUSTER",
1733 }
1734 )
1735 elif cloud_type == "aws":
1736 checkings_list.append(
1737 {
1738 "item": f"nodegroup_{cloud_type}",
1739 "name": f"{cluster_name}-nodegroup",
1740 "namespace": "",
1741 "condition": {
1742 "jsonpath_filter": "status.atProvider.scalingConfig[0].desiredSize",
1743 "value": f"{op_params['node_count']}",
1744 },
1745 "timeout": self._checkloop_resource_timeout * 3,
1746 "enable": True,
1747 "resourceState": "IN_PROGRESS.RESOURCE_READY.NODE_COUNT.CLUSTER",
1748 }
1749 )
1750
garciadeblasd7d8bde2025-01-27 18:31:06 +01001751 # Upgrade operation
1752 if "k8s_version" in op_params:
1753 checkings_list.append(
1754 {
1755 "item": f"cluster_{cloud_type}",
1756 "name": cluster_name,
1757 "namespace": "",
1758 "condition": {
1759 "jsonpath_filter": "status.atProvider.defaultNodePool[0].orchestratorVersion",
1760 "value": op_params["k8s_version"],
1761 },
1762 "timeout": self._checkloop_resource_timeout * 2,
1763 "enable": True,
1764 "resourceState": "IN_PROGRESS.RESOURCE_READY.K8S_VERSION.CLUSTER",
1765 }
1766 )
1767 return await self.common_check_list(
1768 op_id, checkings_list, "clusters", db_cluster
1769 )
garciadeblasad6d1ba2025-01-22 16:02:18 +01001770
yshah771dea82024-07-05 15:11:49 +00001771
garciadeblas72412282024-11-07 12:41:54 +01001772class CloudCredentialsLcm(GitOpsLcm):
yshah771dea82024-07-05 15:11:49 +00001773 db_collection = "vim_accounts"
1774
1775 def __init__(self, msg, lcm_tasks, config):
1776 """
1777 Init, Connect to database, filesystem storage, and messaging
1778 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
1779 :return: None
1780 """
garciadeblas72412282024-11-07 12:41:54 +01001781 super().__init__(msg, lcm_tasks, config)
yshah771dea82024-07-05 15:11:49 +00001782
yshah564ec9c2024-11-29 07:33:32 +00001783 async def add(self, params, order_id):
yshah771dea82024-07-05 15:11:49 +00001784 self.logger.info("Cloud Credentials create")
yshah564ec9c2024-11-29 07:33:32 +00001785 vim_id = params["_id"]
1786 op_id = vim_id
1787 op_params = params
1788 db_content = self.db.get_one(self.db_collection, {"_id": vim_id})
1789 vim_config = db_content.get("config", {})
1790 self.db.encrypt_decrypt_fields(
1791 vim_config.get("credentials"),
1792 "decrypt",
1793 ["password", "secret"],
1794 schema_version=db_content["schema_version"],
1795 salt=vim_id,
1796 )
1797
garciadeblas41859ce2025-02-04 16:08:51 +01001798 workflow_res, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001799 "create_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001800 )
1801
1802 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +01001803 op_id, workflow_name
yshah771dea82024-07-05 15:11:49 +00001804 )
1805
1806 self.logger.info(
1807 "Workflow Status: {} Workflow Msg: {}".format(workflow_status, workflow_msg)
1808 )
1809
garciadeblas28bff0f2024-09-16 12:53:07 +02001810 # Clean items used in the workflow, no matter if the workflow succeeded
1811 clean_status, clean_msg = await self.odu.clean_items_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001812 "create_cloud_credentials", op_id, op_params, db_content
garciadeblas28bff0f2024-09-16 12:53:07 +02001813 )
1814 self.logger.info(
1815 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1816 )
1817
yshah771dea82024-07-05 15:11:49 +00001818 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001819 resource_status, resource_msg = await self.check_resource_status(
yshah564ec9c2024-11-29 07:33:32 +00001820 "create_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001821 )
1822 self.logger.info(
1823 "Resource Status: {} Resource Message: {}".format(
1824 resource_status, resource_msg
1825 )
1826 )
garciadeblas15b8a302024-09-23 12:40:13 +02001827
yshah564ec9c2024-11-29 07:33:32 +00001828 db_content["_admin"]["operationalState"] = "ENABLED"
1829 for operation in db_content["_admin"]["operations"]:
garciadeblas15b8a302024-09-23 12:40:13 +02001830 if operation["lcmOperationType"] == "create":
1831 operation["operationState"] = "ENABLED"
yshah564ec9c2024-11-29 07:33:32 +00001832 self.logger.info("Content : {}".format(db_content))
1833 self.db.set_one("vim_accounts", {"_id": db_content["_id"]}, db_content)
yshah771dea82024-07-05 15:11:49 +00001834 return
1835
yshah564ec9c2024-11-29 07:33:32 +00001836 async def edit(self, params, order_id):
1837 self.logger.info("Cloud Credentials Update")
1838 vim_id = params["_id"]
1839 op_id = vim_id
1840 op_params = params
1841 db_content = self.db.get_one("vim_accounts", {"_id": vim_id})
1842 vim_config = db_content.get("config", {})
1843 self.db.encrypt_decrypt_fields(
1844 vim_config.get("credentials"),
1845 "decrypt",
1846 ["password", "secret"],
1847 schema_version=db_content["schema_version"],
1848 salt=vim_id,
1849 )
1850
garciadeblas41859ce2025-02-04 16:08:51 +01001851 workflow_res, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001852 "update_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001853 )
1854 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +01001855 op_id, workflow_name
yshah771dea82024-07-05 15:11:49 +00001856 )
1857 self.logger.info(
1858 "Workflow Status: {} Workflow Msg: {}".format(workflow_status, workflow_msg)
1859 )
1860
garciadeblas28bff0f2024-09-16 12:53:07 +02001861 # Clean items used in the workflow, no matter if the workflow succeeded
1862 clean_status, clean_msg = await self.odu.clean_items_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001863 "update_cloud_credentials", op_id, op_params, db_content
garciadeblas28bff0f2024-09-16 12:53:07 +02001864 )
1865 self.logger.info(
1866 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1867 )
1868
yshah771dea82024-07-05 15:11:49 +00001869 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001870 resource_status, resource_msg = await self.check_resource_status(
yshah564ec9c2024-11-29 07:33:32 +00001871 "update_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001872 )
1873 self.logger.info(
1874 "Resource Status: {} Resource Message: {}".format(
1875 resource_status, resource_msg
1876 )
1877 )
1878 return
1879
yshah564ec9c2024-11-29 07:33:32 +00001880 async def remove(self, params, order_id):
1881 self.logger.info("Cloud Credentials remove")
1882 vim_id = params["_id"]
1883 op_id = vim_id
1884 op_params = params
1885 db_content = self.db.get_one("vim_accounts", {"_id": vim_id})
1886
garciadeblas41859ce2025-02-04 16:08:51 +01001887 workflow_res, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001888 "delete_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001889 )
1890 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +01001891 op_id, workflow_name
yshah771dea82024-07-05 15:11:49 +00001892 )
1893 self.logger.info(
1894 "Workflow Status: {} Workflow Msg: {}".format(workflow_status, workflow_msg)
1895 )
1896
1897 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001898 resource_status, resource_msg = await self.check_resource_status(
yshah564ec9c2024-11-29 07:33:32 +00001899 "delete_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001900 )
1901 self.logger.info(
1902 "Resource Status: {} Resource Message: {}".format(
1903 resource_status, resource_msg
1904 )
1905 )
yshah564ec9c2024-11-29 07:33:32 +00001906 self.db.del_one(self.db_collection, {"_id": db_content["_id"]})
yshah771dea82024-07-05 15:11:49 +00001907 return
1908
rshri932105f2024-07-05 15:11:55 +00001909
garciadeblas72412282024-11-07 12:41:54 +01001910class K8sAppLcm(GitOpsLcm):
rshri948f7de2024-12-02 03:42:35 +00001911 db_collection = "k8sapp"
1912
rshri932105f2024-07-05 15:11:55 +00001913 def __init__(self, msg, lcm_tasks, config):
1914 """
1915 Init, Connect to database, filesystem storage, and messaging
1916 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
1917 :return: None
1918 """
garciadeblas72412282024-11-07 12:41:54 +01001919 super().__init__(msg, lcm_tasks, config)
rshri932105f2024-07-05 15:11:55 +00001920
rshri948f7de2024-12-02 03:42:35 +00001921 async def create(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001922 self.logger.info("App Create Enter")
1923
rshri948f7de2024-12-02 03:42:35 +00001924 op_id = params["operation_id"]
1925 profile_id = params["profile_id"]
1926
1927 # To initialize the operation states
1928 self.initialize_operation(profile_id, op_id)
1929
1930 content = self.db.get_one("k8sapp", {"_id": profile_id})
1931 content["profile_type"] = "applications"
1932 op_params = self.get_operation_params(content, op_id)
1933 self.db.set_one("k8sapp", {"_id": content["_id"]}, content)
1934
garciadeblas41859ce2025-02-04 16:08:51 +01001935 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001936 "create_profile", op_id, op_params, content
1937 )
garciadeblas26d733c2025-02-03 16:12:43 +01001938 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00001939
garciadeblas33b36e72025-01-17 12:49:19 +01001940 workflow_status = await self.check_workflow_and_update_db(
1941 op_id, workflow_name, content
1942 )
rshri932105f2024-07-05 15:11:55 +00001943
1944 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001945 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001946 "create_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001947 )
yshah564ec9c2024-11-29 07:33:32 +00001948 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1949 self.logger.info(f"App Create Exit with resource status: {resource_status}")
rshri932105f2024-07-05 15:11:55 +00001950 return
1951
rshri948f7de2024-12-02 03:42:35 +00001952 async def delete(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001953 self.logger.info("App delete Enter")
rshri932105f2024-07-05 15:11:55 +00001954
rshri948f7de2024-12-02 03:42:35 +00001955 op_id = params["operation_id"]
1956 profile_id = params["profile_id"]
1957
1958 # To initialize the operation states
1959 self.initialize_operation(profile_id, op_id)
1960
1961 content = self.db.get_one("k8sapp", {"_id": profile_id})
1962 op_params = self.get_operation_params(content, op_id)
1963
garciadeblas41859ce2025-02-04 16:08:51 +01001964 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001965 "delete_profile", op_id, op_params, content
1966 )
garciadeblas26d733c2025-02-03 16:12:43 +01001967 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00001968
garciadeblas33b36e72025-01-17 12:49:19 +01001969 workflow_status = await self.check_workflow_and_update_db(
1970 op_id, workflow_name, content
1971 )
rshri932105f2024-07-05 15:11:55 +00001972
1973 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001974 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001975 "delete_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001976 )
rshri932105f2024-07-05 15:11:55 +00001977
yshahb36649f2025-02-28 09:01:51 +00001978 force = params.get("force", False)
1979 if force:
1980 force_delete_status = self.check_force_delete_and_delete_from_db(
1981 profile_id, workflow_status, resource_status, force
1982 )
1983 if force_delete_status:
1984 return
1985
1986 self.logger.info(f"Resource status: {resource_status}")
yshah564ec9c2024-11-29 07:33:32 +00001987 if resource_status:
1988 content["state"] = "DELETED"
yshah6bad8892025-02-11 12:37:04 +00001989 profile_type = self.profile_type_mapping[content["profile_type"]]
1990 self.delete_profile_ksu(profile_id, profile_type)
yshah564ec9c2024-11-29 07:33:32 +00001991 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1992 self.db.del_one(self.db_collection, {"_id": content["_id"]})
1993 self.logger.info(f"App Delete Exit with resource status: {resource_status}")
rshri932105f2024-07-05 15:11:55 +00001994 return
1995
1996
garciadeblas72412282024-11-07 12:41:54 +01001997class K8sResourceLcm(GitOpsLcm):
rshri948f7de2024-12-02 03:42:35 +00001998 db_collection = "k8sresource"
1999
rshri932105f2024-07-05 15:11:55 +00002000 def __init__(self, msg, lcm_tasks, config):
2001 """
2002 Init, Connect to database, filesystem storage, and messaging
2003 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
2004 :return: None
2005 """
garciadeblas72412282024-11-07 12:41:54 +01002006 super().__init__(msg, lcm_tasks, config)
rshri932105f2024-07-05 15:11:55 +00002007
rshri948f7de2024-12-02 03:42:35 +00002008 async def create(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00002009 self.logger.info("Resource Create Enter")
2010
rshri948f7de2024-12-02 03:42:35 +00002011 op_id = params["operation_id"]
2012 profile_id = params["profile_id"]
2013
2014 # To initialize the operation states
2015 self.initialize_operation(profile_id, op_id)
2016
2017 content = self.db.get_one("k8sresource", {"_id": profile_id})
2018 content["profile_type"] = "managed-resources"
2019 op_params = self.get_operation_params(content, op_id)
2020 self.db.set_one("k8sresource", {"_id": content["_id"]}, content)
2021
garciadeblas41859ce2025-02-04 16:08:51 +01002022 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02002023 "create_profile", op_id, op_params, content
2024 )
garciadeblas26d733c2025-02-03 16:12:43 +01002025 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00002026
garciadeblas33b36e72025-01-17 12:49:19 +01002027 workflow_status = await self.check_workflow_and_update_db(
2028 op_id, workflow_name, content
2029 )
rshri932105f2024-07-05 15:11:55 +00002030
2031 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002032 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02002033 "create_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00002034 )
yshah564ec9c2024-11-29 07:33:32 +00002035 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
2036 self.logger.info(
2037 f"Resource Create Exit with resource status: {resource_status}"
rshri932105f2024-07-05 15:11:55 +00002038 )
rshri932105f2024-07-05 15:11:55 +00002039 return
2040
rshri948f7de2024-12-02 03:42:35 +00002041 async def delete(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00002042 self.logger.info("Resource delete Enter")
rshri948f7de2024-12-02 03:42:35 +00002043
2044 op_id = params["operation_id"]
2045 profile_id = params["profile_id"]
2046
2047 # To initialize the operation states
2048 self.initialize_operation(profile_id, op_id)
2049
2050 content = self.db.get_one("k8sresource", {"_id": profile_id})
2051 op_params = self.get_operation_params(content, op_id)
rshri932105f2024-07-05 15:11:55 +00002052
garciadeblas41859ce2025-02-04 16:08:51 +01002053 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02002054 "delete_profile", op_id, op_params, content
2055 )
garciadeblas26d733c2025-02-03 16:12:43 +01002056 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00002057
garciadeblas33b36e72025-01-17 12:49:19 +01002058 workflow_status = await self.check_workflow_and_update_db(
2059 op_id, workflow_name, content
2060 )
rshri932105f2024-07-05 15:11:55 +00002061
2062 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002063 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02002064 "delete_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00002065 )
rshri932105f2024-07-05 15:11:55 +00002066
yshahb36649f2025-02-28 09:01:51 +00002067 force = params.get("force", False)
2068 if force:
2069 force_delete_status = self.check_force_delete_and_delete_from_db(
2070 profile_id, workflow_status, resource_status, force
2071 )
2072 if force_delete_status:
2073 return
2074
yshah564ec9c2024-11-29 07:33:32 +00002075 if resource_status:
2076 content["state"] = "DELETED"
yshah6bad8892025-02-11 12:37:04 +00002077 profile_type = self.profile_type_mapping[content["profile_type"]]
2078 self.delete_profile_ksu(profile_id, profile_type)
yshah564ec9c2024-11-29 07:33:32 +00002079 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
2080 self.db.del_one(self.db_collection, {"_id": content["_id"]})
2081 self.logger.info(
2082 f"Resource Delete Exit with resource status: {resource_status}"
garciadeblas96b94f52024-07-08 16:18:21 +02002083 )
rshri932105f2024-07-05 15:11:55 +00002084 return
2085
2086
garciadeblas72412282024-11-07 12:41:54 +01002087class K8sInfraControllerLcm(GitOpsLcm):
rshri948f7de2024-12-02 03:42:35 +00002088 db_collection = "k8sinfra_controller"
2089
rshri932105f2024-07-05 15:11:55 +00002090 def __init__(self, msg, lcm_tasks, config):
2091 """
2092 Init, Connect to database, filesystem storage, and messaging
2093 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
2094 :return: None
2095 """
garciadeblas72412282024-11-07 12:41:54 +01002096 super().__init__(msg, lcm_tasks, config)
rshri932105f2024-07-05 15:11:55 +00002097
rshri948f7de2024-12-02 03:42:35 +00002098 async def create(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00002099 self.logger.info("Infra controller Create Enter")
2100
rshri948f7de2024-12-02 03:42:35 +00002101 op_id = params["operation_id"]
2102 profile_id = params["profile_id"]
2103
2104 # To initialize the operation states
2105 self.initialize_operation(profile_id, op_id)
2106
2107 content = self.db.get_one("k8sinfra_controller", {"_id": profile_id})
2108 content["profile_type"] = "infra-controllers"
2109 op_params = self.get_operation_params(content, op_id)
2110 self.db.set_one("k8sinfra_controller", {"_id": content["_id"]}, content)
2111
garciadeblas41859ce2025-02-04 16:08:51 +01002112 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02002113 "create_profile", op_id, op_params, content
2114 )
garciadeblas26d733c2025-02-03 16:12:43 +01002115 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00002116
garciadeblas33b36e72025-01-17 12:49:19 +01002117 workflow_status = await self.check_workflow_and_update_db(
2118 op_id, workflow_name, content
2119 )
rshri932105f2024-07-05 15:11:55 +00002120
2121 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002122 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02002123 "create_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00002124 )
yshah564ec9c2024-11-29 07:33:32 +00002125 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
2126 self.logger.info(
2127 f"Infra Controller Create Exit with resource status: {resource_status}"
rshri932105f2024-07-05 15:11:55 +00002128 )
rshri932105f2024-07-05 15:11:55 +00002129 return
2130
rshri948f7de2024-12-02 03:42:35 +00002131 async def delete(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00002132 self.logger.info("Infra controller delete Enter")
rshri932105f2024-07-05 15:11:55 +00002133
rshri948f7de2024-12-02 03:42:35 +00002134 op_id = params["operation_id"]
2135 profile_id = params["profile_id"]
2136
2137 # To initialize the operation states
2138 self.initialize_operation(profile_id, op_id)
2139
2140 content = self.db.get_one("k8sinfra_controller", {"_id": profile_id})
2141 op_params = self.get_operation_params(content, op_id)
2142
garciadeblas41859ce2025-02-04 16:08:51 +01002143 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02002144 "delete_profile", op_id, op_params, content
2145 )
garciadeblas26d733c2025-02-03 16:12:43 +01002146 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00002147
garciadeblas33b36e72025-01-17 12:49:19 +01002148 workflow_status = await self.check_workflow_and_update_db(
2149 op_id, workflow_name, content
2150 )
rshri932105f2024-07-05 15:11:55 +00002151
2152 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002153 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02002154 "delete_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00002155 )
rshri932105f2024-07-05 15:11:55 +00002156
yshahb36649f2025-02-28 09:01:51 +00002157 force = params.get("force", False)
2158 if force:
2159 force_delete_status = self.check_force_delete_and_delete_from_db(
2160 profile_id, workflow_status, resource_status, force
2161 )
2162 if force_delete_status:
2163 return
2164
yshah564ec9c2024-11-29 07:33:32 +00002165 if resource_status:
2166 content["state"] = "DELETED"
yshah6bad8892025-02-11 12:37:04 +00002167 profile_type = self.profile_type_mapping[content["profile_type"]]
2168 self.delete_profile_ksu(profile_id, profile_type)
yshah564ec9c2024-11-29 07:33:32 +00002169 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
2170 self.db.del_one(self.db_collection, {"_id": content["_id"]})
2171 self.logger.info(
2172 f"Infra Controller Delete Exit with resource status: {resource_status}"
garciadeblas96b94f52024-07-08 16:18:21 +02002173 )
rshri932105f2024-07-05 15:11:55 +00002174 return
2175
2176
garciadeblas72412282024-11-07 12:41:54 +01002177class K8sInfraConfigLcm(GitOpsLcm):
rshri948f7de2024-12-02 03:42:35 +00002178 db_collection = "k8sinfra_config"
2179
rshri932105f2024-07-05 15:11:55 +00002180 def __init__(self, msg, lcm_tasks, config):
2181 """
2182 Init, Connect to database, filesystem storage, and messaging
2183 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
2184 :return: None
2185 """
garciadeblas72412282024-11-07 12:41:54 +01002186 super().__init__(msg, lcm_tasks, config)
rshri932105f2024-07-05 15:11:55 +00002187
rshri948f7de2024-12-02 03:42:35 +00002188 async def create(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00002189 self.logger.info("Infra config Create Enter")
2190
rshri948f7de2024-12-02 03:42:35 +00002191 op_id = params["operation_id"]
2192 profile_id = params["profile_id"]
2193
2194 # To initialize the operation states
2195 self.initialize_operation(profile_id, op_id)
2196
2197 content = self.db.get_one("k8sinfra_config", {"_id": profile_id})
2198 content["profile_type"] = "infra-configs"
2199 op_params = self.get_operation_params(content, op_id)
2200 self.db.set_one("k8sinfra_config", {"_id": content["_id"]}, content)
2201
garciadeblas41859ce2025-02-04 16:08:51 +01002202 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02002203 "create_profile", op_id, op_params, content
2204 )
garciadeblas26d733c2025-02-03 16:12:43 +01002205 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00002206
garciadeblas33b36e72025-01-17 12:49:19 +01002207 workflow_status = await self.check_workflow_and_update_db(
2208 op_id, workflow_name, content
2209 )
rshri932105f2024-07-05 15:11:55 +00002210
2211 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002212 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02002213 "create_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00002214 )
yshah564ec9c2024-11-29 07:33:32 +00002215 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
2216 self.logger.info(
2217 f"Infra Config Create Exit with resource status: {resource_status}"
rshri932105f2024-07-05 15:11:55 +00002218 )
rshri932105f2024-07-05 15:11:55 +00002219 return
2220
rshri948f7de2024-12-02 03:42:35 +00002221 async def delete(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00002222 self.logger.info("Infra config delete Enter")
2223
rshri948f7de2024-12-02 03:42:35 +00002224 op_id = params["operation_id"]
2225 profile_id = params["profile_id"]
2226
2227 # To initialize the operation states
2228 self.initialize_operation(profile_id, op_id)
2229
2230 content = self.db.get_one("k8sinfra_config", {"_id": profile_id})
2231 op_params = self.get_operation_params(content, op_id)
2232
garciadeblas41859ce2025-02-04 16:08:51 +01002233 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02002234 "delete_profile", op_id, op_params, content
2235 )
garciadeblas26d733c2025-02-03 16:12:43 +01002236 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00002237
garciadeblas33b36e72025-01-17 12:49:19 +01002238 workflow_status = await self.check_workflow_and_update_db(
2239 op_id, workflow_name, content
2240 )
yshah564ec9c2024-11-29 07:33:32 +00002241
rshri932105f2024-07-05 15:11:55 +00002242 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002243 resource_status, content = await self.check_resource_and_update_db(
yshah564ec9c2024-11-29 07:33:32 +00002244 "delete_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00002245 )
yshah564ec9c2024-11-29 07:33:32 +00002246
yshahb36649f2025-02-28 09:01:51 +00002247 force = params.get("force", False)
2248 if force:
2249 force_delete_status = self.check_force_delete_and_delete_from_db(
2250 profile_id, workflow_status, resource_status, force
2251 )
2252 if force_delete_status:
2253 return
2254
rshri932105f2024-07-05 15:11:55 +00002255 if resource_status:
yshah564ec9c2024-11-29 07:33:32 +00002256 content["state"] = "DELETED"
yshah6bad8892025-02-11 12:37:04 +00002257 profile_type = self.profile_type_mapping[content["profile_type"]]
2258 self.delete_profile_ksu(profile_id, profile_type)
yshah564ec9c2024-11-29 07:33:32 +00002259 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
2260 self.db.del_one(self.db_collection, {"_id": content["_id"]})
2261 self.logger.info(
2262 f"Infra Config Delete Exit with resource status: {resource_status}"
garciadeblas96b94f52024-07-08 16:18:21 +02002263 )
rshri932105f2024-07-05 15:11:55 +00002264
rshri932105f2024-07-05 15:11:55 +00002265 return
yshah771dea82024-07-05 15:11:49 +00002266
2267
garciadeblas72412282024-11-07 12:41:54 +01002268class OkaLcm(GitOpsLcm):
yshah771dea82024-07-05 15:11:49 +00002269 db_collection = "okas"
2270
2271 def __init__(self, msg, lcm_tasks, config):
2272 """
2273 Init, Connect to database, filesystem storage, and messaging
2274 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
2275 :return: None
2276 """
garciadeblas72412282024-11-07 12:41:54 +01002277 super().__init__(msg, lcm_tasks, config)
yshah771dea82024-07-05 15:11:49 +00002278
yshah564ec9c2024-11-29 07:33:32 +00002279 async def create(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02002280 self.logger.info("OKA Create Enter")
yshah564ec9c2024-11-29 07:33:32 +00002281 op_id = params["operation_id"]
2282 oka_id = params["oka_id"]
2283 self.initialize_operation(oka_id, op_id)
2284 db_content = self.db.get_one(self.db_collection, {"_id": oka_id})
2285 op_params = self.get_operation_params(db_content, op_id)
yshah771dea82024-07-05 15:11:49 +00002286
garciadeblas41859ce2025-02-04 16:08:51 +01002287 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02002288 "create_oka", op_id, op_params, db_content
2289 )
yshah564ec9c2024-11-29 07:33:32 +00002290
garciadeblas33b36e72025-01-17 12:49:19 +01002291 workflow_status = await self.check_workflow_and_update_db(
2292 op_id, workflow_name, db_content
2293 )
yshah771dea82024-07-05 15:11:49 +00002294
2295 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002296 resource_status, db_content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02002297 "create_oka", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00002298 )
garciadeblas96b94f52024-07-08 16:18:21 +02002299 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
garciadeblasb23d2dc2025-02-21 10:15:49 +01002300
2301 # Clean items used in the workflow, no matter if the workflow succeeded
2302 clean_status, clean_msg = await self.odu.clean_items_workflow(
2303 "create_oka", op_id, op_params, db_content
2304 )
2305 self.logger.info(
2306 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
2307 )
yshah564ec9c2024-11-29 07:33:32 +00002308 self.logger.info(f"OKA Create Exit with resource status: {resource_status}")
yshah771dea82024-07-05 15:11:49 +00002309 return
2310
yshah564ec9c2024-11-29 07:33:32 +00002311 async def edit(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02002312 self.logger.info("OKA Edit Enter")
yshah564ec9c2024-11-29 07:33:32 +00002313 op_id = params["operation_id"]
2314 oka_id = params["oka_id"]
2315 self.initialize_operation(oka_id, op_id)
2316 db_content = self.db.get_one(self.db_collection, {"_id": oka_id})
2317 op_params = self.get_operation_params(db_content, op_id)
yshah771dea82024-07-05 15:11:49 +00002318
garciadeblas41859ce2025-02-04 16:08:51 +01002319 workflow_res, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00002320 "update_oka", op_id, op_params, db_content
garciadeblas96b94f52024-07-08 16:18:21 +02002321 )
garciadeblas33b36e72025-01-17 12:49:19 +01002322 workflow_status = await self.check_workflow_and_update_db(
2323 op_id, workflow_name, db_content
2324 )
yshah771dea82024-07-05 15:11:49 +00002325
2326 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002327 resource_status, db_content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02002328 "update_oka", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00002329 )
garciadeblas96b94f52024-07-08 16:18:21 +02002330 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
garciadeblasb23d2dc2025-02-21 10:15:49 +01002331 # Clean items used in the workflow, no matter if the workflow succeeded
2332 clean_status, clean_msg = await self.odu.clean_items_workflow(
2333 "update_oka", op_id, op_params, db_content
2334 )
2335 self.logger.info(
2336 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
2337 )
yshah564ec9c2024-11-29 07:33:32 +00002338 self.logger.info(f"OKA Update Exit with resource status: {resource_status}")
yshah771dea82024-07-05 15:11:49 +00002339 return
2340
yshah564ec9c2024-11-29 07:33:32 +00002341 async def delete(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02002342 self.logger.info("OKA delete Enter")
yshah564ec9c2024-11-29 07:33:32 +00002343 op_id = params["operation_id"]
2344 oka_id = params["oka_id"]
2345 self.initialize_operation(oka_id, op_id)
2346 db_content = self.db.get_one(self.db_collection, {"_id": oka_id})
2347 op_params = self.get_operation_params(db_content, op_id)
yshah771dea82024-07-05 15:11:49 +00002348
garciadeblas41859ce2025-02-04 16:08:51 +01002349 workflow_res, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00002350 "delete_oka", op_id, op_params, db_content
garciadeblas96b94f52024-07-08 16:18:21 +02002351 )
garciadeblas33b36e72025-01-17 12:49:19 +01002352 workflow_status = await self.check_workflow_and_update_db(
2353 op_id, workflow_name, db_content
2354 )
yshah771dea82024-07-05 15:11:49 +00002355
2356 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002357 resource_status, db_content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02002358 "delete_oka", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00002359 )
yshah771dea82024-07-05 15:11:49 +00002360
yshahb36649f2025-02-28 09:01:51 +00002361 force = params.get("force", False)
2362 if force:
2363 force_delete_status = self.check_force_delete_and_delete_from_db(
2364 oka_id, workflow_status, resource_status, force
2365 )
2366 if force_delete_status:
2367 return
2368
yshah564ec9c2024-11-29 07:33:32 +00002369 if resource_status:
2370 db_content["state"] == "DELETED"
2371 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
garciadeblas96b94f52024-07-08 16:18:21 +02002372 self.db.del_one(self.db_collection, {"_id": db_content["_id"]})
garciadeblasb23d2dc2025-02-21 10:15:49 +01002373 # Clean items used in the workflow, no matter if the workflow succeeded
2374 clean_status, clean_msg = await self.odu.clean_items_workflow(
2375 "delete_oka", op_id, op_params, db_content
2376 )
2377 self.logger.info(
2378 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
2379 )
yshah564ec9c2024-11-29 07:33:32 +00002380 self.logger.info(f"OKA Delete Exit with resource status: {resource_status}")
yshah771dea82024-07-05 15:11:49 +00002381 return
2382
2383
garciadeblas72412282024-11-07 12:41:54 +01002384class KsuLcm(GitOpsLcm):
yshah771dea82024-07-05 15:11:49 +00002385 db_collection = "ksus"
2386
2387 def __init__(self, msg, lcm_tasks, config):
2388 """
2389 Init, Connect to database, filesystem storage, and messaging
2390 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
2391 :return: None
2392 """
garciadeblas72412282024-11-07 12:41:54 +01002393 super().__init__(msg, lcm_tasks, config)
garciadeblasad6d1ba2025-01-22 16:02:18 +01002394 self._workflows = {
2395 "create_ksus": {
2396 "check_resource_function": self.check_create_ksus,
2397 },
2398 "delete_ksus": {
2399 "check_resource_function": self.check_delete_ksus,
2400 },
2401 }
2402
2403 def get_dbclusters_from_profile(self, profile_id, profile_type):
2404 cluster_list = []
2405 db_clusters = self.db.get_list("clusters")
2406 self.logger.info(f"Getting list of clusters for {profile_type} {profile_id}")
2407 for db_cluster in db_clusters:
2408 if profile_id in db_cluster.get(profile_type, []):
2409 self.logger.info(
2410 f"Profile {profile_id} found in cluster {db_cluster['name']}"
2411 )
2412 cluster_list.append(db_cluster)
2413 return cluster_list
yshah771dea82024-07-05 15:11:49 +00002414
yshah564ec9c2024-11-29 07:33:32 +00002415 async def create(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02002416 self.logger.info("ksu Create Enter")
yshah564ec9c2024-11-29 07:33:32 +00002417 db_content = []
2418 op_params = []
2419 op_id = params["operation_id"]
2420 for ksu_id in params["ksus_list"]:
2421 self.logger.info("Ksu ID: {}".format(ksu_id))
2422 self.initialize_operation(ksu_id, op_id)
2423 db_ksu = self.db.get_one(self.db_collection, {"_id": ksu_id})
2424 self.logger.info("Db KSU: {}".format(db_ksu))
2425 db_content.append(db_ksu)
2426 ksu_params = {}
2427 ksu_params = self.get_operation_params(db_ksu, op_id)
2428 self.logger.info("Operation Params: {}".format(ksu_params))
2429 # Update ksu_params["profile"] with profile name and age-pubkey
2430 profile_type = ksu_params["profile"]["profile_type"]
2431 profile_id = ksu_params["profile"]["_id"]
2432 profile_collection = self.profile_collection_mapping[profile_type]
2433 db_profile = self.db.get_one(profile_collection, {"_id": profile_id})
garciadeblasd41e9292025-03-11 15:44:25 +01002434 # db_profile is decrypted inline
2435 # No need to use decrypted_copy because db_profile won't be updated.
2436 self.decrypt_age_keys(db_profile)
yshah564ec9c2024-11-29 07:33:32 +00002437 ksu_params["profile"]["name"] = db_profile["name"]
2438 ksu_params["profile"]["age_pubkey"] = db_profile.get("age_pubkey", "")
2439 # Update ksu_params["oka"] with sw_catalog_path (when missing)
garciadeblas9f7c9c52025-01-17 01:06:05 +01002440 # TODO: remove this in favor of doing it in ODU workflow
yshah564ec9c2024-11-29 07:33:32 +00002441 for oka in ksu_params["oka"]:
2442 if "sw_catalog_path" not in oka:
2443 oka_id = oka["_id"]
2444 db_oka = self.db.get_one("okas", {"_id": oka_id})
yshah2f39b8a2024-12-19 11:06:24 +00002445 oka_type = MAP_PROFILE[
2446 db_oka.get("profile_type", "infra_controller_profiles")
2447 ]
garciadeblas9f7c9c52025-01-17 01:06:05 +01002448 oka[
2449 "sw_catalog_path"
garciadeblas29f8bcf2025-01-24 14:24:41 +01002450 ] = f"{oka_type}/{db_oka['git_name'].lower()}/templates"
yshah564ec9c2024-11-29 07:33:32 +00002451 op_params.append(ksu_params)
yshah771dea82024-07-05 15:11:49 +00002452
garciadeblasad6d1ba2025-01-22 16:02:18 +01002453 # A single workflow is launched for all KSUs
garciadeblas41859ce2025-02-04 16:08:51 +01002454 workflow_res, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00002455 "create_ksus", op_id, op_params, db_content
garciadeblas96b94f52024-07-08 16:18:21 +02002456 )
garciadeblasad6d1ba2025-01-22 16:02:18 +01002457 # Update workflow status in all KSUs
2458 wf_status_list = []
yshah564ec9c2024-11-29 07:33:32 +00002459 for db_ksu, ksu_params in zip(db_content, op_params):
garciadeblas33b36e72025-01-17 12:49:19 +01002460 workflow_status = await self.check_workflow_and_update_db(
2461 op_id, workflow_name, db_ksu
2462 )
garciadeblasad6d1ba2025-01-22 16:02:18 +01002463 wf_status_list.append(workflow_status)
2464 # Update resource status in all KSUs
2465 # TODO: Is an operation correct if n KSUs are right and 1 is not OK?
2466 res_status_list = []
2467 for db_ksu, ksu_params, wf_status in zip(db_content, op_params, wf_status_list):
2468 if wf_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002469 resource_status, db_ksu = await self.check_resource_and_update_db(
yshah564ec9c2024-11-29 07:33:32 +00002470 "create_ksus", op_id, ksu_params, db_ksu
2471 )
garciadeblasad6d1ba2025-01-22 16:02:18 +01002472 else:
2473 resource_status = False
2474 res_status_list.append(resource_status)
garciadeblas96b94f52024-07-08 16:18:21 +02002475 self.db.set_one(self.db_collection, {"_id": db_ksu["_id"]}, db_ksu)
2476
garciadeblasd8429852024-10-17 15:30:30 +02002477 # Clean items used in the workflow, no matter if the workflow succeeded
2478 clean_status, clean_msg = await self.odu.clean_items_workflow(
yshah564ec9c2024-11-29 07:33:32 +00002479 "create_ksus", op_id, op_params, db_content
garciadeblasd8429852024-10-17 15:30:30 +02002480 )
2481 self.logger.info(
2482 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
2483 )
garciadeblasad6d1ba2025-01-22 16:02:18 +01002484 self.logger.info(f"KSU Create EXIT with Resource Status {res_status_list}")
yshah771dea82024-07-05 15:11:49 +00002485 return
2486
yshah564ec9c2024-11-29 07:33:32 +00002487 async def edit(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02002488 self.logger.info("ksu edit Enter")
yshah564ec9c2024-11-29 07:33:32 +00002489 db_content = []
2490 op_params = []
2491 op_id = params["operation_id"]
2492 for ksu_id in params["ksus_list"]:
2493 self.initialize_operation(ksu_id, op_id)
2494 db_ksu = self.db.get_one("ksus", {"_id": ksu_id})
2495 db_content.append(db_ksu)
2496 ksu_params = {}
2497 ksu_params = self.get_operation_params(db_ksu, op_id)
2498 # Update ksu_params["profile"] with profile name and age-pubkey
2499 profile_type = ksu_params["profile"]["profile_type"]
2500 profile_id = ksu_params["profile"]["_id"]
2501 profile_collection = self.profile_collection_mapping[profile_type]
2502 db_profile = self.db.get_one(profile_collection, {"_id": profile_id})
garciadeblasd41e9292025-03-11 15:44:25 +01002503 # db_profile is decrypted inline
2504 # No need to use decrypted_copy because db_profile won't be updated.
2505 self.decrypt_age_keys(db_profile)
yshah564ec9c2024-11-29 07:33:32 +00002506 ksu_params["profile"]["name"] = db_profile["name"]
2507 ksu_params["profile"]["age_pubkey"] = db_profile.get("age_pubkey", "")
2508 # Update ksu_params["oka"] with sw_catalog_path (when missing)
garciadeblas9f7c9c52025-01-17 01:06:05 +01002509 # TODO: remove this in favor of doing it in ODU workflow
yshah564ec9c2024-11-29 07:33:32 +00002510 for oka in ksu_params["oka"]:
2511 if "sw_catalog_path" not in oka:
2512 oka_id = oka["_id"]
2513 db_oka = self.db.get_one("okas", {"_id": oka_id})
yshah2f39b8a2024-12-19 11:06:24 +00002514 oka_type = MAP_PROFILE[
2515 db_oka.get("profile_type", "infra_controller_profiles")
2516 ]
garciadeblas9f7c9c52025-01-17 01:06:05 +01002517 oka[
2518 "sw_catalog_path"
2519 ] = f"{oka_type}/{db_oka['git_name']}/templates"
yshah564ec9c2024-11-29 07:33:32 +00002520 op_params.append(ksu_params)
yshah771dea82024-07-05 15:11:49 +00002521
garciadeblas41859ce2025-02-04 16:08:51 +01002522 workflow_res, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00002523 "update_ksus", op_id, op_params, db_content
garciadeblas96b94f52024-07-08 16:18:21 +02002524 )
yshah771dea82024-07-05 15:11:49 +00002525
yshah564ec9c2024-11-29 07:33:32 +00002526 for db_ksu, ksu_params in zip(db_content, op_params):
garciadeblas33b36e72025-01-17 12:49:19 +01002527 workflow_status = await self.check_workflow_and_update_db(
2528 op_id, workflow_name, db_ksu
2529 )
yshah564ec9c2024-11-29 07:33:32 +00002530
garciadeblas96b94f52024-07-08 16:18:21 +02002531 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002532 resource_status, db_ksu = await self.check_resource_and_update_db(
yshah564ec9c2024-11-29 07:33:32 +00002533 "update_ksus", op_id, ksu_params, db_ksu
garciadeblas96b94f52024-07-08 16:18:21 +02002534 )
garciadeblas96b94f52024-07-08 16:18:21 +02002535 db_ksu["name"] = ksu_params["name"]
2536 db_ksu["description"] = ksu_params["description"]
2537 db_ksu["profile"]["profile_type"] = ksu_params["profile"][
2538 "profile_type"
2539 ]
2540 db_ksu["profile"]["_id"] = ksu_params["profile"]["_id"]
2541 db_ksu["oka"] = ksu_params["oka"]
2542 self.db.set_one(self.db_collection, {"_id": db_ksu["_id"]}, db_ksu)
2543
yshah564ec9c2024-11-29 07:33:32 +00002544 # Clean items used in the workflow, no matter if the workflow succeeded
2545 clean_status, clean_msg = await self.odu.clean_items_workflow(
2546 "create_ksus", op_id, op_params, db_content
garciadeblas96b94f52024-07-08 16:18:21 +02002547 )
2548 self.logger.info(
yshah564ec9c2024-11-29 07:33:32 +00002549 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
garciadeblas96b94f52024-07-08 16:18:21 +02002550 )
yshah564ec9c2024-11-29 07:33:32 +00002551 self.logger.info(f"KSU Update EXIT with Resource Status {resource_status}")
yshah771dea82024-07-05 15:11:49 +00002552 return
2553
yshah564ec9c2024-11-29 07:33:32 +00002554 async def delete(self, params, order_id):
2555 self.logger.info("ksu delete Enter")
2556 db_content = []
2557 op_params = []
2558 op_id = params["operation_id"]
2559 for ksu_id in params["ksus_list"]:
2560 self.initialize_operation(ksu_id, op_id)
2561 db_ksu = self.db.get_one("ksus", {"_id": ksu_id})
2562 db_content.append(db_ksu)
2563 ksu_params = {}
2564 ksu_params["profile"] = {}
2565 ksu_params["profile"]["profile_type"] = db_ksu["profile"]["profile_type"]
2566 ksu_params["profile"]["_id"] = db_ksu["profile"]["_id"]
garciadeblasd41e9292025-03-11 15:44:25 +01002567 # Update ksu_params["profile"] with profile name
yshah564ec9c2024-11-29 07:33:32 +00002568 profile_type = ksu_params["profile"]["profile_type"]
2569 profile_id = ksu_params["profile"]["_id"]
2570 profile_collection = self.profile_collection_mapping[profile_type]
2571 db_profile = self.db.get_one(profile_collection, {"_id": profile_id})
2572 ksu_params["profile"]["name"] = db_profile["name"]
yshah564ec9c2024-11-29 07:33:32 +00002573 op_params.append(ksu_params)
yshah771dea82024-07-05 15:11:49 +00002574
garciadeblas41859ce2025-02-04 16:08:51 +01002575 workflow_res, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00002576 "delete_ksus", op_id, op_params, db_content
2577 )
2578
2579 for db_ksu, ksu_params in zip(db_content, op_params):
garciadeblas33b36e72025-01-17 12:49:19 +01002580 workflow_status = await self.check_workflow_and_update_db(
2581 op_id, workflow_name, db_ksu
2582 )
yshah564ec9c2024-11-29 07:33:32 +00002583
2584 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002585 resource_status, db_ksu = await self.check_resource_and_update_db(
yshah564ec9c2024-11-29 07:33:32 +00002586 "delete_ksus", op_id, ksu_params, db_ksu
2587 )
2588
yshahb36649f2025-02-28 09:01:51 +00002589 force = params.get("force", False)
2590 if force:
2591 force_delete_status = self.check_force_delete_and_delete_from_db(
2592 db_ksu["_id"], workflow_status, resource_status, force
2593 )
2594 if force_delete_status:
2595 return
2596
yshah564ec9c2024-11-29 07:33:32 +00002597 if resource_status:
2598 db_ksu["state"] == "DELETED"
yshah5e109152025-05-19 12:29:01 +00002599 self.delete_ksu_dependency(db_ksu["_id"], db_ksu)
yshah564ec9c2024-11-29 07:33:32 +00002600 self.db.set_one(self.db_collection, {"_id": db_ksu["_id"]}, db_ksu)
2601 self.db.del_one(self.db_collection, {"_id": db_ksu["_id"]})
2602
2603 self.logger.info(f"KSU Delete Exit with resource status: {resource_status}")
2604 return
2605
2606 async def clone(self, params, order_id):
2607 self.logger.info("ksu clone Enter")
2608 op_id = params["operation_id"]
2609 ksus_id = params["ksus_list"][0]
2610 self.initialize_operation(ksus_id, op_id)
2611 db_content = self.db.get_one(self.db_collection, {"_id": ksus_id})
2612 op_params = self.get_operation_params(db_content, op_id)
garciadeblas41859ce2025-02-04 16:08:51 +01002613 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02002614 "clone_ksus", op_id, op_params, db_content
2615 )
yshah564ec9c2024-11-29 07:33:32 +00002616
garciadeblas33b36e72025-01-17 12:49:19 +01002617 workflow_status = await self.check_workflow_and_update_db(
2618 op_id, workflow_name, db_content
2619 )
yshah771dea82024-07-05 15:11:49 +00002620
2621 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002622 resource_status, db_content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02002623 "clone_ksus", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00002624 )
garciadeblas96b94f52024-07-08 16:18:21 +02002625 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
yshah564ec9c2024-11-29 07:33:32 +00002626
2627 self.logger.info(f"KSU Clone Exit with resource status: {resource_status}")
yshah771dea82024-07-05 15:11:49 +00002628 return
2629
yshah564ec9c2024-11-29 07:33:32 +00002630 async def move(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02002631 self.logger.info("ksu move Enter")
yshah564ec9c2024-11-29 07:33:32 +00002632 op_id = params["operation_id"]
2633 ksus_id = params["ksus_list"][0]
2634 self.initialize_operation(ksus_id, op_id)
2635 db_content = self.db.get_one(self.db_collection, {"_id": ksus_id})
2636 op_params = self.get_operation_params(db_content, op_id)
garciadeblas41859ce2025-02-04 16:08:51 +01002637 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02002638 "move_ksus", op_id, op_params, db_content
2639 )
yshah564ec9c2024-11-29 07:33:32 +00002640
garciadeblas33b36e72025-01-17 12:49:19 +01002641 workflow_status = await self.check_workflow_and_update_db(
2642 op_id, workflow_name, db_content
2643 )
yshah771dea82024-07-05 15:11:49 +00002644
2645 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002646 resource_status, db_content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02002647 "move_ksus", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00002648 )
garciadeblas96b94f52024-07-08 16:18:21 +02002649 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
yshah564ec9c2024-11-29 07:33:32 +00002650
2651 self.logger.info(f"KSU Move Exit with resource status: {resource_status}")
yshah771dea82024-07-05 15:11:49 +00002652 return
garciadeblasad6d1ba2025-01-22 16:02:18 +01002653
2654 async def check_create_ksus(self, op_id, op_params, content):
2655 self.logger.info(f"check_create_ksus Operation {op_id}. Params: {op_params}.")
2656 self.logger.debug(f"Content: {content}")
2657 db_ksu = content
2658 kustomization_name = db_ksu["git_name"].lower()
2659 oka_list = op_params["oka"]
2660 oka_item = oka_list[0]
2661 oka_params = oka_item.get("transformation", {})
garciadeblas167dde32025-02-14 00:44:58 +01002662 kustomization_ns = oka_params.get("kustomization_namespace", "flux-system")
garciadeblasad6d1ba2025-01-22 16:02:18 +01002663 profile_id = op_params.get("profile", {}).get("_id")
2664 profile_type = op_params.get("profile", {}).get("profile_type")
2665 self.logger.info(
2666 f"Checking status of KSU {db_ksu['name']} for profile {profile_id}."
2667 )
2668 dbcluster_list = self.get_dbclusters_from_profile(profile_id, profile_type)
2669 if not dbcluster_list:
2670 self.logger.info(f"No clusters found for profile {profile_id}.")
2671 for db_cluster in dbcluster_list:
2672 try:
2673 self.logger.info(
garciadeblasae238482025-02-03 08:44:19 +01002674 f"Checking status of KSU {db_ksu['name']} in cluster {db_cluster['name']}."
garciadeblasad6d1ba2025-01-22 16:02:18 +01002675 )
2676 cluster_kubectl = self.cluster_kubectl(db_cluster)
2677 checkings_list = [
2678 {
2679 "item": "kustomization",
2680 "name": kustomization_name,
garciadeblas167dde32025-02-14 00:44:58 +01002681 "namespace": kustomization_ns,
garciadeblas7cf480d2025-01-27 16:53:45 +01002682 "condition": {
2683 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
2684 "value": "True",
2685 },
garciadeblasad6d1ba2025-01-22 16:02:18 +01002686 "timeout": self._checkloop_kustomization_timeout,
2687 "enable": True,
2688 "resourceState": "IN_PROGRESS.KUSTOMIZATION_READY",
2689 },
2690 ]
2691 self.logger.info(
2692 f"Checking status of KSU {db_ksu['name']} for profile {profile_id}."
2693 )
2694 result, message = await self.common_check_list(
garciadeblas6d8acf32025-02-06 13:34:37 +01002695 op_id, checkings_list, "ksus", db_ksu, kubectl_obj=cluster_kubectl
garciadeblasad6d1ba2025-01-22 16:02:18 +01002696 )
2697 if not result:
2698 return False, message
2699 except Exception as e:
2700 self.logger.error(
2701 f"Error checking KSU in cluster {db_cluster['name']}."
2702 )
2703 self.logger.error(e)
2704 return False, f"Error checking KSU in cluster {db_cluster['name']}."
2705 return True, "OK"
2706
2707 async def check_delete_ksus(self, op_id, op_params, content):
2708 self.logger.info(f"check_delete_ksus Operation {op_id}. Params: {op_params}.")
2709 self.logger.debug(f"Content: {content}")
2710 db_ksu = content
2711 kustomization_name = db_ksu["git_name"].lower()
2712 oka_list = db_ksu["oka"]
2713 oka_item = oka_list[0]
2714 oka_params = oka_item.get("transformation", {})
garciadeblas167dde32025-02-14 00:44:58 +01002715 kustomization_ns = oka_params.get("kustomization_namespace", "flux-system")
garciadeblasad6d1ba2025-01-22 16:02:18 +01002716 profile_id = op_params.get("profile", {}).get("_id")
2717 profile_type = op_params.get("profile", {}).get("profile_type")
2718 self.logger.info(
2719 f"Checking status of KSU {db_ksu['name']} for profile {profile_id}."
2720 )
2721 dbcluster_list = self.get_dbclusters_from_profile(profile_id, profile_type)
2722 if not dbcluster_list:
2723 self.logger.info(f"No clusters found for profile {profile_id}.")
2724 for db_cluster in dbcluster_list:
2725 try:
2726 self.logger.info(
2727 f"Checking status of KSU in cluster {db_cluster['name']}."
2728 )
2729 cluster_kubectl = self.cluster_kubectl(db_cluster)
2730 checkings_list = [
2731 {
2732 "item": "kustomization",
2733 "name": kustomization_name,
garciadeblas167dde32025-02-14 00:44:58 +01002734 "namespace": kustomization_ns,
garciadeblasad6d1ba2025-01-22 16:02:18 +01002735 "deleted": True,
2736 "timeout": self._checkloop_kustomization_timeout,
2737 "enable": True,
2738 "resourceState": "IN_PROGRESS.KUSTOMIZATION_DELETED",
2739 },
2740 ]
2741 self.logger.info(
2742 f"Checking status of KSU {db_ksu['name']} for profile {profile_id}."
2743 )
2744 result, message = await self.common_check_list(
garciadeblas6d8acf32025-02-06 13:34:37 +01002745 op_id, checkings_list, "ksus", db_ksu, kubectl_obj=cluster_kubectl
garciadeblasad6d1ba2025-01-22 16:02:18 +01002746 )
2747 if not result:
2748 return False, message
2749 except Exception as e:
2750 self.logger.error(
2751 f"Error checking KSU in cluster {db_cluster['name']}."
2752 )
2753 self.logger.error(e)
2754 return False, f"Error checking KSU in cluster {db_cluster['name']}."
2755 return True, "OK"