blob: 28641986470e87b6c9e1dcd74730ef3d4aecd2f2 [file] [log] [blame]
rshri932105f2024-07-05 15:11:55 +00001# -*- coding: utf-8 -*-
2
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12# implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15
16__author__ = (
17 "Shrinithi R <shrinithi.r@tataelxsi.co.in>",
18 "Shahithya Y <shahithya.y@tataelxsi.co.in>",
19)
20
rshric3564942024-11-12 18:12:38 +000021import copy
rshri932105f2024-07-05 15:11:55 +000022import logging
yshahd940c652024-10-17 06:11:12 +000023from time import time
garciadeblas72412282024-11-07 12:41:54 +010024import traceback
rshri932105f2024-07-05 15:11:55 +000025from osm_lcm.lcm_utils import LcmBase
26from copy import deepcopy
27from osm_lcm import odu_workflows
28from osm_lcm import vim_sdn
rshri948f7de2024-12-02 03:42:35 +000029from osm_lcm.data_utils.list_utils import find_in_list
garciadeblasad6d1ba2025-01-22 16:02:18 +010030from osm_lcm.n2vc.kubectl import Kubectl
31import yaml
rshri932105f2024-07-05 15:11:55 +000032
yshah2f39b8a2024-12-19 11:06:24 +000033MAP_PROFILE = {
34 "infra_controller_profiles": "infra-controllers",
35 "infra_config_profiles": "infra-configs",
36 "resource_profiles": "managed_resources",
37 "app_profiles": "apps",
38}
39
rshri932105f2024-07-05 15:11:55 +000040
garciadeblas72412282024-11-07 12:41:54 +010041class GitOpsLcm(LcmBase):
garciadeblasea865ff2024-11-20 12:42:49 +010042 db_collection = "gitops"
yshah564ec9c2024-11-29 07:33:32 +000043 workflow_status = None
44 resource_status = None
45
46 profile_collection_mapping = {
47 "infra_controller_profiles": "k8sinfra_controller",
48 "infra_config_profiles": "k8sinfra_config",
49 "resource_profiles": "k8sresource",
50 "app_profiles": "k8sapp",
51 }
garciadeblasea865ff2024-11-20 12:42:49 +010052
yshah6bad8892025-02-11 12:37:04 +000053 profile_type_mapping = {
54 "infra-controllers": "infra_controller_profiles",
55 "infra-configs": "infra_config_profiles",
56 "managed-resources": "resource_profiles",
57 "applications": "app_profiles",
58 }
59
garciadeblas72412282024-11-07 12:41:54 +010060 def __init__(self, msg, lcm_tasks, config):
61 self.logger = logging.getLogger("lcm.gitops")
62 self.lcm_tasks = lcm_tasks
63 self.odu = odu_workflows.OduWorkflow(msg, self.lcm_tasks, config)
64 self._checkloop_kustomization_timeout = 900
65 self._checkloop_resource_timeout = 900
66 self._workflows = {}
67 super().__init__(msg, self.logger)
68
69 async def check_dummy_operation(self, op_id, op_params, content):
70 self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
71 return True, "OK"
72
garciadeblasea865ff2024-11-20 12:42:49 +010073 def initialize_operation(self, item_id, op_id):
74 db_item = self.db.get_one(self.db_collection, {"_id": item_id})
75 operation = next(
76 (op for op in db_item.get("operationHistory", []) if op["op_id"] == op_id),
77 None,
78 )
79 operation["workflowState"] = "PROCESSING"
80 operation["resourceState"] = "NOT_READY"
81 operation["operationState"] = "IN_PROGRESS"
82 operation["gitOperationInfo"] = None
83 db_item["current_operation"] = operation["op_id"]
84 self.db.set_one(self.db_collection, {"_id": item_id}, db_item)
85
yshah564ec9c2024-11-29 07:33:32 +000086 def get_operation_params(self, item, operation_id):
87 operation_history = item.get("operationHistory", [])
88 operation = find_in_list(
89 operation_history, lambda op: op["op_id"] == operation_id
90 )
91 return operation.get("operationParams", {})
92
93 def get_operation_type(self, item, operation_id):
94 operation_history = item.get("operationHistory", [])
95 operation = find_in_list(
96 operation_history, lambda op: op["op_id"] == operation_id
97 )
98 return operation.get("operationType", {})
99
garciadeblasbe890702024-12-20 11:39:13 +0100100 def update_state_operation_history(
101 self, content, op_id, workflow_state=None, resource_state=None
102 ):
103 self.logger.info(
104 f"Update state of operation {op_id} in Operation History in DB"
105 )
106 self.logger.info(
107 f"Workflow state: {workflow_state}. Resource state: {resource_state}"
108 )
109 self.logger.debug(f"Content: {content}")
110
111 op_num = 0
112 for operation in content["operationHistory"]:
113 self.logger.debug("Operations: {}".format(operation))
114 if operation["op_id"] == op_id:
115 self.logger.debug("Found operation number: {}".format(op_num))
116 if workflow_state is not None:
117 operation["workflowState"] = workflow_state
118
119 if resource_state is not None:
120 operation["resourceState"] = resource_state
121 break
122 op_num += 1
123 self.logger.debug("content: {}".format(content))
124
125 return content
126
garciadeblas7eae6f42024-11-08 10:41:38 +0100127 def update_operation_history(
garciadeblasf9092892024-12-12 11:07:08 +0100128 self, content, op_id, workflow_status=None, resource_status=None, op_end=True
garciadeblas7eae6f42024-11-08 10:41:38 +0100129 ):
130 self.logger.info(
131 f"Update Operation History in DB. Workflow status: {workflow_status}. Resource status: {resource_status}"
132 )
133 self.logger.debug(f"Content: {content}")
134
garciadeblas7eae6f42024-11-08 10:41:38 +0100135 op_num = 0
136 for operation in content["operationHistory"]:
137 self.logger.debug("Operations: {}".format(operation))
138 if operation["op_id"] == op_id:
139 self.logger.debug("Found operation number: {}".format(op_num))
garciadeblas8bde3f42024-12-20 10:37:12 +0100140 if workflow_status is not None:
141 if workflow_status:
142 operation["workflowState"] = "COMPLETED"
143 operation["result"] = True
144 else:
145 operation["workflowState"] = "ERROR"
146 operation["operationState"] = "FAILED"
147 operation["result"] = False
garciadeblas7eae6f42024-11-08 10:41:38 +0100148
garciadeblas8bde3f42024-12-20 10:37:12 +0100149 if resource_status is not None:
150 if resource_status:
151 operation["resourceState"] = "READY"
152 operation["operationState"] = "COMPLETED"
153 operation["result"] = True
154 else:
155 operation["resourceState"] = "NOT_READY"
156 operation["operationState"] = "FAILED"
157 operation["result"] = False
garciadeblas7eae6f42024-11-08 10:41:38 +0100158
garciadeblasf9092892024-12-12 11:07:08 +0100159 if op_end:
160 now = time()
161 operation["endDate"] = now
garciadeblas7eae6f42024-11-08 10:41:38 +0100162 break
163 op_num += 1
164 self.logger.debug("content: {}".format(content))
165
166 return content
167
garciadeblas33b36e72025-01-17 12:49:19 +0100168 async def check_workflow_and_update_db(self, op_id, workflow_name, db_content):
yshah564ec9c2024-11-29 07:33:32 +0000169 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +0100170 op_id, workflow_name
yshah564ec9c2024-11-29 07:33:32 +0000171 )
172 self.logger.info(
173 "Workflow Status: {} Workflow Message: {}".format(
174 workflow_status, workflow_msg
175 )
176 )
177 operation_type = self.get_operation_type(db_content, op_id)
178 if operation_type == "create" and workflow_status:
179 db_content["state"] = "CREATED"
180 elif operation_type == "create" and not workflow_status:
181 db_content["state"] = "FAILED_CREATION"
182 elif operation_type == "delete" and workflow_status:
183 db_content["state"] = "DELETED"
184 elif operation_type == "delete" and not workflow_status:
185 db_content["state"] = "FAILED_DELETION"
186
187 if workflow_status:
188 db_content["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
189 else:
190 db_content["resourceState"] = "ERROR"
191
192 db_content = self.update_operation_history(
193 db_content, op_id, workflow_status, None
194 )
195 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
196 return workflow_status
197
garciadeblas33b36e72025-01-17 12:49:19 +0100198 async def check_resource_and_update_db(
199 self, resource_name, op_id, op_params, db_content
200 ):
yshah564ec9c2024-11-29 07:33:32 +0000201 workflow_status = True
202
203 resource_status, resource_msg = await self.check_resource_status(
204 resource_name, op_id, op_params, db_content
205 )
206 self.logger.info(
207 "Resource Status: {} Resource Message: {}".format(
208 resource_status, resource_msg
209 )
210 )
211
212 if resource_status:
213 db_content["resourceState"] = "READY"
214 else:
215 db_content["resourceState"] = "ERROR"
216
217 db_content = self.update_operation_history(
218 db_content, op_id, workflow_status, resource_status
219 )
220 db_content["operatingState"] = "IDLE"
221 db_content["current_operation"] = None
222 return resource_status, db_content
223
garciadeblasad6d1ba2025-01-22 16:02:18 +0100224 async def common_check_list(
garciadeblas6d8acf32025-02-06 13:34:37 +0100225 self, op_id, checkings_list, db_collection, db_item, kubectl_obj=None
garciadeblasad6d1ba2025-01-22 16:02:18 +0100226 ):
garciadeblas72412282024-11-07 12:41:54 +0100227 try:
228 for checking in checkings_list:
229 if checking["enable"]:
230 status, message = await self.odu.readiness_loop(
garciadeblasc89134b2025-02-05 16:36:17 +0100231 op_id=op_id,
garciadeblas72412282024-11-07 12:41:54 +0100232 item=checking["item"],
233 name=checking["name"],
234 namespace=checking["namespace"],
garciadeblas7cf480d2025-01-27 16:53:45 +0100235 condition=checking.get("condition"),
garciadeblasad6d1ba2025-01-22 16:02:18 +0100236 deleted=checking.get("deleted", False),
garciadeblas72412282024-11-07 12:41:54 +0100237 timeout=checking["timeout"],
garciadeblas6d8acf32025-02-06 13:34:37 +0100238 kubectl_obj=kubectl_obj,
garciadeblas72412282024-11-07 12:41:54 +0100239 )
240 if not status:
garciadeblas656818e2025-01-21 18:48:58 +0100241 error_message = "Resources not ready: "
242 error_message += checking.get("error_message", "")
243 return status, f"{error_message}: {message}"
garciadeblas7eae6f42024-11-08 10:41:38 +0100244 else:
245 db_item["resourceState"] = checking["resourceState"]
garciadeblasbe890702024-12-20 11:39:13 +0100246 db_item = self.update_state_operation_history(
247 db_item, op_id, None, checking["resourceState"]
garciadeblas7eae6f42024-11-08 10:41:38 +0100248 )
249 self.db.set_one(db_collection, {"_id": db_item["_id"]}, db_item)
garciadeblas72412282024-11-07 12:41:54 +0100250 except Exception as e:
251 self.logger.debug(traceback.format_exc())
252 self.logger.debug(f"Exception: {e}", exc_info=True)
253 return False, f"Unexpected exception: {e}"
254 return True, "OK"
255
256 async def check_resource_status(self, key, op_id, op_params, content):
257 self.logger.info(
garciadeblasad6d1ba2025-01-22 16:02:18 +0100258 f"Check resource status. Key: {key}. Operation: {op_id}. Params: {op_params}."
garciadeblas72412282024-11-07 12:41:54 +0100259 )
garciadeblasad6d1ba2025-01-22 16:02:18 +0100260 self.logger.debug(f"Check resource status. Content: {content}")
garciadeblas72412282024-11-07 12:41:54 +0100261 check_resource_function = self._workflows.get(key, {}).get(
262 "check_resource_function"
263 )
264 self.logger.info("check_resource function : {}".format(check_resource_function))
265 if check_resource_function:
266 return await check_resource_function(op_id, op_params, content)
267 else:
268 return await self.check_dummy_operation(op_id, op_params, content)
269
yshahb36649f2025-02-28 09:01:51 +0000270 def check_force_delete_and_delete_from_db(
271 self, _id, workflow_status, resource_status, force
272 ):
273 self.logger.info(
274 f" Force: {force} Workflow status: {workflow_status} Resource Status: {resource_status}"
275 )
276 if force and (not workflow_status or not resource_status):
277 self.db.del_one(self.db_collection, {"_id": _id})
278 return True
279 return False
280
garciadeblasd41e9292025-03-11 15:44:25 +0100281 def decrypt_age_keys(self, content, fields=["age_pubkey", "age_privkey"]):
282 self.db.encrypt_decrypt_fields(
283 content,
284 "decrypt",
285 fields,
286 schema_version="1.11",
287 salt=content["_id"],
288 )
289
290 def encrypt_age_keys(self, content, fields=["age_pubkey", "age_privkey"]):
291 self.db.encrypt_decrypt_fields(
292 content,
293 "encrypt",
294 fields,
295 schema_version="1.11",
296 salt=content["_id"],
297 )
298
garciadeblas995cbf32024-12-18 12:54:00 +0100299 def decrypted_copy(self, content, fields=["age_pubkey", "age_privkey"]):
300 # This deep copy is intended to be passed to ODU workflows.
301 content_copy = copy.deepcopy(content)
rshric3564942024-11-12 18:12:38 +0000302
303 # decrypting the key
304 self.db.encrypt_decrypt_fields(
garciadeblas995cbf32024-12-18 12:54:00 +0100305 content_copy,
rshric3564942024-11-12 18:12:38 +0000306 "decrypt",
garciadeblas995cbf32024-12-18 12:54:00 +0100307 fields,
rshric3564942024-11-12 18:12:38 +0000308 schema_version="1.11",
garciadeblas995cbf32024-12-18 12:54:00 +0100309 salt=content_copy["_id"],
rshric3564942024-11-12 18:12:38 +0000310 )
garciadeblas995cbf32024-12-18 12:54:00 +0100311 return content_copy
rshric3564942024-11-12 18:12:38 +0000312
yshah5e109152025-05-19 12:29:01 +0000313 def delete_ksu_dependency(self, _id, data):
314 used_oka = []
315 existing_oka = []
316
317 for oka_data in data["oka"]:
318 if oka_data.get("_id"):
319 used_oka.append(oka_data["_id"])
320
321 all_ksu_data = self.db.get_list("ksus", {})
322 for ksu_data in all_ksu_data:
323 if ksu_data["_id"] != _id:
324 for oka_data in ksu_data["oka"]:
325 if oka_data.get("_id"):
326 if oka_data["_id"] not in existing_oka:
327 existing_oka.append(oka_data["_id"])
328
329 self.logger.info(f"Used OKA: {used_oka}")
330 self.logger.info(f"Existing OKA: {existing_oka}")
331
332 for oka_id in used_oka:
333 if oka_id not in existing_oka:
334 self.db.set_one(
335 "okas", {"_id": oka_id}, {"_admin.usageState": "NOT_IN_USE"}
336 )
337
338 return
339
yshah6bad8892025-02-11 12:37:04 +0000340 def delete_profile_ksu(self, _id, profile_type):
341 filter_q = {"profile": {"_id": _id, "profile_type": profile_type}}
342 ksu_list = self.db.get_list("ksus", filter_q)
yshah5e109152025-05-19 12:29:01 +0000343 for ksu_data in ksu_list:
344 self.delete_ksu_dependency(ksu_data["_id"], ksu_data)
345
yshah6bad8892025-02-11 12:37:04 +0000346 if ksu_list:
347 self.db.del_list("ksus", filter_q)
348 return
349
garciadeblasad6d1ba2025-01-22 16:02:18 +0100350 def cluster_kubectl(self, db_cluster):
351 cluster_kubeconfig = db_cluster["credentials"]
352 kubeconfig_path = f"/tmp/{db_cluster['_id']}_kubeconfig.yaml"
353 with open(kubeconfig_path, "w") as kubeconfig_file:
354 yaml.safe_dump(cluster_kubeconfig, kubeconfig_file)
355 return Kubectl(config_file=kubeconfig_path)
356
garciadeblas72412282024-11-07 12:41:54 +0100357
yshah83a30572025-06-13 08:38:49 +0000358class NodeGroupLcm(GitOpsLcm):
359 db_collection = "nodegroups"
360
361 def __init__(self, msg, lcm_tasks, config):
362 """
363 Init, Connect to database, filesystem storage, and messaging
364 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
365 :return: None
366 """
367 super().__init__(msg, lcm_tasks, config)
368 self._workflows = {
369 "add_nodegroup": {
370 "check_resource_function": self.check_add_nodegroup,
371 },
372 "scale_nodegroup": {
373 "check_resource_function": self.check_scale_nodegroup,
374 },
375 "delete_nodegroup": {
376 "check_resource_function": self.check_delete_nodegroup,
377 },
378 }
379
380 async def create(self, params, order_id):
381 self.logger.info("Add NodeGroup Enter")
382
383 # To get the nodegroup and op ids
384 nodegroup_id = params["nodegroup_id"]
385 op_id = params["operation_id"]
386
387 # To initialize the operation states
388 self.initialize_operation(nodegroup_id, op_id)
389
390 # To get the nodegroup details and control plane from DB
391 db_nodegroup = self.db.get_one(self.db_collection, {"_id": nodegroup_id})
392 db_cluster = self.db.get_one("clusters", {"_id": db_nodegroup["cluster_id"]})
393
394 # To get the operation params details
395 op_params = self.get_operation_params(db_nodegroup, op_id)
396 self.logger.info(f"Operations Params: {op_params}")
397
398 db_vim = self.db.get_one("vim_accounts", {"name": db_cluster["vim_account"]})
399
400 # To copy the cluster content and decrypting fields to use in workflows
401 workflow_content = {
402 "nodegroup": db_nodegroup,
403 "cluster": db_cluster,
404 "vim_account": db_vim,
405 }
406 self.logger.info(f"Workflow content: {workflow_content}")
407
408 workflow_res, workflow_name = await self.odu.launch_workflow(
409 "add_nodegroup", op_id, op_params, workflow_content
410 )
411 self.logger.info("workflow_name is: {}".format(workflow_name))
412
413 workflow_status = await self.check_workflow_and_update_db(
414 op_id, workflow_name, db_nodegroup
415 )
416
417 # Clean items used in the workflow, no matter if the workflow succeeded
418 clean_status, clean_msg = await self.odu.clean_items_workflow(
419 "add_nodegroup", op_id, op_params, workflow_content
420 )
421 self.logger.info(
422 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
423 )
424 if workflow_status:
425 resource_status, content = await self.check_resource_and_update_db(
426 "add_nodegroup", op_id, op_params, db_nodegroup
427 )
428 self.db.set_one(self.db_collection, {"_id": db_nodegroup["_id"]}, db_nodegroup)
429 self.logger.info(f"Add NodeGroup Exit with resource status: {resource_status}")
430 return
431
432 async def check_add_nodegroup(self, op_id, op_params, content):
433 self.logger.info(f"check_add_nodegroup Operation {op_id}. Params: {op_params}.")
434 self.logger.info(f"Content: {content}")
435 db_nodegroup = content
436 nodegroup_name = db_nodegroup["git_name"].lower()
437 nodegroup_kustomization_name = nodegroup_name
438 checkings_list = [
439 {
440 "item": "kustomization",
441 "name": nodegroup_kustomization_name,
442 "namespace": "managed-resources",
443 "condition": {
444 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
445 "value": "True",
446 },
447 "timeout": self._checkloop_kustomization_timeout,
448 "enable": True,
449 "resourceState": "IN_PROGRESS.KUSTOMIZATION_READY",
450 },
451 {
452 "item": "nodepool_aws",
453 "name": nodegroup_name,
454 "namespace": "",
455 "condition": {
456 "jsonpath_filter": "status.conditions[?(@.type=='Synced')].status",
457 "value": "True",
458 },
459 "timeout": self._checkloop_resource_timeout,
460 "enable": True,
461 "resourceState": "IN_PROGRESS.RESOURCE_SYNCED.NODEGROUP",
462 },
463 {
464 "item": "nodepool_aws",
465 "name": nodegroup_name,
466 "namespace": "",
467 "condition": {
468 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
469 "value": "True",
470 },
471 "timeout": self._checkloop_resource_timeout,
472 "enable": True,
473 "resourceState": "IN_PROGRESS.RESOURCE_READY.NODEGROUP",
474 },
475 ]
476 self.logger.info(f"Checking list: {checkings_list}")
477 result, message = await self.common_check_list(
478 op_id, checkings_list, "nodegroups", db_nodegroup
479 )
480 if not result:
481 return False, message
482 return True, "OK"
483
484 async def scale(self, params, order_id):
485 self.logger.info("Scale nodegroup Enter")
486
487 op_id = params["operation_id"]
488 nodegroup_id = params["nodegroup_id"]
489
490 # To initialize the operation states
491 self.initialize_operation(nodegroup_id, op_id)
492
493 db_nodegroup = self.db.get_one(self.db_collection, {"_id": nodegroup_id})
494 db_cluster = self.db.get_one("clusters", {"_id": db_nodegroup["cluster_id"]})
495 op_params = self.get_operation_params(db_nodegroup, op_id)
496 db_vim = self.db.get_one("vim_accounts", {"name": db_cluster["vim_account"]})
497
498 workflow_content = {
499 "nodegroup": db_nodegroup,
500 "cluster": db_cluster,
501 "vim_account": db_vim,
502 }
503 self.logger.info(f"Workflow content: {workflow_content}")
504
505 workflow_res, workflow_name = await self.odu.launch_workflow(
506 "scale_nodegroup", op_id, op_params, workflow_content
507 )
508 self.logger.info("workflow_name is: {}".format(workflow_name))
509
510 workflow_status = await self.check_workflow_and_update_db(
511 op_id, workflow_name, db_nodegroup
512 )
513
514 # Clean items used in the workflow, no matter if the workflow succeeded
515 clean_status, clean_msg = await self.odu.clean_items_workflow(
516 "scale_nodegroup", op_id, op_params, workflow_content
517 )
518 self.logger.info(
519 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
520 )
521
522 if workflow_status:
523 resource_status, content = await self.check_resource_and_update_db(
524 "scale_nodegroup", op_id, op_params, db_nodegroup
525 )
526
527 if resource_status:
528 db_nodegroup["state"] = "READY"
529 self.db.set_one(
530 self.db_collection, {"_id": db_nodegroup["_id"]}, db_nodegroup
531 )
532 self.logger.info(
533 f"Nodegroup Scale Exit with resource status: {resource_status}"
534 )
535 return
536
537 async def check_scale_nodegroup(self, op_id, op_params, content):
538 self.logger.info(
539 f"check_scale_nodegroup Operation {op_id}. Params: {op_params}."
540 )
541 self.logger.debug(f"Content: {content}")
542 db_nodegroup = content
543 nodegroup_name = db_nodegroup["git_name"].lower()
544 nodegroup_kustomization_name = nodegroup_name
545 checkings_list = [
546 {
547 "item": "kustomization",
548 "name": nodegroup_kustomization_name,
549 "namespace": "managed-resources",
550 "condition": {
551 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
552 "value": "True",
553 },
554 "timeout": self._checkloop_kustomization_timeout,
555 "enable": True,
556 "resourceState": "IN_PROGRESS.KUSTOMIZATION_READY",
557 },
558 {
559 "item": "nodepool_aws",
560 "name": nodegroup_name,
561 "namespace": "",
562 "condition": {
563 "jsonpath_filter": "status.atProvider.scalingConfig[0].desiredSize",
564 "value": f"{op_params['node_count']}",
565 },
566 "timeout": self._checkloop_resource_timeout,
567 "enable": True,
568 "resourceState": "IN_PROGRESS.RESOURCE_SYNCED.NODEGROUP",
569 },
570 ]
571 self.logger.info(f"Checking list: {checkings_list}")
572 return await self.common_check_list(
573 op_id, checkings_list, "nodegroups", db_nodegroup
574 )
575
576 async def delete(self, params, order_id):
577 self.logger.info("Delete nodegroup Enter")
578
579 op_id = params["operation_id"]
580 nodegroup_id = params["nodegroup_id"]
581
582 # To initialize the operation states
583 self.initialize_operation(nodegroup_id, op_id)
584
585 db_nodegroup = self.db.get_one(self.db_collection, {"_id": nodegroup_id})
586 db_cluster = self.db.get_one("clusters", {"_id": db_nodegroup["cluster_id"]})
587 op_params = self.get_operation_params(db_nodegroup, op_id)
588
589 workflow_content = {"nodegroup": db_nodegroup, "cluster": db_cluster}
590
591 workflow_res, workflow_name = await self.odu.launch_workflow(
592 "delete_nodegroup", op_id, op_params, workflow_content
593 )
594 self.logger.info("workflow_name is: {}".format(workflow_name))
595
596 workflow_status = await self.check_workflow_and_update_db(
597 op_id, workflow_name, db_nodegroup
598 )
599
600 # Clean items used in the workflow, no matter if the workflow succeeded
601 clean_status, clean_msg = await self.odu.clean_items_workflow(
602 "delete_nodegroup", op_id, op_params, workflow_content
603 )
604 self.logger.info(
605 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
606 )
607
608 if workflow_status:
609 resource_status, content = await self.check_resource_and_update_db(
610 "delete_nodegroup", op_id, op_params, db_nodegroup
611 )
612
613 if resource_status:
614 node_count = db_cluster.get("node_count")
615 new_node_count = node_count - 1
616 self.logger.info(f"New Node count: {new_node_count}")
617 db_cluster["node_count"] = new_node_count
618 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
619 db_nodegroup["state"] = "DELETED"
620 self.db.set_one(
621 self.db_collection, {"_id": db_nodegroup["_id"]}, db_nodegroup
622 )
623 self.db.del_one(self.db_collection, {"_id": db_nodegroup["_id"]})
624 self.logger.info(
625 f"Nodegroup Delete Exit with resource status: {resource_status}"
626 )
627 return
628
629 async def check_delete_nodegroup(self, op_id, op_params, content):
630 self.logger.info(
631 f"check_delete_nodegroup Operation {op_id}. Params: {op_params}."
632 )
633 db_nodegroup = content
634 nodegroup_name = db_nodegroup["git_name"].lower()
635 nodegroup_kustomization_name = nodegroup_name
636 checkings_list = [
637 {
638 "item": "kustomization",
639 "name": nodegroup_kustomization_name,
640 "namespace": "managed-resources",
641 "deleted": True,
642 "timeout": self._checkloop_kustomization_timeout,
643 "enable": True,
644 "resourceState": "IN_PROGRESS.KUSTOMIZATION_DELETED",
645 },
646 {
647 "item": "nodepool_aws",
648 "name": nodegroup_name,
649 "namespace": "",
650 "deleted": True,
651 "timeout": self._checkloop_resource_timeout,
652 "enable": True,
653 "resourceState": "IN_PROGRESS.RESOURCE_DELETED.NODEGROUP",
654 },
655 ]
656 self.logger.info(f"Checking list: {checkings_list}")
657 return await self.common_check_list(
658 op_id, checkings_list, "nodegroups", db_nodegroup
659 )
660
661
garciadeblas72412282024-11-07 12:41:54 +0100662class ClusterLcm(GitOpsLcm):
garciadeblas96b94f52024-07-08 16:18:21 +0200663 db_collection = "clusters"
rshri932105f2024-07-05 15:11:55 +0000664
665 def __init__(self, msg, lcm_tasks, config):
666 """
667 Init, Connect to database, filesystem storage, and messaging
668 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
669 :return: None
670 """
garciadeblas72412282024-11-07 12:41:54 +0100671 super().__init__(msg, lcm_tasks, config)
672 self._workflows = {
673 "create_cluster": {
674 "check_resource_function": self.check_create_cluster,
675 },
garciadeblasb0a42c22024-11-13 16:00:10 +0100676 "register_cluster": {
677 "check_resource_function": self.check_register_cluster,
678 },
679 "update_cluster": {
680 "check_resource_function": self.check_update_cluster,
681 },
garciadeblasad6d1ba2025-01-22 16:02:18 +0100682 "delete_cluster": {
683 "check_resource_function": self.check_delete_cluster,
684 },
garciadeblas72412282024-11-07 12:41:54 +0100685 }
rshri932105f2024-07-05 15:11:55 +0000686 self.regist = vim_sdn.K8sClusterLcm(msg, self.lcm_tasks, config)
687
rshri948f7de2024-12-02 03:42:35 +0000688 async def create(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +0000689 self.logger.info("cluster Create Enter")
garciadeblas8bdb3d42025-04-04 00:19:13 +0200690 workflow_status = None
691 resource_status = None
rshri932105f2024-07-05 15:11:55 +0000692
garciadeblas995cbf32024-12-18 12:54:00 +0100693 # To get the cluster and op ids
rshri948f7de2024-12-02 03:42:35 +0000694 cluster_id = params["cluster_id"]
rshri948f7de2024-12-02 03:42:35 +0000695 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +0000696
697 # To initialize the operation states
698 self.initialize_operation(cluster_id, op_id)
699
garciadeblas995cbf32024-12-18 12:54:00 +0100700 # To get the cluster
701 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
702
703 # To get the operation params details
704 op_params = self.get_operation_params(db_cluster, op_id)
705
706 # To copy the cluster content and decrypting fields to use in workflows
garciadeblasd41e9292025-03-11 15:44:25 +0100707 db_cluster_copy = self.decrypted_copy(db_cluster)
garciadeblas995cbf32024-12-18 12:54:00 +0100708 workflow_content = {
garciadeblasd41e9292025-03-11 15:44:25 +0100709 "cluster": db_cluster_copy,
garciadeblas995cbf32024-12-18 12:54:00 +0100710 }
rshric3564942024-11-12 18:12:38 +0000711
rshri948f7de2024-12-02 03:42:35 +0000712 # To get the vim account details
rshric3564942024-11-12 18:12:38 +0000713 db_vim = self.db.get_one("vim_accounts", {"name": db_cluster["vim_account"]})
garciadeblas995cbf32024-12-18 12:54:00 +0100714 workflow_content["vim_account"] = db_vim
rshric3564942024-11-12 18:12:38 +0000715
garciadeblas41859ce2025-02-04 16:08:51 +0100716 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +0100717 "create_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +0200718 )
garciadeblas41859ce2025-02-04 16:08:51 +0100719 if not workflow_res:
720 self.logger.error(f"Failed to launch workflow: {workflow_name}")
721 db_cluster["state"] = "FAILED_CREATION"
722 db_cluster["resourceState"] = "ERROR"
723 db_cluster = self.update_operation_history(
724 db_cluster, op_id, workflow_status=False, resource_status=None
725 )
726 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
727 # Clean items used in the workflow, no matter if the workflow succeeded
728 clean_status, clean_msg = await self.odu.clean_items_workflow(
729 "create_cluster", op_id, op_params, workflow_content
730 )
731 self.logger.info(
732 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
733 )
734 return
rshri932105f2024-07-05 15:11:55 +0000735
garciadeblas26d733c2025-02-03 16:12:43 +0100736 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +0200737 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +0100738 op_id, workflow_name
garciadeblas96b94f52024-07-08 16:18:21 +0200739 )
rshri932105f2024-07-05 15:11:55 +0000740 self.logger.info(
garciadeblas26d733c2025-02-03 16:12:43 +0100741 "workflow_status is: {} and workflow_msg is: {}".format(
rshri932105f2024-07-05 15:11:55 +0000742 workflow_status, workflow_msg
743 )
744 )
745 if workflow_status:
garciadeblas96b94f52024-07-08 16:18:21 +0200746 db_cluster["state"] = "CREATED"
747 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
rshri932105f2024-07-05 15:11:55 +0000748 else:
garciadeblas96b94f52024-07-08 16:18:21 +0200749 db_cluster["state"] = "FAILED_CREATION"
750 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +0000751 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +0000752 db_cluster = self.update_operation_history(
753 db_cluster, op_id, workflow_status, None
754 )
garciadeblas96b94f52024-07-08 16:18:21 +0200755 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +0000756
garciadeblas28bff0f2024-09-16 12:53:07 +0200757 # Clean items used in the workflow, no matter if the workflow succeeded
758 clean_status, clean_msg = await self.odu.clean_items_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +0100759 "create_cluster", op_id, op_params, workflow_content
garciadeblas28bff0f2024-09-16 12:53:07 +0200760 )
761 self.logger.info(
762 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
763 )
764
rshri932105f2024-07-05 15:11:55 +0000765 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +0100766 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +0100767 "create_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +0000768 )
769 self.logger.info(
770 "resource_status is :{} and resource_msg is :{}".format(
771 resource_status, resource_msg
772 )
773 )
774 if resource_status:
garciadeblas96b94f52024-07-08 16:18:21 +0200775 db_cluster["resourceState"] = "READY"
rshri932105f2024-07-05 15:11:55 +0000776 else:
garciadeblas96b94f52024-07-08 16:18:21 +0200777 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +0000778
garciadeblas96b94f52024-07-08 16:18:21 +0200779 db_cluster["operatingState"] = "IDLE"
780 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +0000781 db_cluster, op_id, workflow_status, resource_status
rshri932105f2024-07-05 15:11:55 +0000782 )
shahithya70a3fc92024-11-12 11:01:05 +0000783 db_cluster["current_operation"] = None
garciadeblas41a600e2025-01-21 11:49:38 +0100784
rshrif8911b92025-06-11 18:19:07 +0000785 # Retrieve credentials and subnets and register the cluster in k8sclusters collection
garciadeblas41a600e2025-01-21 11:49:38 +0100786 cluster_creds = None
rshrif8911b92025-06-11 18:19:07 +0000787 db_register = self.db.get_one("k8sclusters", {"name": db_cluster["name"]})
garciadeblas41a600e2025-01-21 11:49:38 +0100788 if db_cluster["resourceState"] == "READY" and db_cluster["state"] == "CREATED":
rshrif8911b92025-06-11 18:19:07 +0000789 # Retrieve credentials
garciadeblas41a600e2025-01-21 11:49:38 +0100790 result, cluster_creds = await self.odu.get_cluster_credentials(db_cluster)
791 # TODO: manage the case where the credentials are not available
792 if result:
793 db_cluster["credentials"] = cluster_creds
794
rshrif8911b92025-06-11 18:19:07 +0000795 # Retrieve subnets
796 if db_cluster["vim_type"] == "aws":
797 generic_object = await self.odu.list_object(
798 api_group="ec2.aws.upbound.io",
799 api_plural="subnets",
800 api_version="v1beta1",
801 )
802 private_subnet = []
803 public_subnet = []
804 for subnet in generic_object:
805 labels = subnet.get("metadata", {}).get("labels", {})
806 status = subnet.get("status", {}).get("atProvider", {})
807 # Extract relevant label values
808 cluster_label = labels.get("cluster")
809 access_label = labels.get("access")
810 subnet_id = status.get("id")
811 # Apply filtering
812 if cluster_label == db_cluster["name"] and subnet_id:
813 if access_label == "private":
814 private_subnet.append(subnet_id)
815 elif access_label == "public":
816 public_subnet.append(subnet_id)
817 # Update db_cluster
818 db_cluster["private_subnet"] = private_subnet
819 db_cluster["public_subnet"] = public_subnet
rshri948f7de2024-12-02 03:42:35 +0000820
rshrif8911b92025-06-11 18:19:07 +0000821 # Register the cluster in k8sclusters collection
rshri948f7de2024-12-02 03:42:35 +0000822 db_register["credentials"] = cluster_creds
garciadeblas41a600e2025-01-21 11:49:38 +0100823 # To call the lcm.py for registering the cluster in k8scluster lcm.
rshri948f7de2024-12-02 03:42:35 +0000824 self.db.set_one("k8sclusters", {"_id": db_register["_id"]}, db_register)
825 register = await self.regist.create(db_register, order_id)
826 self.logger.debug(f"Register is : {register}")
827 else:
828 db_register["_admin"]["operationalState"] = "ERROR"
rshri948f7de2024-12-02 03:42:35 +0000829 self.db.set_one("k8sclusters", {"_id": db_register["_id"]}, db_register)
830
rshrif8911b92025-06-11 18:19:07 +0000831 # Update db_cluster
832 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
833 self.update_default_profile_agekeys(db_cluster_copy)
834 self.update_profile_state(db_cluster, workflow_status, resource_status)
835
rshri932105f2024-07-05 15:11:55 +0000836 return
837
garciadeblas72412282024-11-07 12:41:54 +0100838 async def check_create_cluster(self, op_id, op_params, content):
garciadeblas7eae6f42024-11-08 10:41:38 +0100839 self.logger.info(
840 f"check_create_cluster Operation {op_id}. Params: {op_params}."
841 )
garciadeblas72412282024-11-07 12:41:54 +0100842 db_cluster = content["cluster"]
843 cluster_name = db_cluster["git_name"].lower()
844 cluster_kustomization_name = cluster_name
845 db_vim_account = content["vim_account"]
846 cloud_type = db_vim_account["vim_type"]
garciadeblas1ca09852025-05-30 11:19:06 +0200847 nodegroup_name = ""
garciadeblas72412282024-11-07 12:41:54 +0100848 if cloud_type == "aws":
garciadeblas1ca09852025-05-30 11:19:06 +0200849 nodegroup_name = f"{cluster_name}-nodegroup"
garciadeblas72412282024-11-07 12:41:54 +0100850 cluster_name = f"{cluster_name}-cluster"
851 elif cloud_type == "gcp":
garciadeblas1ca09852025-05-30 11:19:06 +0200852 nodegroup_name = f"nodepool-{cluster_name}"
garciadeblas72412282024-11-07 12:41:54 +0100853 bootstrap = op_params.get("bootstrap", True)
854 if cloud_type in ("azure", "gcp", "aws"):
855 checkings_list = [
856 {
857 "item": "kustomization",
858 "name": cluster_kustomization_name,
859 "namespace": "managed-resources",
garciadeblas7cf480d2025-01-27 16:53:45 +0100860 "condition": {
861 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
862 "value": "True",
863 },
yshahcb9075f2024-11-22 12:08:57 +0000864 "timeout": 1500,
garciadeblas72412282024-11-07 12:41:54 +0100865 "enable": True,
garciadeblas7eae6f42024-11-08 10:41:38 +0100866 "resourceState": "IN_PROGRESS.KUSTOMIZATION_READY",
garciadeblas72412282024-11-07 12:41:54 +0100867 },
868 {
869 "item": f"cluster_{cloud_type}",
870 "name": cluster_name,
871 "namespace": "",
garciadeblas7cf480d2025-01-27 16:53:45 +0100872 "condition": {
873 "jsonpath_filter": "status.conditions[?(@.type=='Synced')].status",
874 "value": "True",
875 },
garciadeblas72412282024-11-07 12:41:54 +0100876 "timeout": self._checkloop_resource_timeout,
877 "enable": True,
garciadeblas7eae6f42024-11-08 10:41:38 +0100878 "resourceState": "IN_PROGRESS.RESOURCE_SYNCED.CLUSTER",
garciadeblas72412282024-11-07 12:41:54 +0100879 },
880 {
881 "item": f"cluster_{cloud_type}",
882 "name": cluster_name,
883 "namespace": "",
garciadeblas7cf480d2025-01-27 16:53:45 +0100884 "condition": {
885 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
886 "value": "True",
887 },
garciadeblas72412282024-11-07 12:41:54 +0100888 "timeout": self._checkloop_resource_timeout,
889 "enable": True,
garciadeblas7eae6f42024-11-08 10:41:38 +0100890 "resourceState": "IN_PROGRESS.RESOURCE_READY.CLUSTER",
garciadeblas72412282024-11-07 12:41:54 +0100891 },
892 {
893 "item": "kustomization",
894 "name": f"{cluster_kustomization_name}-bstrp-fluxctrl",
895 "namespace": "managed-resources",
garciadeblas7cf480d2025-01-27 16:53:45 +0100896 "condition": {
897 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
898 "value": "True",
899 },
yshahcb9075f2024-11-22 12:08:57 +0000900 "timeout": self._checkloop_resource_timeout,
garciadeblas72412282024-11-07 12:41:54 +0100901 "enable": bootstrap,
garciadeblas7eae6f42024-11-08 10:41:38 +0100902 "resourceState": "IN_PROGRESS.BOOTSTRAP_OK",
garciadeblas72412282024-11-07 12:41:54 +0100903 },
904 ]
905 else:
906 return False, "Not suitable VIM account to check cluster status"
rshrif8911b92025-06-11 18:19:07 +0000907 if cloud_type != "aws":
908 if nodegroup_name:
909 nodegroup_check = {
910 "item": f"nodegroup_{cloud_type}",
911 "name": nodegroup_name,
912 "namespace": "",
913 "condition": {
914 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
915 "value": "True",
916 },
917 "timeout": self._checkloop_resource_timeout,
918 "enable": True,
919 "resourceState": "IN_PROGRESS.RESOURCE_READY.NODEGROUP",
920 }
921 checkings_list.insert(3, nodegroup_check)
yshahcb9075f2024-11-22 12:08:57 +0000922 return await self.common_check_list(
923 op_id, checkings_list, "clusters", db_cluster
924 )
garciadeblas72412282024-11-07 12:41:54 +0100925
garciadeblasd41e9292025-03-11 15:44:25 +0100926 def update_default_profile_agekeys(self, db_cluster):
927 profiles = [
928 "infra_controller_profiles",
929 "infra_config_profiles",
930 "app_profiles",
931 "resource_profiles",
932 ]
933 self.logger.debug("the db_cluster is :{}".format(db_cluster))
934 for profile_type in profiles:
935 profile_id = db_cluster[profile_type]
936 db_collection = self.profile_collection_mapping[profile_type]
937 db_profile = self.db.get_one(db_collection, {"_id": profile_id})
938 db_profile["age_pubkey"] = db_cluster["age_pubkey"]
939 db_profile["age_privkey"] = db_cluster["age_privkey"]
940 self.encrypt_age_keys(db_profile)
941 self.db.set_one(db_collection, {"_id": db_profile["_id"]}, db_profile)
942
garciadeblas96b94f52024-07-08 16:18:21 +0200943 def update_profile_state(self, db_cluster, workflow_status, resource_status):
rshri932105f2024-07-05 15:11:55 +0000944 profiles = [
945 "infra_controller_profiles",
946 "infra_config_profiles",
947 "app_profiles",
948 "resource_profiles",
949 ]
garciadeblasd41e9292025-03-11 15:44:25 +0100950 self.logger.debug("the db_cluster is :{}".format(db_cluster))
rshri932105f2024-07-05 15:11:55 +0000951 for profile_type in profiles:
garciadeblas96b94f52024-07-08 16:18:21 +0200952 profile_id = db_cluster[profile_type]
rshri948f7de2024-12-02 03:42:35 +0000953 db_collection = self.profile_collection_mapping[profile_type]
rshri932105f2024-07-05 15:11:55 +0000954 db_profile = self.db.get_one(db_collection, {"_id": profile_id})
yshahcb9075f2024-11-22 12:08:57 +0000955 op_id = db_profile["operationHistory"][-1].get("op_id")
garciadeblas96b94f52024-07-08 16:18:21 +0200956 db_profile["state"] = db_cluster["state"]
957 db_profile["resourceState"] = db_cluster["resourceState"]
958 db_profile["operatingState"] = db_cluster["operatingState"]
rshri932105f2024-07-05 15:11:55 +0000959 db_profile = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +0000960 db_profile, op_id, workflow_status, resource_status
rshri932105f2024-07-05 15:11:55 +0000961 )
rshri932105f2024-07-05 15:11:55 +0000962 self.db.set_one(db_collection, {"_id": db_profile["_id"]}, db_profile)
963
rshri948f7de2024-12-02 03:42:35 +0000964 async def delete(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +0000965 self.logger.info("cluster delete Enter")
rshri948f7de2024-12-02 03:42:35 +0000966
garciadeblas926ffac2025-02-12 16:45:40 +0100967 try:
968 # To get the cluster and op ids
969 cluster_id = params["cluster_id"]
970 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +0000971
garciadeblas926ffac2025-02-12 16:45:40 +0100972 # To initialize the operation states
973 self.initialize_operation(cluster_id, op_id)
rshri948f7de2024-12-02 03:42:35 +0000974
garciadeblas926ffac2025-02-12 16:45:40 +0100975 # To get the cluster
976 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
garciadeblas995cbf32024-12-18 12:54:00 +0100977
garciadeblas926ffac2025-02-12 16:45:40 +0100978 # To get the operation params details
979 op_params = self.get_operation_params(db_cluster, op_id)
garciadeblas995cbf32024-12-18 12:54:00 +0100980
garciadeblas926ffac2025-02-12 16:45:40 +0100981 # To copy the cluster content and decrypting fields to use in workflows
982 workflow_content = {
983 "cluster": self.decrypted_copy(db_cluster),
984 }
rshri948f7de2024-12-02 03:42:35 +0000985
garciadeblas926ffac2025-02-12 16:45:40 +0100986 # To get the vim account details
987 db_vim = self.db.get_one(
988 "vim_accounts", {"name": db_cluster["vim_account"]}
989 )
990 workflow_content["vim_account"] = db_vim
991 except Exception as e:
992 self.logger.debug(traceback.format_exc())
993 self.logger.debug(f"Exception: {e}", exc_info=True)
994 raise e
garciadeblasad6d1ba2025-01-22 16:02:18 +0100995
garciadeblas41859ce2025-02-04 16:08:51 +0100996 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +0100997 "delete_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +0200998 )
garciadeblas41859ce2025-02-04 16:08:51 +0100999 if not workflow_res:
1000 self.logger.error(f"Failed to launch workflow: {workflow_name}")
1001 db_cluster["state"] = "FAILED_DELETION"
1002 db_cluster["resourceState"] = "ERROR"
1003 db_cluster = self.update_operation_history(
1004 db_cluster, op_id, workflow_status=False, resource_status=None
1005 )
1006 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
1007 # Clean items used in the workflow, no matter if the workflow succeeded
1008 clean_status, clean_msg = await self.odu.clean_items_workflow(
1009 "delete_cluster", op_id, op_params, workflow_content
1010 )
1011 self.logger.info(
1012 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1013 )
1014 return
rshri932105f2024-07-05 15:11:55 +00001015
garciadeblas26d733c2025-02-03 16:12:43 +01001016 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +02001017 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +01001018 op_id, workflow_name
garciadeblas96b94f52024-07-08 16:18:21 +02001019 )
rshri932105f2024-07-05 15:11:55 +00001020 self.logger.info(
garciadeblas26d733c2025-02-03 16:12:43 +01001021 "workflow_status is: {} and workflow_msg is: {}".format(
rshri932105f2024-07-05 15:11:55 +00001022 workflow_status, workflow_msg
1023 )
1024 )
1025 if workflow_status:
garciadeblas96b94f52024-07-08 16:18:21 +02001026 db_cluster["state"] = "DELETED"
1027 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
rshri932105f2024-07-05 15:11:55 +00001028 else:
garciadeblas96b94f52024-07-08 16:18:21 +02001029 db_cluster["state"] = "FAILED_DELETION"
1030 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +00001031 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +00001032 db_cluster = self.update_operation_history(
1033 db_cluster, op_id, workflow_status, None
1034 )
garciadeblas96b94f52024-07-08 16:18:21 +02001035 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +00001036
garciadeblas98f9a3d2024-12-10 13:42:47 +01001037 # Clean items used in the workflow or in the cluster, no matter if the workflow succeeded
1038 clean_status, clean_msg = await self.odu.clean_items_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001039 "delete_cluster", op_id, op_params, workflow_content
garciadeblas98f9a3d2024-12-10 13:42:47 +01001040 )
1041 self.logger.info(
1042 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1043 )
1044
rshri932105f2024-07-05 15:11:55 +00001045 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001046 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +01001047 "delete_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +00001048 )
1049 self.logger.info(
1050 "resource_status is :{} and resource_msg is :{}".format(
1051 resource_status, resource_msg
1052 )
1053 )
1054 if resource_status:
garciadeblas96b94f52024-07-08 16:18:21 +02001055 db_cluster["resourceState"] = "READY"
rshri932105f2024-07-05 15:11:55 +00001056 else:
garciadeblas96b94f52024-07-08 16:18:21 +02001057 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +00001058
garciadeblas96b94f52024-07-08 16:18:21 +02001059 db_cluster["operatingState"] = "IDLE"
1060 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +00001061 db_cluster, op_id, workflow_status, resource_status
garciadeblas96b94f52024-07-08 16:18:21 +02001062 )
shahithya70a3fc92024-11-12 11:01:05 +00001063 db_cluster["current_operation"] = None
garciadeblas96b94f52024-07-08 16:18:21 +02001064 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +00001065
yshahb36649f2025-02-28 09:01:51 +00001066 force = params.get("force", False)
1067 if force:
1068 force_delete_status = self.check_force_delete_and_delete_from_db(
1069 cluster_id, workflow_status, resource_status, force
1070 )
1071 if force_delete_status:
1072 return
1073
garciadeblas96b94f52024-07-08 16:18:21 +02001074 # To delete it from DB
1075 if db_cluster["state"] == "DELETED":
1076 self.delete_cluster(db_cluster)
rshri948f7de2024-12-02 03:42:35 +00001077
1078 # To delete it from k8scluster collection
1079 self.db.del_one("k8sclusters", {"name": db_cluster["name"]})
1080
rshri932105f2024-07-05 15:11:55 +00001081 return
1082
garciadeblasad6d1ba2025-01-22 16:02:18 +01001083 async def check_delete_cluster(self, op_id, op_params, content):
1084 self.logger.info(
1085 f"check_delete_cluster Operation {op_id}. Params: {op_params}."
1086 )
1087 self.logger.debug(f"Content: {content}")
1088 db_cluster = content["cluster"]
1089 cluster_name = db_cluster["git_name"].lower()
1090 cluster_kustomization_name = cluster_name
1091 db_vim_account = content["vim_account"]
1092 cloud_type = db_vim_account["vim_type"]
1093 if cloud_type == "aws":
1094 cluster_name = f"{cluster_name}-cluster"
1095 if cloud_type in ("azure", "gcp", "aws"):
1096 checkings_list = [
1097 {
1098 "item": "kustomization",
1099 "name": cluster_kustomization_name,
1100 "namespace": "managed-resources",
1101 "deleted": True,
1102 "timeout": self._checkloop_kustomization_timeout,
1103 "enable": True,
1104 "resourceState": "IN_PROGRESS.KUSTOMIZATION_DELETED",
1105 },
1106 {
1107 "item": f"cluster_{cloud_type}",
1108 "name": cluster_name,
1109 "namespace": "",
1110 "deleted": True,
1111 "timeout": self._checkloop_resource_timeout,
1112 "enable": True,
1113 "resourceState": "IN_PROGRESS.RESOURCE_DELETED.CLUSTER",
1114 },
1115 ]
1116 else:
1117 return False, "Not suitable VIM account to check cluster status"
1118 return await self.common_check_list(
1119 op_id, checkings_list, "clusters", db_cluster
1120 )
1121
garciadeblas96b94f52024-07-08 16:18:21 +02001122 def delete_cluster(self, db_cluster):
1123 # Actually, item_content is equal to db_cluster
rshri932105f2024-07-05 15:11:55 +00001124 # detach profiles
1125 update_dict = None
1126 profiles_to_detach = [
1127 "infra_controller_profiles",
1128 "infra_config_profiles",
1129 "app_profiles",
1130 "resource_profiles",
1131 ]
rshri948f7de2024-12-02 03:42:35 +00001132 """
rshri932105f2024-07-05 15:11:55 +00001133 profiles_collection = {
1134 "infra_controller_profiles": "k8sinfra_controller",
1135 "infra_config_profiles": "k8sinfra_config",
1136 "app_profiles": "k8sapp",
1137 "resource_profiles": "k8sresource",
1138 }
rshri948f7de2024-12-02 03:42:35 +00001139 """
rshri932105f2024-07-05 15:11:55 +00001140 for profile_type in profiles_to_detach:
garciadeblas96b94f52024-07-08 16:18:21 +02001141 if db_cluster.get(profile_type):
garciadeblas96b94f52024-07-08 16:18:21 +02001142 profile_ids = db_cluster[profile_type]
rshri932105f2024-07-05 15:11:55 +00001143 profile_ids_copy = deepcopy(profile_ids)
rshri932105f2024-07-05 15:11:55 +00001144 for profile_id in profile_ids_copy:
rshri948f7de2024-12-02 03:42:35 +00001145 db_collection = self.profile_collection_mapping[profile_type]
rshri932105f2024-07-05 15:11:55 +00001146 db_profile = self.db.get_one(db_collection, {"_id": profile_id})
garciadeblasc2552852024-10-22 12:39:32 +02001147 self.logger.debug("the db_profile is :{}".format(db_profile))
1148 self.logger.debug(
garciadeblas96b94f52024-07-08 16:18:21 +02001149 "the item_content name is :{}".format(db_cluster["name"])
rshri932105f2024-07-05 15:11:55 +00001150 )
garciadeblasc2552852024-10-22 12:39:32 +02001151 self.logger.debug(
rshri932105f2024-07-05 15:11:55 +00001152 "the db_profile name is :{}".format(db_profile["name"])
1153 )
garciadeblas96b94f52024-07-08 16:18:21 +02001154 if db_cluster["name"] == db_profile["name"]:
yshah6bad8892025-02-11 12:37:04 +00001155 self.delete_profile_ksu(profile_id, profile_type)
rshri932105f2024-07-05 15:11:55 +00001156 self.db.del_one(db_collection, {"_id": profile_id})
1157 else:
rshri932105f2024-07-05 15:11:55 +00001158 profile_ids.remove(profile_id)
1159 update_dict = {profile_type: profile_ids}
rshri932105f2024-07-05 15:11:55 +00001160 self.db.set_one(
garciadeblas96b94f52024-07-08 16:18:21 +02001161 "clusters", {"_id": db_cluster["_id"]}, update_dict
rshri932105f2024-07-05 15:11:55 +00001162 )
garciadeblas96b94f52024-07-08 16:18:21 +02001163 self.db.del_one("clusters", {"_id": db_cluster["_id"]})
rshri932105f2024-07-05 15:11:55 +00001164
rshri948f7de2024-12-02 03:42:35 +00001165 async def attach_profile(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001166 self.logger.info("profile attach Enter")
rshri948f7de2024-12-02 03:42:35 +00001167
garciadeblas995cbf32024-12-18 12:54:00 +01001168 # To get the cluster and op ids
rshri948f7de2024-12-02 03:42:35 +00001169 cluster_id = params["cluster_id"]
rshri948f7de2024-12-02 03:42:35 +00001170 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +00001171
1172 # To initialize the operation states
1173 self.initialize_operation(cluster_id, op_id)
1174
garciadeblas995cbf32024-12-18 12:54:00 +01001175 # To get the cluster
1176 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
1177
1178 # To get the operation params details
1179 op_params = self.get_operation_params(db_cluster, op_id)
1180
1181 # To copy the cluster content and decrypting fields to use in workflows
1182 workflow_content = {
1183 "cluster": self.decrypted_copy(db_cluster),
1184 }
rshri948f7de2024-12-02 03:42:35 +00001185
1186 # To get the profile details
1187 profile_id = params["profile_id"]
1188 profile_type = params["profile_type"]
1189 profile_collection = self.profile_collection_mapping[profile_type]
1190 db_profile = self.db.get_one(profile_collection, {"_id": profile_id})
1191 db_profile["profile_type"] = profile_type
1192 # content["profile"] = db_profile
garciadeblas995cbf32024-12-18 12:54:00 +01001193 workflow_content["profile"] = db_profile
rshri932105f2024-07-05 15:11:55 +00001194
garciadeblas41859ce2025-02-04 16:08:51 +01001195 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001196 "attach_profile_to_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +02001197 )
garciadeblas41859ce2025-02-04 16:08:51 +01001198 if not workflow_res:
1199 self.logger.error(f"Failed to launch workflow: {workflow_name}")
1200 db_cluster["resourceState"] = "ERROR"
1201 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
1202 db_cluster = self.update_operation_history(
1203 db_cluster, op_id, workflow_status=False, resource_status=None
1204 )
1205 return
rshri932105f2024-07-05 15:11:55 +00001206
garciadeblas26d733c2025-02-03 16:12:43 +01001207 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +02001208 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +01001209 op_id, workflow_name
garciadeblas96b94f52024-07-08 16:18:21 +02001210 )
rshri932105f2024-07-05 15:11:55 +00001211 self.logger.info(
garciadeblas26d733c2025-02-03 16:12:43 +01001212 "workflow_status is: {} and workflow_msg is: {}".format(
rshri932105f2024-07-05 15:11:55 +00001213 workflow_status, workflow_msg
1214 )
1215 )
1216 if workflow_status:
1217 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
1218 else:
1219 db_cluster["resourceState"] = "ERROR"
1220 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +00001221 db_cluster = self.update_operation_history(
1222 db_cluster, op_id, workflow_status, None
1223 )
rshri932105f2024-07-05 15:11:55 +00001224 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
1225
1226 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001227 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +01001228 "attach_profile_to_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +00001229 )
1230 self.logger.info(
1231 "resource_status is :{} and resource_msg is :{}".format(
1232 resource_status, resource_msg
1233 )
1234 )
1235 if resource_status:
1236 db_cluster["resourceState"] = "READY"
1237 else:
1238 db_cluster["resourceState"] = "ERROR"
1239
1240 db_cluster["operatingState"] = "IDLE"
1241 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +00001242 db_cluster, op_id, workflow_status, resource_status
rshri932105f2024-07-05 15:11:55 +00001243 )
rshri932105f2024-07-05 15:11:55 +00001244 profile_list = db_cluster[profile_type]
rshri932105f2024-07-05 15:11:55 +00001245 if resource_status:
rshri932105f2024-07-05 15:11:55 +00001246 profile_list.append(profile_id)
rshri932105f2024-07-05 15:11:55 +00001247 db_cluster[profile_type] = profile_list
shahithya70a3fc92024-11-12 11:01:05 +00001248 db_cluster["current_operation"] = None
rshri932105f2024-07-05 15:11:55 +00001249 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
1250
1251 return
1252
rshri948f7de2024-12-02 03:42:35 +00001253 async def detach_profile(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001254 self.logger.info("profile dettach Enter")
rshri948f7de2024-12-02 03:42:35 +00001255
garciadeblas995cbf32024-12-18 12:54:00 +01001256 # To get the cluster and op ids
rshri948f7de2024-12-02 03:42:35 +00001257 cluster_id = params["cluster_id"]
rshri948f7de2024-12-02 03:42:35 +00001258 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +00001259
1260 # To initialize the operation states
1261 self.initialize_operation(cluster_id, op_id)
1262
garciadeblas995cbf32024-12-18 12:54:00 +01001263 # To get the cluster
1264 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
1265
1266 # To get the operation params details
1267 op_params = self.get_operation_params(db_cluster, op_id)
1268
1269 # To copy the cluster content and decrypting fields to use in workflows
1270 workflow_content = {
1271 "cluster": self.decrypted_copy(db_cluster),
1272 }
rshri948f7de2024-12-02 03:42:35 +00001273
1274 # To get the profile details
1275 profile_id = params["profile_id"]
1276 profile_type = params["profile_type"]
1277 profile_collection = self.profile_collection_mapping[profile_type]
1278 db_profile = self.db.get_one(profile_collection, {"_id": profile_id})
1279 db_profile["profile_type"] = profile_type
garciadeblas995cbf32024-12-18 12:54:00 +01001280 workflow_content["profile"] = db_profile
rshri932105f2024-07-05 15:11:55 +00001281
garciadeblas41859ce2025-02-04 16:08:51 +01001282 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001283 "detach_profile_from_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +02001284 )
garciadeblas41859ce2025-02-04 16:08:51 +01001285 if not workflow_res:
1286 self.logger.error(f"Failed to launch workflow: {workflow_name}")
1287 db_cluster["resourceState"] = "ERROR"
1288 db_cluster = self.update_operation_history(
1289 db_cluster, op_id, workflow_status=False, resource_status=None
1290 )
1291 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
1292 return
rshri932105f2024-07-05 15:11:55 +00001293
garciadeblas26d733c2025-02-03 16:12:43 +01001294 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +02001295 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +01001296 op_id, workflow_name
garciadeblas96b94f52024-07-08 16:18:21 +02001297 )
rshri932105f2024-07-05 15:11:55 +00001298 self.logger.info(
garciadeblas26d733c2025-02-03 16:12:43 +01001299 "workflow_status is: {} and workflow_msg is: {}".format(
rshri932105f2024-07-05 15:11:55 +00001300 workflow_status, workflow_msg
1301 )
1302 )
1303 if workflow_status:
1304 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
1305 else:
1306 db_cluster["resourceState"] = "ERROR"
1307 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +00001308 db_cluster = self.update_operation_history(
1309 db_cluster, op_id, workflow_status, None
1310 )
rshri932105f2024-07-05 15:11:55 +00001311 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
1312
1313 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001314 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +01001315 "detach_profile_from_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +00001316 )
1317 self.logger.info(
1318 "resource_status is :{} and resource_msg is :{}".format(
1319 resource_status, resource_msg
1320 )
1321 )
1322 if resource_status:
1323 db_cluster["resourceState"] = "READY"
1324 else:
1325 db_cluster["resourceState"] = "ERROR"
1326
1327 db_cluster["operatingState"] = "IDLE"
1328 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +00001329 db_cluster, op_id, workflow_status, resource_status
rshri932105f2024-07-05 15:11:55 +00001330 )
rshri932105f2024-07-05 15:11:55 +00001331 profile_list = db_cluster[profile_type]
1332 self.logger.info("profile list is : {}".format(profile_list))
1333 if resource_status:
rshri932105f2024-07-05 15:11:55 +00001334 profile_list.remove(profile_id)
rshri932105f2024-07-05 15:11:55 +00001335 db_cluster[profile_type] = profile_list
shahithya70a3fc92024-11-12 11:01:05 +00001336 db_cluster["current_operation"] = None
rshri932105f2024-07-05 15:11:55 +00001337 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
1338
1339 return
1340
rshri948f7de2024-12-02 03:42:35 +00001341 async def register(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001342 self.logger.info("cluster register enter")
garciadeblas8bdb3d42025-04-04 00:19:13 +02001343 workflow_status = None
1344 resource_status = None
rshri932105f2024-07-05 15:11:55 +00001345
garciadeblas995cbf32024-12-18 12:54:00 +01001346 # To get the cluster and op ids
rshri948f7de2024-12-02 03:42:35 +00001347 cluster_id = params["cluster_id"]
rshri948f7de2024-12-02 03:42:35 +00001348 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +00001349
1350 # To initialize the operation states
1351 self.initialize_operation(cluster_id, op_id)
1352
garciadeblas995cbf32024-12-18 12:54:00 +01001353 # To get the cluster
1354 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
1355
1356 # To get the operation params details
1357 op_params = self.get_operation_params(db_cluster, op_id)
1358
1359 # To copy the cluster content and decrypting fields to use in workflows
garciadeblas8bdb3d42025-04-04 00:19:13 +02001360 db_cluster_copy = self.decrypted_copy(db_cluster)
garciadeblas995cbf32024-12-18 12:54:00 +01001361 workflow_content = {
garciadeblas8bdb3d42025-04-04 00:19:13 +02001362 "cluster": db_cluster_copy,
garciadeblas995cbf32024-12-18 12:54:00 +01001363 }
rshric3564942024-11-12 18:12:38 +00001364
garciadeblas41859ce2025-02-04 16:08:51 +01001365 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001366 "register_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +02001367 )
garciadeblas41859ce2025-02-04 16:08:51 +01001368 if not workflow_res:
1369 self.logger.error(f"Failed to launch workflow: {workflow_name}")
1370 db_cluster["state"] = "FAILED_CREATION"
1371 db_cluster["resourceState"] = "ERROR"
1372 db_cluster = self.update_operation_history(
1373 db_cluster, op_id, workflow_status=False, resource_status=None
1374 )
1375 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
1376 # Clean items used in the workflow, no matter if the workflow succeeded
1377 clean_status, clean_msg = await self.odu.clean_items_workflow(
1378 "register_cluster", op_id, op_params, workflow_content
1379 )
1380 self.logger.info(
1381 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1382 )
1383 return
rshri932105f2024-07-05 15:11:55 +00001384
garciadeblas26d733c2025-02-03 16:12:43 +01001385 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +02001386 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +01001387 op_id, workflow_name
garciadeblas96b94f52024-07-08 16:18:21 +02001388 )
rshri932105f2024-07-05 15:11:55 +00001389 self.logger.info(
garciadeblas26d733c2025-02-03 16:12:43 +01001390 "workflow_status is: {} and workflow_msg is: {}".format(
rshri932105f2024-07-05 15:11:55 +00001391 workflow_status, workflow_msg
1392 )
1393 )
1394 if workflow_status:
garciadeblas96b94f52024-07-08 16:18:21 +02001395 db_cluster["state"] = "CREATED"
1396 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
rshri932105f2024-07-05 15:11:55 +00001397 else:
garciadeblas96b94f52024-07-08 16:18:21 +02001398 db_cluster["state"] = "FAILED_CREATION"
1399 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +00001400 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +00001401 db_cluster = self.update_operation_history(
1402 db_cluster, op_id, workflow_status, None
1403 )
garciadeblas96b94f52024-07-08 16:18:21 +02001404 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +00001405
garciadeblasdde3a312024-09-17 13:25:06 +02001406 # Clean items used in the workflow, no matter if the workflow succeeded
1407 clean_status, clean_msg = await self.odu.clean_items_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001408 "register_cluster", op_id, op_params, workflow_content
garciadeblasdde3a312024-09-17 13:25:06 +02001409 )
1410 self.logger.info(
1411 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1412 )
1413
rshri932105f2024-07-05 15:11:55 +00001414 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001415 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +01001416 "register_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +00001417 )
1418 self.logger.info(
1419 "resource_status is :{} and resource_msg is :{}".format(
1420 resource_status, resource_msg
1421 )
1422 )
1423 if resource_status:
garciadeblas96b94f52024-07-08 16:18:21 +02001424 db_cluster["resourceState"] = "READY"
rshri932105f2024-07-05 15:11:55 +00001425 else:
garciadeblas96b94f52024-07-08 16:18:21 +02001426 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +00001427
garciadeblas96b94f52024-07-08 16:18:21 +02001428 db_cluster["operatingState"] = "IDLE"
1429 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +00001430 db_cluster, op_id, workflow_status, resource_status
rshri932105f2024-07-05 15:11:55 +00001431 )
shahithya70a3fc92024-11-12 11:01:05 +00001432 db_cluster["current_operation"] = None
garciadeblas96b94f52024-07-08 16:18:21 +02001433 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri948f7de2024-12-02 03:42:35 +00001434
garciadeblas8bdb3d42025-04-04 00:19:13 +02001435 # Update default profile agekeys and state
1436 self.update_default_profile_agekeys(db_cluster_copy)
1437 self.update_profile_state(db_cluster, workflow_status, resource_status)
1438
rshri948f7de2024-12-02 03:42:35 +00001439 db_register = self.db.get_one("k8sclusters", {"name": db_cluster["name"]})
1440 db_register["credentials"] = db_cluster["credentials"]
1441 self.db.set_one("k8sclusters", {"_id": db_register["_id"]}, db_register)
1442
1443 if db_cluster["resourceState"] == "READY" and db_cluster["state"] == "CREATED":
1444 # To call the lcm.py for registering the cluster in k8scluster lcm.
1445 register = await self.regist.create(db_register, order_id)
1446 self.logger.debug(f"Register is : {register}")
1447 else:
1448 db_register["_admin"]["operationalState"] = "ERROR"
1449 self.db.set_one("k8sclusters", {"_id": db_register["_id"]}, db_register)
1450
rshri932105f2024-07-05 15:11:55 +00001451 return
1452
garciadeblasad6d1ba2025-01-22 16:02:18 +01001453 async def check_register_cluster(self, op_id, op_params, content):
1454 self.logger.info(
1455 f"check_register_cluster Operation {op_id}. Params: {op_params}."
1456 )
1457 # self.logger.debug(f"Content: {content}")
1458 db_cluster = content["cluster"]
1459 cluster_name = db_cluster["git_name"].lower()
1460 cluster_kustomization_name = cluster_name
1461 bootstrap = op_params.get("bootstrap", True)
1462 checkings_list = [
1463 {
1464 "item": "kustomization",
1465 "name": f"{cluster_kustomization_name}-bstrp-fluxctrl",
1466 "namespace": "managed-resources",
garciadeblas7cf480d2025-01-27 16:53:45 +01001467 "condition": {
1468 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
1469 "value": "True",
1470 },
garciadeblasad6d1ba2025-01-22 16:02:18 +01001471 "timeout": self._checkloop_kustomization_timeout,
1472 "enable": bootstrap,
1473 "resourceState": "IN_PROGRESS.BOOTSTRAP_OK",
1474 },
1475 ]
1476 return await self.common_check_list(
1477 op_id, checkings_list, "clusters", db_cluster
1478 )
1479
rshri948f7de2024-12-02 03:42:35 +00001480 async def deregister(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001481 self.logger.info("cluster deregister enter")
1482
garciadeblas995cbf32024-12-18 12:54:00 +01001483 # To get the cluster and op ids
rshri948f7de2024-12-02 03:42:35 +00001484 cluster_id = params["cluster_id"]
rshri948f7de2024-12-02 03:42:35 +00001485 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +00001486
1487 # To initialize the operation states
1488 self.initialize_operation(cluster_id, op_id)
1489
garciadeblas995cbf32024-12-18 12:54:00 +01001490 # To get the cluster
1491 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
1492
1493 # To get the operation params details
1494 op_params = self.get_operation_params(db_cluster, op_id)
1495
1496 # To copy the cluster content and decrypting fields to use in workflows
1497 workflow_content = {
1498 "cluster": self.decrypted_copy(db_cluster),
1499 }
rshri932105f2024-07-05 15:11:55 +00001500
garciadeblas41859ce2025-02-04 16:08:51 +01001501 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001502 "deregister_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +02001503 )
garciadeblas41859ce2025-02-04 16:08:51 +01001504 if not workflow_res:
1505 self.logger.error(f"Failed to launch workflow: {workflow_name}")
1506 db_cluster["state"] = "FAILED_DELETION"
1507 db_cluster["resourceState"] = "ERROR"
1508 db_cluster = self.update_operation_history(
1509 db_cluster, op_id, workflow_status=False, resource_status=None
1510 )
1511 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
garciadeblas41859ce2025-02-04 16:08:51 +01001512 return
rshri932105f2024-07-05 15:11:55 +00001513
garciadeblas26d733c2025-02-03 16:12:43 +01001514 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +02001515 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +01001516 op_id, workflow_name
garciadeblas96b94f52024-07-08 16:18:21 +02001517 )
rshri932105f2024-07-05 15:11:55 +00001518 self.logger.info(
garciadeblas26d733c2025-02-03 16:12:43 +01001519 "workflow_status is: {} and workflow_msg is: {}".format(
rshri932105f2024-07-05 15:11:55 +00001520 workflow_status, workflow_msg
1521 )
1522 )
1523 if workflow_status:
garciadeblas96b94f52024-07-08 16:18:21 +02001524 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
rshri932105f2024-07-05 15:11:55 +00001525 else:
garciadeblas96b94f52024-07-08 16:18:21 +02001526 db_cluster["state"] = "FAILED_DELETION"
1527 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +00001528 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +00001529 db_cluster = self.update_operation_history(
1530 db_cluster, op_id, workflow_status, None
1531 )
garciadeblas96b94f52024-07-08 16:18:21 +02001532 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +00001533
1534 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001535 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +01001536 "deregister_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +00001537 )
1538 self.logger.info(
1539 "resource_status is :{} and resource_msg is :{}".format(
1540 resource_status, resource_msg
1541 )
1542 )
1543 if resource_status:
garciadeblas96b94f52024-07-08 16:18:21 +02001544 db_cluster["resourceState"] = "READY"
rshri932105f2024-07-05 15:11:55 +00001545 else:
garciadeblas96b94f52024-07-08 16:18:21 +02001546 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +00001547
garciadeblas96b94f52024-07-08 16:18:21 +02001548 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +00001549 db_cluster, op_id, workflow_status, resource_status
garciadeblas96b94f52024-07-08 16:18:21 +02001550 )
1551 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +00001552
garciadeblas93380452025-02-05 09:32:52 +01001553 await self.delete(params, order_id)
1554 # Clean items used in the workflow or in the cluster, no matter if the workflow succeeded
1555 clean_status, clean_msg = await self.odu.clean_items_workflow(
1556 "deregister_cluster", op_id, op_params, workflow_content
1557 )
1558 self.logger.info(
1559 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1560 )
1561 return
rshri932105f2024-07-05 15:11:55 +00001562
rshri948f7de2024-12-02 03:42:35 +00001563 async def get_creds(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02001564 self.logger.info("Cluster get creds Enter")
rshri948f7de2024-12-02 03:42:35 +00001565 cluster_id = params["cluster_id"]
1566 op_id = params["operation_id"]
1567 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
garciadeblas96b94f52024-07-08 16:18:21 +02001568 result, cluster_creds = await self.odu.get_cluster_credentials(db_cluster)
1569 if result:
1570 db_cluster["credentials"] = cluster_creds
yshahd940c652024-10-17 06:11:12 +00001571 op_len = 0
1572 for operations in db_cluster["operationHistory"]:
1573 if operations["op_id"] == op_id:
1574 db_cluster["operationHistory"][op_len]["result"] = result
1575 db_cluster["operationHistory"][op_len]["endDate"] = time()
1576 op_len += 1
shahithya70a3fc92024-11-12 11:01:05 +00001577 db_cluster["current_operation"] = None
yshahd940c652024-10-17 06:11:12 +00001578 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri948f7de2024-12-02 03:42:35 +00001579 self.logger.info("Cluster Get Creds Exit")
yshah771dea82024-07-05 15:11:49 +00001580 return
1581
rshri948f7de2024-12-02 03:42:35 +00001582 async def update(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02001583 self.logger.info("Cluster update Enter")
rshri948f7de2024-12-02 03:42:35 +00001584 # To get the cluster details
1585 cluster_id = params["cluster_id"]
1586 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
1587
1588 # To get the operation params details
1589 op_id = params["operation_id"]
1590 op_params = self.get_operation_params(db_cluster, op_id)
yshah771dea82024-07-05 15:11:49 +00001591
garciadeblas995cbf32024-12-18 12:54:00 +01001592 # To copy the cluster content and decrypting fields to use in workflows
1593 workflow_content = {
1594 "cluster": self.decrypted_copy(db_cluster),
1595 }
rshric3564942024-11-12 18:12:38 +00001596
1597 # vim account details
1598 db_vim = self.db.get_one("vim_accounts", {"name": db_cluster["vim_account"]})
garciadeblas995cbf32024-12-18 12:54:00 +01001599 workflow_content["vim_account"] = db_vim
rshric3564942024-11-12 18:12:38 +00001600
garciadeblas41859ce2025-02-04 16:08:51 +01001601 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001602 "update_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +02001603 )
garciadeblas41859ce2025-02-04 16:08:51 +01001604 if not workflow_res:
1605 self.logger.error(f"Failed to launch workflow: {workflow_name}")
1606 db_cluster["resourceState"] = "ERROR"
1607 db_cluster = self.update_operation_history(
1608 db_cluster, op_id, workflow_status=False, resource_status=None
1609 )
1610 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
1611 # Clean items used in the workflow, no matter if the workflow succeeded
1612 clean_status, clean_msg = await self.odu.clean_items_workflow(
1613 "update_cluster", op_id, op_params, workflow_content
1614 )
1615 self.logger.info(
1616 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1617 )
1618 return
garciadeblas26d733c2025-02-03 16:12:43 +01001619 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +02001620 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +01001621 op_id, workflow_name
garciadeblas96b94f52024-07-08 16:18:21 +02001622 )
1623 self.logger.info(
1624 "Workflow Status: {} Workflow Message: {}".format(
1625 workflow_status, workflow_msg
yshah771dea82024-07-05 15:11:49 +00001626 )
garciadeblas96b94f52024-07-08 16:18:21 +02001627 )
1628
1629 if workflow_status:
1630 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
1631 else:
1632 db_cluster["resourceState"] = "ERROR"
1633
yshahcb9075f2024-11-22 12:08:57 +00001634 db_cluster = self.update_operation_history(
1635 db_cluster, op_id, workflow_status, None
1636 )
garciadeblas96b94f52024-07-08 16:18:21 +02001637 # self.logger.info("Db content: {}".format(db_content))
1638 # self.db.set_one(self.db_collection, {"_id": _id}, db_cluster)
1639 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
1640
garciadeblas28bff0f2024-09-16 12:53:07 +02001641 # Clean items used in the workflow, no matter if the workflow succeeded
1642 clean_status, clean_msg = await self.odu.clean_items_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001643 "update_cluster", op_id, op_params, workflow_content
garciadeblas28bff0f2024-09-16 12:53:07 +02001644 )
1645 self.logger.info(
1646 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1647 )
garciadeblas96b94f52024-07-08 16:18:21 +02001648 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001649 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +01001650 "update_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +02001651 )
1652 self.logger.info(
1653 "Resource Status: {} Resource Message: {}".format(
1654 resource_status, resource_msg
1655 )
1656 )
yshah771dea82024-07-05 15:11:49 +00001657
1658 if resource_status:
garciadeblas96b94f52024-07-08 16:18:21 +02001659 db_cluster["resourceState"] = "READY"
yshah771dea82024-07-05 15:11:49 +00001660 else:
garciadeblas96b94f52024-07-08 16:18:21 +02001661 db_cluster["resourceState"] = "ERROR"
yshah771dea82024-07-05 15:11:49 +00001662
yshah0defcd52024-11-18 07:41:35 +00001663 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +00001664 db_cluster, op_id, workflow_status, resource_status
yshah0defcd52024-11-18 07:41:35 +00001665 )
1666
garciadeblas96b94f52024-07-08 16:18:21 +02001667 db_cluster["operatingState"] = "IDLE"
garciadeblas96b94f52024-07-08 16:18:21 +02001668 # self.logger.info("db_cluster: {}".format(db_cluster))
garciadeblas7cf480d2025-01-27 16:53:45 +01001669 # TODO: verify condition
garciadeblas96b94f52024-07-08 16:18:21 +02001670 # For the moment, if the workflow completed successfully, then we update the db accordingly.
1671 if workflow_status:
1672 if "k8s_version" in op_params:
1673 db_cluster["k8s_version"] = op_params["k8s_version"]
yshah0defcd52024-11-18 07:41:35 +00001674 if "node_count" in op_params:
garciadeblas96b94f52024-07-08 16:18:21 +02001675 db_cluster["node_count"] = op_params["node_count"]
yshah0defcd52024-11-18 07:41:35 +00001676 if "node_size" in op_params:
1677 db_cluster["node_count"] = op_params["node_size"]
garciadeblas96b94f52024-07-08 16:18:21 +02001678 # self.db.set_one(self.db_collection, {"_id": _id}, db_content)
shahithya70a3fc92024-11-12 11:01:05 +00001679 db_cluster["current_operation"] = None
garciadeblas96b94f52024-07-08 16:18:21 +02001680 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
yshah771dea82024-07-05 15:11:49 +00001681 return
1682
garciadeblasad6d1ba2025-01-22 16:02:18 +01001683 async def check_update_cluster(self, op_id, op_params, content):
1684 self.logger.info(
1685 f"check_update_cluster Operation {op_id}. Params: {op_params}."
1686 )
1687 self.logger.debug(f"Content: {content}")
garciadeblasd7d8bde2025-01-27 18:31:06 +01001688 # return await self.check_dummy_operation(op_id, op_params, content)
1689 db_cluster = content["cluster"]
1690 cluster_name = db_cluster["git_name"].lower()
1691 cluster_kustomization_name = cluster_name
1692 db_vim_account = content["vim_account"]
1693 cloud_type = db_vim_account["vim_type"]
1694 if cloud_type == "aws":
1695 cluster_name = f"{cluster_name}-cluster"
1696 if cloud_type in ("azure", "gcp", "aws"):
1697 checkings_list = [
1698 {
1699 "item": "kustomization",
1700 "name": cluster_kustomization_name,
1701 "namespace": "managed-resources",
1702 "condition": {
1703 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
1704 "value": "True",
1705 },
1706 "timeout": self._checkloop_kustomization_timeout,
1707 "enable": True,
1708 "resourceState": "IN_PROGRESS.KUSTOMIZATION_READY",
1709 },
1710 ]
1711 else:
1712 return False, "Not suitable VIM account to check cluster status"
1713 # Scale operation
1714 if "node_count" in op_params:
garciadeblas1ca09852025-05-30 11:19:06 +02001715 if cloud_type in ("azure", "gcp"):
1716 checkings_list.append(
1717 {
1718 "item": f"cluster_{cloud_type}",
1719 "name": cluster_name,
1720 "namespace": "",
1721 "condition": {
1722 "jsonpath_filter": "status.atProvider.defaultNodePool[0].nodeCount",
1723 "value": f"{op_params['node_count']}",
1724 },
1725 "timeout": self._checkloop_resource_timeout * 3,
1726 "enable": True,
1727 "resourceState": "IN_PROGRESS.RESOURCE_READY.NODE_COUNT.CLUSTER",
1728 }
1729 )
1730 elif cloud_type == "aws":
1731 checkings_list.append(
1732 {
1733 "item": f"nodegroup_{cloud_type}",
1734 "name": f"{cluster_name}-nodegroup",
1735 "namespace": "",
1736 "condition": {
1737 "jsonpath_filter": "status.atProvider.scalingConfig[0].desiredSize",
1738 "value": f"{op_params['node_count']}",
1739 },
1740 "timeout": self._checkloop_resource_timeout * 3,
1741 "enable": True,
1742 "resourceState": "IN_PROGRESS.RESOURCE_READY.NODE_COUNT.CLUSTER",
1743 }
1744 )
1745
garciadeblasd7d8bde2025-01-27 18:31:06 +01001746 # Upgrade operation
1747 if "k8s_version" in op_params:
1748 checkings_list.append(
1749 {
1750 "item": f"cluster_{cloud_type}",
1751 "name": cluster_name,
1752 "namespace": "",
1753 "condition": {
1754 "jsonpath_filter": "status.atProvider.defaultNodePool[0].orchestratorVersion",
1755 "value": op_params["k8s_version"],
1756 },
1757 "timeout": self._checkloop_resource_timeout * 2,
1758 "enable": True,
1759 "resourceState": "IN_PROGRESS.RESOURCE_READY.K8S_VERSION.CLUSTER",
1760 }
1761 )
1762 return await self.common_check_list(
1763 op_id, checkings_list, "clusters", db_cluster
1764 )
garciadeblasad6d1ba2025-01-22 16:02:18 +01001765
yshah771dea82024-07-05 15:11:49 +00001766
garciadeblas72412282024-11-07 12:41:54 +01001767class CloudCredentialsLcm(GitOpsLcm):
yshah771dea82024-07-05 15:11:49 +00001768 db_collection = "vim_accounts"
1769
1770 def __init__(self, msg, lcm_tasks, config):
1771 """
1772 Init, Connect to database, filesystem storage, and messaging
1773 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
1774 :return: None
1775 """
garciadeblas72412282024-11-07 12:41:54 +01001776 super().__init__(msg, lcm_tasks, config)
yshah771dea82024-07-05 15:11:49 +00001777
yshah564ec9c2024-11-29 07:33:32 +00001778 async def add(self, params, order_id):
yshah771dea82024-07-05 15:11:49 +00001779 self.logger.info("Cloud Credentials create")
yshah564ec9c2024-11-29 07:33:32 +00001780 vim_id = params["_id"]
1781 op_id = vim_id
1782 op_params = params
1783 db_content = self.db.get_one(self.db_collection, {"_id": vim_id})
1784 vim_config = db_content.get("config", {})
1785 self.db.encrypt_decrypt_fields(
1786 vim_config.get("credentials"),
1787 "decrypt",
1788 ["password", "secret"],
1789 schema_version=db_content["schema_version"],
1790 salt=vim_id,
1791 )
1792
garciadeblas41859ce2025-02-04 16:08:51 +01001793 workflow_res, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001794 "create_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001795 )
1796
1797 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +01001798 op_id, workflow_name
yshah771dea82024-07-05 15:11:49 +00001799 )
1800
1801 self.logger.info(
1802 "Workflow Status: {} Workflow Msg: {}".format(workflow_status, workflow_msg)
1803 )
1804
garciadeblas28bff0f2024-09-16 12:53:07 +02001805 # Clean items used in the workflow, no matter if the workflow succeeded
1806 clean_status, clean_msg = await self.odu.clean_items_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001807 "create_cloud_credentials", op_id, op_params, db_content
garciadeblas28bff0f2024-09-16 12:53:07 +02001808 )
1809 self.logger.info(
1810 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1811 )
1812
yshah771dea82024-07-05 15:11:49 +00001813 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001814 resource_status, resource_msg = await self.check_resource_status(
yshah564ec9c2024-11-29 07:33:32 +00001815 "create_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001816 )
1817 self.logger.info(
1818 "Resource Status: {} Resource Message: {}".format(
1819 resource_status, resource_msg
1820 )
1821 )
garciadeblas15b8a302024-09-23 12:40:13 +02001822
yshah564ec9c2024-11-29 07:33:32 +00001823 db_content["_admin"]["operationalState"] = "ENABLED"
1824 for operation in db_content["_admin"]["operations"]:
garciadeblas15b8a302024-09-23 12:40:13 +02001825 if operation["lcmOperationType"] == "create":
1826 operation["operationState"] = "ENABLED"
yshah564ec9c2024-11-29 07:33:32 +00001827 self.logger.info("Content : {}".format(db_content))
1828 self.db.set_one("vim_accounts", {"_id": db_content["_id"]}, db_content)
yshah771dea82024-07-05 15:11:49 +00001829 return
1830
yshah564ec9c2024-11-29 07:33:32 +00001831 async def edit(self, params, order_id):
1832 self.logger.info("Cloud Credentials Update")
1833 vim_id = params["_id"]
1834 op_id = vim_id
1835 op_params = params
1836 db_content = self.db.get_one("vim_accounts", {"_id": vim_id})
1837 vim_config = db_content.get("config", {})
1838 self.db.encrypt_decrypt_fields(
1839 vim_config.get("credentials"),
1840 "decrypt",
1841 ["password", "secret"],
1842 schema_version=db_content["schema_version"],
1843 salt=vim_id,
1844 )
1845
garciadeblas41859ce2025-02-04 16:08:51 +01001846 workflow_res, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001847 "update_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001848 )
1849 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +01001850 op_id, workflow_name
yshah771dea82024-07-05 15:11:49 +00001851 )
1852 self.logger.info(
1853 "Workflow Status: {} Workflow Msg: {}".format(workflow_status, workflow_msg)
1854 )
1855
garciadeblas28bff0f2024-09-16 12:53:07 +02001856 # Clean items used in the workflow, no matter if the workflow succeeded
1857 clean_status, clean_msg = await self.odu.clean_items_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001858 "update_cloud_credentials", op_id, op_params, db_content
garciadeblas28bff0f2024-09-16 12:53:07 +02001859 )
1860 self.logger.info(
1861 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1862 )
1863
yshah771dea82024-07-05 15:11:49 +00001864 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001865 resource_status, resource_msg = await self.check_resource_status(
yshah564ec9c2024-11-29 07:33:32 +00001866 "update_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001867 )
1868 self.logger.info(
1869 "Resource Status: {} Resource Message: {}".format(
1870 resource_status, resource_msg
1871 )
1872 )
1873 return
1874
yshah564ec9c2024-11-29 07:33:32 +00001875 async def remove(self, params, order_id):
1876 self.logger.info("Cloud Credentials remove")
1877 vim_id = params["_id"]
1878 op_id = vim_id
1879 op_params = params
1880 db_content = self.db.get_one("vim_accounts", {"_id": vim_id})
1881
garciadeblas41859ce2025-02-04 16:08:51 +01001882 workflow_res, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001883 "delete_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001884 )
1885 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +01001886 op_id, workflow_name
yshah771dea82024-07-05 15:11:49 +00001887 )
1888 self.logger.info(
1889 "Workflow Status: {} Workflow Msg: {}".format(workflow_status, workflow_msg)
1890 )
1891
1892 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001893 resource_status, resource_msg = await self.check_resource_status(
yshah564ec9c2024-11-29 07:33:32 +00001894 "delete_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001895 )
1896 self.logger.info(
1897 "Resource Status: {} Resource Message: {}".format(
1898 resource_status, resource_msg
1899 )
1900 )
yshah564ec9c2024-11-29 07:33:32 +00001901 self.db.del_one(self.db_collection, {"_id": db_content["_id"]})
yshah771dea82024-07-05 15:11:49 +00001902 return
1903
rshri932105f2024-07-05 15:11:55 +00001904
garciadeblas72412282024-11-07 12:41:54 +01001905class K8sAppLcm(GitOpsLcm):
rshri948f7de2024-12-02 03:42:35 +00001906 db_collection = "k8sapp"
1907
rshri932105f2024-07-05 15:11:55 +00001908 def __init__(self, msg, lcm_tasks, config):
1909 """
1910 Init, Connect to database, filesystem storage, and messaging
1911 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
1912 :return: None
1913 """
garciadeblas72412282024-11-07 12:41:54 +01001914 super().__init__(msg, lcm_tasks, config)
rshri932105f2024-07-05 15:11:55 +00001915
rshri948f7de2024-12-02 03:42:35 +00001916 async def create(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001917 self.logger.info("App Create Enter")
1918
rshri948f7de2024-12-02 03:42:35 +00001919 op_id = params["operation_id"]
1920 profile_id = params["profile_id"]
1921
1922 # To initialize the operation states
1923 self.initialize_operation(profile_id, op_id)
1924
1925 content = self.db.get_one("k8sapp", {"_id": profile_id})
1926 content["profile_type"] = "applications"
1927 op_params = self.get_operation_params(content, op_id)
1928 self.db.set_one("k8sapp", {"_id": content["_id"]}, content)
1929
garciadeblas41859ce2025-02-04 16:08:51 +01001930 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001931 "create_profile", op_id, op_params, content
1932 )
garciadeblas26d733c2025-02-03 16:12:43 +01001933 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00001934
garciadeblas33b36e72025-01-17 12:49:19 +01001935 workflow_status = await self.check_workflow_and_update_db(
1936 op_id, workflow_name, content
1937 )
rshri932105f2024-07-05 15:11:55 +00001938
1939 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001940 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001941 "create_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001942 )
yshah564ec9c2024-11-29 07:33:32 +00001943 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1944 self.logger.info(f"App Create Exit with resource status: {resource_status}")
rshri932105f2024-07-05 15:11:55 +00001945 return
1946
rshri948f7de2024-12-02 03:42:35 +00001947 async def delete(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001948 self.logger.info("App delete Enter")
rshri932105f2024-07-05 15:11:55 +00001949
rshri948f7de2024-12-02 03:42:35 +00001950 op_id = params["operation_id"]
1951 profile_id = params["profile_id"]
1952
1953 # To initialize the operation states
1954 self.initialize_operation(profile_id, op_id)
1955
1956 content = self.db.get_one("k8sapp", {"_id": profile_id})
1957 op_params = self.get_operation_params(content, op_id)
1958
garciadeblas41859ce2025-02-04 16:08:51 +01001959 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001960 "delete_profile", op_id, op_params, content
1961 )
garciadeblas26d733c2025-02-03 16:12:43 +01001962 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00001963
garciadeblas33b36e72025-01-17 12:49:19 +01001964 workflow_status = await self.check_workflow_and_update_db(
1965 op_id, workflow_name, content
1966 )
rshri932105f2024-07-05 15:11:55 +00001967
1968 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001969 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001970 "delete_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001971 )
rshri932105f2024-07-05 15:11:55 +00001972
yshahb36649f2025-02-28 09:01:51 +00001973 force = params.get("force", False)
1974 if force:
1975 force_delete_status = self.check_force_delete_and_delete_from_db(
1976 profile_id, workflow_status, resource_status, force
1977 )
1978 if force_delete_status:
1979 return
1980
1981 self.logger.info(f"Resource status: {resource_status}")
yshah564ec9c2024-11-29 07:33:32 +00001982 if resource_status:
1983 content["state"] = "DELETED"
yshah6bad8892025-02-11 12:37:04 +00001984 profile_type = self.profile_type_mapping[content["profile_type"]]
1985 self.delete_profile_ksu(profile_id, profile_type)
yshah564ec9c2024-11-29 07:33:32 +00001986 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1987 self.db.del_one(self.db_collection, {"_id": content["_id"]})
1988 self.logger.info(f"App Delete Exit with resource status: {resource_status}")
rshri932105f2024-07-05 15:11:55 +00001989 return
1990
1991
garciadeblas72412282024-11-07 12:41:54 +01001992class K8sResourceLcm(GitOpsLcm):
rshri948f7de2024-12-02 03:42:35 +00001993 db_collection = "k8sresource"
1994
rshri932105f2024-07-05 15:11:55 +00001995 def __init__(self, msg, lcm_tasks, config):
1996 """
1997 Init, Connect to database, filesystem storage, and messaging
1998 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
1999 :return: None
2000 """
garciadeblas72412282024-11-07 12:41:54 +01002001 super().__init__(msg, lcm_tasks, config)
rshri932105f2024-07-05 15:11:55 +00002002
rshri948f7de2024-12-02 03:42:35 +00002003 async def create(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00002004 self.logger.info("Resource Create Enter")
2005
rshri948f7de2024-12-02 03:42:35 +00002006 op_id = params["operation_id"]
2007 profile_id = params["profile_id"]
2008
2009 # To initialize the operation states
2010 self.initialize_operation(profile_id, op_id)
2011
2012 content = self.db.get_one("k8sresource", {"_id": profile_id})
2013 content["profile_type"] = "managed-resources"
2014 op_params = self.get_operation_params(content, op_id)
2015 self.db.set_one("k8sresource", {"_id": content["_id"]}, content)
2016
garciadeblas41859ce2025-02-04 16:08:51 +01002017 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02002018 "create_profile", op_id, op_params, content
2019 )
garciadeblas26d733c2025-02-03 16:12:43 +01002020 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00002021
garciadeblas33b36e72025-01-17 12:49:19 +01002022 workflow_status = await self.check_workflow_and_update_db(
2023 op_id, workflow_name, content
2024 )
rshri932105f2024-07-05 15:11:55 +00002025
2026 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002027 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02002028 "create_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00002029 )
yshah564ec9c2024-11-29 07:33:32 +00002030 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
2031 self.logger.info(
2032 f"Resource Create Exit with resource status: {resource_status}"
rshri932105f2024-07-05 15:11:55 +00002033 )
rshri932105f2024-07-05 15:11:55 +00002034 return
2035
rshri948f7de2024-12-02 03:42:35 +00002036 async def delete(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00002037 self.logger.info("Resource delete Enter")
rshri948f7de2024-12-02 03:42:35 +00002038
2039 op_id = params["operation_id"]
2040 profile_id = params["profile_id"]
2041
2042 # To initialize the operation states
2043 self.initialize_operation(profile_id, op_id)
2044
2045 content = self.db.get_one("k8sresource", {"_id": profile_id})
2046 op_params = self.get_operation_params(content, op_id)
rshri932105f2024-07-05 15:11:55 +00002047
garciadeblas41859ce2025-02-04 16:08:51 +01002048 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02002049 "delete_profile", op_id, op_params, content
2050 )
garciadeblas26d733c2025-02-03 16:12:43 +01002051 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00002052
garciadeblas33b36e72025-01-17 12:49:19 +01002053 workflow_status = await self.check_workflow_and_update_db(
2054 op_id, workflow_name, content
2055 )
rshri932105f2024-07-05 15:11:55 +00002056
2057 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002058 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02002059 "delete_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00002060 )
rshri932105f2024-07-05 15:11:55 +00002061
yshahb36649f2025-02-28 09:01:51 +00002062 force = params.get("force", False)
2063 if force:
2064 force_delete_status = self.check_force_delete_and_delete_from_db(
2065 profile_id, workflow_status, resource_status, force
2066 )
2067 if force_delete_status:
2068 return
2069
yshah564ec9c2024-11-29 07:33:32 +00002070 if resource_status:
2071 content["state"] = "DELETED"
yshah6bad8892025-02-11 12:37:04 +00002072 profile_type = self.profile_type_mapping[content["profile_type"]]
2073 self.delete_profile_ksu(profile_id, profile_type)
yshah564ec9c2024-11-29 07:33:32 +00002074 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
2075 self.db.del_one(self.db_collection, {"_id": content["_id"]})
2076 self.logger.info(
2077 f"Resource Delete Exit with resource status: {resource_status}"
garciadeblas96b94f52024-07-08 16:18:21 +02002078 )
rshri932105f2024-07-05 15:11:55 +00002079 return
2080
2081
garciadeblas72412282024-11-07 12:41:54 +01002082class K8sInfraControllerLcm(GitOpsLcm):
rshri948f7de2024-12-02 03:42:35 +00002083 db_collection = "k8sinfra_controller"
2084
rshri932105f2024-07-05 15:11:55 +00002085 def __init__(self, msg, lcm_tasks, config):
2086 """
2087 Init, Connect to database, filesystem storage, and messaging
2088 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
2089 :return: None
2090 """
garciadeblas72412282024-11-07 12:41:54 +01002091 super().__init__(msg, lcm_tasks, config)
rshri932105f2024-07-05 15:11:55 +00002092
rshri948f7de2024-12-02 03:42:35 +00002093 async def create(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00002094 self.logger.info("Infra controller Create Enter")
2095
rshri948f7de2024-12-02 03:42:35 +00002096 op_id = params["operation_id"]
2097 profile_id = params["profile_id"]
2098
2099 # To initialize the operation states
2100 self.initialize_operation(profile_id, op_id)
2101
2102 content = self.db.get_one("k8sinfra_controller", {"_id": profile_id})
2103 content["profile_type"] = "infra-controllers"
2104 op_params = self.get_operation_params(content, op_id)
2105 self.db.set_one("k8sinfra_controller", {"_id": content["_id"]}, content)
2106
garciadeblas41859ce2025-02-04 16:08:51 +01002107 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02002108 "create_profile", op_id, op_params, content
2109 )
garciadeblas26d733c2025-02-03 16:12:43 +01002110 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00002111
garciadeblas33b36e72025-01-17 12:49:19 +01002112 workflow_status = await self.check_workflow_and_update_db(
2113 op_id, workflow_name, content
2114 )
rshri932105f2024-07-05 15:11:55 +00002115
2116 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002117 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02002118 "create_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00002119 )
yshah564ec9c2024-11-29 07:33:32 +00002120 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
2121 self.logger.info(
2122 f"Infra Controller Create Exit with resource status: {resource_status}"
rshri932105f2024-07-05 15:11:55 +00002123 )
rshri932105f2024-07-05 15:11:55 +00002124 return
2125
rshri948f7de2024-12-02 03:42:35 +00002126 async def delete(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00002127 self.logger.info("Infra controller delete Enter")
rshri932105f2024-07-05 15:11:55 +00002128
rshri948f7de2024-12-02 03:42:35 +00002129 op_id = params["operation_id"]
2130 profile_id = params["profile_id"]
2131
2132 # To initialize the operation states
2133 self.initialize_operation(profile_id, op_id)
2134
2135 content = self.db.get_one("k8sinfra_controller", {"_id": profile_id})
2136 op_params = self.get_operation_params(content, op_id)
2137
garciadeblas41859ce2025-02-04 16:08:51 +01002138 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02002139 "delete_profile", op_id, op_params, content
2140 )
garciadeblas26d733c2025-02-03 16:12:43 +01002141 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00002142
garciadeblas33b36e72025-01-17 12:49:19 +01002143 workflow_status = await self.check_workflow_and_update_db(
2144 op_id, workflow_name, content
2145 )
rshri932105f2024-07-05 15:11:55 +00002146
2147 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002148 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02002149 "delete_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00002150 )
rshri932105f2024-07-05 15:11:55 +00002151
yshahb36649f2025-02-28 09:01:51 +00002152 force = params.get("force", False)
2153 if force:
2154 force_delete_status = self.check_force_delete_and_delete_from_db(
2155 profile_id, workflow_status, resource_status, force
2156 )
2157 if force_delete_status:
2158 return
2159
yshah564ec9c2024-11-29 07:33:32 +00002160 if resource_status:
2161 content["state"] = "DELETED"
yshah6bad8892025-02-11 12:37:04 +00002162 profile_type = self.profile_type_mapping[content["profile_type"]]
2163 self.delete_profile_ksu(profile_id, profile_type)
yshah564ec9c2024-11-29 07:33:32 +00002164 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
2165 self.db.del_one(self.db_collection, {"_id": content["_id"]})
2166 self.logger.info(
2167 f"Infra Controller Delete Exit with resource status: {resource_status}"
garciadeblas96b94f52024-07-08 16:18:21 +02002168 )
rshri932105f2024-07-05 15:11:55 +00002169 return
2170
2171
garciadeblas72412282024-11-07 12:41:54 +01002172class K8sInfraConfigLcm(GitOpsLcm):
rshri948f7de2024-12-02 03:42:35 +00002173 db_collection = "k8sinfra_config"
2174
rshri932105f2024-07-05 15:11:55 +00002175 def __init__(self, msg, lcm_tasks, config):
2176 """
2177 Init, Connect to database, filesystem storage, and messaging
2178 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
2179 :return: None
2180 """
garciadeblas72412282024-11-07 12:41:54 +01002181 super().__init__(msg, lcm_tasks, config)
rshri932105f2024-07-05 15:11:55 +00002182
rshri948f7de2024-12-02 03:42:35 +00002183 async def create(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00002184 self.logger.info("Infra config Create Enter")
2185
rshri948f7de2024-12-02 03:42:35 +00002186 op_id = params["operation_id"]
2187 profile_id = params["profile_id"]
2188
2189 # To initialize the operation states
2190 self.initialize_operation(profile_id, op_id)
2191
2192 content = self.db.get_one("k8sinfra_config", {"_id": profile_id})
2193 content["profile_type"] = "infra-configs"
2194 op_params = self.get_operation_params(content, op_id)
2195 self.db.set_one("k8sinfra_config", {"_id": content["_id"]}, content)
2196
garciadeblas41859ce2025-02-04 16:08:51 +01002197 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02002198 "create_profile", op_id, op_params, content
2199 )
garciadeblas26d733c2025-02-03 16:12:43 +01002200 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00002201
garciadeblas33b36e72025-01-17 12:49:19 +01002202 workflow_status = await self.check_workflow_and_update_db(
2203 op_id, workflow_name, content
2204 )
rshri932105f2024-07-05 15:11:55 +00002205
2206 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002207 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02002208 "create_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00002209 )
yshah564ec9c2024-11-29 07:33:32 +00002210 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
2211 self.logger.info(
2212 f"Infra Config Create Exit with resource status: {resource_status}"
rshri932105f2024-07-05 15:11:55 +00002213 )
rshri932105f2024-07-05 15:11:55 +00002214 return
2215
rshri948f7de2024-12-02 03:42:35 +00002216 async def delete(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00002217 self.logger.info("Infra config delete Enter")
2218
rshri948f7de2024-12-02 03:42:35 +00002219 op_id = params["operation_id"]
2220 profile_id = params["profile_id"]
2221
2222 # To initialize the operation states
2223 self.initialize_operation(profile_id, op_id)
2224
2225 content = self.db.get_one("k8sinfra_config", {"_id": profile_id})
2226 op_params = self.get_operation_params(content, op_id)
2227
garciadeblas41859ce2025-02-04 16:08:51 +01002228 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02002229 "delete_profile", op_id, op_params, content
2230 )
garciadeblas26d733c2025-02-03 16:12:43 +01002231 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00002232
garciadeblas33b36e72025-01-17 12:49:19 +01002233 workflow_status = await self.check_workflow_and_update_db(
2234 op_id, workflow_name, content
2235 )
yshah564ec9c2024-11-29 07:33:32 +00002236
rshri932105f2024-07-05 15:11:55 +00002237 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002238 resource_status, content = await self.check_resource_and_update_db(
yshah564ec9c2024-11-29 07:33:32 +00002239 "delete_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00002240 )
yshah564ec9c2024-11-29 07:33:32 +00002241
yshahb36649f2025-02-28 09:01:51 +00002242 force = params.get("force", False)
2243 if force:
2244 force_delete_status = self.check_force_delete_and_delete_from_db(
2245 profile_id, workflow_status, resource_status, force
2246 )
2247 if force_delete_status:
2248 return
2249
rshri932105f2024-07-05 15:11:55 +00002250 if resource_status:
yshah564ec9c2024-11-29 07:33:32 +00002251 content["state"] = "DELETED"
yshah6bad8892025-02-11 12:37:04 +00002252 profile_type = self.profile_type_mapping[content["profile_type"]]
2253 self.delete_profile_ksu(profile_id, profile_type)
yshah564ec9c2024-11-29 07:33:32 +00002254 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
2255 self.db.del_one(self.db_collection, {"_id": content["_id"]})
2256 self.logger.info(
2257 f"Infra Config Delete Exit with resource status: {resource_status}"
garciadeblas96b94f52024-07-08 16:18:21 +02002258 )
rshri932105f2024-07-05 15:11:55 +00002259
rshri932105f2024-07-05 15:11:55 +00002260 return
yshah771dea82024-07-05 15:11:49 +00002261
2262
garciadeblas72412282024-11-07 12:41:54 +01002263class OkaLcm(GitOpsLcm):
yshah771dea82024-07-05 15:11:49 +00002264 db_collection = "okas"
2265
2266 def __init__(self, msg, lcm_tasks, config):
2267 """
2268 Init, Connect to database, filesystem storage, and messaging
2269 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
2270 :return: None
2271 """
garciadeblas72412282024-11-07 12:41:54 +01002272 super().__init__(msg, lcm_tasks, config)
yshah771dea82024-07-05 15:11:49 +00002273
yshah564ec9c2024-11-29 07:33:32 +00002274 async def create(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02002275 self.logger.info("OKA Create Enter")
yshah564ec9c2024-11-29 07:33:32 +00002276 op_id = params["operation_id"]
2277 oka_id = params["oka_id"]
2278 self.initialize_operation(oka_id, op_id)
2279 db_content = self.db.get_one(self.db_collection, {"_id": oka_id})
2280 op_params = self.get_operation_params(db_content, op_id)
yshah771dea82024-07-05 15:11:49 +00002281
garciadeblas41859ce2025-02-04 16:08:51 +01002282 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02002283 "create_oka", op_id, op_params, db_content
2284 )
yshah564ec9c2024-11-29 07:33:32 +00002285
garciadeblas33b36e72025-01-17 12:49:19 +01002286 workflow_status = await self.check_workflow_and_update_db(
2287 op_id, workflow_name, db_content
2288 )
yshah771dea82024-07-05 15:11:49 +00002289
2290 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002291 resource_status, db_content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02002292 "create_oka", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00002293 )
garciadeblas96b94f52024-07-08 16:18:21 +02002294 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
garciadeblasb23d2dc2025-02-21 10:15:49 +01002295
2296 # Clean items used in the workflow, no matter if the workflow succeeded
2297 clean_status, clean_msg = await self.odu.clean_items_workflow(
2298 "create_oka", op_id, op_params, db_content
2299 )
2300 self.logger.info(
2301 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
2302 )
yshah564ec9c2024-11-29 07:33:32 +00002303 self.logger.info(f"OKA Create Exit with resource status: {resource_status}")
yshah771dea82024-07-05 15:11:49 +00002304 return
2305
yshah564ec9c2024-11-29 07:33:32 +00002306 async def edit(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02002307 self.logger.info("OKA Edit Enter")
yshah564ec9c2024-11-29 07:33:32 +00002308 op_id = params["operation_id"]
2309 oka_id = params["oka_id"]
2310 self.initialize_operation(oka_id, op_id)
2311 db_content = self.db.get_one(self.db_collection, {"_id": oka_id})
2312 op_params = self.get_operation_params(db_content, op_id)
yshah771dea82024-07-05 15:11:49 +00002313
garciadeblas41859ce2025-02-04 16:08:51 +01002314 workflow_res, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00002315 "update_oka", op_id, op_params, db_content
garciadeblas96b94f52024-07-08 16:18:21 +02002316 )
garciadeblas33b36e72025-01-17 12:49:19 +01002317 workflow_status = await self.check_workflow_and_update_db(
2318 op_id, workflow_name, db_content
2319 )
yshah771dea82024-07-05 15:11:49 +00002320
2321 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002322 resource_status, db_content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02002323 "update_oka", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00002324 )
garciadeblas96b94f52024-07-08 16:18:21 +02002325 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
garciadeblasb23d2dc2025-02-21 10:15:49 +01002326 # Clean items used in the workflow, no matter if the workflow succeeded
2327 clean_status, clean_msg = await self.odu.clean_items_workflow(
2328 "update_oka", op_id, op_params, db_content
2329 )
2330 self.logger.info(
2331 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
2332 )
yshah564ec9c2024-11-29 07:33:32 +00002333 self.logger.info(f"OKA Update Exit with resource status: {resource_status}")
yshah771dea82024-07-05 15:11:49 +00002334 return
2335
yshah564ec9c2024-11-29 07:33:32 +00002336 async def delete(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02002337 self.logger.info("OKA delete Enter")
yshah564ec9c2024-11-29 07:33:32 +00002338 op_id = params["operation_id"]
2339 oka_id = params["oka_id"]
2340 self.initialize_operation(oka_id, op_id)
2341 db_content = self.db.get_one(self.db_collection, {"_id": oka_id})
2342 op_params = self.get_operation_params(db_content, op_id)
yshah771dea82024-07-05 15:11:49 +00002343
garciadeblas41859ce2025-02-04 16:08:51 +01002344 workflow_res, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00002345 "delete_oka", op_id, op_params, db_content
garciadeblas96b94f52024-07-08 16:18:21 +02002346 )
garciadeblas33b36e72025-01-17 12:49:19 +01002347 workflow_status = await self.check_workflow_and_update_db(
2348 op_id, workflow_name, db_content
2349 )
yshah771dea82024-07-05 15:11:49 +00002350
2351 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002352 resource_status, db_content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02002353 "delete_oka", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00002354 )
yshah771dea82024-07-05 15:11:49 +00002355
yshahb36649f2025-02-28 09:01:51 +00002356 force = params.get("force", False)
2357 if force:
2358 force_delete_status = self.check_force_delete_and_delete_from_db(
2359 oka_id, workflow_status, resource_status, force
2360 )
2361 if force_delete_status:
2362 return
2363
yshah564ec9c2024-11-29 07:33:32 +00002364 if resource_status:
2365 db_content["state"] == "DELETED"
2366 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
garciadeblas96b94f52024-07-08 16:18:21 +02002367 self.db.del_one(self.db_collection, {"_id": db_content["_id"]})
garciadeblasb23d2dc2025-02-21 10:15:49 +01002368 # Clean items used in the workflow, no matter if the workflow succeeded
2369 clean_status, clean_msg = await self.odu.clean_items_workflow(
2370 "delete_oka", op_id, op_params, db_content
2371 )
2372 self.logger.info(
2373 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
2374 )
yshah564ec9c2024-11-29 07:33:32 +00002375 self.logger.info(f"OKA Delete Exit with resource status: {resource_status}")
yshah771dea82024-07-05 15:11:49 +00002376 return
2377
2378
garciadeblas72412282024-11-07 12:41:54 +01002379class KsuLcm(GitOpsLcm):
yshah771dea82024-07-05 15:11:49 +00002380 db_collection = "ksus"
2381
2382 def __init__(self, msg, lcm_tasks, config):
2383 """
2384 Init, Connect to database, filesystem storage, and messaging
2385 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
2386 :return: None
2387 """
garciadeblas72412282024-11-07 12:41:54 +01002388 super().__init__(msg, lcm_tasks, config)
garciadeblasad6d1ba2025-01-22 16:02:18 +01002389 self._workflows = {
2390 "create_ksus": {
2391 "check_resource_function": self.check_create_ksus,
2392 },
2393 "delete_ksus": {
2394 "check_resource_function": self.check_delete_ksus,
2395 },
2396 }
2397
2398 def get_dbclusters_from_profile(self, profile_id, profile_type):
2399 cluster_list = []
2400 db_clusters = self.db.get_list("clusters")
2401 self.logger.info(f"Getting list of clusters for {profile_type} {profile_id}")
2402 for db_cluster in db_clusters:
2403 if profile_id in db_cluster.get(profile_type, []):
2404 self.logger.info(
2405 f"Profile {profile_id} found in cluster {db_cluster['name']}"
2406 )
2407 cluster_list.append(db_cluster)
2408 return cluster_list
yshah771dea82024-07-05 15:11:49 +00002409
yshah564ec9c2024-11-29 07:33:32 +00002410 async def create(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02002411 self.logger.info("ksu Create Enter")
yshah564ec9c2024-11-29 07:33:32 +00002412 db_content = []
2413 op_params = []
2414 op_id = params["operation_id"]
2415 for ksu_id in params["ksus_list"]:
2416 self.logger.info("Ksu ID: {}".format(ksu_id))
2417 self.initialize_operation(ksu_id, op_id)
2418 db_ksu = self.db.get_one(self.db_collection, {"_id": ksu_id})
2419 self.logger.info("Db KSU: {}".format(db_ksu))
2420 db_content.append(db_ksu)
2421 ksu_params = {}
2422 ksu_params = self.get_operation_params(db_ksu, op_id)
2423 self.logger.info("Operation Params: {}".format(ksu_params))
2424 # Update ksu_params["profile"] with profile name and age-pubkey
2425 profile_type = ksu_params["profile"]["profile_type"]
2426 profile_id = ksu_params["profile"]["_id"]
2427 profile_collection = self.profile_collection_mapping[profile_type]
2428 db_profile = self.db.get_one(profile_collection, {"_id": profile_id})
garciadeblasd41e9292025-03-11 15:44:25 +01002429 # db_profile is decrypted inline
2430 # No need to use decrypted_copy because db_profile won't be updated.
2431 self.decrypt_age_keys(db_profile)
yshah564ec9c2024-11-29 07:33:32 +00002432 ksu_params["profile"]["name"] = db_profile["name"]
2433 ksu_params["profile"]["age_pubkey"] = db_profile.get("age_pubkey", "")
2434 # Update ksu_params["oka"] with sw_catalog_path (when missing)
garciadeblas9f7c9c52025-01-17 01:06:05 +01002435 # TODO: remove this in favor of doing it in ODU workflow
yshah564ec9c2024-11-29 07:33:32 +00002436 for oka in ksu_params["oka"]:
2437 if "sw_catalog_path" not in oka:
2438 oka_id = oka["_id"]
2439 db_oka = self.db.get_one("okas", {"_id": oka_id})
yshah2f39b8a2024-12-19 11:06:24 +00002440 oka_type = MAP_PROFILE[
2441 db_oka.get("profile_type", "infra_controller_profiles")
2442 ]
garciadeblas9f7c9c52025-01-17 01:06:05 +01002443 oka[
2444 "sw_catalog_path"
garciadeblas29f8bcf2025-01-24 14:24:41 +01002445 ] = f"{oka_type}/{db_oka['git_name'].lower()}/templates"
yshah564ec9c2024-11-29 07:33:32 +00002446 op_params.append(ksu_params)
yshah771dea82024-07-05 15:11:49 +00002447
garciadeblasad6d1ba2025-01-22 16:02:18 +01002448 # A single workflow is launched for all KSUs
garciadeblas41859ce2025-02-04 16:08:51 +01002449 workflow_res, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00002450 "create_ksus", op_id, op_params, db_content
garciadeblas96b94f52024-07-08 16:18:21 +02002451 )
garciadeblasad6d1ba2025-01-22 16:02:18 +01002452 # Update workflow status in all KSUs
2453 wf_status_list = []
yshah564ec9c2024-11-29 07:33:32 +00002454 for db_ksu, ksu_params in zip(db_content, op_params):
garciadeblas33b36e72025-01-17 12:49:19 +01002455 workflow_status = await self.check_workflow_and_update_db(
2456 op_id, workflow_name, db_ksu
2457 )
garciadeblasad6d1ba2025-01-22 16:02:18 +01002458 wf_status_list.append(workflow_status)
2459 # Update resource status in all KSUs
2460 # TODO: Is an operation correct if n KSUs are right and 1 is not OK?
2461 res_status_list = []
2462 for db_ksu, ksu_params, wf_status in zip(db_content, op_params, wf_status_list):
2463 if wf_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002464 resource_status, db_ksu = await self.check_resource_and_update_db(
yshah564ec9c2024-11-29 07:33:32 +00002465 "create_ksus", op_id, ksu_params, db_ksu
2466 )
garciadeblasad6d1ba2025-01-22 16:02:18 +01002467 else:
2468 resource_status = False
2469 res_status_list.append(resource_status)
garciadeblas96b94f52024-07-08 16:18:21 +02002470 self.db.set_one(self.db_collection, {"_id": db_ksu["_id"]}, db_ksu)
2471
garciadeblasd8429852024-10-17 15:30:30 +02002472 # Clean items used in the workflow, no matter if the workflow succeeded
2473 clean_status, clean_msg = await self.odu.clean_items_workflow(
yshah564ec9c2024-11-29 07:33:32 +00002474 "create_ksus", op_id, op_params, db_content
garciadeblasd8429852024-10-17 15:30:30 +02002475 )
2476 self.logger.info(
2477 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
2478 )
garciadeblasad6d1ba2025-01-22 16:02:18 +01002479 self.logger.info(f"KSU Create EXIT with Resource Status {res_status_list}")
yshah771dea82024-07-05 15:11:49 +00002480 return
2481
yshah564ec9c2024-11-29 07:33:32 +00002482 async def edit(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02002483 self.logger.info("ksu edit Enter")
yshah564ec9c2024-11-29 07:33:32 +00002484 db_content = []
2485 op_params = []
2486 op_id = params["operation_id"]
2487 for ksu_id in params["ksus_list"]:
2488 self.initialize_operation(ksu_id, op_id)
2489 db_ksu = self.db.get_one("ksus", {"_id": ksu_id})
2490 db_content.append(db_ksu)
2491 ksu_params = {}
2492 ksu_params = self.get_operation_params(db_ksu, op_id)
2493 # Update ksu_params["profile"] with profile name and age-pubkey
2494 profile_type = ksu_params["profile"]["profile_type"]
2495 profile_id = ksu_params["profile"]["_id"]
2496 profile_collection = self.profile_collection_mapping[profile_type]
2497 db_profile = self.db.get_one(profile_collection, {"_id": profile_id})
garciadeblasd41e9292025-03-11 15:44:25 +01002498 # db_profile is decrypted inline
2499 # No need to use decrypted_copy because db_profile won't be updated.
2500 self.decrypt_age_keys(db_profile)
yshah564ec9c2024-11-29 07:33:32 +00002501 ksu_params["profile"]["name"] = db_profile["name"]
2502 ksu_params["profile"]["age_pubkey"] = db_profile.get("age_pubkey", "")
2503 # Update ksu_params["oka"] with sw_catalog_path (when missing)
garciadeblas9f7c9c52025-01-17 01:06:05 +01002504 # TODO: remove this in favor of doing it in ODU workflow
yshah564ec9c2024-11-29 07:33:32 +00002505 for oka in ksu_params["oka"]:
2506 if "sw_catalog_path" not in oka:
2507 oka_id = oka["_id"]
2508 db_oka = self.db.get_one("okas", {"_id": oka_id})
yshah2f39b8a2024-12-19 11:06:24 +00002509 oka_type = MAP_PROFILE[
2510 db_oka.get("profile_type", "infra_controller_profiles")
2511 ]
garciadeblas9f7c9c52025-01-17 01:06:05 +01002512 oka[
2513 "sw_catalog_path"
2514 ] = f"{oka_type}/{db_oka['git_name']}/templates"
yshah564ec9c2024-11-29 07:33:32 +00002515 op_params.append(ksu_params)
yshah771dea82024-07-05 15:11:49 +00002516
garciadeblas41859ce2025-02-04 16:08:51 +01002517 workflow_res, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00002518 "update_ksus", op_id, op_params, db_content
garciadeblas96b94f52024-07-08 16:18:21 +02002519 )
yshah771dea82024-07-05 15:11:49 +00002520
yshah564ec9c2024-11-29 07:33:32 +00002521 for db_ksu, ksu_params in zip(db_content, op_params):
garciadeblas33b36e72025-01-17 12:49:19 +01002522 workflow_status = await self.check_workflow_and_update_db(
2523 op_id, workflow_name, db_ksu
2524 )
yshah564ec9c2024-11-29 07:33:32 +00002525
garciadeblas96b94f52024-07-08 16:18:21 +02002526 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002527 resource_status, db_ksu = await self.check_resource_and_update_db(
yshah564ec9c2024-11-29 07:33:32 +00002528 "update_ksus", op_id, ksu_params, db_ksu
garciadeblas96b94f52024-07-08 16:18:21 +02002529 )
garciadeblas96b94f52024-07-08 16:18:21 +02002530 db_ksu["name"] = ksu_params["name"]
2531 db_ksu["description"] = ksu_params["description"]
2532 db_ksu["profile"]["profile_type"] = ksu_params["profile"][
2533 "profile_type"
2534 ]
2535 db_ksu["profile"]["_id"] = ksu_params["profile"]["_id"]
2536 db_ksu["oka"] = ksu_params["oka"]
2537 self.db.set_one(self.db_collection, {"_id": db_ksu["_id"]}, db_ksu)
2538
yshah564ec9c2024-11-29 07:33:32 +00002539 # Clean items used in the workflow, no matter if the workflow succeeded
2540 clean_status, clean_msg = await self.odu.clean_items_workflow(
2541 "create_ksus", op_id, op_params, db_content
garciadeblas96b94f52024-07-08 16:18:21 +02002542 )
2543 self.logger.info(
yshah564ec9c2024-11-29 07:33:32 +00002544 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
garciadeblas96b94f52024-07-08 16:18:21 +02002545 )
yshah564ec9c2024-11-29 07:33:32 +00002546 self.logger.info(f"KSU Update EXIT with Resource Status {resource_status}")
yshah771dea82024-07-05 15:11:49 +00002547 return
2548
yshah564ec9c2024-11-29 07:33:32 +00002549 async def delete(self, params, order_id):
2550 self.logger.info("ksu delete Enter")
2551 db_content = []
2552 op_params = []
2553 op_id = params["operation_id"]
2554 for ksu_id in params["ksus_list"]:
2555 self.initialize_operation(ksu_id, op_id)
2556 db_ksu = self.db.get_one("ksus", {"_id": ksu_id})
2557 db_content.append(db_ksu)
2558 ksu_params = {}
2559 ksu_params["profile"] = {}
2560 ksu_params["profile"]["profile_type"] = db_ksu["profile"]["profile_type"]
2561 ksu_params["profile"]["_id"] = db_ksu["profile"]["_id"]
garciadeblasd41e9292025-03-11 15:44:25 +01002562 # Update ksu_params["profile"] with profile name
yshah564ec9c2024-11-29 07:33:32 +00002563 profile_type = ksu_params["profile"]["profile_type"]
2564 profile_id = ksu_params["profile"]["_id"]
2565 profile_collection = self.profile_collection_mapping[profile_type]
2566 db_profile = self.db.get_one(profile_collection, {"_id": profile_id})
2567 ksu_params["profile"]["name"] = db_profile["name"]
yshah564ec9c2024-11-29 07:33:32 +00002568 op_params.append(ksu_params)
yshah771dea82024-07-05 15:11:49 +00002569
garciadeblas41859ce2025-02-04 16:08:51 +01002570 workflow_res, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00002571 "delete_ksus", op_id, op_params, db_content
2572 )
2573
2574 for db_ksu, ksu_params in zip(db_content, op_params):
garciadeblas33b36e72025-01-17 12:49:19 +01002575 workflow_status = await self.check_workflow_and_update_db(
2576 op_id, workflow_name, db_ksu
2577 )
yshah564ec9c2024-11-29 07:33:32 +00002578
2579 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002580 resource_status, db_ksu = await self.check_resource_and_update_db(
yshah564ec9c2024-11-29 07:33:32 +00002581 "delete_ksus", op_id, ksu_params, db_ksu
2582 )
2583
yshahb36649f2025-02-28 09:01:51 +00002584 force = params.get("force", False)
2585 if force:
2586 force_delete_status = self.check_force_delete_and_delete_from_db(
2587 db_ksu["_id"], workflow_status, resource_status, force
2588 )
2589 if force_delete_status:
2590 return
2591
yshah564ec9c2024-11-29 07:33:32 +00002592 if resource_status:
2593 db_ksu["state"] == "DELETED"
yshah5e109152025-05-19 12:29:01 +00002594 self.delete_ksu_dependency(db_ksu["_id"], db_ksu)
yshah564ec9c2024-11-29 07:33:32 +00002595 self.db.set_one(self.db_collection, {"_id": db_ksu["_id"]}, db_ksu)
2596 self.db.del_one(self.db_collection, {"_id": db_ksu["_id"]})
2597
2598 self.logger.info(f"KSU Delete Exit with resource status: {resource_status}")
2599 return
2600
2601 async def clone(self, params, order_id):
2602 self.logger.info("ksu clone Enter")
2603 op_id = params["operation_id"]
2604 ksus_id = params["ksus_list"][0]
2605 self.initialize_operation(ksus_id, op_id)
2606 db_content = self.db.get_one(self.db_collection, {"_id": ksus_id})
2607 op_params = self.get_operation_params(db_content, op_id)
garciadeblas41859ce2025-02-04 16:08:51 +01002608 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02002609 "clone_ksus", op_id, op_params, db_content
2610 )
yshah564ec9c2024-11-29 07:33:32 +00002611
garciadeblas33b36e72025-01-17 12:49:19 +01002612 workflow_status = await self.check_workflow_and_update_db(
2613 op_id, workflow_name, db_content
2614 )
yshah771dea82024-07-05 15:11:49 +00002615
2616 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002617 resource_status, db_content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02002618 "clone_ksus", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00002619 )
garciadeblas96b94f52024-07-08 16:18:21 +02002620 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
yshah564ec9c2024-11-29 07:33:32 +00002621
2622 self.logger.info(f"KSU Clone Exit with resource status: {resource_status}")
yshah771dea82024-07-05 15:11:49 +00002623 return
2624
yshah564ec9c2024-11-29 07:33:32 +00002625 async def move(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02002626 self.logger.info("ksu move Enter")
yshah564ec9c2024-11-29 07:33:32 +00002627 op_id = params["operation_id"]
2628 ksus_id = params["ksus_list"][0]
2629 self.initialize_operation(ksus_id, op_id)
2630 db_content = self.db.get_one(self.db_collection, {"_id": ksus_id})
2631 op_params = self.get_operation_params(db_content, op_id)
garciadeblas41859ce2025-02-04 16:08:51 +01002632 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02002633 "move_ksus", op_id, op_params, db_content
2634 )
yshah564ec9c2024-11-29 07:33:32 +00002635
garciadeblas33b36e72025-01-17 12:49:19 +01002636 workflow_status = await self.check_workflow_and_update_db(
2637 op_id, workflow_name, db_content
2638 )
yshah771dea82024-07-05 15:11:49 +00002639
2640 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002641 resource_status, db_content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02002642 "move_ksus", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00002643 )
garciadeblas96b94f52024-07-08 16:18:21 +02002644 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
yshah564ec9c2024-11-29 07:33:32 +00002645
2646 self.logger.info(f"KSU Move Exit with resource status: {resource_status}")
yshah771dea82024-07-05 15:11:49 +00002647 return
garciadeblasad6d1ba2025-01-22 16:02:18 +01002648
2649 async def check_create_ksus(self, op_id, op_params, content):
2650 self.logger.info(f"check_create_ksus Operation {op_id}. Params: {op_params}.")
2651 self.logger.debug(f"Content: {content}")
2652 db_ksu = content
2653 kustomization_name = db_ksu["git_name"].lower()
2654 oka_list = op_params["oka"]
2655 oka_item = oka_list[0]
2656 oka_params = oka_item.get("transformation", {})
garciadeblas167dde32025-02-14 00:44:58 +01002657 kustomization_ns = oka_params.get("kustomization_namespace", "flux-system")
garciadeblasad6d1ba2025-01-22 16:02:18 +01002658 profile_id = op_params.get("profile", {}).get("_id")
2659 profile_type = op_params.get("profile", {}).get("profile_type")
2660 self.logger.info(
2661 f"Checking status of KSU {db_ksu['name']} for profile {profile_id}."
2662 )
2663 dbcluster_list = self.get_dbclusters_from_profile(profile_id, profile_type)
2664 if not dbcluster_list:
2665 self.logger.info(f"No clusters found for profile {profile_id}.")
2666 for db_cluster in dbcluster_list:
2667 try:
2668 self.logger.info(
garciadeblasae238482025-02-03 08:44:19 +01002669 f"Checking status of KSU {db_ksu['name']} in cluster {db_cluster['name']}."
garciadeblasad6d1ba2025-01-22 16:02:18 +01002670 )
2671 cluster_kubectl = self.cluster_kubectl(db_cluster)
2672 checkings_list = [
2673 {
2674 "item": "kustomization",
2675 "name": kustomization_name,
garciadeblas167dde32025-02-14 00:44:58 +01002676 "namespace": kustomization_ns,
garciadeblas7cf480d2025-01-27 16:53:45 +01002677 "condition": {
2678 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
2679 "value": "True",
2680 },
garciadeblasad6d1ba2025-01-22 16:02:18 +01002681 "timeout": self._checkloop_kustomization_timeout,
2682 "enable": True,
2683 "resourceState": "IN_PROGRESS.KUSTOMIZATION_READY",
2684 },
2685 ]
2686 self.logger.info(
2687 f"Checking status of KSU {db_ksu['name']} for profile {profile_id}."
2688 )
2689 result, message = await self.common_check_list(
garciadeblas6d8acf32025-02-06 13:34:37 +01002690 op_id, checkings_list, "ksus", db_ksu, kubectl_obj=cluster_kubectl
garciadeblasad6d1ba2025-01-22 16:02:18 +01002691 )
2692 if not result:
2693 return False, message
2694 except Exception as e:
2695 self.logger.error(
2696 f"Error checking KSU in cluster {db_cluster['name']}."
2697 )
2698 self.logger.error(e)
2699 return False, f"Error checking KSU in cluster {db_cluster['name']}."
2700 return True, "OK"
2701
2702 async def check_delete_ksus(self, op_id, op_params, content):
2703 self.logger.info(f"check_delete_ksus Operation {op_id}. Params: {op_params}.")
2704 self.logger.debug(f"Content: {content}")
2705 db_ksu = content
2706 kustomization_name = db_ksu["git_name"].lower()
2707 oka_list = db_ksu["oka"]
2708 oka_item = oka_list[0]
2709 oka_params = oka_item.get("transformation", {})
garciadeblas167dde32025-02-14 00:44:58 +01002710 kustomization_ns = oka_params.get("kustomization_namespace", "flux-system")
garciadeblasad6d1ba2025-01-22 16:02:18 +01002711 profile_id = op_params.get("profile", {}).get("_id")
2712 profile_type = op_params.get("profile", {}).get("profile_type")
2713 self.logger.info(
2714 f"Checking status of KSU {db_ksu['name']} for profile {profile_id}."
2715 )
2716 dbcluster_list = self.get_dbclusters_from_profile(profile_id, profile_type)
2717 if not dbcluster_list:
2718 self.logger.info(f"No clusters found for profile {profile_id}.")
2719 for db_cluster in dbcluster_list:
2720 try:
2721 self.logger.info(
2722 f"Checking status of KSU in cluster {db_cluster['name']}."
2723 )
2724 cluster_kubectl = self.cluster_kubectl(db_cluster)
2725 checkings_list = [
2726 {
2727 "item": "kustomization",
2728 "name": kustomization_name,
garciadeblas167dde32025-02-14 00:44:58 +01002729 "namespace": kustomization_ns,
garciadeblasad6d1ba2025-01-22 16:02:18 +01002730 "deleted": True,
2731 "timeout": self._checkloop_kustomization_timeout,
2732 "enable": True,
2733 "resourceState": "IN_PROGRESS.KUSTOMIZATION_DELETED",
2734 },
2735 ]
2736 self.logger.info(
2737 f"Checking status of KSU {db_ksu['name']} for profile {profile_id}."
2738 )
2739 result, message = await self.common_check_list(
garciadeblas6d8acf32025-02-06 13:34:37 +01002740 op_id, checkings_list, "ksus", db_ksu, kubectl_obj=cluster_kubectl
garciadeblasad6d1ba2025-01-22 16:02:18 +01002741 )
2742 if not result:
2743 return False, message
2744 except Exception as e:
2745 self.logger.error(
2746 f"Error checking KSU in cluster {db_cluster['name']}."
2747 )
2748 self.logger.error(e)
2749 return False, f"Error checking KSU in cluster {db_cluster['name']}."
2750 return True, "OK"