blob: cd22a9957d2235492bb77306426f6d2cebf6608b [file] [log] [blame]
rshri932105f2024-07-05 15:11:55 +00001# -*- coding: utf-8 -*-
2
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12# implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15
16__author__ = (
17 "Shrinithi R <shrinithi.r@tataelxsi.co.in>",
18 "Shahithya Y <shahithya.y@tataelxsi.co.in>",
19)
20
rshric3564942024-11-12 18:12:38 +000021import copy
rshri932105f2024-07-05 15:11:55 +000022import logging
yshahd940c652024-10-17 06:11:12 +000023from time import time
garciadeblas72412282024-11-07 12:41:54 +010024import traceback
rshri932105f2024-07-05 15:11:55 +000025from osm_lcm.lcm_utils import LcmBase
26from copy import deepcopy
27from osm_lcm import odu_workflows
28from osm_lcm import vim_sdn
rshri948f7de2024-12-02 03:42:35 +000029from osm_lcm.data_utils.list_utils import find_in_list
garciadeblasad6d1ba2025-01-22 16:02:18 +010030from osm_lcm.n2vc.kubectl import Kubectl
31import yaml
rshri932105f2024-07-05 15:11:55 +000032
yshah2f39b8a2024-12-19 11:06:24 +000033MAP_PROFILE = {
34 "infra_controller_profiles": "infra-controllers",
35 "infra_config_profiles": "infra-configs",
36 "resource_profiles": "managed_resources",
37 "app_profiles": "apps",
38}
39
rshri932105f2024-07-05 15:11:55 +000040
garciadeblas72412282024-11-07 12:41:54 +010041class GitOpsLcm(LcmBase):
garciadeblasea865ff2024-11-20 12:42:49 +010042 db_collection = "gitops"
yshah564ec9c2024-11-29 07:33:32 +000043 workflow_status = None
44 resource_status = None
45
46 profile_collection_mapping = {
47 "infra_controller_profiles": "k8sinfra_controller",
48 "infra_config_profiles": "k8sinfra_config",
49 "resource_profiles": "k8sresource",
50 "app_profiles": "k8sapp",
51 }
garciadeblasea865ff2024-11-20 12:42:49 +010052
yshah6bad8892025-02-11 12:37:04 +000053 profile_type_mapping = {
54 "infra-controllers": "infra_controller_profiles",
55 "infra-configs": "infra_config_profiles",
56 "managed-resources": "resource_profiles",
57 "applications": "app_profiles",
58 }
59
garciadeblas72412282024-11-07 12:41:54 +010060 def __init__(self, msg, lcm_tasks, config):
61 self.logger = logging.getLogger("lcm.gitops")
62 self.lcm_tasks = lcm_tasks
63 self.odu = odu_workflows.OduWorkflow(msg, self.lcm_tasks, config)
64 self._checkloop_kustomization_timeout = 900
65 self._checkloop_resource_timeout = 900
66 self._workflows = {}
67 super().__init__(msg, self.logger)
68
69 async def check_dummy_operation(self, op_id, op_params, content):
70 self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
71 return True, "OK"
72
garciadeblasea865ff2024-11-20 12:42:49 +010073 def initialize_operation(self, item_id, op_id):
74 db_item = self.db.get_one(self.db_collection, {"_id": item_id})
75 operation = next(
76 (op for op in db_item.get("operationHistory", []) if op["op_id"] == op_id),
77 None,
78 )
79 operation["workflowState"] = "PROCESSING"
80 operation["resourceState"] = "NOT_READY"
81 operation["operationState"] = "IN_PROGRESS"
82 operation["gitOperationInfo"] = None
83 db_item["current_operation"] = operation["op_id"]
84 self.db.set_one(self.db_collection, {"_id": item_id}, db_item)
85
yshah564ec9c2024-11-29 07:33:32 +000086 def get_operation_params(self, item, operation_id):
87 operation_history = item.get("operationHistory", [])
88 operation = find_in_list(
89 operation_history, lambda op: op["op_id"] == operation_id
90 )
91 return operation.get("operationParams", {})
92
93 def get_operation_type(self, item, operation_id):
94 operation_history = item.get("operationHistory", [])
95 operation = find_in_list(
96 operation_history, lambda op: op["op_id"] == operation_id
97 )
98 return operation.get("operationType", {})
99
garciadeblasbe890702024-12-20 11:39:13 +0100100 def update_state_operation_history(
101 self, content, op_id, workflow_state=None, resource_state=None
102 ):
103 self.logger.info(
104 f"Update state of operation {op_id} in Operation History in DB"
105 )
106 self.logger.info(
107 f"Workflow state: {workflow_state}. Resource state: {resource_state}"
108 )
109 self.logger.debug(f"Content: {content}")
110
111 op_num = 0
112 for operation in content["operationHistory"]:
113 self.logger.debug("Operations: {}".format(operation))
114 if operation["op_id"] == op_id:
115 self.logger.debug("Found operation number: {}".format(op_num))
116 if workflow_state is not None:
117 operation["workflowState"] = workflow_state
118
119 if resource_state is not None:
120 operation["resourceState"] = resource_state
121 break
122 op_num += 1
123 self.logger.debug("content: {}".format(content))
124
125 return content
126
garciadeblas7eae6f42024-11-08 10:41:38 +0100127 def update_operation_history(
garciadeblasf9092892024-12-12 11:07:08 +0100128 self, content, op_id, workflow_status=None, resource_status=None, op_end=True
garciadeblas7eae6f42024-11-08 10:41:38 +0100129 ):
130 self.logger.info(
131 f"Update Operation History in DB. Workflow status: {workflow_status}. Resource status: {resource_status}"
132 )
133 self.logger.debug(f"Content: {content}")
134
garciadeblas7eae6f42024-11-08 10:41:38 +0100135 op_num = 0
136 for operation in content["operationHistory"]:
137 self.logger.debug("Operations: {}".format(operation))
138 if operation["op_id"] == op_id:
139 self.logger.debug("Found operation number: {}".format(op_num))
garciadeblas8bde3f42024-12-20 10:37:12 +0100140 if workflow_status is not None:
141 if workflow_status:
142 operation["workflowState"] = "COMPLETED"
143 operation["result"] = True
144 else:
145 operation["workflowState"] = "ERROR"
146 operation["operationState"] = "FAILED"
147 operation["result"] = False
garciadeblas7eae6f42024-11-08 10:41:38 +0100148
garciadeblas8bde3f42024-12-20 10:37:12 +0100149 if resource_status is not None:
150 if resource_status:
151 operation["resourceState"] = "READY"
152 operation["operationState"] = "COMPLETED"
153 operation["result"] = True
154 else:
155 operation["resourceState"] = "NOT_READY"
156 operation["operationState"] = "FAILED"
157 operation["result"] = False
garciadeblas7eae6f42024-11-08 10:41:38 +0100158
garciadeblasf9092892024-12-12 11:07:08 +0100159 if op_end:
160 now = time()
161 operation["endDate"] = now
garciadeblas7eae6f42024-11-08 10:41:38 +0100162 break
163 op_num += 1
164 self.logger.debug("content: {}".format(content))
165
166 return content
167
garciadeblas33b36e72025-01-17 12:49:19 +0100168 async def check_workflow_and_update_db(self, op_id, workflow_name, db_content):
yshah564ec9c2024-11-29 07:33:32 +0000169 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +0100170 op_id, workflow_name
yshah564ec9c2024-11-29 07:33:32 +0000171 )
172 self.logger.info(
173 "Workflow Status: {} Workflow Message: {}".format(
174 workflow_status, workflow_msg
175 )
176 )
177 operation_type = self.get_operation_type(db_content, op_id)
178 if operation_type == "create" and workflow_status:
179 db_content["state"] = "CREATED"
180 elif operation_type == "create" and not workflow_status:
181 db_content["state"] = "FAILED_CREATION"
182 elif operation_type == "delete" and workflow_status:
183 db_content["state"] = "DELETED"
184 elif operation_type == "delete" and not workflow_status:
185 db_content["state"] = "FAILED_DELETION"
186
187 if workflow_status:
188 db_content["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
189 else:
190 db_content["resourceState"] = "ERROR"
191
192 db_content = self.update_operation_history(
193 db_content, op_id, workflow_status, None
194 )
195 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
196 return workflow_status
197
garciadeblas33b36e72025-01-17 12:49:19 +0100198 async def check_resource_and_update_db(
199 self, resource_name, op_id, op_params, db_content
200 ):
yshah564ec9c2024-11-29 07:33:32 +0000201 workflow_status = True
202
203 resource_status, resource_msg = await self.check_resource_status(
204 resource_name, op_id, op_params, db_content
205 )
206 self.logger.info(
207 "Resource Status: {} Resource Message: {}".format(
208 resource_status, resource_msg
209 )
210 )
211
212 if resource_status:
213 db_content["resourceState"] = "READY"
214 else:
215 db_content["resourceState"] = "ERROR"
216
217 db_content = self.update_operation_history(
218 db_content, op_id, workflow_status, resource_status
219 )
220 db_content["operatingState"] = "IDLE"
221 db_content["current_operation"] = None
222 return resource_status, db_content
223
garciadeblasad6d1ba2025-01-22 16:02:18 +0100224 async def common_check_list(
garciadeblas6d8acf32025-02-06 13:34:37 +0100225 self, op_id, checkings_list, db_collection, db_item, kubectl_obj=None
garciadeblasad6d1ba2025-01-22 16:02:18 +0100226 ):
garciadeblas72412282024-11-07 12:41:54 +0100227 try:
228 for checking in checkings_list:
229 if checking["enable"]:
230 status, message = await self.odu.readiness_loop(
garciadeblasc89134b2025-02-05 16:36:17 +0100231 op_id=op_id,
garciadeblas72412282024-11-07 12:41:54 +0100232 item=checking["item"],
233 name=checking["name"],
234 namespace=checking["namespace"],
garciadeblas7cf480d2025-01-27 16:53:45 +0100235 condition=checking.get("condition"),
garciadeblasad6d1ba2025-01-22 16:02:18 +0100236 deleted=checking.get("deleted", False),
garciadeblas72412282024-11-07 12:41:54 +0100237 timeout=checking["timeout"],
garciadeblas6d8acf32025-02-06 13:34:37 +0100238 kubectl_obj=kubectl_obj,
garciadeblas72412282024-11-07 12:41:54 +0100239 )
240 if not status:
garciadeblas656818e2025-01-21 18:48:58 +0100241 error_message = "Resources not ready: "
242 error_message += checking.get("error_message", "")
243 return status, f"{error_message}: {message}"
garciadeblas7eae6f42024-11-08 10:41:38 +0100244 else:
245 db_item["resourceState"] = checking["resourceState"]
garciadeblasbe890702024-12-20 11:39:13 +0100246 db_item = self.update_state_operation_history(
247 db_item, op_id, None, checking["resourceState"]
garciadeblas7eae6f42024-11-08 10:41:38 +0100248 )
249 self.db.set_one(db_collection, {"_id": db_item["_id"]}, db_item)
garciadeblas72412282024-11-07 12:41:54 +0100250 except Exception as e:
251 self.logger.debug(traceback.format_exc())
252 self.logger.debug(f"Exception: {e}", exc_info=True)
253 return False, f"Unexpected exception: {e}"
254 return True, "OK"
255
256 async def check_resource_status(self, key, op_id, op_params, content):
257 self.logger.info(
garciadeblasad6d1ba2025-01-22 16:02:18 +0100258 f"Check resource status. Key: {key}. Operation: {op_id}. Params: {op_params}."
garciadeblas72412282024-11-07 12:41:54 +0100259 )
garciadeblasad6d1ba2025-01-22 16:02:18 +0100260 self.logger.debug(f"Check resource status. Content: {content}")
garciadeblas72412282024-11-07 12:41:54 +0100261 check_resource_function = self._workflows.get(key, {}).get(
262 "check_resource_function"
263 )
264 self.logger.info("check_resource function : {}".format(check_resource_function))
265 if check_resource_function:
266 return await check_resource_function(op_id, op_params, content)
267 else:
268 return await self.check_dummy_operation(op_id, op_params, content)
269
yshahb36649f2025-02-28 09:01:51 +0000270 def check_force_delete_and_delete_from_db(
271 self, _id, workflow_status, resource_status, force
272 ):
273 self.logger.info(
274 f" Force: {force} Workflow status: {workflow_status} Resource Status: {resource_status}"
275 )
276 if force and (not workflow_status or not resource_status):
277 self.db.del_one(self.db_collection, {"_id": _id})
278 return True
279 return False
280
garciadeblasd41e9292025-03-11 15:44:25 +0100281 def decrypt_age_keys(self, content, fields=["age_pubkey", "age_privkey"]):
282 self.db.encrypt_decrypt_fields(
283 content,
284 "decrypt",
285 fields,
286 schema_version="1.11",
287 salt=content["_id"],
288 )
289
290 def encrypt_age_keys(self, content, fields=["age_pubkey", "age_privkey"]):
291 self.db.encrypt_decrypt_fields(
292 content,
293 "encrypt",
294 fields,
295 schema_version="1.11",
296 salt=content["_id"],
297 )
298
garciadeblas995cbf32024-12-18 12:54:00 +0100299 def decrypted_copy(self, content, fields=["age_pubkey", "age_privkey"]):
300 # This deep copy is intended to be passed to ODU workflows.
301 content_copy = copy.deepcopy(content)
rshric3564942024-11-12 18:12:38 +0000302
303 # decrypting the key
304 self.db.encrypt_decrypt_fields(
garciadeblas995cbf32024-12-18 12:54:00 +0100305 content_copy,
rshric3564942024-11-12 18:12:38 +0000306 "decrypt",
garciadeblas995cbf32024-12-18 12:54:00 +0100307 fields,
rshric3564942024-11-12 18:12:38 +0000308 schema_version="1.11",
garciadeblas995cbf32024-12-18 12:54:00 +0100309 salt=content_copy["_id"],
rshric3564942024-11-12 18:12:38 +0000310 )
garciadeblas995cbf32024-12-18 12:54:00 +0100311 return content_copy
rshric3564942024-11-12 18:12:38 +0000312
yshah6bad8892025-02-11 12:37:04 +0000313 def delete_profile_ksu(self, _id, profile_type):
314 filter_q = {"profile": {"_id": _id, "profile_type": profile_type}}
315 ksu_list = self.db.get_list("ksus", filter_q)
316 if ksu_list:
317 self.db.del_list("ksus", filter_q)
318 return
319
garciadeblasad6d1ba2025-01-22 16:02:18 +0100320 def cluster_kubectl(self, db_cluster):
321 cluster_kubeconfig = db_cluster["credentials"]
322 kubeconfig_path = f"/tmp/{db_cluster['_id']}_kubeconfig.yaml"
323 with open(kubeconfig_path, "w") as kubeconfig_file:
324 yaml.safe_dump(cluster_kubeconfig, kubeconfig_file)
325 return Kubectl(config_file=kubeconfig_path)
326
garciadeblas72412282024-11-07 12:41:54 +0100327
328class ClusterLcm(GitOpsLcm):
garciadeblas96b94f52024-07-08 16:18:21 +0200329 db_collection = "clusters"
rshri932105f2024-07-05 15:11:55 +0000330
331 def __init__(self, msg, lcm_tasks, config):
332 """
333 Init, Connect to database, filesystem storage, and messaging
334 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
335 :return: None
336 """
garciadeblas72412282024-11-07 12:41:54 +0100337 super().__init__(msg, lcm_tasks, config)
338 self._workflows = {
339 "create_cluster": {
340 "check_resource_function": self.check_create_cluster,
341 },
garciadeblasb0a42c22024-11-13 16:00:10 +0100342 "register_cluster": {
343 "check_resource_function": self.check_register_cluster,
344 },
345 "update_cluster": {
346 "check_resource_function": self.check_update_cluster,
347 },
garciadeblasad6d1ba2025-01-22 16:02:18 +0100348 "delete_cluster": {
349 "check_resource_function": self.check_delete_cluster,
350 },
garciadeblas72412282024-11-07 12:41:54 +0100351 }
rshri932105f2024-07-05 15:11:55 +0000352 self.regist = vim_sdn.K8sClusterLcm(msg, self.lcm_tasks, config)
353
rshri948f7de2024-12-02 03:42:35 +0000354 async def create(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +0000355 self.logger.info("cluster Create Enter")
356
garciadeblas995cbf32024-12-18 12:54:00 +0100357 # To get the cluster and op ids
rshri948f7de2024-12-02 03:42:35 +0000358 cluster_id = params["cluster_id"]
rshri948f7de2024-12-02 03:42:35 +0000359 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +0000360
361 # To initialize the operation states
362 self.initialize_operation(cluster_id, op_id)
363
garciadeblas995cbf32024-12-18 12:54:00 +0100364 # To get the cluster
365 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
366
367 # To get the operation params details
368 op_params = self.get_operation_params(db_cluster, op_id)
369
370 # To copy the cluster content and decrypting fields to use in workflows
garciadeblasd41e9292025-03-11 15:44:25 +0100371 db_cluster_copy = self.decrypted_copy(db_cluster)
garciadeblas995cbf32024-12-18 12:54:00 +0100372 workflow_content = {
garciadeblasd41e9292025-03-11 15:44:25 +0100373 "cluster": db_cluster_copy,
garciadeblas995cbf32024-12-18 12:54:00 +0100374 }
rshric3564942024-11-12 18:12:38 +0000375
rshri948f7de2024-12-02 03:42:35 +0000376 # To get the vim account details
rshric3564942024-11-12 18:12:38 +0000377 db_vim = self.db.get_one("vim_accounts", {"name": db_cluster["vim_account"]})
garciadeblas995cbf32024-12-18 12:54:00 +0100378 workflow_content["vim_account"] = db_vim
rshric3564942024-11-12 18:12:38 +0000379
garciadeblas41859ce2025-02-04 16:08:51 +0100380 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +0100381 "create_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +0200382 )
garciadeblas41859ce2025-02-04 16:08:51 +0100383 if not workflow_res:
384 self.logger.error(f"Failed to launch workflow: {workflow_name}")
385 db_cluster["state"] = "FAILED_CREATION"
386 db_cluster["resourceState"] = "ERROR"
387 db_cluster = self.update_operation_history(
388 db_cluster, op_id, workflow_status=False, resource_status=None
389 )
390 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
391 # Clean items used in the workflow, no matter if the workflow succeeded
392 clean_status, clean_msg = await self.odu.clean_items_workflow(
393 "create_cluster", op_id, op_params, workflow_content
394 )
395 self.logger.info(
396 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
397 )
398 return
rshri932105f2024-07-05 15:11:55 +0000399
garciadeblas26d733c2025-02-03 16:12:43 +0100400 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +0200401 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +0100402 op_id, workflow_name
garciadeblas96b94f52024-07-08 16:18:21 +0200403 )
rshri932105f2024-07-05 15:11:55 +0000404 self.logger.info(
garciadeblas26d733c2025-02-03 16:12:43 +0100405 "workflow_status is: {} and workflow_msg is: {}".format(
rshri932105f2024-07-05 15:11:55 +0000406 workflow_status, workflow_msg
407 )
408 )
409 if workflow_status:
garciadeblas96b94f52024-07-08 16:18:21 +0200410 db_cluster["state"] = "CREATED"
411 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
rshri932105f2024-07-05 15:11:55 +0000412 else:
garciadeblas96b94f52024-07-08 16:18:21 +0200413 db_cluster["state"] = "FAILED_CREATION"
414 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +0000415 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +0000416 db_cluster = self.update_operation_history(
417 db_cluster, op_id, workflow_status, None
418 )
garciadeblas96b94f52024-07-08 16:18:21 +0200419 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +0000420
garciadeblas28bff0f2024-09-16 12:53:07 +0200421 # Clean items used in the workflow, no matter if the workflow succeeded
422 clean_status, clean_msg = await self.odu.clean_items_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +0100423 "create_cluster", op_id, op_params, workflow_content
garciadeblas28bff0f2024-09-16 12:53:07 +0200424 )
425 self.logger.info(
426 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
427 )
428
rshri932105f2024-07-05 15:11:55 +0000429 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +0100430 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +0100431 "create_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +0000432 )
433 self.logger.info(
434 "resource_status is :{} and resource_msg is :{}".format(
435 resource_status, resource_msg
436 )
437 )
438 if resource_status:
garciadeblas96b94f52024-07-08 16:18:21 +0200439 db_cluster["resourceState"] = "READY"
rshri932105f2024-07-05 15:11:55 +0000440 else:
garciadeblas96b94f52024-07-08 16:18:21 +0200441 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +0000442
garciadeblas96b94f52024-07-08 16:18:21 +0200443 db_cluster["operatingState"] = "IDLE"
444 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +0000445 db_cluster, op_id, workflow_status, resource_status
rshri932105f2024-07-05 15:11:55 +0000446 )
shahithya70a3fc92024-11-12 11:01:05 +0000447 db_cluster["current_operation"] = None
garciadeblas41a600e2025-01-21 11:49:38 +0100448
449 # Retrieve credentials
450 cluster_creds = None
451 if db_cluster["resourceState"] == "READY" and db_cluster["state"] == "CREATED":
452 result, cluster_creds = await self.odu.get_cluster_credentials(db_cluster)
453 # TODO: manage the case where the credentials are not available
454 if result:
455 db_cluster["credentials"] = cluster_creds
456
457 # Update db_cluster
garciadeblas96b94f52024-07-08 16:18:21 +0200458 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
garciadeblasd41e9292025-03-11 15:44:25 +0100459 self.update_default_profile_agekeys(db_cluster_copy)
garciadeblas96b94f52024-07-08 16:18:21 +0200460 self.update_profile_state(db_cluster, workflow_status, resource_status)
rshri948f7de2024-12-02 03:42:35 +0000461
garciadeblas41a600e2025-01-21 11:49:38 +0100462 # Register the cluster in k8sclusters collection
rshri948f7de2024-12-02 03:42:35 +0000463 db_register = self.db.get_one("k8sclusters", {"name": db_cluster["name"]})
garciadeblas41a600e2025-01-21 11:49:38 +0100464 if cluster_creds:
rshri948f7de2024-12-02 03:42:35 +0000465 db_register["credentials"] = cluster_creds
garciadeblas41a600e2025-01-21 11:49:38 +0100466 # To call the lcm.py for registering the cluster in k8scluster lcm.
rshri948f7de2024-12-02 03:42:35 +0000467 self.db.set_one("k8sclusters", {"_id": db_register["_id"]}, db_register)
468 register = await self.regist.create(db_register, order_id)
469 self.logger.debug(f"Register is : {register}")
470 else:
471 db_register["_admin"]["operationalState"] = "ERROR"
472 result, cluster_creds = await self.odu.get_cluster_credentials(db_cluster)
473 # To call the lcm.py for registering the cluster in k8scluster lcm.
474 db_register["credentials"] = cluster_creds
475 self.db.set_one("k8sclusters", {"_id": db_register["_id"]}, db_register)
476
rshri932105f2024-07-05 15:11:55 +0000477 return
478
garciadeblas72412282024-11-07 12:41:54 +0100479 async def check_create_cluster(self, op_id, op_params, content):
garciadeblas7eae6f42024-11-08 10:41:38 +0100480 self.logger.info(
481 f"check_create_cluster Operation {op_id}. Params: {op_params}."
482 )
garciadeblas72412282024-11-07 12:41:54 +0100483 db_cluster = content["cluster"]
484 cluster_name = db_cluster["git_name"].lower()
485 cluster_kustomization_name = cluster_name
486 db_vim_account = content["vim_account"]
487 cloud_type = db_vim_account["vim_type"]
488 nodepool_name = ""
489 if cloud_type == "aws":
490 nodepool_name = f"{cluster_name}-nodegroup"
491 cluster_name = f"{cluster_name}-cluster"
492 elif cloud_type == "gcp":
493 nodepool_name = f"nodepool-{cluster_name}"
494 bootstrap = op_params.get("bootstrap", True)
495 if cloud_type in ("azure", "gcp", "aws"):
496 checkings_list = [
497 {
498 "item": "kustomization",
499 "name": cluster_kustomization_name,
500 "namespace": "managed-resources",
garciadeblas7cf480d2025-01-27 16:53:45 +0100501 "condition": {
502 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
503 "value": "True",
504 },
yshahcb9075f2024-11-22 12:08:57 +0000505 "timeout": 1500,
garciadeblas72412282024-11-07 12:41:54 +0100506 "enable": True,
garciadeblas7eae6f42024-11-08 10:41:38 +0100507 "resourceState": "IN_PROGRESS.KUSTOMIZATION_READY",
garciadeblas72412282024-11-07 12:41:54 +0100508 },
509 {
510 "item": f"cluster_{cloud_type}",
511 "name": cluster_name,
512 "namespace": "",
garciadeblas7cf480d2025-01-27 16:53:45 +0100513 "condition": {
514 "jsonpath_filter": "status.conditions[?(@.type=='Synced')].status",
515 "value": "True",
516 },
garciadeblas72412282024-11-07 12:41:54 +0100517 "timeout": self._checkloop_resource_timeout,
518 "enable": True,
garciadeblas7eae6f42024-11-08 10:41:38 +0100519 "resourceState": "IN_PROGRESS.RESOURCE_SYNCED.CLUSTER",
garciadeblas72412282024-11-07 12:41:54 +0100520 },
521 {
522 "item": f"cluster_{cloud_type}",
523 "name": cluster_name,
524 "namespace": "",
garciadeblas7cf480d2025-01-27 16:53:45 +0100525 "condition": {
526 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
527 "value": "True",
528 },
garciadeblas72412282024-11-07 12:41:54 +0100529 "timeout": self._checkloop_resource_timeout,
530 "enable": True,
garciadeblas7eae6f42024-11-08 10:41:38 +0100531 "resourceState": "IN_PROGRESS.RESOURCE_READY.CLUSTER",
garciadeblas72412282024-11-07 12:41:54 +0100532 },
533 {
534 "item": "kustomization",
535 "name": f"{cluster_kustomization_name}-bstrp-fluxctrl",
536 "namespace": "managed-resources",
garciadeblas7cf480d2025-01-27 16:53:45 +0100537 "condition": {
538 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
539 "value": "True",
540 },
yshahcb9075f2024-11-22 12:08:57 +0000541 "timeout": self._checkloop_resource_timeout,
garciadeblas72412282024-11-07 12:41:54 +0100542 "enable": bootstrap,
garciadeblas7eae6f42024-11-08 10:41:38 +0100543 "resourceState": "IN_PROGRESS.BOOTSTRAP_OK",
garciadeblas72412282024-11-07 12:41:54 +0100544 },
545 ]
546 else:
547 return False, "Not suitable VIM account to check cluster status"
548 if nodepool_name:
549 nodepool_check = {
550 "item": f"nodepool_{cloud_type}",
551 "name": nodepool_name,
552 "namespace": "",
garciadeblas7cf480d2025-01-27 16:53:45 +0100553 "condition": {
554 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
555 "value": "True",
556 },
garciadeblas72412282024-11-07 12:41:54 +0100557 "timeout": self._checkloop_resource_timeout,
558 "enable": True,
garciadeblas7eae6f42024-11-08 10:41:38 +0100559 "resourceState": "IN_PROGRESS.RESOURCE_READY.NODEPOOL",
garciadeblas72412282024-11-07 12:41:54 +0100560 }
561 checkings_list.insert(3, nodepool_check)
yshahcb9075f2024-11-22 12:08:57 +0000562 return await self.common_check_list(
563 op_id, checkings_list, "clusters", db_cluster
564 )
garciadeblas72412282024-11-07 12:41:54 +0100565
garciadeblasd41e9292025-03-11 15:44:25 +0100566 def update_default_profile_agekeys(self, db_cluster):
567 profiles = [
568 "infra_controller_profiles",
569 "infra_config_profiles",
570 "app_profiles",
571 "resource_profiles",
572 ]
573 self.logger.debug("the db_cluster is :{}".format(db_cluster))
574 for profile_type in profiles:
575 profile_id = db_cluster[profile_type]
576 db_collection = self.profile_collection_mapping[profile_type]
577 db_profile = self.db.get_one(db_collection, {"_id": profile_id})
578 db_profile["age_pubkey"] = db_cluster["age_pubkey"]
579 db_profile["age_privkey"] = db_cluster["age_privkey"]
580 self.encrypt_age_keys(db_profile)
581 self.db.set_one(db_collection, {"_id": db_profile["_id"]}, db_profile)
582
garciadeblas96b94f52024-07-08 16:18:21 +0200583 def update_profile_state(self, db_cluster, workflow_status, resource_status):
rshri932105f2024-07-05 15:11:55 +0000584 profiles = [
585 "infra_controller_profiles",
586 "infra_config_profiles",
587 "app_profiles",
588 "resource_profiles",
589 ]
garciadeblasd41e9292025-03-11 15:44:25 +0100590 self.logger.debug("the db_cluster is :{}".format(db_cluster))
rshri932105f2024-07-05 15:11:55 +0000591 for profile_type in profiles:
garciadeblas96b94f52024-07-08 16:18:21 +0200592 profile_id = db_cluster[profile_type]
rshri948f7de2024-12-02 03:42:35 +0000593 db_collection = self.profile_collection_mapping[profile_type]
rshri932105f2024-07-05 15:11:55 +0000594 db_profile = self.db.get_one(db_collection, {"_id": profile_id})
yshahcb9075f2024-11-22 12:08:57 +0000595 op_id = db_profile["operationHistory"][-1].get("op_id")
garciadeblas96b94f52024-07-08 16:18:21 +0200596 db_profile["state"] = db_cluster["state"]
597 db_profile["resourceState"] = db_cluster["resourceState"]
598 db_profile["operatingState"] = db_cluster["operatingState"]
rshri932105f2024-07-05 15:11:55 +0000599 db_profile = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +0000600 db_profile, op_id, workflow_status, resource_status
rshri932105f2024-07-05 15:11:55 +0000601 )
rshri932105f2024-07-05 15:11:55 +0000602 self.db.set_one(db_collection, {"_id": db_profile["_id"]}, db_profile)
603
rshri948f7de2024-12-02 03:42:35 +0000604 async def delete(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +0000605 self.logger.info("cluster delete Enter")
rshri948f7de2024-12-02 03:42:35 +0000606
garciadeblas926ffac2025-02-12 16:45:40 +0100607 try:
608 # To get the cluster and op ids
609 cluster_id = params["cluster_id"]
610 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +0000611
garciadeblas926ffac2025-02-12 16:45:40 +0100612 # To initialize the operation states
613 self.initialize_operation(cluster_id, op_id)
rshri948f7de2024-12-02 03:42:35 +0000614
garciadeblas926ffac2025-02-12 16:45:40 +0100615 # To get the cluster
616 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
garciadeblas995cbf32024-12-18 12:54:00 +0100617
garciadeblas926ffac2025-02-12 16:45:40 +0100618 # To get the operation params details
619 op_params = self.get_operation_params(db_cluster, op_id)
garciadeblas995cbf32024-12-18 12:54:00 +0100620
garciadeblas926ffac2025-02-12 16:45:40 +0100621 # To copy the cluster content and decrypting fields to use in workflows
622 workflow_content = {
623 "cluster": self.decrypted_copy(db_cluster),
624 }
rshri948f7de2024-12-02 03:42:35 +0000625
garciadeblas926ffac2025-02-12 16:45:40 +0100626 # To get the vim account details
627 db_vim = self.db.get_one(
628 "vim_accounts", {"name": db_cluster["vim_account"]}
629 )
630 workflow_content["vim_account"] = db_vim
631 except Exception as e:
632 self.logger.debug(traceback.format_exc())
633 self.logger.debug(f"Exception: {e}", exc_info=True)
634 raise e
garciadeblasad6d1ba2025-01-22 16:02:18 +0100635
garciadeblas41859ce2025-02-04 16:08:51 +0100636 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +0100637 "delete_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +0200638 )
garciadeblas41859ce2025-02-04 16:08:51 +0100639 if not workflow_res:
640 self.logger.error(f"Failed to launch workflow: {workflow_name}")
641 db_cluster["state"] = "FAILED_DELETION"
642 db_cluster["resourceState"] = "ERROR"
643 db_cluster = self.update_operation_history(
644 db_cluster, op_id, workflow_status=False, resource_status=None
645 )
646 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
647 # Clean items used in the workflow, no matter if the workflow succeeded
648 clean_status, clean_msg = await self.odu.clean_items_workflow(
649 "delete_cluster", op_id, op_params, workflow_content
650 )
651 self.logger.info(
652 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
653 )
654 return
rshri932105f2024-07-05 15:11:55 +0000655
garciadeblas26d733c2025-02-03 16:12:43 +0100656 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +0200657 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +0100658 op_id, workflow_name
garciadeblas96b94f52024-07-08 16:18:21 +0200659 )
rshri932105f2024-07-05 15:11:55 +0000660 self.logger.info(
garciadeblas26d733c2025-02-03 16:12:43 +0100661 "workflow_status is: {} and workflow_msg is: {}".format(
rshri932105f2024-07-05 15:11:55 +0000662 workflow_status, workflow_msg
663 )
664 )
665 if workflow_status:
garciadeblas96b94f52024-07-08 16:18:21 +0200666 db_cluster["state"] = "DELETED"
667 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
rshri932105f2024-07-05 15:11:55 +0000668 else:
garciadeblas96b94f52024-07-08 16:18:21 +0200669 db_cluster["state"] = "FAILED_DELETION"
670 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +0000671 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +0000672 db_cluster = self.update_operation_history(
673 db_cluster, op_id, workflow_status, None
674 )
garciadeblas96b94f52024-07-08 16:18:21 +0200675 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +0000676
garciadeblas98f9a3d2024-12-10 13:42:47 +0100677 # Clean items used in the workflow or in the cluster, no matter if the workflow succeeded
678 clean_status, clean_msg = await self.odu.clean_items_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +0100679 "delete_cluster", op_id, op_params, workflow_content
garciadeblas98f9a3d2024-12-10 13:42:47 +0100680 )
681 self.logger.info(
682 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
683 )
684
rshri932105f2024-07-05 15:11:55 +0000685 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +0100686 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +0100687 "delete_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +0000688 )
689 self.logger.info(
690 "resource_status is :{} and resource_msg is :{}".format(
691 resource_status, resource_msg
692 )
693 )
694 if resource_status:
garciadeblas96b94f52024-07-08 16:18:21 +0200695 db_cluster["resourceState"] = "READY"
rshri932105f2024-07-05 15:11:55 +0000696 else:
garciadeblas96b94f52024-07-08 16:18:21 +0200697 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +0000698
garciadeblas96b94f52024-07-08 16:18:21 +0200699 db_cluster["operatingState"] = "IDLE"
700 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +0000701 db_cluster, op_id, workflow_status, resource_status
garciadeblas96b94f52024-07-08 16:18:21 +0200702 )
shahithya70a3fc92024-11-12 11:01:05 +0000703 db_cluster["current_operation"] = None
garciadeblas96b94f52024-07-08 16:18:21 +0200704 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +0000705
yshahb36649f2025-02-28 09:01:51 +0000706 force = params.get("force", False)
707 if force:
708 force_delete_status = self.check_force_delete_and_delete_from_db(
709 cluster_id, workflow_status, resource_status, force
710 )
711 if force_delete_status:
712 return
713
garciadeblas96b94f52024-07-08 16:18:21 +0200714 # To delete it from DB
715 if db_cluster["state"] == "DELETED":
716 self.delete_cluster(db_cluster)
rshri948f7de2024-12-02 03:42:35 +0000717
718 # To delete it from k8scluster collection
719 self.db.del_one("k8sclusters", {"name": db_cluster["name"]})
720
rshri932105f2024-07-05 15:11:55 +0000721 return
722
garciadeblasad6d1ba2025-01-22 16:02:18 +0100723 async def check_delete_cluster(self, op_id, op_params, content):
724 self.logger.info(
725 f"check_delete_cluster Operation {op_id}. Params: {op_params}."
726 )
727 self.logger.debug(f"Content: {content}")
728 db_cluster = content["cluster"]
729 cluster_name = db_cluster["git_name"].lower()
730 cluster_kustomization_name = cluster_name
731 db_vim_account = content["vim_account"]
732 cloud_type = db_vim_account["vim_type"]
733 if cloud_type == "aws":
734 cluster_name = f"{cluster_name}-cluster"
735 if cloud_type in ("azure", "gcp", "aws"):
736 checkings_list = [
737 {
738 "item": "kustomization",
739 "name": cluster_kustomization_name,
740 "namespace": "managed-resources",
741 "deleted": True,
742 "timeout": self._checkloop_kustomization_timeout,
743 "enable": True,
744 "resourceState": "IN_PROGRESS.KUSTOMIZATION_DELETED",
745 },
746 {
747 "item": f"cluster_{cloud_type}",
748 "name": cluster_name,
749 "namespace": "",
750 "deleted": True,
751 "timeout": self._checkloop_resource_timeout,
752 "enable": True,
753 "resourceState": "IN_PROGRESS.RESOURCE_DELETED.CLUSTER",
754 },
755 ]
756 else:
757 return False, "Not suitable VIM account to check cluster status"
758 return await self.common_check_list(
759 op_id, checkings_list, "clusters", db_cluster
760 )
761
garciadeblas96b94f52024-07-08 16:18:21 +0200762 def delete_cluster(self, db_cluster):
763 # Actually, item_content is equal to db_cluster
rshri932105f2024-07-05 15:11:55 +0000764 # detach profiles
765 update_dict = None
766 profiles_to_detach = [
767 "infra_controller_profiles",
768 "infra_config_profiles",
769 "app_profiles",
770 "resource_profiles",
771 ]
rshri948f7de2024-12-02 03:42:35 +0000772 """
rshri932105f2024-07-05 15:11:55 +0000773 profiles_collection = {
774 "infra_controller_profiles": "k8sinfra_controller",
775 "infra_config_profiles": "k8sinfra_config",
776 "app_profiles": "k8sapp",
777 "resource_profiles": "k8sresource",
778 }
rshri948f7de2024-12-02 03:42:35 +0000779 """
rshri932105f2024-07-05 15:11:55 +0000780 for profile_type in profiles_to_detach:
garciadeblas96b94f52024-07-08 16:18:21 +0200781 if db_cluster.get(profile_type):
garciadeblas96b94f52024-07-08 16:18:21 +0200782 profile_ids = db_cluster[profile_type]
rshri932105f2024-07-05 15:11:55 +0000783 profile_ids_copy = deepcopy(profile_ids)
rshri932105f2024-07-05 15:11:55 +0000784 for profile_id in profile_ids_copy:
rshri948f7de2024-12-02 03:42:35 +0000785 db_collection = self.profile_collection_mapping[profile_type]
rshri932105f2024-07-05 15:11:55 +0000786 db_profile = self.db.get_one(db_collection, {"_id": profile_id})
garciadeblasc2552852024-10-22 12:39:32 +0200787 self.logger.debug("the db_profile is :{}".format(db_profile))
788 self.logger.debug(
garciadeblas96b94f52024-07-08 16:18:21 +0200789 "the item_content name is :{}".format(db_cluster["name"])
rshri932105f2024-07-05 15:11:55 +0000790 )
garciadeblasc2552852024-10-22 12:39:32 +0200791 self.logger.debug(
rshri932105f2024-07-05 15:11:55 +0000792 "the db_profile name is :{}".format(db_profile["name"])
793 )
garciadeblas96b94f52024-07-08 16:18:21 +0200794 if db_cluster["name"] == db_profile["name"]:
yshah6bad8892025-02-11 12:37:04 +0000795 self.delete_profile_ksu(profile_id, profile_type)
rshri932105f2024-07-05 15:11:55 +0000796 self.db.del_one(db_collection, {"_id": profile_id})
797 else:
rshri932105f2024-07-05 15:11:55 +0000798 profile_ids.remove(profile_id)
799 update_dict = {profile_type: profile_ids}
rshri932105f2024-07-05 15:11:55 +0000800 self.db.set_one(
garciadeblas96b94f52024-07-08 16:18:21 +0200801 "clusters", {"_id": db_cluster["_id"]}, update_dict
rshri932105f2024-07-05 15:11:55 +0000802 )
garciadeblas96b94f52024-07-08 16:18:21 +0200803 self.db.del_one("clusters", {"_id": db_cluster["_id"]})
rshri932105f2024-07-05 15:11:55 +0000804
rshri948f7de2024-12-02 03:42:35 +0000805 async def attach_profile(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +0000806 self.logger.info("profile attach Enter")
rshri948f7de2024-12-02 03:42:35 +0000807
garciadeblas995cbf32024-12-18 12:54:00 +0100808 # To get the cluster and op ids
rshri948f7de2024-12-02 03:42:35 +0000809 cluster_id = params["cluster_id"]
rshri948f7de2024-12-02 03:42:35 +0000810 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +0000811
812 # To initialize the operation states
813 self.initialize_operation(cluster_id, op_id)
814
garciadeblas995cbf32024-12-18 12:54:00 +0100815 # To get the cluster
816 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
817
818 # To get the operation params details
819 op_params = self.get_operation_params(db_cluster, op_id)
820
821 # To copy the cluster content and decrypting fields to use in workflows
822 workflow_content = {
823 "cluster": self.decrypted_copy(db_cluster),
824 }
rshri948f7de2024-12-02 03:42:35 +0000825
826 # To get the profile details
827 profile_id = params["profile_id"]
828 profile_type = params["profile_type"]
829 profile_collection = self.profile_collection_mapping[profile_type]
830 db_profile = self.db.get_one(profile_collection, {"_id": profile_id})
831 db_profile["profile_type"] = profile_type
832 # content["profile"] = db_profile
garciadeblas995cbf32024-12-18 12:54:00 +0100833 workflow_content["profile"] = db_profile
rshri932105f2024-07-05 15:11:55 +0000834
garciadeblas41859ce2025-02-04 16:08:51 +0100835 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +0100836 "attach_profile_to_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +0200837 )
garciadeblas41859ce2025-02-04 16:08:51 +0100838 if not workflow_res:
839 self.logger.error(f"Failed to launch workflow: {workflow_name}")
840 db_cluster["resourceState"] = "ERROR"
841 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
842 db_cluster = self.update_operation_history(
843 db_cluster, op_id, workflow_status=False, resource_status=None
844 )
845 return
rshri932105f2024-07-05 15:11:55 +0000846
garciadeblas26d733c2025-02-03 16:12:43 +0100847 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +0200848 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +0100849 op_id, workflow_name
garciadeblas96b94f52024-07-08 16:18:21 +0200850 )
rshri932105f2024-07-05 15:11:55 +0000851 self.logger.info(
garciadeblas26d733c2025-02-03 16:12:43 +0100852 "workflow_status is: {} and workflow_msg is: {}".format(
rshri932105f2024-07-05 15:11:55 +0000853 workflow_status, workflow_msg
854 )
855 )
856 if workflow_status:
857 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
858 else:
859 db_cluster["resourceState"] = "ERROR"
860 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +0000861 db_cluster = self.update_operation_history(
862 db_cluster, op_id, workflow_status, None
863 )
rshri932105f2024-07-05 15:11:55 +0000864 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
865
866 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +0100867 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +0100868 "attach_profile_to_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +0000869 )
870 self.logger.info(
871 "resource_status is :{} and resource_msg is :{}".format(
872 resource_status, resource_msg
873 )
874 )
875 if resource_status:
876 db_cluster["resourceState"] = "READY"
877 else:
878 db_cluster["resourceState"] = "ERROR"
879
880 db_cluster["operatingState"] = "IDLE"
881 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +0000882 db_cluster, op_id, workflow_status, resource_status
rshri932105f2024-07-05 15:11:55 +0000883 )
rshri932105f2024-07-05 15:11:55 +0000884 profile_list = db_cluster[profile_type]
rshri932105f2024-07-05 15:11:55 +0000885 if resource_status:
rshri932105f2024-07-05 15:11:55 +0000886 profile_list.append(profile_id)
rshri932105f2024-07-05 15:11:55 +0000887 db_cluster[profile_type] = profile_list
shahithya70a3fc92024-11-12 11:01:05 +0000888 db_cluster["current_operation"] = None
rshri932105f2024-07-05 15:11:55 +0000889 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
890
891 return
892
rshri948f7de2024-12-02 03:42:35 +0000893 async def detach_profile(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +0000894 self.logger.info("profile dettach Enter")
rshri948f7de2024-12-02 03:42:35 +0000895
garciadeblas995cbf32024-12-18 12:54:00 +0100896 # To get the cluster and op ids
rshri948f7de2024-12-02 03:42:35 +0000897 cluster_id = params["cluster_id"]
rshri948f7de2024-12-02 03:42:35 +0000898 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +0000899
900 # To initialize the operation states
901 self.initialize_operation(cluster_id, op_id)
902
garciadeblas995cbf32024-12-18 12:54:00 +0100903 # To get the cluster
904 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
905
906 # To get the operation params details
907 op_params = self.get_operation_params(db_cluster, op_id)
908
909 # To copy the cluster content and decrypting fields to use in workflows
910 workflow_content = {
911 "cluster": self.decrypted_copy(db_cluster),
912 }
rshri948f7de2024-12-02 03:42:35 +0000913
914 # To get the profile details
915 profile_id = params["profile_id"]
916 profile_type = params["profile_type"]
917 profile_collection = self.profile_collection_mapping[profile_type]
918 db_profile = self.db.get_one(profile_collection, {"_id": profile_id})
919 db_profile["profile_type"] = profile_type
garciadeblas995cbf32024-12-18 12:54:00 +0100920 workflow_content["profile"] = db_profile
rshri932105f2024-07-05 15:11:55 +0000921
garciadeblas41859ce2025-02-04 16:08:51 +0100922 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +0100923 "detach_profile_from_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +0200924 )
garciadeblas41859ce2025-02-04 16:08:51 +0100925 if not workflow_res:
926 self.logger.error(f"Failed to launch workflow: {workflow_name}")
927 db_cluster["resourceState"] = "ERROR"
928 db_cluster = self.update_operation_history(
929 db_cluster, op_id, workflow_status=False, resource_status=None
930 )
931 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
932 return
rshri932105f2024-07-05 15:11:55 +0000933
garciadeblas26d733c2025-02-03 16:12:43 +0100934 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +0200935 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +0100936 op_id, workflow_name
garciadeblas96b94f52024-07-08 16:18:21 +0200937 )
rshri932105f2024-07-05 15:11:55 +0000938 self.logger.info(
garciadeblas26d733c2025-02-03 16:12:43 +0100939 "workflow_status is: {} and workflow_msg is: {}".format(
rshri932105f2024-07-05 15:11:55 +0000940 workflow_status, workflow_msg
941 )
942 )
943 if workflow_status:
944 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
945 else:
946 db_cluster["resourceState"] = "ERROR"
947 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +0000948 db_cluster = self.update_operation_history(
949 db_cluster, op_id, workflow_status, None
950 )
rshri932105f2024-07-05 15:11:55 +0000951 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
952
953 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +0100954 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +0100955 "detach_profile_from_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +0000956 )
957 self.logger.info(
958 "resource_status is :{} and resource_msg is :{}".format(
959 resource_status, resource_msg
960 )
961 )
962 if resource_status:
963 db_cluster["resourceState"] = "READY"
964 else:
965 db_cluster["resourceState"] = "ERROR"
966
967 db_cluster["operatingState"] = "IDLE"
968 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +0000969 db_cluster, op_id, workflow_status, resource_status
rshri932105f2024-07-05 15:11:55 +0000970 )
rshri932105f2024-07-05 15:11:55 +0000971 profile_list = db_cluster[profile_type]
972 self.logger.info("profile list is : {}".format(profile_list))
973 if resource_status:
rshri932105f2024-07-05 15:11:55 +0000974 profile_list.remove(profile_id)
rshri932105f2024-07-05 15:11:55 +0000975 db_cluster[profile_type] = profile_list
shahithya70a3fc92024-11-12 11:01:05 +0000976 db_cluster["current_operation"] = None
rshri932105f2024-07-05 15:11:55 +0000977 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
978
979 return
980
rshri948f7de2024-12-02 03:42:35 +0000981 async def register(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +0000982 self.logger.info("cluster register enter")
983
garciadeblas995cbf32024-12-18 12:54:00 +0100984 # To get the cluster and op ids
rshri948f7de2024-12-02 03:42:35 +0000985 cluster_id = params["cluster_id"]
rshri948f7de2024-12-02 03:42:35 +0000986 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +0000987
988 # To initialize the operation states
989 self.initialize_operation(cluster_id, op_id)
990
garciadeblas995cbf32024-12-18 12:54:00 +0100991 # To get the cluster
992 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
993
994 # To get the operation params details
995 op_params = self.get_operation_params(db_cluster, op_id)
996
997 # To copy the cluster content and decrypting fields to use in workflows
998 workflow_content = {
999 "cluster": self.decrypted_copy(db_cluster),
1000 }
rshric3564942024-11-12 18:12:38 +00001001
garciadeblas41859ce2025-02-04 16:08:51 +01001002 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001003 "register_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +02001004 )
garciadeblas41859ce2025-02-04 16:08:51 +01001005 if not workflow_res:
1006 self.logger.error(f"Failed to launch workflow: {workflow_name}")
1007 db_cluster["state"] = "FAILED_CREATION"
1008 db_cluster["resourceState"] = "ERROR"
1009 db_cluster = self.update_operation_history(
1010 db_cluster, op_id, workflow_status=False, resource_status=None
1011 )
1012 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
1013 # Clean items used in the workflow, no matter if the workflow succeeded
1014 clean_status, clean_msg = await self.odu.clean_items_workflow(
1015 "register_cluster", op_id, op_params, workflow_content
1016 )
1017 self.logger.info(
1018 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1019 )
1020 return
rshri932105f2024-07-05 15:11:55 +00001021
garciadeblas26d733c2025-02-03 16:12:43 +01001022 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +02001023 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +01001024 op_id, workflow_name
garciadeblas96b94f52024-07-08 16:18:21 +02001025 )
rshri932105f2024-07-05 15:11:55 +00001026 self.logger.info(
garciadeblas26d733c2025-02-03 16:12:43 +01001027 "workflow_status is: {} and workflow_msg is: {}".format(
rshri932105f2024-07-05 15:11:55 +00001028 workflow_status, workflow_msg
1029 )
1030 )
1031 if workflow_status:
garciadeblas96b94f52024-07-08 16:18:21 +02001032 db_cluster["state"] = "CREATED"
1033 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
rshri932105f2024-07-05 15:11:55 +00001034 else:
garciadeblas96b94f52024-07-08 16:18:21 +02001035 db_cluster["state"] = "FAILED_CREATION"
1036 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +00001037 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +00001038 db_cluster = self.update_operation_history(
1039 db_cluster, op_id, workflow_status, None
1040 )
garciadeblas96b94f52024-07-08 16:18:21 +02001041 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +00001042
garciadeblasdde3a312024-09-17 13:25:06 +02001043 # Clean items used in the workflow, no matter if the workflow succeeded
1044 clean_status, clean_msg = await self.odu.clean_items_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001045 "register_cluster", op_id, op_params, workflow_content
garciadeblasdde3a312024-09-17 13:25:06 +02001046 )
1047 self.logger.info(
1048 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1049 )
1050
rshri932105f2024-07-05 15:11:55 +00001051 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001052 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +01001053 "register_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +00001054 )
1055 self.logger.info(
1056 "resource_status is :{} and resource_msg is :{}".format(
1057 resource_status, resource_msg
1058 )
1059 )
1060 if resource_status:
garciadeblas96b94f52024-07-08 16:18:21 +02001061 db_cluster["resourceState"] = "READY"
rshri932105f2024-07-05 15:11:55 +00001062 else:
garciadeblas96b94f52024-07-08 16:18:21 +02001063 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +00001064
garciadeblas96b94f52024-07-08 16:18:21 +02001065 db_cluster["operatingState"] = "IDLE"
1066 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +00001067 db_cluster, op_id, workflow_status, resource_status
rshri932105f2024-07-05 15:11:55 +00001068 )
shahithya70a3fc92024-11-12 11:01:05 +00001069 db_cluster["current_operation"] = None
garciadeblas96b94f52024-07-08 16:18:21 +02001070 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri948f7de2024-12-02 03:42:35 +00001071
1072 db_register = self.db.get_one("k8sclusters", {"name": db_cluster["name"]})
1073 db_register["credentials"] = db_cluster["credentials"]
1074 self.db.set_one("k8sclusters", {"_id": db_register["_id"]}, db_register)
1075
1076 if db_cluster["resourceState"] == "READY" and db_cluster["state"] == "CREATED":
1077 # To call the lcm.py for registering the cluster in k8scluster lcm.
1078 register = await self.regist.create(db_register, order_id)
1079 self.logger.debug(f"Register is : {register}")
1080 else:
1081 db_register["_admin"]["operationalState"] = "ERROR"
1082 self.db.set_one("k8sclusters", {"_id": db_register["_id"]}, db_register)
1083
rshri932105f2024-07-05 15:11:55 +00001084 return
1085
garciadeblasad6d1ba2025-01-22 16:02:18 +01001086 async def check_register_cluster(self, op_id, op_params, content):
1087 self.logger.info(
1088 f"check_register_cluster Operation {op_id}. Params: {op_params}."
1089 )
1090 # self.logger.debug(f"Content: {content}")
1091 db_cluster = content["cluster"]
1092 cluster_name = db_cluster["git_name"].lower()
1093 cluster_kustomization_name = cluster_name
1094 bootstrap = op_params.get("bootstrap", True)
1095 checkings_list = [
1096 {
1097 "item": "kustomization",
1098 "name": f"{cluster_kustomization_name}-bstrp-fluxctrl",
1099 "namespace": "managed-resources",
garciadeblas7cf480d2025-01-27 16:53:45 +01001100 "condition": {
1101 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
1102 "value": "True",
1103 },
garciadeblasad6d1ba2025-01-22 16:02:18 +01001104 "timeout": self._checkloop_kustomization_timeout,
1105 "enable": bootstrap,
1106 "resourceState": "IN_PROGRESS.BOOTSTRAP_OK",
1107 },
1108 ]
1109 return await self.common_check_list(
1110 op_id, checkings_list, "clusters", db_cluster
1111 )
1112
rshri948f7de2024-12-02 03:42:35 +00001113 async def deregister(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001114 self.logger.info("cluster deregister enter")
1115
garciadeblas995cbf32024-12-18 12:54:00 +01001116 # To get the cluster and op ids
rshri948f7de2024-12-02 03:42:35 +00001117 cluster_id = params["cluster_id"]
rshri948f7de2024-12-02 03:42:35 +00001118 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +00001119
1120 # To initialize the operation states
1121 self.initialize_operation(cluster_id, op_id)
1122
garciadeblas995cbf32024-12-18 12:54:00 +01001123 # To get the cluster
1124 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
1125
1126 # To get the operation params details
1127 op_params = self.get_operation_params(db_cluster, op_id)
1128
1129 # To copy the cluster content and decrypting fields to use in workflows
1130 workflow_content = {
1131 "cluster": self.decrypted_copy(db_cluster),
1132 }
rshri932105f2024-07-05 15:11:55 +00001133
garciadeblas41859ce2025-02-04 16:08:51 +01001134 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001135 "deregister_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +02001136 )
garciadeblas41859ce2025-02-04 16:08:51 +01001137 if not workflow_res:
1138 self.logger.error(f"Failed to launch workflow: {workflow_name}")
1139 db_cluster["state"] = "FAILED_DELETION"
1140 db_cluster["resourceState"] = "ERROR"
1141 db_cluster = self.update_operation_history(
1142 db_cluster, op_id, workflow_status=False, resource_status=None
1143 )
1144 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
garciadeblas41859ce2025-02-04 16:08:51 +01001145 return
rshri932105f2024-07-05 15:11:55 +00001146
garciadeblas26d733c2025-02-03 16:12:43 +01001147 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +02001148 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +01001149 op_id, workflow_name
garciadeblas96b94f52024-07-08 16:18:21 +02001150 )
rshri932105f2024-07-05 15:11:55 +00001151 self.logger.info(
garciadeblas26d733c2025-02-03 16:12:43 +01001152 "workflow_status is: {} and workflow_msg is: {}".format(
rshri932105f2024-07-05 15:11:55 +00001153 workflow_status, workflow_msg
1154 )
1155 )
1156 if workflow_status:
garciadeblas96b94f52024-07-08 16:18:21 +02001157 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
rshri932105f2024-07-05 15:11:55 +00001158 else:
garciadeblas96b94f52024-07-08 16:18:21 +02001159 db_cluster["state"] = "FAILED_DELETION"
1160 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +00001161 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +00001162 db_cluster = self.update_operation_history(
1163 db_cluster, op_id, workflow_status, None
1164 )
garciadeblas96b94f52024-07-08 16:18:21 +02001165 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +00001166
1167 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001168 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +01001169 "deregister_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +00001170 )
1171 self.logger.info(
1172 "resource_status is :{} and resource_msg is :{}".format(
1173 resource_status, resource_msg
1174 )
1175 )
1176 if resource_status:
garciadeblas96b94f52024-07-08 16:18:21 +02001177 db_cluster["resourceState"] = "READY"
rshri932105f2024-07-05 15:11:55 +00001178 else:
garciadeblas96b94f52024-07-08 16:18:21 +02001179 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +00001180
garciadeblas96b94f52024-07-08 16:18:21 +02001181 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +00001182 db_cluster, op_id, workflow_status, resource_status
garciadeblas96b94f52024-07-08 16:18:21 +02001183 )
1184 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +00001185
garciadeblas93380452025-02-05 09:32:52 +01001186 await self.delete(params, order_id)
1187 # Clean items used in the workflow or in the cluster, no matter if the workflow succeeded
1188 clean_status, clean_msg = await self.odu.clean_items_workflow(
1189 "deregister_cluster", op_id, op_params, workflow_content
1190 )
1191 self.logger.info(
1192 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1193 )
1194 return
rshri932105f2024-07-05 15:11:55 +00001195
rshri948f7de2024-12-02 03:42:35 +00001196 async def get_creds(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02001197 self.logger.info("Cluster get creds Enter")
rshri948f7de2024-12-02 03:42:35 +00001198 cluster_id = params["cluster_id"]
1199 op_id = params["operation_id"]
1200 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
garciadeblas96b94f52024-07-08 16:18:21 +02001201 result, cluster_creds = await self.odu.get_cluster_credentials(db_cluster)
1202 if result:
1203 db_cluster["credentials"] = cluster_creds
yshahd940c652024-10-17 06:11:12 +00001204 op_len = 0
1205 for operations in db_cluster["operationHistory"]:
1206 if operations["op_id"] == op_id:
1207 db_cluster["operationHistory"][op_len]["result"] = result
1208 db_cluster["operationHistory"][op_len]["endDate"] = time()
1209 op_len += 1
shahithya70a3fc92024-11-12 11:01:05 +00001210 db_cluster["current_operation"] = None
yshahd940c652024-10-17 06:11:12 +00001211 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri948f7de2024-12-02 03:42:35 +00001212 self.logger.info("Cluster Get Creds Exit")
yshah771dea82024-07-05 15:11:49 +00001213 return
1214
rshri948f7de2024-12-02 03:42:35 +00001215 async def update(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02001216 self.logger.info("Cluster update Enter")
rshri948f7de2024-12-02 03:42:35 +00001217 # To get the cluster details
1218 cluster_id = params["cluster_id"]
1219 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
1220
1221 # To get the operation params details
1222 op_id = params["operation_id"]
1223 op_params = self.get_operation_params(db_cluster, op_id)
yshah771dea82024-07-05 15:11:49 +00001224
garciadeblas995cbf32024-12-18 12:54:00 +01001225 # To copy the cluster content and decrypting fields to use in workflows
1226 workflow_content = {
1227 "cluster": self.decrypted_copy(db_cluster),
1228 }
rshric3564942024-11-12 18:12:38 +00001229
1230 # vim account details
1231 db_vim = self.db.get_one("vim_accounts", {"name": db_cluster["vim_account"]})
garciadeblas995cbf32024-12-18 12:54:00 +01001232 workflow_content["vim_account"] = db_vim
rshric3564942024-11-12 18:12:38 +00001233
garciadeblas41859ce2025-02-04 16:08:51 +01001234 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001235 "update_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +02001236 )
garciadeblas41859ce2025-02-04 16:08:51 +01001237 if not workflow_res:
1238 self.logger.error(f"Failed to launch workflow: {workflow_name}")
1239 db_cluster["resourceState"] = "ERROR"
1240 db_cluster = self.update_operation_history(
1241 db_cluster, op_id, workflow_status=False, resource_status=None
1242 )
1243 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
1244 # Clean items used in the workflow, no matter if the workflow succeeded
1245 clean_status, clean_msg = await self.odu.clean_items_workflow(
1246 "update_cluster", op_id, op_params, workflow_content
1247 )
1248 self.logger.info(
1249 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1250 )
1251 return
garciadeblas26d733c2025-02-03 16:12:43 +01001252 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +02001253 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +01001254 op_id, workflow_name
garciadeblas96b94f52024-07-08 16:18:21 +02001255 )
1256 self.logger.info(
1257 "Workflow Status: {} Workflow Message: {}".format(
1258 workflow_status, workflow_msg
yshah771dea82024-07-05 15:11:49 +00001259 )
garciadeblas96b94f52024-07-08 16:18:21 +02001260 )
1261
1262 if workflow_status:
1263 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
1264 else:
1265 db_cluster["resourceState"] = "ERROR"
1266
yshahcb9075f2024-11-22 12:08:57 +00001267 db_cluster = self.update_operation_history(
1268 db_cluster, op_id, workflow_status, None
1269 )
garciadeblas96b94f52024-07-08 16:18:21 +02001270 # self.logger.info("Db content: {}".format(db_content))
1271 # self.db.set_one(self.db_collection, {"_id": _id}, db_cluster)
1272 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
1273
garciadeblas28bff0f2024-09-16 12:53:07 +02001274 # Clean items used in the workflow, no matter if the workflow succeeded
1275 clean_status, clean_msg = await self.odu.clean_items_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001276 "update_cluster", op_id, op_params, workflow_content
garciadeblas28bff0f2024-09-16 12:53:07 +02001277 )
1278 self.logger.info(
1279 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1280 )
garciadeblas96b94f52024-07-08 16:18:21 +02001281 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001282 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +01001283 "update_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +02001284 )
1285 self.logger.info(
1286 "Resource Status: {} Resource Message: {}".format(
1287 resource_status, resource_msg
1288 )
1289 )
yshah771dea82024-07-05 15:11:49 +00001290
1291 if resource_status:
garciadeblas96b94f52024-07-08 16:18:21 +02001292 db_cluster["resourceState"] = "READY"
yshah771dea82024-07-05 15:11:49 +00001293 else:
garciadeblas96b94f52024-07-08 16:18:21 +02001294 db_cluster["resourceState"] = "ERROR"
yshah771dea82024-07-05 15:11:49 +00001295
yshah0defcd52024-11-18 07:41:35 +00001296 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +00001297 db_cluster, op_id, workflow_status, resource_status
yshah0defcd52024-11-18 07:41:35 +00001298 )
1299
garciadeblas96b94f52024-07-08 16:18:21 +02001300 db_cluster["operatingState"] = "IDLE"
garciadeblas96b94f52024-07-08 16:18:21 +02001301 # self.logger.info("db_cluster: {}".format(db_cluster))
garciadeblas7cf480d2025-01-27 16:53:45 +01001302 # TODO: verify condition
garciadeblas96b94f52024-07-08 16:18:21 +02001303 # For the moment, if the workflow completed successfully, then we update the db accordingly.
1304 if workflow_status:
1305 if "k8s_version" in op_params:
1306 db_cluster["k8s_version"] = op_params["k8s_version"]
yshah0defcd52024-11-18 07:41:35 +00001307 if "node_count" in op_params:
garciadeblas96b94f52024-07-08 16:18:21 +02001308 db_cluster["node_count"] = op_params["node_count"]
yshah0defcd52024-11-18 07:41:35 +00001309 if "node_size" in op_params:
1310 db_cluster["node_count"] = op_params["node_size"]
garciadeblas96b94f52024-07-08 16:18:21 +02001311 # self.db.set_one(self.db_collection, {"_id": _id}, db_content)
shahithya70a3fc92024-11-12 11:01:05 +00001312 db_cluster["current_operation"] = None
garciadeblas96b94f52024-07-08 16:18:21 +02001313 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
yshah771dea82024-07-05 15:11:49 +00001314 return
1315
garciadeblasad6d1ba2025-01-22 16:02:18 +01001316 async def check_update_cluster(self, op_id, op_params, content):
1317 self.logger.info(
1318 f"check_update_cluster Operation {op_id}. Params: {op_params}."
1319 )
1320 self.logger.debug(f"Content: {content}")
garciadeblasd7d8bde2025-01-27 18:31:06 +01001321 # return await self.check_dummy_operation(op_id, op_params, content)
1322 db_cluster = content["cluster"]
1323 cluster_name = db_cluster["git_name"].lower()
1324 cluster_kustomization_name = cluster_name
1325 db_vim_account = content["vim_account"]
1326 cloud_type = db_vim_account["vim_type"]
1327 if cloud_type == "aws":
1328 cluster_name = f"{cluster_name}-cluster"
1329 if cloud_type in ("azure", "gcp", "aws"):
1330 checkings_list = [
1331 {
1332 "item": "kustomization",
1333 "name": cluster_kustomization_name,
1334 "namespace": "managed-resources",
1335 "condition": {
1336 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
1337 "value": "True",
1338 },
1339 "timeout": self._checkloop_kustomization_timeout,
1340 "enable": True,
1341 "resourceState": "IN_PROGRESS.KUSTOMIZATION_READY",
1342 },
1343 ]
1344 else:
1345 return False, "Not suitable VIM account to check cluster status"
1346 # Scale operation
1347 if "node_count" in op_params:
1348 checkings_list.append(
1349 {
1350 "item": f"cluster_{cloud_type}",
1351 "name": cluster_name,
1352 "namespace": "",
1353 "condition": {
1354 "jsonpath_filter": "status.atProvider.defaultNodePool[0].nodeCount",
1355 "value": f"{op_params['node_count']}",
1356 },
garciadeblas65cd9892025-02-08 10:42:08 +01001357 "timeout": self._checkloop_resource_timeout * 3,
garciadeblasd7d8bde2025-01-27 18:31:06 +01001358 "enable": True,
1359 "resourceState": "IN_PROGRESS.RESOURCE_READY.NODE_COUNT.CLUSTER",
1360 }
1361 )
1362 # Upgrade operation
1363 if "k8s_version" in op_params:
1364 checkings_list.append(
1365 {
1366 "item": f"cluster_{cloud_type}",
1367 "name": cluster_name,
1368 "namespace": "",
1369 "condition": {
1370 "jsonpath_filter": "status.atProvider.defaultNodePool[0].orchestratorVersion",
1371 "value": op_params["k8s_version"],
1372 },
1373 "timeout": self._checkloop_resource_timeout * 2,
1374 "enable": True,
1375 "resourceState": "IN_PROGRESS.RESOURCE_READY.K8S_VERSION.CLUSTER",
1376 }
1377 )
1378 return await self.common_check_list(
1379 op_id, checkings_list, "clusters", db_cluster
1380 )
garciadeblasad6d1ba2025-01-22 16:02:18 +01001381
yshah771dea82024-07-05 15:11:49 +00001382
garciadeblas72412282024-11-07 12:41:54 +01001383class CloudCredentialsLcm(GitOpsLcm):
yshah771dea82024-07-05 15:11:49 +00001384 db_collection = "vim_accounts"
1385
1386 def __init__(self, msg, lcm_tasks, config):
1387 """
1388 Init, Connect to database, filesystem storage, and messaging
1389 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
1390 :return: None
1391 """
garciadeblas72412282024-11-07 12:41:54 +01001392 super().__init__(msg, lcm_tasks, config)
yshah771dea82024-07-05 15:11:49 +00001393
yshah564ec9c2024-11-29 07:33:32 +00001394 async def add(self, params, order_id):
yshah771dea82024-07-05 15:11:49 +00001395 self.logger.info("Cloud Credentials create")
yshah564ec9c2024-11-29 07:33:32 +00001396 vim_id = params["_id"]
1397 op_id = vim_id
1398 op_params = params
1399 db_content = self.db.get_one(self.db_collection, {"_id": vim_id})
1400 vim_config = db_content.get("config", {})
1401 self.db.encrypt_decrypt_fields(
1402 vim_config.get("credentials"),
1403 "decrypt",
1404 ["password", "secret"],
1405 schema_version=db_content["schema_version"],
1406 salt=vim_id,
1407 )
1408
garciadeblas41859ce2025-02-04 16:08:51 +01001409 workflow_res, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001410 "create_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001411 )
1412
1413 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +01001414 op_id, workflow_name
yshah771dea82024-07-05 15:11:49 +00001415 )
1416
1417 self.logger.info(
1418 "Workflow Status: {} Workflow Msg: {}".format(workflow_status, workflow_msg)
1419 )
1420
garciadeblas28bff0f2024-09-16 12:53:07 +02001421 # Clean items used in the workflow, no matter if the workflow succeeded
1422 clean_status, clean_msg = await self.odu.clean_items_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001423 "create_cloud_credentials", op_id, op_params, db_content
garciadeblas28bff0f2024-09-16 12:53:07 +02001424 )
1425 self.logger.info(
1426 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1427 )
1428
yshah771dea82024-07-05 15:11:49 +00001429 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001430 resource_status, resource_msg = await self.check_resource_status(
yshah564ec9c2024-11-29 07:33:32 +00001431 "create_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001432 )
1433 self.logger.info(
1434 "Resource Status: {} Resource Message: {}".format(
1435 resource_status, resource_msg
1436 )
1437 )
garciadeblas15b8a302024-09-23 12:40:13 +02001438
yshah564ec9c2024-11-29 07:33:32 +00001439 db_content["_admin"]["operationalState"] = "ENABLED"
1440 for operation in db_content["_admin"]["operations"]:
garciadeblas15b8a302024-09-23 12:40:13 +02001441 if operation["lcmOperationType"] == "create":
1442 operation["operationState"] = "ENABLED"
yshah564ec9c2024-11-29 07:33:32 +00001443 self.logger.info("Content : {}".format(db_content))
1444 self.db.set_one("vim_accounts", {"_id": db_content["_id"]}, db_content)
yshah771dea82024-07-05 15:11:49 +00001445 return
1446
yshah564ec9c2024-11-29 07:33:32 +00001447 async def edit(self, params, order_id):
1448 self.logger.info("Cloud Credentials Update")
1449 vim_id = params["_id"]
1450 op_id = vim_id
1451 op_params = params
1452 db_content = self.db.get_one("vim_accounts", {"_id": vim_id})
1453 vim_config = db_content.get("config", {})
1454 self.db.encrypt_decrypt_fields(
1455 vim_config.get("credentials"),
1456 "decrypt",
1457 ["password", "secret"],
1458 schema_version=db_content["schema_version"],
1459 salt=vim_id,
1460 )
1461
garciadeblas41859ce2025-02-04 16:08:51 +01001462 workflow_res, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001463 "update_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001464 )
1465 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +01001466 op_id, workflow_name
yshah771dea82024-07-05 15:11:49 +00001467 )
1468 self.logger.info(
1469 "Workflow Status: {} Workflow Msg: {}".format(workflow_status, workflow_msg)
1470 )
1471
garciadeblas28bff0f2024-09-16 12:53:07 +02001472 # Clean items used in the workflow, no matter if the workflow succeeded
1473 clean_status, clean_msg = await self.odu.clean_items_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001474 "update_cloud_credentials", op_id, op_params, db_content
garciadeblas28bff0f2024-09-16 12:53:07 +02001475 )
1476 self.logger.info(
1477 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1478 )
1479
yshah771dea82024-07-05 15:11:49 +00001480 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001481 resource_status, resource_msg = await self.check_resource_status(
yshah564ec9c2024-11-29 07:33:32 +00001482 "update_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001483 )
1484 self.logger.info(
1485 "Resource Status: {} Resource Message: {}".format(
1486 resource_status, resource_msg
1487 )
1488 )
1489 return
1490
yshah564ec9c2024-11-29 07:33:32 +00001491 async def remove(self, params, order_id):
1492 self.logger.info("Cloud Credentials remove")
1493 vim_id = params["_id"]
1494 op_id = vim_id
1495 op_params = params
1496 db_content = self.db.get_one("vim_accounts", {"_id": vim_id})
1497
garciadeblas41859ce2025-02-04 16:08:51 +01001498 workflow_res, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001499 "delete_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001500 )
1501 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +01001502 op_id, workflow_name
yshah771dea82024-07-05 15:11:49 +00001503 )
1504 self.logger.info(
1505 "Workflow Status: {} Workflow Msg: {}".format(workflow_status, workflow_msg)
1506 )
1507
1508 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001509 resource_status, resource_msg = await self.check_resource_status(
yshah564ec9c2024-11-29 07:33:32 +00001510 "delete_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001511 )
1512 self.logger.info(
1513 "Resource Status: {} Resource Message: {}".format(
1514 resource_status, resource_msg
1515 )
1516 )
yshah564ec9c2024-11-29 07:33:32 +00001517 self.db.del_one(self.db_collection, {"_id": db_content["_id"]})
yshah771dea82024-07-05 15:11:49 +00001518 return
1519
rshri932105f2024-07-05 15:11:55 +00001520
garciadeblas72412282024-11-07 12:41:54 +01001521class K8sAppLcm(GitOpsLcm):
rshri948f7de2024-12-02 03:42:35 +00001522 db_collection = "k8sapp"
1523
rshri932105f2024-07-05 15:11:55 +00001524 def __init__(self, msg, lcm_tasks, config):
1525 """
1526 Init, Connect to database, filesystem storage, and messaging
1527 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
1528 :return: None
1529 """
garciadeblas72412282024-11-07 12:41:54 +01001530 super().__init__(msg, lcm_tasks, config)
rshri932105f2024-07-05 15:11:55 +00001531
rshri948f7de2024-12-02 03:42:35 +00001532 async def create(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001533 self.logger.info("App Create Enter")
1534
rshri948f7de2024-12-02 03:42:35 +00001535 op_id = params["operation_id"]
1536 profile_id = params["profile_id"]
1537
1538 # To initialize the operation states
1539 self.initialize_operation(profile_id, op_id)
1540
1541 content = self.db.get_one("k8sapp", {"_id": profile_id})
1542 content["profile_type"] = "applications"
1543 op_params = self.get_operation_params(content, op_id)
1544 self.db.set_one("k8sapp", {"_id": content["_id"]}, content)
1545
garciadeblas41859ce2025-02-04 16:08:51 +01001546 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001547 "create_profile", op_id, op_params, content
1548 )
garciadeblas26d733c2025-02-03 16:12:43 +01001549 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00001550
garciadeblas33b36e72025-01-17 12:49:19 +01001551 workflow_status = await self.check_workflow_and_update_db(
1552 op_id, workflow_name, content
1553 )
rshri932105f2024-07-05 15:11:55 +00001554
1555 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001556 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001557 "create_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001558 )
yshah564ec9c2024-11-29 07:33:32 +00001559 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1560 self.logger.info(f"App Create Exit with resource status: {resource_status}")
rshri932105f2024-07-05 15:11:55 +00001561 return
1562
rshri948f7de2024-12-02 03:42:35 +00001563 async def delete(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001564 self.logger.info("App delete Enter")
rshri932105f2024-07-05 15:11:55 +00001565
rshri948f7de2024-12-02 03:42:35 +00001566 op_id = params["operation_id"]
1567 profile_id = params["profile_id"]
1568
1569 # To initialize the operation states
1570 self.initialize_operation(profile_id, op_id)
1571
1572 content = self.db.get_one("k8sapp", {"_id": profile_id})
1573 op_params = self.get_operation_params(content, op_id)
1574
garciadeblas41859ce2025-02-04 16:08:51 +01001575 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001576 "delete_profile", op_id, op_params, content
1577 )
garciadeblas26d733c2025-02-03 16:12:43 +01001578 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00001579
garciadeblas33b36e72025-01-17 12:49:19 +01001580 workflow_status = await self.check_workflow_and_update_db(
1581 op_id, workflow_name, content
1582 )
rshri932105f2024-07-05 15:11:55 +00001583
1584 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001585 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001586 "delete_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001587 )
rshri932105f2024-07-05 15:11:55 +00001588
yshahb36649f2025-02-28 09:01:51 +00001589 force = params.get("force", False)
1590 if force:
1591 force_delete_status = self.check_force_delete_and_delete_from_db(
1592 profile_id, workflow_status, resource_status, force
1593 )
1594 if force_delete_status:
1595 return
1596
1597 self.logger.info(f"Resource status: {resource_status}")
yshah564ec9c2024-11-29 07:33:32 +00001598 if resource_status:
1599 content["state"] = "DELETED"
yshah6bad8892025-02-11 12:37:04 +00001600 profile_type = self.profile_type_mapping[content["profile_type"]]
1601 self.delete_profile_ksu(profile_id, profile_type)
yshah564ec9c2024-11-29 07:33:32 +00001602 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1603 self.db.del_one(self.db_collection, {"_id": content["_id"]})
1604 self.logger.info(f"App Delete Exit with resource status: {resource_status}")
rshri932105f2024-07-05 15:11:55 +00001605 return
1606
1607
garciadeblas72412282024-11-07 12:41:54 +01001608class K8sResourceLcm(GitOpsLcm):
rshri948f7de2024-12-02 03:42:35 +00001609 db_collection = "k8sresource"
1610
rshri932105f2024-07-05 15:11:55 +00001611 def __init__(self, msg, lcm_tasks, config):
1612 """
1613 Init, Connect to database, filesystem storage, and messaging
1614 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
1615 :return: None
1616 """
garciadeblas72412282024-11-07 12:41:54 +01001617 super().__init__(msg, lcm_tasks, config)
rshri932105f2024-07-05 15:11:55 +00001618
rshri948f7de2024-12-02 03:42:35 +00001619 async def create(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001620 self.logger.info("Resource Create Enter")
1621
rshri948f7de2024-12-02 03:42:35 +00001622 op_id = params["operation_id"]
1623 profile_id = params["profile_id"]
1624
1625 # To initialize the operation states
1626 self.initialize_operation(profile_id, op_id)
1627
1628 content = self.db.get_one("k8sresource", {"_id": profile_id})
1629 content["profile_type"] = "managed-resources"
1630 op_params = self.get_operation_params(content, op_id)
1631 self.db.set_one("k8sresource", {"_id": content["_id"]}, content)
1632
garciadeblas41859ce2025-02-04 16:08:51 +01001633 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001634 "create_profile", op_id, op_params, content
1635 )
garciadeblas26d733c2025-02-03 16:12:43 +01001636 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00001637
garciadeblas33b36e72025-01-17 12:49:19 +01001638 workflow_status = await self.check_workflow_and_update_db(
1639 op_id, workflow_name, content
1640 )
rshri932105f2024-07-05 15:11:55 +00001641
1642 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001643 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001644 "create_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001645 )
yshah564ec9c2024-11-29 07:33:32 +00001646 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1647 self.logger.info(
1648 f"Resource Create Exit with resource status: {resource_status}"
rshri932105f2024-07-05 15:11:55 +00001649 )
rshri932105f2024-07-05 15:11:55 +00001650 return
1651
rshri948f7de2024-12-02 03:42:35 +00001652 async def delete(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001653 self.logger.info("Resource delete Enter")
rshri948f7de2024-12-02 03:42:35 +00001654
1655 op_id = params["operation_id"]
1656 profile_id = params["profile_id"]
1657
1658 # To initialize the operation states
1659 self.initialize_operation(profile_id, op_id)
1660
1661 content = self.db.get_one("k8sresource", {"_id": profile_id})
1662 op_params = self.get_operation_params(content, op_id)
rshri932105f2024-07-05 15:11:55 +00001663
garciadeblas41859ce2025-02-04 16:08:51 +01001664 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001665 "delete_profile", op_id, op_params, content
1666 )
garciadeblas26d733c2025-02-03 16:12:43 +01001667 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00001668
garciadeblas33b36e72025-01-17 12:49:19 +01001669 workflow_status = await self.check_workflow_and_update_db(
1670 op_id, workflow_name, content
1671 )
rshri932105f2024-07-05 15:11:55 +00001672
1673 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001674 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001675 "delete_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001676 )
rshri932105f2024-07-05 15:11:55 +00001677
yshahb36649f2025-02-28 09:01:51 +00001678 force = params.get("force", False)
1679 if force:
1680 force_delete_status = self.check_force_delete_and_delete_from_db(
1681 profile_id, workflow_status, resource_status, force
1682 )
1683 if force_delete_status:
1684 return
1685
yshah564ec9c2024-11-29 07:33:32 +00001686 if resource_status:
1687 content["state"] = "DELETED"
yshah6bad8892025-02-11 12:37:04 +00001688 profile_type = self.profile_type_mapping[content["profile_type"]]
1689 self.delete_profile_ksu(profile_id, profile_type)
yshah564ec9c2024-11-29 07:33:32 +00001690 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1691 self.db.del_one(self.db_collection, {"_id": content["_id"]})
1692 self.logger.info(
1693 f"Resource Delete Exit with resource status: {resource_status}"
garciadeblas96b94f52024-07-08 16:18:21 +02001694 )
rshri932105f2024-07-05 15:11:55 +00001695 return
1696
1697
garciadeblas72412282024-11-07 12:41:54 +01001698class K8sInfraControllerLcm(GitOpsLcm):
rshri948f7de2024-12-02 03:42:35 +00001699 db_collection = "k8sinfra_controller"
1700
rshri932105f2024-07-05 15:11:55 +00001701 def __init__(self, msg, lcm_tasks, config):
1702 """
1703 Init, Connect to database, filesystem storage, and messaging
1704 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
1705 :return: None
1706 """
garciadeblas72412282024-11-07 12:41:54 +01001707 super().__init__(msg, lcm_tasks, config)
rshri932105f2024-07-05 15:11:55 +00001708
rshri948f7de2024-12-02 03:42:35 +00001709 async def create(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001710 self.logger.info("Infra controller Create Enter")
1711
rshri948f7de2024-12-02 03:42:35 +00001712 op_id = params["operation_id"]
1713 profile_id = params["profile_id"]
1714
1715 # To initialize the operation states
1716 self.initialize_operation(profile_id, op_id)
1717
1718 content = self.db.get_one("k8sinfra_controller", {"_id": profile_id})
1719 content["profile_type"] = "infra-controllers"
1720 op_params = self.get_operation_params(content, op_id)
1721 self.db.set_one("k8sinfra_controller", {"_id": content["_id"]}, content)
1722
garciadeblas41859ce2025-02-04 16:08:51 +01001723 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001724 "create_profile", op_id, op_params, content
1725 )
garciadeblas26d733c2025-02-03 16:12:43 +01001726 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00001727
garciadeblas33b36e72025-01-17 12:49:19 +01001728 workflow_status = await self.check_workflow_and_update_db(
1729 op_id, workflow_name, content
1730 )
rshri932105f2024-07-05 15:11:55 +00001731
1732 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001733 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001734 "create_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001735 )
yshah564ec9c2024-11-29 07:33:32 +00001736 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1737 self.logger.info(
1738 f"Infra Controller Create Exit with resource status: {resource_status}"
rshri932105f2024-07-05 15:11:55 +00001739 )
rshri932105f2024-07-05 15:11:55 +00001740 return
1741
rshri948f7de2024-12-02 03:42:35 +00001742 async def delete(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001743 self.logger.info("Infra controller delete Enter")
rshri932105f2024-07-05 15:11:55 +00001744
rshri948f7de2024-12-02 03:42:35 +00001745 op_id = params["operation_id"]
1746 profile_id = params["profile_id"]
1747
1748 # To initialize the operation states
1749 self.initialize_operation(profile_id, op_id)
1750
1751 content = self.db.get_one("k8sinfra_controller", {"_id": profile_id})
1752 op_params = self.get_operation_params(content, op_id)
1753
garciadeblas41859ce2025-02-04 16:08:51 +01001754 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001755 "delete_profile", op_id, op_params, content
1756 )
garciadeblas26d733c2025-02-03 16:12:43 +01001757 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00001758
garciadeblas33b36e72025-01-17 12:49:19 +01001759 workflow_status = await self.check_workflow_and_update_db(
1760 op_id, workflow_name, content
1761 )
rshri932105f2024-07-05 15:11:55 +00001762
1763 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001764 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001765 "delete_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001766 )
rshri932105f2024-07-05 15:11:55 +00001767
yshahb36649f2025-02-28 09:01:51 +00001768 force = params.get("force", False)
1769 if force:
1770 force_delete_status = self.check_force_delete_and_delete_from_db(
1771 profile_id, workflow_status, resource_status, force
1772 )
1773 if force_delete_status:
1774 return
1775
yshah564ec9c2024-11-29 07:33:32 +00001776 if resource_status:
1777 content["state"] = "DELETED"
yshah6bad8892025-02-11 12:37:04 +00001778 profile_type = self.profile_type_mapping[content["profile_type"]]
1779 self.delete_profile_ksu(profile_id, profile_type)
yshah564ec9c2024-11-29 07:33:32 +00001780 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1781 self.db.del_one(self.db_collection, {"_id": content["_id"]})
1782 self.logger.info(
1783 f"Infra Controller Delete Exit with resource status: {resource_status}"
garciadeblas96b94f52024-07-08 16:18:21 +02001784 )
rshri932105f2024-07-05 15:11:55 +00001785 return
1786
1787
garciadeblas72412282024-11-07 12:41:54 +01001788class K8sInfraConfigLcm(GitOpsLcm):
rshri948f7de2024-12-02 03:42:35 +00001789 db_collection = "k8sinfra_config"
1790
rshri932105f2024-07-05 15:11:55 +00001791 def __init__(self, msg, lcm_tasks, config):
1792 """
1793 Init, Connect to database, filesystem storage, and messaging
1794 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
1795 :return: None
1796 """
garciadeblas72412282024-11-07 12:41:54 +01001797 super().__init__(msg, lcm_tasks, config)
rshri932105f2024-07-05 15:11:55 +00001798
rshri948f7de2024-12-02 03:42:35 +00001799 async def create(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001800 self.logger.info("Infra config Create Enter")
1801
rshri948f7de2024-12-02 03:42:35 +00001802 op_id = params["operation_id"]
1803 profile_id = params["profile_id"]
1804
1805 # To initialize the operation states
1806 self.initialize_operation(profile_id, op_id)
1807
1808 content = self.db.get_one("k8sinfra_config", {"_id": profile_id})
1809 content["profile_type"] = "infra-configs"
1810 op_params = self.get_operation_params(content, op_id)
1811 self.db.set_one("k8sinfra_config", {"_id": content["_id"]}, content)
1812
garciadeblas41859ce2025-02-04 16:08:51 +01001813 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001814 "create_profile", op_id, op_params, content
1815 )
garciadeblas26d733c2025-02-03 16:12:43 +01001816 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00001817
garciadeblas33b36e72025-01-17 12:49:19 +01001818 workflow_status = await self.check_workflow_and_update_db(
1819 op_id, workflow_name, content
1820 )
rshri932105f2024-07-05 15:11:55 +00001821
1822 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001823 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001824 "create_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001825 )
yshah564ec9c2024-11-29 07:33:32 +00001826 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1827 self.logger.info(
1828 f"Infra Config Create Exit with resource status: {resource_status}"
rshri932105f2024-07-05 15:11:55 +00001829 )
rshri932105f2024-07-05 15:11:55 +00001830 return
1831
rshri948f7de2024-12-02 03:42:35 +00001832 async def delete(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001833 self.logger.info("Infra config delete Enter")
1834
rshri948f7de2024-12-02 03:42:35 +00001835 op_id = params["operation_id"]
1836 profile_id = params["profile_id"]
1837
1838 # To initialize the operation states
1839 self.initialize_operation(profile_id, op_id)
1840
1841 content = self.db.get_one("k8sinfra_config", {"_id": profile_id})
1842 op_params = self.get_operation_params(content, op_id)
1843
garciadeblas41859ce2025-02-04 16:08:51 +01001844 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001845 "delete_profile", op_id, op_params, content
1846 )
garciadeblas26d733c2025-02-03 16:12:43 +01001847 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00001848
garciadeblas33b36e72025-01-17 12:49:19 +01001849 workflow_status = await self.check_workflow_and_update_db(
1850 op_id, workflow_name, content
1851 )
yshah564ec9c2024-11-29 07:33:32 +00001852
rshri932105f2024-07-05 15:11:55 +00001853 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001854 resource_status, content = await self.check_resource_and_update_db(
yshah564ec9c2024-11-29 07:33:32 +00001855 "delete_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001856 )
yshah564ec9c2024-11-29 07:33:32 +00001857
yshahb36649f2025-02-28 09:01:51 +00001858 force = params.get("force", False)
1859 if force:
1860 force_delete_status = self.check_force_delete_and_delete_from_db(
1861 profile_id, workflow_status, resource_status, force
1862 )
1863 if force_delete_status:
1864 return
1865
rshri932105f2024-07-05 15:11:55 +00001866 if resource_status:
yshah564ec9c2024-11-29 07:33:32 +00001867 content["state"] = "DELETED"
yshah6bad8892025-02-11 12:37:04 +00001868 profile_type = self.profile_type_mapping[content["profile_type"]]
1869 self.delete_profile_ksu(profile_id, profile_type)
yshah564ec9c2024-11-29 07:33:32 +00001870 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1871 self.db.del_one(self.db_collection, {"_id": content["_id"]})
1872 self.logger.info(
1873 f"Infra Config Delete Exit with resource status: {resource_status}"
garciadeblas96b94f52024-07-08 16:18:21 +02001874 )
rshri932105f2024-07-05 15:11:55 +00001875
rshri932105f2024-07-05 15:11:55 +00001876 return
yshah771dea82024-07-05 15:11:49 +00001877
1878
garciadeblas72412282024-11-07 12:41:54 +01001879class OkaLcm(GitOpsLcm):
yshah771dea82024-07-05 15:11:49 +00001880 db_collection = "okas"
1881
1882 def __init__(self, msg, lcm_tasks, config):
1883 """
1884 Init, Connect to database, filesystem storage, and messaging
1885 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
1886 :return: None
1887 """
garciadeblas72412282024-11-07 12:41:54 +01001888 super().__init__(msg, lcm_tasks, config)
yshah771dea82024-07-05 15:11:49 +00001889
yshah564ec9c2024-11-29 07:33:32 +00001890 async def create(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02001891 self.logger.info("OKA Create Enter")
yshah564ec9c2024-11-29 07:33:32 +00001892 op_id = params["operation_id"]
1893 oka_id = params["oka_id"]
1894 self.initialize_operation(oka_id, op_id)
1895 db_content = self.db.get_one(self.db_collection, {"_id": oka_id})
1896 op_params = self.get_operation_params(db_content, op_id)
yshah771dea82024-07-05 15:11:49 +00001897
garciadeblas41859ce2025-02-04 16:08:51 +01001898 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001899 "create_oka", op_id, op_params, db_content
1900 )
yshah564ec9c2024-11-29 07:33:32 +00001901
garciadeblas33b36e72025-01-17 12:49:19 +01001902 workflow_status = await self.check_workflow_and_update_db(
1903 op_id, workflow_name, db_content
1904 )
yshah771dea82024-07-05 15:11:49 +00001905
1906 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001907 resource_status, db_content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001908 "create_oka", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001909 )
garciadeblas96b94f52024-07-08 16:18:21 +02001910 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
garciadeblasb23d2dc2025-02-21 10:15:49 +01001911
1912 # Clean items used in the workflow, no matter if the workflow succeeded
1913 clean_status, clean_msg = await self.odu.clean_items_workflow(
1914 "create_oka", op_id, op_params, db_content
1915 )
1916 self.logger.info(
1917 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1918 )
yshah564ec9c2024-11-29 07:33:32 +00001919 self.logger.info(f"OKA Create Exit with resource status: {resource_status}")
yshah771dea82024-07-05 15:11:49 +00001920 return
1921
yshah564ec9c2024-11-29 07:33:32 +00001922 async def edit(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02001923 self.logger.info("OKA Edit Enter")
yshah564ec9c2024-11-29 07:33:32 +00001924 op_id = params["operation_id"]
1925 oka_id = params["oka_id"]
1926 self.initialize_operation(oka_id, op_id)
1927 db_content = self.db.get_one(self.db_collection, {"_id": oka_id})
1928 op_params = self.get_operation_params(db_content, op_id)
yshah771dea82024-07-05 15:11:49 +00001929
garciadeblas41859ce2025-02-04 16:08:51 +01001930 workflow_res, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001931 "update_oka", op_id, op_params, db_content
garciadeblas96b94f52024-07-08 16:18:21 +02001932 )
garciadeblas33b36e72025-01-17 12:49:19 +01001933 workflow_status = await self.check_workflow_and_update_db(
1934 op_id, workflow_name, db_content
1935 )
yshah771dea82024-07-05 15:11:49 +00001936
1937 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001938 resource_status, db_content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001939 "update_oka", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001940 )
garciadeblas96b94f52024-07-08 16:18:21 +02001941 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
garciadeblasb23d2dc2025-02-21 10:15:49 +01001942 # Clean items used in the workflow, no matter if the workflow succeeded
1943 clean_status, clean_msg = await self.odu.clean_items_workflow(
1944 "update_oka", op_id, op_params, db_content
1945 )
1946 self.logger.info(
1947 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1948 )
yshah564ec9c2024-11-29 07:33:32 +00001949 self.logger.info(f"OKA Update Exit with resource status: {resource_status}")
yshah771dea82024-07-05 15:11:49 +00001950 return
1951
yshah564ec9c2024-11-29 07:33:32 +00001952 async def delete(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02001953 self.logger.info("OKA delete Enter")
yshah564ec9c2024-11-29 07:33:32 +00001954 op_id = params["operation_id"]
1955 oka_id = params["oka_id"]
1956 self.initialize_operation(oka_id, op_id)
1957 db_content = self.db.get_one(self.db_collection, {"_id": oka_id})
1958 op_params = self.get_operation_params(db_content, op_id)
yshah771dea82024-07-05 15:11:49 +00001959
garciadeblas41859ce2025-02-04 16:08:51 +01001960 workflow_res, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001961 "delete_oka", op_id, op_params, db_content
garciadeblas96b94f52024-07-08 16:18:21 +02001962 )
garciadeblas33b36e72025-01-17 12:49:19 +01001963 workflow_status = await self.check_workflow_and_update_db(
1964 op_id, workflow_name, db_content
1965 )
yshah771dea82024-07-05 15:11:49 +00001966
1967 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001968 resource_status, db_content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001969 "delete_oka", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001970 )
yshah771dea82024-07-05 15:11:49 +00001971
yshahb36649f2025-02-28 09:01:51 +00001972 force = params.get("force", False)
1973 if force:
1974 force_delete_status = self.check_force_delete_and_delete_from_db(
1975 oka_id, workflow_status, resource_status, force
1976 )
1977 if force_delete_status:
1978 return
1979
yshah564ec9c2024-11-29 07:33:32 +00001980 if resource_status:
1981 db_content["state"] == "DELETED"
1982 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
garciadeblas96b94f52024-07-08 16:18:21 +02001983 self.db.del_one(self.db_collection, {"_id": db_content["_id"]})
garciadeblasb23d2dc2025-02-21 10:15:49 +01001984 # Clean items used in the workflow, no matter if the workflow succeeded
1985 clean_status, clean_msg = await self.odu.clean_items_workflow(
1986 "delete_oka", op_id, op_params, db_content
1987 )
1988 self.logger.info(
1989 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1990 )
yshah564ec9c2024-11-29 07:33:32 +00001991 self.logger.info(f"OKA Delete Exit with resource status: {resource_status}")
yshah771dea82024-07-05 15:11:49 +00001992 return
1993
1994
garciadeblas72412282024-11-07 12:41:54 +01001995class KsuLcm(GitOpsLcm):
yshah771dea82024-07-05 15:11:49 +00001996 db_collection = "ksus"
1997
1998 def __init__(self, msg, lcm_tasks, config):
1999 """
2000 Init, Connect to database, filesystem storage, and messaging
2001 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
2002 :return: None
2003 """
garciadeblas72412282024-11-07 12:41:54 +01002004 super().__init__(msg, lcm_tasks, config)
garciadeblasad6d1ba2025-01-22 16:02:18 +01002005 self._workflows = {
2006 "create_ksus": {
2007 "check_resource_function": self.check_create_ksus,
2008 },
2009 "delete_ksus": {
2010 "check_resource_function": self.check_delete_ksus,
2011 },
2012 }
2013
2014 def get_dbclusters_from_profile(self, profile_id, profile_type):
2015 cluster_list = []
2016 db_clusters = self.db.get_list("clusters")
2017 self.logger.info(f"Getting list of clusters for {profile_type} {profile_id}")
2018 for db_cluster in db_clusters:
2019 if profile_id in db_cluster.get(profile_type, []):
2020 self.logger.info(
2021 f"Profile {profile_id} found in cluster {db_cluster['name']}"
2022 )
2023 cluster_list.append(db_cluster)
2024 return cluster_list
yshah771dea82024-07-05 15:11:49 +00002025
yshah564ec9c2024-11-29 07:33:32 +00002026 async def create(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02002027 self.logger.info("ksu Create Enter")
yshah564ec9c2024-11-29 07:33:32 +00002028 db_content = []
2029 op_params = []
2030 op_id = params["operation_id"]
2031 for ksu_id in params["ksus_list"]:
2032 self.logger.info("Ksu ID: {}".format(ksu_id))
2033 self.initialize_operation(ksu_id, op_id)
2034 db_ksu = self.db.get_one(self.db_collection, {"_id": ksu_id})
2035 self.logger.info("Db KSU: {}".format(db_ksu))
2036 db_content.append(db_ksu)
2037 ksu_params = {}
2038 ksu_params = self.get_operation_params(db_ksu, op_id)
2039 self.logger.info("Operation Params: {}".format(ksu_params))
2040 # Update ksu_params["profile"] with profile name and age-pubkey
2041 profile_type = ksu_params["profile"]["profile_type"]
2042 profile_id = ksu_params["profile"]["_id"]
2043 profile_collection = self.profile_collection_mapping[profile_type]
2044 db_profile = self.db.get_one(profile_collection, {"_id": profile_id})
garciadeblasd41e9292025-03-11 15:44:25 +01002045 # db_profile is decrypted inline
2046 # No need to use decrypted_copy because db_profile won't be updated.
2047 self.decrypt_age_keys(db_profile)
yshah564ec9c2024-11-29 07:33:32 +00002048 ksu_params["profile"]["name"] = db_profile["name"]
2049 ksu_params["profile"]["age_pubkey"] = db_profile.get("age_pubkey", "")
2050 # Update ksu_params["oka"] with sw_catalog_path (when missing)
garciadeblas9f7c9c52025-01-17 01:06:05 +01002051 # TODO: remove this in favor of doing it in ODU workflow
yshah564ec9c2024-11-29 07:33:32 +00002052 for oka in ksu_params["oka"]:
2053 if "sw_catalog_path" not in oka:
2054 oka_id = oka["_id"]
2055 db_oka = self.db.get_one("okas", {"_id": oka_id})
yshah2f39b8a2024-12-19 11:06:24 +00002056 oka_type = MAP_PROFILE[
2057 db_oka.get("profile_type", "infra_controller_profiles")
2058 ]
garciadeblas9f7c9c52025-01-17 01:06:05 +01002059 oka[
2060 "sw_catalog_path"
garciadeblas29f8bcf2025-01-24 14:24:41 +01002061 ] = f"{oka_type}/{db_oka['git_name'].lower()}/templates"
yshah564ec9c2024-11-29 07:33:32 +00002062 op_params.append(ksu_params)
yshah771dea82024-07-05 15:11:49 +00002063
garciadeblasad6d1ba2025-01-22 16:02:18 +01002064 # A single workflow is launched for all KSUs
garciadeblas41859ce2025-02-04 16:08:51 +01002065 workflow_res, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00002066 "create_ksus", op_id, op_params, db_content
garciadeblas96b94f52024-07-08 16:18:21 +02002067 )
garciadeblasad6d1ba2025-01-22 16:02:18 +01002068 # Update workflow status in all KSUs
2069 wf_status_list = []
yshah564ec9c2024-11-29 07:33:32 +00002070 for db_ksu, ksu_params in zip(db_content, op_params):
garciadeblas33b36e72025-01-17 12:49:19 +01002071 workflow_status = await self.check_workflow_and_update_db(
2072 op_id, workflow_name, db_ksu
2073 )
garciadeblasad6d1ba2025-01-22 16:02:18 +01002074 wf_status_list.append(workflow_status)
2075 # Update resource status in all KSUs
2076 # TODO: Is an operation correct if n KSUs are right and 1 is not OK?
2077 res_status_list = []
2078 for db_ksu, ksu_params, wf_status in zip(db_content, op_params, wf_status_list):
2079 if wf_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002080 resource_status, db_ksu = await self.check_resource_and_update_db(
yshah564ec9c2024-11-29 07:33:32 +00002081 "create_ksus", op_id, ksu_params, db_ksu
2082 )
garciadeblasad6d1ba2025-01-22 16:02:18 +01002083 else:
2084 resource_status = False
2085 res_status_list.append(resource_status)
garciadeblas96b94f52024-07-08 16:18:21 +02002086 self.db.set_one(self.db_collection, {"_id": db_ksu["_id"]}, db_ksu)
2087
garciadeblasd8429852024-10-17 15:30:30 +02002088 # Clean items used in the workflow, no matter if the workflow succeeded
2089 clean_status, clean_msg = await self.odu.clean_items_workflow(
yshah564ec9c2024-11-29 07:33:32 +00002090 "create_ksus", op_id, op_params, db_content
garciadeblasd8429852024-10-17 15:30:30 +02002091 )
2092 self.logger.info(
2093 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
2094 )
garciadeblasad6d1ba2025-01-22 16:02:18 +01002095 self.logger.info(f"KSU Create EXIT with Resource Status {res_status_list}")
yshah771dea82024-07-05 15:11:49 +00002096 return
2097
yshah564ec9c2024-11-29 07:33:32 +00002098 async def edit(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02002099 self.logger.info("ksu edit Enter")
yshah564ec9c2024-11-29 07:33:32 +00002100 db_content = []
2101 op_params = []
2102 op_id = params["operation_id"]
2103 for ksu_id in params["ksus_list"]:
2104 self.initialize_operation(ksu_id, op_id)
2105 db_ksu = self.db.get_one("ksus", {"_id": ksu_id})
2106 db_content.append(db_ksu)
2107 ksu_params = {}
2108 ksu_params = self.get_operation_params(db_ksu, op_id)
2109 # Update ksu_params["profile"] with profile name and age-pubkey
2110 profile_type = ksu_params["profile"]["profile_type"]
2111 profile_id = ksu_params["profile"]["_id"]
2112 profile_collection = self.profile_collection_mapping[profile_type]
2113 db_profile = self.db.get_one(profile_collection, {"_id": profile_id})
garciadeblasd41e9292025-03-11 15:44:25 +01002114 # db_profile is decrypted inline
2115 # No need to use decrypted_copy because db_profile won't be updated.
2116 self.decrypt_age_keys(db_profile)
yshah564ec9c2024-11-29 07:33:32 +00002117 ksu_params["profile"]["name"] = db_profile["name"]
2118 ksu_params["profile"]["age_pubkey"] = db_profile.get("age_pubkey", "")
2119 # Update ksu_params["oka"] with sw_catalog_path (when missing)
garciadeblas9f7c9c52025-01-17 01:06:05 +01002120 # TODO: remove this in favor of doing it in ODU workflow
yshah564ec9c2024-11-29 07:33:32 +00002121 for oka in ksu_params["oka"]:
2122 if "sw_catalog_path" not in oka:
2123 oka_id = oka["_id"]
2124 db_oka = self.db.get_one("okas", {"_id": oka_id})
yshah2f39b8a2024-12-19 11:06:24 +00002125 oka_type = MAP_PROFILE[
2126 db_oka.get("profile_type", "infra_controller_profiles")
2127 ]
garciadeblas9f7c9c52025-01-17 01:06:05 +01002128 oka[
2129 "sw_catalog_path"
2130 ] = f"{oka_type}/{db_oka['git_name']}/templates"
yshah564ec9c2024-11-29 07:33:32 +00002131 op_params.append(ksu_params)
yshah771dea82024-07-05 15:11:49 +00002132
garciadeblas41859ce2025-02-04 16:08:51 +01002133 workflow_res, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00002134 "update_ksus", op_id, op_params, db_content
garciadeblas96b94f52024-07-08 16:18:21 +02002135 )
yshah771dea82024-07-05 15:11:49 +00002136
yshah564ec9c2024-11-29 07:33:32 +00002137 for db_ksu, ksu_params in zip(db_content, op_params):
garciadeblas33b36e72025-01-17 12:49:19 +01002138 workflow_status = await self.check_workflow_and_update_db(
2139 op_id, workflow_name, db_ksu
2140 )
yshah564ec9c2024-11-29 07:33:32 +00002141
garciadeblas96b94f52024-07-08 16:18:21 +02002142 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002143 resource_status, db_ksu = await self.check_resource_and_update_db(
yshah564ec9c2024-11-29 07:33:32 +00002144 "update_ksus", op_id, ksu_params, db_ksu
garciadeblas96b94f52024-07-08 16:18:21 +02002145 )
garciadeblas96b94f52024-07-08 16:18:21 +02002146 db_ksu["name"] = ksu_params["name"]
2147 db_ksu["description"] = ksu_params["description"]
2148 db_ksu["profile"]["profile_type"] = ksu_params["profile"][
2149 "profile_type"
2150 ]
2151 db_ksu["profile"]["_id"] = ksu_params["profile"]["_id"]
2152 db_ksu["oka"] = ksu_params["oka"]
2153 self.db.set_one(self.db_collection, {"_id": db_ksu["_id"]}, db_ksu)
2154
yshah564ec9c2024-11-29 07:33:32 +00002155 # Clean items used in the workflow, no matter if the workflow succeeded
2156 clean_status, clean_msg = await self.odu.clean_items_workflow(
2157 "create_ksus", op_id, op_params, db_content
garciadeblas96b94f52024-07-08 16:18:21 +02002158 )
2159 self.logger.info(
yshah564ec9c2024-11-29 07:33:32 +00002160 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
garciadeblas96b94f52024-07-08 16:18:21 +02002161 )
yshah564ec9c2024-11-29 07:33:32 +00002162 self.logger.info(f"KSU Update EXIT with Resource Status {resource_status}")
yshah771dea82024-07-05 15:11:49 +00002163 return
2164
yshah564ec9c2024-11-29 07:33:32 +00002165 async def delete(self, params, order_id):
2166 self.logger.info("ksu delete Enter")
2167 db_content = []
2168 op_params = []
2169 op_id = params["operation_id"]
2170 for ksu_id in params["ksus_list"]:
2171 self.initialize_operation(ksu_id, op_id)
2172 db_ksu = self.db.get_one("ksus", {"_id": ksu_id})
2173 db_content.append(db_ksu)
2174 ksu_params = {}
2175 ksu_params["profile"] = {}
2176 ksu_params["profile"]["profile_type"] = db_ksu["profile"]["profile_type"]
2177 ksu_params["profile"]["_id"] = db_ksu["profile"]["_id"]
garciadeblasd41e9292025-03-11 15:44:25 +01002178 # Update ksu_params["profile"] with profile name
yshah564ec9c2024-11-29 07:33:32 +00002179 profile_type = ksu_params["profile"]["profile_type"]
2180 profile_id = ksu_params["profile"]["_id"]
2181 profile_collection = self.profile_collection_mapping[profile_type]
2182 db_profile = self.db.get_one(profile_collection, {"_id": profile_id})
2183 ksu_params["profile"]["name"] = db_profile["name"]
yshah564ec9c2024-11-29 07:33:32 +00002184 op_params.append(ksu_params)
yshah771dea82024-07-05 15:11:49 +00002185
garciadeblas41859ce2025-02-04 16:08:51 +01002186 workflow_res, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00002187 "delete_ksus", op_id, op_params, db_content
2188 )
2189
2190 for db_ksu, ksu_params in zip(db_content, op_params):
garciadeblas33b36e72025-01-17 12:49:19 +01002191 workflow_status = await self.check_workflow_and_update_db(
2192 op_id, workflow_name, db_ksu
2193 )
yshah564ec9c2024-11-29 07:33:32 +00002194
2195 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002196 resource_status, db_ksu = await self.check_resource_and_update_db(
yshah564ec9c2024-11-29 07:33:32 +00002197 "delete_ksus", op_id, ksu_params, db_ksu
2198 )
2199
yshahb36649f2025-02-28 09:01:51 +00002200 force = params.get("force", False)
2201 if force:
2202 force_delete_status = self.check_force_delete_and_delete_from_db(
2203 db_ksu["_id"], workflow_status, resource_status, force
2204 )
2205 if force_delete_status:
2206 return
2207
yshah564ec9c2024-11-29 07:33:32 +00002208 if resource_status:
2209 db_ksu["state"] == "DELETED"
2210 self.db.set_one(self.db_collection, {"_id": db_ksu["_id"]}, db_ksu)
2211 self.db.del_one(self.db_collection, {"_id": db_ksu["_id"]})
2212
2213 self.logger.info(f"KSU Delete Exit with resource status: {resource_status}")
2214 return
2215
2216 async def clone(self, params, order_id):
2217 self.logger.info("ksu clone Enter")
2218 op_id = params["operation_id"]
2219 ksus_id = params["ksus_list"][0]
2220 self.initialize_operation(ksus_id, op_id)
2221 db_content = self.db.get_one(self.db_collection, {"_id": ksus_id})
2222 op_params = self.get_operation_params(db_content, op_id)
garciadeblas41859ce2025-02-04 16:08:51 +01002223 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02002224 "clone_ksus", op_id, op_params, db_content
2225 )
yshah564ec9c2024-11-29 07:33:32 +00002226
garciadeblas33b36e72025-01-17 12:49:19 +01002227 workflow_status = await self.check_workflow_and_update_db(
2228 op_id, workflow_name, db_content
2229 )
yshah771dea82024-07-05 15:11:49 +00002230
2231 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002232 resource_status, db_content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02002233 "clone_ksus", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00002234 )
garciadeblas96b94f52024-07-08 16:18:21 +02002235 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
yshah564ec9c2024-11-29 07:33:32 +00002236
2237 self.logger.info(f"KSU Clone Exit with resource status: {resource_status}")
yshah771dea82024-07-05 15:11:49 +00002238 return
2239
yshah564ec9c2024-11-29 07:33:32 +00002240 async def move(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02002241 self.logger.info("ksu move Enter")
yshah564ec9c2024-11-29 07:33:32 +00002242 op_id = params["operation_id"]
2243 ksus_id = params["ksus_list"][0]
2244 self.initialize_operation(ksus_id, op_id)
2245 db_content = self.db.get_one(self.db_collection, {"_id": ksus_id})
2246 op_params = self.get_operation_params(db_content, op_id)
garciadeblas41859ce2025-02-04 16:08:51 +01002247 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02002248 "move_ksus", op_id, op_params, db_content
2249 )
yshah564ec9c2024-11-29 07:33:32 +00002250
garciadeblas33b36e72025-01-17 12:49:19 +01002251 workflow_status = await self.check_workflow_and_update_db(
2252 op_id, workflow_name, db_content
2253 )
yshah771dea82024-07-05 15:11:49 +00002254
2255 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002256 resource_status, db_content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02002257 "move_ksus", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00002258 )
garciadeblas96b94f52024-07-08 16:18:21 +02002259 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
yshah564ec9c2024-11-29 07:33:32 +00002260
2261 self.logger.info(f"KSU Move Exit with resource status: {resource_status}")
yshah771dea82024-07-05 15:11:49 +00002262 return
garciadeblasad6d1ba2025-01-22 16:02:18 +01002263
2264 async def check_create_ksus(self, op_id, op_params, content):
2265 self.logger.info(f"check_create_ksus Operation {op_id}. Params: {op_params}.")
2266 self.logger.debug(f"Content: {content}")
2267 db_ksu = content
2268 kustomization_name = db_ksu["git_name"].lower()
2269 oka_list = op_params["oka"]
2270 oka_item = oka_list[0]
2271 oka_params = oka_item.get("transformation", {})
garciadeblas167dde32025-02-14 00:44:58 +01002272 kustomization_ns = oka_params.get("kustomization_namespace", "flux-system")
garciadeblasad6d1ba2025-01-22 16:02:18 +01002273 profile_id = op_params.get("profile", {}).get("_id")
2274 profile_type = op_params.get("profile", {}).get("profile_type")
2275 self.logger.info(
2276 f"Checking status of KSU {db_ksu['name']} for profile {profile_id}."
2277 )
2278 dbcluster_list = self.get_dbclusters_from_profile(profile_id, profile_type)
2279 if not dbcluster_list:
2280 self.logger.info(f"No clusters found for profile {profile_id}.")
2281 for db_cluster in dbcluster_list:
2282 try:
2283 self.logger.info(
garciadeblasae238482025-02-03 08:44:19 +01002284 f"Checking status of KSU {db_ksu['name']} in cluster {db_cluster['name']}."
garciadeblasad6d1ba2025-01-22 16:02:18 +01002285 )
2286 cluster_kubectl = self.cluster_kubectl(db_cluster)
2287 checkings_list = [
2288 {
2289 "item": "kustomization",
2290 "name": kustomization_name,
garciadeblas167dde32025-02-14 00:44:58 +01002291 "namespace": kustomization_ns,
garciadeblas7cf480d2025-01-27 16:53:45 +01002292 "condition": {
2293 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
2294 "value": "True",
2295 },
garciadeblasad6d1ba2025-01-22 16:02:18 +01002296 "timeout": self._checkloop_kustomization_timeout,
2297 "enable": True,
2298 "resourceState": "IN_PROGRESS.KUSTOMIZATION_READY",
2299 },
2300 ]
2301 self.logger.info(
2302 f"Checking status of KSU {db_ksu['name']} for profile {profile_id}."
2303 )
2304 result, message = await self.common_check_list(
garciadeblas6d8acf32025-02-06 13:34:37 +01002305 op_id, checkings_list, "ksus", db_ksu, kubectl_obj=cluster_kubectl
garciadeblasad6d1ba2025-01-22 16:02:18 +01002306 )
2307 if not result:
2308 return False, message
2309 except Exception as e:
2310 self.logger.error(
2311 f"Error checking KSU in cluster {db_cluster['name']}."
2312 )
2313 self.logger.error(e)
2314 return False, f"Error checking KSU in cluster {db_cluster['name']}."
2315 return True, "OK"
2316
2317 async def check_delete_ksus(self, op_id, op_params, content):
2318 self.logger.info(f"check_delete_ksus Operation {op_id}. Params: {op_params}.")
2319 self.logger.debug(f"Content: {content}")
2320 db_ksu = content
2321 kustomization_name = db_ksu["git_name"].lower()
2322 oka_list = db_ksu["oka"]
2323 oka_item = oka_list[0]
2324 oka_params = oka_item.get("transformation", {})
garciadeblas167dde32025-02-14 00:44:58 +01002325 kustomization_ns = oka_params.get("kustomization_namespace", "flux-system")
garciadeblasad6d1ba2025-01-22 16:02:18 +01002326 profile_id = op_params.get("profile", {}).get("_id")
2327 profile_type = op_params.get("profile", {}).get("profile_type")
2328 self.logger.info(
2329 f"Checking status of KSU {db_ksu['name']} for profile {profile_id}."
2330 )
2331 dbcluster_list = self.get_dbclusters_from_profile(profile_id, profile_type)
2332 if not dbcluster_list:
2333 self.logger.info(f"No clusters found for profile {profile_id}.")
2334 for db_cluster in dbcluster_list:
2335 try:
2336 self.logger.info(
2337 f"Checking status of KSU in cluster {db_cluster['name']}."
2338 )
2339 cluster_kubectl = self.cluster_kubectl(db_cluster)
2340 checkings_list = [
2341 {
2342 "item": "kustomization",
2343 "name": kustomization_name,
garciadeblas167dde32025-02-14 00:44:58 +01002344 "namespace": kustomization_ns,
garciadeblasad6d1ba2025-01-22 16:02:18 +01002345 "deleted": True,
2346 "timeout": self._checkloop_kustomization_timeout,
2347 "enable": True,
2348 "resourceState": "IN_PROGRESS.KUSTOMIZATION_DELETED",
2349 },
2350 ]
2351 self.logger.info(
2352 f"Checking status of KSU {db_ksu['name']} for profile {profile_id}."
2353 )
2354 result, message = await self.common_check_list(
garciadeblas6d8acf32025-02-06 13:34:37 +01002355 op_id, checkings_list, "ksus", db_ksu, kubectl_obj=cluster_kubectl
garciadeblasad6d1ba2025-01-22 16:02:18 +01002356 )
2357 if not result:
2358 return False, message
2359 except Exception as e:
2360 self.logger.error(
2361 f"Error checking KSU in cluster {db_cluster['name']}."
2362 )
2363 self.logger.error(e)
2364 return False, f"Error checking KSU in cluster {db_cluster['name']}."
2365 return True, "OK"