blob: dda07fbb51c92bccf83676f7692aed6b3578956e [file] [log] [blame]
rshri932105f2024-07-05 15:11:55 +00001# -*- coding: utf-8 -*-
2
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12# implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15
16__author__ = (
17 "Shrinithi R <shrinithi.r@tataelxsi.co.in>",
18 "Shahithya Y <shahithya.y@tataelxsi.co.in>",
19)
20
rshric3564942024-11-12 18:12:38 +000021import copy
rshri932105f2024-07-05 15:11:55 +000022import logging
yshahd940c652024-10-17 06:11:12 +000023from time import time
garciadeblas72412282024-11-07 12:41:54 +010024import traceback
rshri932105f2024-07-05 15:11:55 +000025from osm_lcm.lcm_utils import LcmBase
26from copy import deepcopy
27from osm_lcm import odu_workflows
28from osm_lcm import vim_sdn
rshri948f7de2024-12-02 03:42:35 +000029from osm_lcm.data_utils.list_utils import find_in_list
garciadeblasad6d1ba2025-01-22 16:02:18 +010030from osm_lcm.n2vc.kubectl import Kubectl
31import yaml
rshri932105f2024-07-05 15:11:55 +000032
yshah2f39b8a2024-12-19 11:06:24 +000033MAP_PROFILE = {
34 "infra_controller_profiles": "infra-controllers",
35 "infra_config_profiles": "infra-configs",
36 "resource_profiles": "managed_resources",
37 "app_profiles": "apps",
38}
39
rshri932105f2024-07-05 15:11:55 +000040
garciadeblas72412282024-11-07 12:41:54 +010041class GitOpsLcm(LcmBase):
garciadeblasea865ff2024-11-20 12:42:49 +010042 db_collection = "gitops"
yshah564ec9c2024-11-29 07:33:32 +000043 workflow_status = None
44 resource_status = None
45
46 profile_collection_mapping = {
47 "infra_controller_profiles": "k8sinfra_controller",
48 "infra_config_profiles": "k8sinfra_config",
49 "resource_profiles": "k8sresource",
50 "app_profiles": "k8sapp",
51 }
garciadeblasea865ff2024-11-20 12:42:49 +010052
garciadeblas72412282024-11-07 12:41:54 +010053 def __init__(self, msg, lcm_tasks, config):
54 self.logger = logging.getLogger("lcm.gitops")
55 self.lcm_tasks = lcm_tasks
56 self.odu = odu_workflows.OduWorkflow(msg, self.lcm_tasks, config)
57 self._checkloop_kustomization_timeout = 900
58 self._checkloop_resource_timeout = 900
59 self._workflows = {}
60 super().__init__(msg, self.logger)
61
62 async def check_dummy_operation(self, op_id, op_params, content):
63 self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
64 return True, "OK"
65
garciadeblasea865ff2024-11-20 12:42:49 +010066 def initialize_operation(self, item_id, op_id):
67 db_item = self.db.get_one(self.db_collection, {"_id": item_id})
68 operation = next(
69 (op for op in db_item.get("operationHistory", []) if op["op_id"] == op_id),
70 None,
71 )
72 operation["workflowState"] = "PROCESSING"
73 operation["resourceState"] = "NOT_READY"
74 operation["operationState"] = "IN_PROGRESS"
75 operation["gitOperationInfo"] = None
76 db_item["current_operation"] = operation["op_id"]
77 self.db.set_one(self.db_collection, {"_id": item_id}, db_item)
78
yshah564ec9c2024-11-29 07:33:32 +000079 def get_operation_params(self, item, operation_id):
80 operation_history = item.get("operationHistory", [])
81 operation = find_in_list(
82 operation_history, lambda op: op["op_id"] == operation_id
83 )
84 return operation.get("operationParams", {})
85
86 def get_operation_type(self, item, operation_id):
87 operation_history = item.get("operationHistory", [])
88 operation = find_in_list(
89 operation_history, lambda op: op["op_id"] == operation_id
90 )
91 return operation.get("operationType", {})
92
garciadeblasbe890702024-12-20 11:39:13 +010093 def update_state_operation_history(
94 self, content, op_id, workflow_state=None, resource_state=None
95 ):
96 self.logger.info(
97 f"Update state of operation {op_id} in Operation History in DB"
98 )
99 self.logger.info(
100 f"Workflow state: {workflow_state}. Resource state: {resource_state}"
101 )
102 self.logger.debug(f"Content: {content}")
103
104 op_num = 0
105 for operation in content["operationHistory"]:
106 self.logger.debug("Operations: {}".format(operation))
107 if operation["op_id"] == op_id:
108 self.logger.debug("Found operation number: {}".format(op_num))
109 if workflow_state is not None:
110 operation["workflowState"] = workflow_state
111
112 if resource_state is not None:
113 operation["resourceState"] = resource_state
114 break
115 op_num += 1
116 self.logger.debug("content: {}".format(content))
117
118 return content
119
garciadeblas7eae6f42024-11-08 10:41:38 +0100120 def update_operation_history(
garciadeblasf9092892024-12-12 11:07:08 +0100121 self, content, op_id, workflow_status=None, resource_status=None, op_end=True
garciadeblas7eae6f42024-11-08 10:41:38 +0100122 ):
123 self.logger.info(
124 f"Update Operation History in DB. Workflow status: {workflow_status}. Resource status: {resource_status}"
125 )
126 self.logger.debug(f"Content: {content}")
127
garciadeblas7eae6f42024-11-08 10:41:38 +0100128 op_num = 0
129 for operation in content["operationHistory"]:
130 self.logger.debug("Operations: {}".format(operation))
131 if operation["op_id"] == op_id:
132 self.logger.debug("Found operation number: {}".format(op_num))
garciadeblas8bde3f42024-12-20 10:37:12 +0100133 if workflow_status is not None:
134 if workflow_status:
135 operation["workflowState"] = "COMPLETED"
136 operation["result"] = True
137 else:
138 operation["workflowState"] = "ERROR"
139 operation["operationState"] = "FAILED"
140 operation["result"] = False
garciadeblas7eae6f42024-11-08 10:41:38 +0100141
garciadeblas8bde3f42024-12-20 10:37:12 +0100142 if resource_status is not None:
143 if resource_status:
144 operation["resourceState"] = "READY"
145 operation["operationState"] = "COMPLETED"
146 operation["result"] = True
147 else:
148 operation["resourceState"] = "NOT_READY"
149 operation["operationState"] = "FAILED"
150 operation["result"] = False
garciadeblas7eae6f42024-11-08 10:41:38 +0100151
garciadeblasf9092892024-12-12 11:07:08 +0100152 if op_end:
153 now = time()
154 operation["endDate"] = now
garciadeblas7eae6f42024-11-08 10:41:38 +0100155 break
156 op_num += 1
157 self.logger.debug("content: {}".format(content))
158
159 return content
160
garciadeblas33b36e72025-01-17 12:49:19 +0100161 async def check_workflow_and_update_db(self, op_id, workflow_name, db_content):
yshah564ec9c2024-11-29 07:33:32 +0000162 workflow_status, workflow_msg = await self.odu.check_workflow_status(
163 workflow_name
164 )
165 self.logger.info(
166 "Workflow Status: {} Workflow Message: {}".format(
167 workflow_status, workflow_msg
168 )
169 )
170 operation_type = self.get_operation_type(db_content, op_id)
171 if operation_type == "create" and workflow_status:
172 db_content["state"] = "CREATED"
173 elif operation_type == "create" and not workflow_status:
174 db_content["state"] = "FAILED_CREATION"
175 elif operation_type == "delete" and workflow_status:
176 db_content["state"] = "DELETED"
177 elif operation_type == "delete" and not workflow_status:
178 db_content["state"] = "FAILED_DELETION"
179
180 if workflow_status:
181 db_content["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
182 else:
183 db_content["resourceState"] = "ERROR"
184
185 db_content = self.update_operation_history(
186 db_content, op_id, workflow_status, None
187 )
188 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
189 return workflow_status
190
garciadeblas33b36e72025-01-17 12:49:19 +0100191 async def check_resource_and_update_db(
192 self, resource_name, op_id, op_params, db_content
193 ):
yshah564ec9c2024-11-29 07:33:32 +0000194 workflow_status = True
195
196 resource_status, resource_msg = await self.check_resource_status(
197 resource_name, op_id, op_params, db_content
198 )
199 self.logger.info(
200 "Resource Status: {} Resource Message: {}".format(
201 resource_status, resource_msg
202 )
203 )
204
205 if resource_status:
206 db_content["resourceState"] = "READY"
207 else:
208 db_content["resourceState"] = "ERROR"
209
210 db_content = self.update_operation_history(
211 db_content, op_id, workflow_status, resource_status
212 )
213 db_content["operatingState"] = "IDLE"
214 db_content["current_operation"] = None
215 return resource_status, db_content
216
garciadeblasad6d1ba2025-01-22 16:02:18 +0100217 async def common_check_list(
218 self, op_id, checkings_list, db_collection, db_item, kubectl=None
219 ):
garciadeblas72412282024-11-07 12:41:54 +0100220 try:
221 for checking in checkings_list:
222 if checking["enable"]:
223 status, message = await self.odu.readiness_loop(
224 item=checking["item"],
225 name=checking["name"],
226 namespace=checking["namespace"],
garciadeblas7cf480d2025-01-27 16:53:45 +0100227 condition=checking.get("condition"),
garciadeblasad6d1ba2025-01-22 16:02:18 +0100228 deleted=checking.get("deleted", False),
garciadeblas72412282024-11-07 12:41:54 +0100229 timeout=checking["timeout"],
garciadeblasad6d1ba2025-01-22 16:02:18 +0100230 kubectl=kubectl,
garciadeblas72412282024-11-07 12:41:54 +0100231 )
232 if not status:
garciadeblas656818e2025-01-21 18:48:58 +0100233 error_message = "Resources not ready: "
234 error_message += checking.get("error_message", "")
235 return status, f"{error_message}: {message}"
garciadeblas7eae6f42024-11-08 10:41:38 +0100236 else:
237 db_item["resourceState"] = checking["resourceState"]
garciadeblasbe890702024-12-20 11:39:13 +0100238 db_item = self.update_state_operation_history(
239 db_item, op_id, None, checking["resourceState"]
garciadeblas7eae6f42024-11-08 10:41:38 +0100240 )
241 self.db.set_one(db_collection, {"_id": db_item["_id"]}, db_item)
garciadeblas72412282024-11-07 12:41:54 +0100242 except Exception as e:
243 self.logger.debug(traceback.format_exc())
244 self.logger.debug(f"Exception: {e}", exc_info=True)
245 return False, f"Unexpected exception: {e}"
246 return True, "OK"
247
248 async def check_resource_status(self, key, op_id, op_params, content):
249 self.logger.info(
garciadeblasad6d1ba2025-01-22 16:02:18 +0100250 f"Check resource status. Key: {key}. Operation: {op_id}. Params: {op_params}."
garciadeblas72412282024-11-07 12:41:54 +0100251 )
garciadeblasad6d1ba2025-01-22 16:02:18 +0100252 self.logger.debug(f"Check resource status. Content: {content}")
garciadeblas72412282024-11-07 12:41:54 +0100253 check_resource_function = self._workflows.get(key, {}).get(
254 "check_resource_function"
255 )
256 self.logger.info("check_resource function : {}".format(check_resource_function))
257 if check_resource_function:
258 return await check_resource_function(op_id, op_params, content)
259 else:
260 return await self.check_dummy_operation(op_id, op_params, content)
261
garciadeblas995cbf32024-12-18 12:54:00 +0100262 def decrypted_copy(self, content, fields=["age_pubkey", "age_privkey"]):
263 # This deep copy is intended to be passed to ODU workflows.
264 content_copy = copy.deepcopy(content)
rshric3564942024-11-12 18:12:38 +0000265
266 # decrypting the key
267 self.db.encrypt_decrypt_fields(
garciadeblas995cbf32024-12-18 12:54:00 +0100268 content_copy,
rshric3564942024-11-12 18:12:38 +0000269 "decrypt",
garciadeblas995cbf32024-12-18 12:54:00 +0100270 fields,
rshric3564942024-11-12 18:12:38 +0000271 schema_version="1.11",
garciadeblas995cbf32024-12-18 12:54:00 +0100272 salt=content_copy["_id"],
rshric3564942024-11-12 18:12:38 +0000273 )
garciadeblas995cbf32024-12-18 12:54:00 +0100274 return content_copy
rshric3564942024-11-12 18:12:38 +0000275
garciadeblasad6d1ba2025-01-22 16:02:18 +0100276 def cluster_kubectl(self, db_cluster):
277 cluster_kubeconfig = db_cluster["credentials"]
278 kubeconfig_path = f"/tmp/{db_cluster['_id']}_kubeconfig.yaml"
279 with open(kubeconfig_path, "w") as kubeconfig_file:
280 yaml.safe_dump(cluster_kubeconfig, kubeconfig_file)
281 return Kubectl(config_file=kubeconfig_path)
282
garciadeblas72412282024-11-07 12:41:54 +0100283
284class ClusterLcm(GitOpsLcm):
garciadeblas96b94f52024-07-08 16:18:21 +0200285 db_collection = "clusters"
rshri932105f2024-07-05 15:11:55 +0000286
287 def __init__(self, msg, lcm_tasks, config):
288 """
289 Init, Connect to database, filesystem storage, and messaging
290 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
291 :return: None
292 """
garciadeblas72412282024-11-07 12:41:54 +0100293 super().__init__(msg, lcm_tasks, config)
294 self._workflows = {
295 "create_cluster": {
296 "check_resource_function": self.check_create_cluster,
297 },
garciadeblasb0a42c22024-11-13 16:00:10 +0100298 "register_cluster": {
299 "check_resource_function": self.check_register_cluster,
300 },
301 "update_cluster": {
302 "check_resource_function": self.check_update_cluster,
303 },
garciadeblasad6d1ba2025-01-22 16:02:18 +0100304 "delete_cluster": {
305 "check_resource_function": self.check_delete_cluster,
306 },
garciadeblas72412282024-11-07 12:41:54 +0100307 }
rshri932105f2024-07-05 15:11:55 +0000308 self.regist = vim_sdn.K8sClusterLcm(msg, self.lcm_tasks, config)
309
rshri948f7de2024-12-02 03:42:35 +0000310 async def create(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +0000311 self.logger.info("cluster Create Enter")
312
garciadeblas995cbf32024-12-18 12:54:00 +0100313 # To get the cluster and op ids
rshri948f7de2024-12-02 03:42:35 +0000314 cluster_id = params["cluster_id"]
rshri948f7de2024-12-02 03:42:35 +0000315 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +0000316
317 # To initialize the operation states
318 self.initialize_operation(cluster_id, op_id)
319
garciadeblas995cbf32024-12-18 12:54:00 +0100320 # To get the cluster
321 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
322
323 # To get the operation params details
324 op_params = self.get_operation_params(db_cluster, op_id)
325
326 # To copy the cluster content and decrypting fields to use in workflows
327 workflow_content = {
328 "cluster": self.decrypted_copy(db_cluster),
329 }
rshric3564942024-11-12 18:12:38 +0000330
rshri948f7de2024-12-02 03:42:35 +0000331 # To get the vim account details
rshric3564942024-11-12 18:12:38 +0000332 db_vim = self.db.get_one("vim_accounts", {"name": db_cluster["vim_account"]})
garciadeblas995cbf32024-12-18 12:54:00 +0100333 workflow_content["vim_account"] = db_vim
rshric3564942024-11-12 18:12:38 +0000334
garciadeblasadb81e82024-11-08 01:11:46 +0100335 _, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +0100336 "create_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +0200337 )
rshri932105f2024-07-05 15:11:55 +0000338 self.logger.info("workflow_name is :{}".format(workflow_name))
339
garciadeblas96b94f52024-07-08 16:18:21 +0200340 workflow_status, workflow_msg = await self.odu.check_workflow_status(
341 workflow_name
342 )
rshri932105f2024-07-05 15:11:55 +0000343 self.logger.info(
344 "workflow_status is :{} and workflow_msg is :{}".format(
345 workflow_status, workflow_msg
346 )
347 )
348 if workflow_status:
garciadeblas96b94f52024-07-08 16:18:21 +0200349 db_cluster["state"] = "CREATED"
350 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
rshri932105f2024-07-05 15:11:55 +0000351 else:
garciadeblas96b94f52024-07-08 16:18:21 +0200352 db_cluster["state"] = "FAILED_CREATION"
353 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +0000354 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +0000355 db_cluster = self.update_operation_history(
356 db_cluster, op_id, workflow_status, None
357 )
garciadeblas96b94f52024-07-08 16:18:21 +0200358 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +0000359
garciadeblas28bff0f2024-09-16 12:53:07 +0200360 # Clean items used in the workflow, no matter if the workflow succeeded
361 clean_status, clean_msg = await self.odu.clean_items_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +0100362 "create_cluster", op_id, op_params, workflow_content
garciadeblas28bff0f2024-09-16 12:53:07 +0200363 )
364 self.logger.info(
365 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
366 )
367
rshri932105f2024-07-05 15:11:55 +0000368 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +0100369 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +0100370 "create_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +0000371 )
372 self.logger.info(
373 "resource_status is :{} and resource_msg is :{}".format(
374 resource_status, resource_msg
375 )
376 )
377 if resource_status:
garciadeblas96b94f52024-07-08 16:18:21 +0200378 db_cluster["resourceState"] = "READY"
rshri932105f2024-07-05 15:11:55 +0000379 else:
garciadeblas96b94f52024-07-08 16:18:21 +0200380 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +0000381
garciadeblas96b94f52024-07-08 16:18:21 +0200382 db_cluster["operatingState"] = "IDLE"
383 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +0000384 db_cluster, op_id, workflow_status, resource_status
rshri932105f2024-07-05 15:11:55 +0000385 )
shahithya70a3fc92024-11-12 11:01:05 +0000386 db_cluster["current_operation"] = None
garciadeblas41a600e2025-01-21 11:49:38 +0100387
388 # Retrieve credentials
389 cluster_creds = None
390 if db_cluster["resourceState"] == "READY" and db_cluster["state"] == "CREATED":
391 result, cluster_creds = await self.odu.get_cluster_credentials(db_cluster)
392 # TODO: manage the case where the credentials are not available
393 if result:
394 db_cluster["credentials"] = cluster_creds
395
396 # Update db_cluster
garciadeblas96b94f52024-07-08 16:18:21 +0200397 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
398 self.update_profile_state(db_cluster, workflow_status, resource_status)
rshri948f7de2024-12-02 03:42:35 +0000399
garciadeblas41a600e2025-01-21 11:49:38 +0100400 # Register the cluster in k8sclusters collection
rshri948f7de2024-12-02 03:42:35 +0000401 db_register = self.db.get_one("k8sclusters", {"name": db_cluster["name"]})
garciadeblas41a600e2025-01-21 11:49:38 +0100402 if cluster_creds:
rshri948f7de2024-12-02 03:42:35 +0000403 db_register["credentials"] = cluster_creds
garciadeblas41a600e2025-01-21 11:49:38 +0100404 # To call the lcm.py for registering the cluster in k8scluster lcm.
rshri948f7de2024-12-02 03:42:35 +0000405 self.db.set_one("k8sclusters", {"_id": db_register["_id"]}, db_register)
406 register = await self.regist.create(db_register, order_id)
407 self.logger.debug(f"Register is : {register}")
408 else:
409 db_register["_admin"]["operationalState"] = "ERROR"
410 result, cluster_creds = await self.odu.get_cluster_credentials(db_cluster)
411 # To call the lcm.py for registering the cluster in k8scluster lcm.
412 db_register["credentials"] = cluster_creds
413 self.db.set_one("k8sclusters", {"_id": db_register["_id"]}, db_register)
414
rshri932105f2024-07-05 15:11:55 +0000415 return
416
garciadeblas72412282024-11-07 12:41:54 +0100417 async def check_create_cluster(self, op_id, op_params, content):
garciadeblas7eae6f42024-11-08 10:41:38 +0100418 self.logger.info(
419 f"check_create_cluster Operation {op_id}. Params: {op_params}."
420 )
421 # self.logger.debug(f"Content: {content}")
garciadeblas72412282024-11-07 12:41:54 +0100422 db_cluster = content["cluster"]
423 cluster_name = db_cluster["git_name"].lower()
424 cluster_kustomization_name = cluster_name
425 db_vim_account = content["vim_account"]
426 cloud_type = db_vim_account["vim_type"]
427 nodepool_name = ""
428 if cloud_type == "aws":
429 nodepool_name = f"{cluster_name}-nodegroup"
430 cluster_name = f"{cluster_name}-cluster"
431 elif cloud_type == "gcp":
432 nodepool_name = f"nodepool-{cluster_name}"
433 bootstrap = op_params.get("bootstrap", True)
434 if cloud_type in ("azure", "gcp", "aws"):
435 checkings_list = [
436 {
437 "item": "kustomization",
438 "name": cluster_kustomization_name,
439 "namespace": "managed-resources",
garciadeblas7cf480d2025-01-27 16:53:45 +0100440 "condition": {
441 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
442 "value": "True",
443 },
yshahcb9075f2024-11-22 12:08:57 +0000444 "timeout": 1500,
garciadeblas72412282024-11-07 12:41:54 +0100445 "enable": True,
garciadeblas7eae6f42024-11-08 10:41:38 +0100446 "resourceState": "IN_PROGRESS.KUSTOMIZATION_READY",
garciadeblas72412282024-11-07 12:41:54 +0100447 },
448 {
449 "item": f"cluster_{cloud_type}",
450 "name": cluster_name,
451 "namespace": "",
garciadeblas7cf480d2025-01-27 16:53:45 +0100452 "condition": {
453 "jsonpath_filter": "status.conditions[?(@.type=='Synced')].status",
454 "value": "True",
455 },
garciadeblas72412282024-11-07 12:41:54 +0100456 "timeout": self._checkloop_resource_timeout,
457 "enable": True,
garciadeblas7eae6f42024-11-08 10:41:38 +0100458 "resourceState": "IN_PROGRESS.RESOURCE_SYNCED.CLUSTER",
garciadeblas72412282024-11-07 12:41:54 +0100459 },
460 {
461 "item": f"cluster_{cloud_type}",
462 "name": cluster_name,
463 "namespace": "",
garciadeblas7cf480d2025-01-27 16:53:45 +0100464 "condition": {
465 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
466 "value": "True",
467 },
garciadeblas72412282024-11-07 12:41:54 +0100468 "timeout": self._checkloop_resource_timeout,
469 "enable": True,
garciadeblas7eae6f42024-11-08 10:41:38 +0100470 "resourceState": "IN_PROGRESS.RESOURCE_READY.CLUSTER",
garciadeblas72412282024-11-07 12:41:54 +0100471 },
472 {
473 "item": "kustomization",
474 "name": f"{cluster_kustomization_name}-bstrp-fluxctrl",
475 "namespace": "managed-resources",
garciadeblas7cf480d2025-01-27 16:53:45 +0100476 "condition": {
477 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
478 "value": "True",
479 },
yshahcb9075f2024-11-22 12:08:57 +0000480 "timeout": self._checkloop_resource_timeout,
garciadeblas72412282024-11-07 12:41:54 +0100481 "enable": bootstrap,
garciadeblas7eae6f42024-11-08 10:41:38 +0100482 "resourceState": "IN_PROGRESS.BOOTSTRAP_OK",
garciadeblas72412282024-11-07 12:41:54 +0100483 },
484 ]
485 else:
486 return False, "Not suitable VIM account to check cluster status"
487 if nodepool_name:
488 nodepool_check = {
489 "item": f"nodepool_{cloud_type}",
490 "name": nodepool_name,
491 "namespace": "",
garciadeblas7cf480d2025-01-27 16:53:45 +0100492 "condition": {
493 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
494 "value": "True",
495 },
garciadeblas72412282024-11-07 12:41:54 +0100496 "timeout": self._checkloop_resource_timeout,
497 "enable": True,
garciadeblas7eae6f42024-11-08 10:41:38 +0100498 "resourceState": "IN_PROGRESS.RESOURCE_READY.NODEPOOL",
garciadeblas72412282024-11-07 12:41:54 +0100499 }
500 checkings_list.insert(3, nodepool_check)
yshahcb9075f2024-11-22 12:08:57 +0000501 return await self.common_check_list(
502 op_id, checkings_list, "clusters", db_cluster
503 )
garciadeblas72412282024-11-07 12:41:54 +0100504
garciadeblas96b94f52024-07-08 16:18:21 +0200505 def update_profile_state(self, db_cluster, workflow_status, resource_status):
rshri932105f2024-07-05 15:11:55 +0000506 profiles = [
507 "infra_controller_profiles",
508 "infra_config_profiles",
509 "app_profiles",
510 "resource_profiles",
511 ]
rshri948f7de2024-12-02 03:42:35 +0000512 """
rshri932105f2024-07-05 15:11:55 +0000513 profiles_collection = {
514 "infra_controller_profiles": "k8sinfra_controller",
515 "infra_config_profiles": "k8sinfra_config",
516 "app_profiles": "k8sapp",
517 "resource_profiles": "k8sresource",
518 }
rshri948f7de2024-12-02 03:42:35 +0000519 """
Your Name86149632024-11-14 16:17:16 +0000520 self.logger.info("the db_cluster is :{}".format(db_cluster))
rshri932105f2024-07-05 15:11:55 +0000521 for profile_type in profiles:
garciadeblas96b94f52024-07-08 16:18:21 +0200522 profile_id = db_cluster[profile_type]
rshri948f7de2024-12-02 03:42:35 +0000523 # db_collection = profiles_collection[profile_type]
524 db_collection = self.profile_collection_mapping[profile_type]
rshri932105f2024-07-05 15:11:55 +0000525 db_profile = self.db.get_one(db_collection, {"_id": profile_id})
yshahcb9075f2024-11-22 12:08:57 +0000526 op_id = db_profile["operationHistory"][-1].get("op_id")
garciadeblas96b94f52024-07-08 16:18:21 +0200527 db_profile["state"] = db_cluster["state"]
528 db_profile["resourceState"] = db_cluster["resourceState"]
529 db_profile["operatingState"] = db_cluster["operatingState"]
rshric3564942024-11-12 18:12:38 +0000530 db_profile["age_pubkey"] = db_cluster["age_pubkey"]
Your Name86149632024-11-14 16:17:16 +0000531 db_profile["age_privkey"] = db_cluster["age_privkey"]
rshri932105f2024-07-05 15:11:55 +0000532 db_profile = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +0000533 db_profile, op_id, workflow_status, resource_status
rshri932105f2024-07-05 15:11:55 +0000534 )
rshri932105f2024-07-05 15:11:55 +0000535 self.db.set_one(db_collection, {"_id": db_profile["_id"]}, db_profile)
536
rshri948f7de2024-12-02 03:42:35 +0000537 async def delete(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +0000538 self.logger.info("cluster delete Enter")
rshri948f7de2024-12-02 03:42:35 +0000539
garciadeblas995cbf32024-12-18 12:54:00 +0100540 # To get the cluster and op ids
rshri948f7de2024-12-02 03:42:35 +0000541 cluster_id = params["cluster_id"]
rshri948f7de2024-12-02 03:42:35 +0000542 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +0000543
544 # To initialize the operation states
545 self.initialize_operation(cluster_id, op_id)
546
garciadeblas995cbf32024-12-18 12:54:00 +0100547 # To get the cluster
548 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
549
550 # To get the operation params details
551 op_params = self.get_operation_params(db_cluster, op_id)
552
553 # To copy the cluster content and decrypting fields to use in workflows
554 workflow_content = {
555 "cluster": self.decrypted_copy(db_cluster),
556 }
rshri948f7de2024-12-02 03:42:35 +0000557
garciadeblasad6d1ba2025-01-22 16:02:18 +0100558 # To get the vim account details
559 db_vim = self.db.get_one("vim_accounts", {"name": db_cluster["vim_account"]})
560 workflow_content["vim_account"] = db_vim
561
garciadeblas6b2112c2024-12-20 10:35:13 +0100562 # TODO: workaround until NBI rejects cluster deletion requests for registered clusters
563 # This if clause will be removed
garciadeblas12470812024-11-18 10:33:12 +0100564 if db_cluster["created"] == "false":
rshri948f7de2024-12-02 03:42:35 +0000565 return await self.deregister(params, order_id)
rshri932105f2024-07-05 15:11:55 +0000566
garciadeblasadb81e82024-11-08 01:11:46 +0100567 _, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +0100568 "delete_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +0200569 )
rshri932105f2024-07-05 15:11:55 +0000570 self.logger.info("workflow_name is :{}".format(workflow_name))
571
garciadeblas96b94f52024-07-08 16:18:21 +0200572 workflow_status, workflow_msg = await self.odu.check_workflow_status(
573 workflow_name
574 )
rshri932105f2024-07-05 15:11:55 +0000575 self.logger.info(
576 "workflow_status is :{} and workflow_msg is :{}".format(
577 workflow_status, workflow_msg
578 )
579 )
580 if workflow_status:
garciadeblas96b94f52024-07-08 16:18:21 +0200581 db_cluster["state"] = "DELETED"
582 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
rshri932105f2024-07-05 15:11:55 +0000583 else:
garciadeblas96b94f52024-07-08 16:18:21 +0200584 db_cluster["state"] = "FAILED_DELETION"
585 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +0000586 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +0000587 db_cluster = self.update_operation_history(
588 db_cluster, op_id, workflow_status, None
589 )
garciadeblas96b94f52024-07-08 16:18:21 +0200590 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +0000591
garciadeblas98f9a3d2024-12-10 13:42:47 +0100592 # Clean items used in the workflow or in the cluster, no matter if the workflow succeeded
593 clean_status, clean_msg = await self.odu.clean_items_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +0100594 "delete_cluster", op_id, op_params, workflow_content
garciadeblas98f9a3d2024-12-10 13:42:47 +0100595 )
596 self.logger.info(
597 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
598 )
599
rshri932105f2024-07-05 15:11:55 +0000600 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +0100601 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +0100602 "delete_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +0000603 )
604 self.logger.info(
605 "resource_status is :{} and resource_msg is :{}".format(
606 resource_status, resource_msg
607 )
608 )
609 if resource_status:
garciadeblas96b94f52024-07-08 16:18:21 +0200610 db_cluster["resourceState"] = "READY"
rshri932105f2024-07-05 15:11:55 +0000611 else:
garciadeblas96b94f52024-07-08 16:18:21 +0200612 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +0000613
garciadeblas96b94f52024-07-08 16:18:21 +0200614 db_cluster["operatingState"] = "IDLE"
615 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +0000616 db_cluster, op_id, workflow_status, resource_status
garciadeblas96b94f52024-07-08 16:18:21 +0200617 )
shahithya70a3fc92024-11-12 11:01:05 +0000618 db_cluster["current_operation"] = None
garciadeblas96b94f52024-07-08 16:18:21 +0200619 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +0000620
garciadeblas96b94f52024-07-08 16:18:21 +0200621 # To delete it from DB
622 if db_cluster["state"] == "DELETED":
623 self.delete_cluster(db_cluster)
rshri948f7de2024-12-02 03:42:35 +0000624
625 # To delete it from k8scluster collection
626 self.db.del_one("k8sclusters", {"name": db_cluster["name"]})
627
rshri932105f2024-07-05 15:11:55 +0000628 return
629
garciadeblasad6d1ba2025-01-22 16:02:18 +0100630 async def check_delete_cluster(self, op_id, op_params, content):
631 self.logger.info(
632 f"check_delete_cluster Operation {op_id}. Params: {op_params}."
633 )
634 self.logger.debug(f"Content: {content}")
635 db_cluster = content["cluster"]
636 cluster_name = db_cluster["git_name"].lower()
637 cluster_kustomization_name = cluster_name
638 db_vim_account = content["vim_account"]
639 cloud_type = db_vim_account["vim_type"]
640 if cloud_type == "aws":
641 cluster_name = f"{cluster_name}-cluster"
642 if cloud_type in ("azure", "gcp", "aws"):
643 checkings_list = [
644 {
645 "item": "kustomization",
646 "name": cluster_kustomization_name,
647 "namespace": "managed-resources",
648 "deleted": True,
649 "timeout": self._checkloop_kustomization_timeout,
650 "enable": True,
651 "resourceState": "IN_PROGRESS.KUSTOMIZATION_DELETED",
652 },
653 {
654 "item": f"cluster_{cloud_type}",
655 "name": cluster_name,
656 "namespace": "",
657 "deleted": True,
658 "timeout": self._checkloop_resource_timeout,
659 "enable": True,
660 "resourceState": "IN_PROGRESS.RESOURCE_DELETED.CLUSTER",
661 },
662 ]
663 else:
664 return False, "Not suitable VIM account to check cluster status"
665 return await self.common_check_list(
666 op_id, checkings_list, "clusters", db_cluster
667 )
668
garciadeblas96b94f52024-07-08 16:18:21 +0200669 def delete_cluster(self, db_cluster):
670 # Actually, item_content is equal to db_cluster
671 # item_content = self.db.get_one("clusters", {"_id": db_cluster["_id"]})
672 # self.logger.debug("item_content is : {}".format(item_content))
rshri932105f2024-07-05 15:11:55 +0000673
rshri932105f2024-07-05 15:11:55 +0000674 # detach profiles
675 update_dict = None
676 profiles_to_detach = [
677 "infra_controller_profiles",
678 "infra_config_profiles",
679 "app_profiles",
680 "resource_profiles",
681 ]
rshri948f7de2024-12-02 03:42:35 +0000682 """
rshri932105f2024-07-05 15:11:55 +0000683 profiles_collection = {
684 "infra_controller_profiles": "k8sinfra_controller",
685 "infra_config_profiles": "k8sinfra_config",
686 "app_profiles": "k8sapp",
687 "resource_profiles": "k8sresource",
688 }
rshri948f7de2024-12-02 03:42:35 +0000689 """
rshri932105f2024-07-05 15:11:55 +0000690 for profile_type in profiles_to_detach:
garciadeblas96b94f52024-07-08 16:18:21 +0200691 if db_cluster.get(profile_type):
garciadeblas96b94f52024-07-08 16:18:21 +0200692 profile_ids = db_cluster[profile_type]
rshri932105f2024-07-05 15:11:55 +0000693 profile_ids_copy = deepcopy(profile_ids)
rshri932105f2024-07-05 15:11:55 +0000694 for profile_id in profile_ids_copy:
rshri948f7de2024-12-02 03:42:35 +0000695 # db_collection = profiles_collection[profile_type]
696 db_collection = self.profile_collection_mapping[profile_type]
rshri932105f2024-07-05 15:11:55 +0000697 db_profile = self.db.get_one(db_collection, {"_id": profile_id})
garciadeblasc2552852024-10-22 12:39:32 +0200698 self.logger.debug("the db_profile is :{}".format(db_profile))
699 self.logger.debug(
garciadeblas96b94f52024-07-08 16:18:21 +0200700 "the item_content name is :{}".format(db_cluster["name"])
rshri932105f2024-07-05 15:11:55 +0000701 )
garciadeblasc2552852024-10-22 12:39:32 +0200702 self.logger.debug(
rshri932105f2024-07-05 15:11:55 +0000703 "the db_profile name is :{}".format(db_profile["name"])
704 )
garciadeblas96b94f52024-07-08 16:18:21 +0200705 if db_cluster["name"] == db_profile["name"]:
rshri932105f2024-07-05 15:11:55 +0000706 self.db.del_one(db_collection, {"_id": profile_id})
707 else:
rshri932105f2024-07-05 15:11:55 +0000708 profile_ids.remove(profile_id)
709 update_dict = {profile_type: profile_ids}
rshri932105f2024-07-05 15:11:55 +0000710 self.db.set_one(
garciadeblas96b94f52024-07-08 16:18:21 +0200711 "clusters", {"_id": db_cluster["_id"]}, update_dict
rshri932105f2024-07-05 15:11:55 +0000712 )
garciadeblas96b94f52024-07-08 16:18:21 +0200713 self.db.del_one("clusters", {"_id": db_cluster["_id"]})
rshri932105f2024-07-05 15:11:55 +0000714
rshri948f7de2024-12-02 03:42:35 +0000715 async def attach_profile(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +0000716 self.logger.info("profile attach Enter")
rshri948f7de2024-12-02 03:42:35 +0000717
garciadeblas995cbf32024-12-18 12:54:00 +0100718 # To get the cluster and op ids
rshri948f7de2024-12-02 03:42:35 +0000719 cluster_id = params["cluster_id"]
rshri948f7de2024-12-02 03:42:35 +0000720 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +0000721
722 # To initialize the operation states
723 self.initialize_operation(cluster_id, op_id)
724
garciadeblas995cbf32024-12-18 12:54:00 +0100725 # To get the cluster
726 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
727
728 # To get the operation params details
729 op_params = self.get_operation_params(db_cluster, op_id)
730
731 # To copy the cluster content and decrypting fields to use in workflows
732 workflow_content = {
733 "cluster": self.decrypted_copy(db_cluster),
734 }
rshri948f7de2024-12-02 03:42:35 +0000735
736 # To get the profile details
737 profile_id = params["profile_id"]
738 profile_type = params["profile_type"]
739 profile_collection = self.profile_collection_mapping[profile_type]
740 db_profile = self.db.get_one(profile_collection, {"_id": profile_id})
741 db_profile["profile_type"] = profile_type
742 # content["profile"] = db_profile
garciadeblas995cbf32024-12-18 12:54:00 +0100743 workflow_content["profile"] = db_profile
rshri932105f2024-07-05 15:11:55 +0000744
garciadeblasadb81e82024-11-08 01:11:46 +0100745 _, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +0100746 "attach_profile_to_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +0200747 )
rshri932105f2024-07-05 15:11:55 +0000748 self.logger.info("workflow_name is :{}".format(workflow_name))
749
garciadeblas96b94f52024-07-08 16:18:21 +0200750 workflow_status, workflow_msg = await self.odu.check_workflow_status(
751 workflow_name
752 )
rshri932105f2024-07-05 15:11:55 +0000753 self.logger.info(
754 "workflow_status is :{} and workflow_msg is :{}".format(
755 workflow_status, workflow_msg
756 )
757 )
758 if workflow_status:
759 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
760 else:
761 db_cluster["resourceState"] = "ERROR"
762 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +0000763 db_cluster = self.update_operation_history(
764 db_cluster, op_id, workflow_status, None
765 )
rshri932105f2024-07-05 15:11:55 +0000766 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
767
768 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +0100769 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +0100770 "attach_profile_to_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +0000771 )
772 self.logger.info(
773 "resource_status is :{} and resource_msg is :{}".format(
774 resource_status, resource_msg
775 )
776 )
777 if resource_status:
778 db_cluster["resourceState"] = "READY"
779 else:
780 db_cluster["resourceState"] = "ERROR"
781
782 db_cluster["operatingState"] = "IDLE"
783 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +0000784 db_cluster, op_id, workflow_status, resource_status
rshri932105f2024-07-05 15:11:55 +0000785 )
rshri932105f2024-07-05 15:11:55 +0000786 profile_list = db_cluster[profile_type]
rshri932105f2024-07-05 15:11:55 +0000787 if resource_status:
rshri932105f2024-07-05 15:11:55 +0000788 profile_list.append(profile_id)
rshri932105f2024-07-05 15:11:55 +0000789 db_cluster[profile_type] = profile_list
shahithya70a3fc92024-11-12 11:01:05 +0000790 db_cluster["current_operation"] = None
rshri932105f2024-07-05 15:11:55 +0000791 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
792
793 return
794
rshri948f7de2024-12-02 03:42:35 +0000795 async def detach_profile(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +0000796 self.logger.info("profile dettach Enter")
rshri948f7de2024-12-02 03:42:35 +0000797
garciadeblas995cbf32024-12-18 12:54:00 +0100798 # To get the cluster and op ids
rshri948f7de2024-12-02 03:42:35 +0000799 cluster_id = params["cluster_id"]
rshri948f7de2024-12-02 03:42:35 +0000800 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +0000801
802 # To initialize the operation states
803 self.initialize_operation(cluster_id, op_id)
804
garciadeblas995cbf32024-12-18 12:54:00 +0100805 # To get the cluster
806 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
807
808 # To get the operation params details
809 op_params = self.get_operation_params(db_cluster, op_id)
810
811 # To copy the cluster content and decrypting fields to use in workflows
812 workflow_content = {
813 "cluster": self.decrypted_copy(db_cluster),
814 }
rshri948f7de2024-12-02 03:42:35 +0000815
816 # To get the profile details
817 profile_id = params["profile_id"]
818 profile_type = params["profile_type"]
819 profile_collection = self.profile_collection_mapping[profile_type]
820 db_profile = self.db.get_one(profile_collection, {"_id": profile_id})
821 db_profile["profile_type"] = profile_type
822 # content["profile"] = db_profile
garciadeblas995cbf32024-12-18 12:54:00 +0100823 workflow_content["profile"] = db_profile
rshri932105f2024-07-05 15:11:55 +0000824
garciadeblasadb81e82024-11-08 01:11:46 +0100825 _, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +0100826 "detach_profile_from_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +0200827 )
rshri932105f2024-07-05 15:11:55 +0000828 self.logger.info("workflow_name is :{}".format(workflow_name))
829
garciadeblas96b94f52024-07-08 16:18:21 +0200830 workflow_status, workflow_msg = await self.odu.check_workflow_status(
831 workflow_name
832 )
rshri932105f2024-07-05 15:11:55 +0000833 self.logger.info(
834 "workflow_status is :{} and workflow_msg is :{}".format(
835 workflow_status, workflow_msg
836 )
837 )
838 if workflow_status:
839 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
840 else:
841 db_cluster["resourceState"] = "ERROR"
842 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +0000843 db_cluster = self.update_operation_history(
844 db_cluster, op_id, workflow_status, None
845 )
rshri932105f2024-07-05 15:11:55 +0000846 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
847
848 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +0100849 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +0100850 "detach_profile_from_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +0000851 )
852 self.logger.info(
853 "resource_status is :{} and resource_msg is :{}".format(
854 resource_status, resource_msg
855 )
856 )
857 if resource_status:
858 db_cluster["resourceState"] = "READY"
859 else:
860 db_cluster["resourceState"] = "ERROR"
861
862 db_cluster["operatingState"] = "IDLE"
863 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +0000864 db_cluster, op_id, workflow_status, resource_status
rshri932105f2024-07-05 15:11:55 +0000865 )
rshri932105f2024-07-05 15:11:55 +0000866 profile_list = db_cluster[profile_type]
867 self.logger.info("profile list is : {}".format(profile_list))
868 if resource_status:
rshri932105f2024-07-05 15:11:55 +0000869 profile_list.remove(profile_id)
rshri932105f2024-07-05 15:11:55 +0000870 db_cluster[profile_type] = profile_list
shahithya70a3fc92024-11-12 11:01:05 +0000871 db_cluster["current_operation"] = None
rshri932105f2024-07-05 15:11:55 +0000872 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
873
874 return
875
rshri948f7de2024-12-02 03:42:35 +0000876 async def register(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +0000877 self.logger.info("cluster register enter")
878
garciadeblas995cbf32024-12-18 12:54:00 +0100879 # To get the cluster and op ids
rshri948f7de2024-12-02 03:42:35 +0000880 cluster_id = params["cluster_id"]
rshri948f7de2024-12-02 03:42:35 +0000881 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +0000882
883 # To initialize the operation states
884 self.initialize_operation(cluster_id, op_id)
885
garciadeblas995cbf32024-12-18 12:54:00 +0100886 # To get the cluster
887 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
888
889 # To get the operation params details
890 op_params = self.get_operation_params(db_cluster, op_id)
891
892 # To copy the cluster content and decrypting fields to use in workflows
893 workflow_content = {
894 "cluster": self.decrypted_copy(db_cluster),
895 }
rshric3564942024-11-12 18:12:38 +0000896
garciadeblasadb81e82024-11-08 01:11:46 +0100897 _, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +0100898 "register_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +0200899 )
rshri932105f2024-07-05 15:11:55 +0000900 self.logger.info("workflow_name is :{}".format(workflow_name))
901
garciadeblas96b94f52024-07-08 16:18:21 +0200902 workflow_status, workflow_msg = await self.odu.check_workflow_status(
903 workflow_name
904 )
rshri932105f2024-07-05 15:11:55 +0000905 self.logger.info(
906 "workflow_status is :{} and workflow_msg is :{}".format(
907 workflow_status, workflow_msg
908 )
909 )
910 if workflow_status:
garciadeblas96b94f52024-07-08 16:18:21 +0200911 db_cluster["state"] = "CREATED"
912 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
rshri932105f2024-07-05 15:11:55 +0000913 else:
garciadeblas96b94f52024-07-08 16:18:21 +0200914 db_cluster["state"] = "FAILED_CREATION"
915 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +0000916 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +0000917 db_cluster = self.update_operation_history(
918 db_cluster, op_id, workflow_status, None
919 )
garciadeblas96b94f52024-07-08 16:18:21 +0200920 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +0000921
garciadeblasdde3a312024-09-17 13:25:06 +0200922 # Clean items used in the workflow, no matter if the workflow succeeded
923 clean_status, clean_msg = await self.odu.clean_items_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +0100924 "register_cluster", op_id, op_params, workflow_content
garciadeblasdde3a312024-09-17 13:25:06 +0200925 )
926 self.logger.info(
927 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
928 )
929
rshri932105f2024-07-05 15:11:55 +0000930 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +0100931 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +0100932 "register_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +0000933 )
934 self.logger.info(
935 "resource_status is :{} and resource_msg is :{}".format(
936 resource_status, resource_msg
937 )
938 )
939 if resource_status:
garciadeblas96b94f52024-07-08 16:18:21 +0200940 db_cluster["resourceState"] = "READY"
rshri932105f2024-07-05 15:11:55 +0000941 else:
garciadeblas96b94f52024-07-08 16:18:21 +0200942 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +0000943
garciadeblas96b94f52024-07-08 16:18:21 +0200944 db_cluster["operatingState"] = "IDLE"
945 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +0000946 db_cluster, op_id, workflow_status, resource_status
rshri932105f2024-07-05 15:11:55 +0000947 )
shahithya70a3fc92024-11-12 11:01:05 +0000948 db_cluster["current_operation"] = None
garciadeblas96b94f52024-07-08 16:18:21 +0200949 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri948f7de2024-12-02 03:42:35 +0000950
951 db_register = self.db.get_one("k8sclusters", {"name": db_cluster["name"]})
952 db_register["credentials"] = db_cluster["credentials"]
953 self.db.set_one("k8sclusters", {"_id": db_register["_id"]}, db_register)
954
955 if db_cluster["resourceState"] == "READY" and db_cluster["state"] == "CREATED":
956 # To call the lcm.py for registering the cluster in k8scluster lcm.
957 register = await self.regist.create(db_register, order_id)
958 self.logger.debug(f"Register is : {register}")
959 else:
960 db_register["_admin"]["operationalState"] = "ERROR"
961 self.db.set_one("k8sclusters", {"_id": db_register["_id"]}, db_register)
962
rshri932105f2024-07-05 15:11:55 +0000963 return
964
garciadeblasad6d1ba2025-01-22 16:02:18 +0100965 async def check_register_cluster(self, op_id, op_params, content):
966 self.logger.info(
967 f"check_register_cluster Operation {op_id}. Params: {op_params}."
968 )
969 # self.logger.debug(f"Content: {content}")
970 db_cluster = content["cluster"]
971 cluster_name = db_cluster["git_name"].lower()
972 cluster_kustomization_name = cluster_name
973 bootstrap = op_params.get("bootstrap", True)
974 checkings_list = [
975 {
976 "item": "kustomization",
977 "name": f"{cluster_kustomization_name}-bstrp-fluxctrl",
978 "namespace": "managed-resources",
garciadeblas7cf480d2025-01-27 16:53:45 +0100979 "condition": {
980 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
981 "value": "True",
982 },
garciadeblasad6d1ba2025-01-22 16:02:18 +0100983 "timeout": self._checkloop_kustomization_timeout,
984 "enable": bootstrap,
985 "resourceState": "IN_PROGRESS.BOOTSTRAP_OK",
986 },
987 ]
988 return await self.common_check_list(
989 op_id, checkings_list, "clusters", db_cluster
990 )
991
rshri948f7de2024-12-02 03:42:35 +0000992 async def deregister(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +0000993 self.logger.info("cluster deregister enter")
994
garciadeblas995cbf32024-12-18 12:54:00 +0100995 # To get the cluster and op ids
rshri948f7de2024-12-02 03:42:35 +0000996 cluster_id = params["cluster_id"]
rshri948f7de2024-12-02 03:42:35 +0000997 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +0000998
999 # To initialize the operation states
1000 self.initialize_operation(cluster_id, op_id)
1001
garciadeblas995cbf32024-12-18 12:54:00 +01001002 # To get the cluster
1003 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
1004
1005 # To get the operation params details
1006 op_params = self.get_operation_params(db_cluster, op_id)
1007
1008 # To copy the cluster content and decrypting fields to use in workflows
1009 workflow_content = {
1010 "cluster": self.decrypted_copy(db_cluster),
1011 }
rshri932105f2024-07-05 15:11:55 +00001012
garciadeblasadb81e82024-11-08 01:11:46 +01001013 _, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001014 "deregister_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +02001015 )
rshri932105f2024-07-05 15:11:55 +00001016 self.logger.info("workflow_name is :{}".format(workflow_name))
1017
garciadeblas96b94f52024-07-08 16:18:21 +02001018 workflow_status, workflow_msg = await self.odu.check_workflow_status(
1019 workflow_name
1020 )
rshri932105f2024-07-05 15:11:55 +00001021 self.logger.info(
1022 "workflow_status is :{} and workflow_msg is :{}".format(
1023 workflow_status, workflow_msg
1024 )
1025 )
1026 if workflow_status:
garciadeblas96b94f52024-07-08 16:18:21 +02001027 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
rshri932105f2024-07-05 15:11:55 +00001028 else:
garciadeblas96b94f52024-07-08 16:18:21 +02001029 db_cluster["state"] = "FAILED_DELETION"
1030 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +00001031 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +00001032 db_cluster = self.update_operation_history(
1033 db_cluster, op_id, workflow_status, None
1034 )
garciadeblas96b94f52024-07-08 16:18:21 +02001035 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +00001036
garciadeblas91bb2c42024-11-12 11:17:12 +01001037 # Clean items used in the workflow or in the cluster, no matter if the workflow succeeded
1038 clean_status, clean_msg = await self.odu.clean_items_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001039 "deregister_cluster", op_id, op_params, workflow_content
garciadeblas91bb2c42024-11-12 11:17:12 +01001040 )
1041 self.logger.info(
1042 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1043 )
1044
rshri932105f2024-07-05 15:11:55 +00001045 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001046 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +01001047 "deregister_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +00001048 )
1049 self.logger.info(
1050 "resource_status is :{} and resource_msg is :{}".format(
1051 resource_status, resource_msg
1052 )
1053 )
1054 if resource_status:
garciadeblas96b94f52024-07-08 16:18:21 +02001055 db_cluster["resourceState"] = "READY"
rshri932105f2024-07-05 15:11:55 +00001056 else:
garciadeblas96b94f52024-07-08 16:18:21 +02001057 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +00001058
garciadeblas96b94f52024-07-08 16:18:21 +02001059 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +00001060 db_cluster, op_id, workflow_status, resource_status
garciadeblas96b94f52024-07-08 16:18:21 +02001061 )
garciadeblas6b2112c2024-12-20 10:35:13 +01001062 # TODO: workaround until NBI rejects cluster deletion requests for registered clusters
1063 # Setting created flag to true avoids infinite loops when deregistering a cluster
1064 db_cluster["created"] = "true"
garciadeblas96b94f52024-07-08 16:18:21 +02001065 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +00001066
garciadeblas98f9a3d2024-12-10 13:42:47 +01001067 return await self.delete(params, order_id)
rshri932105f2024-07-05 15:11:55 +00001068
rshri948f7de2024-12-02 03:42:35 +00001069 async def get_creds(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02001070 self.logger.info("Cluster get creds Enter")
rshri948f7de2024-12-02 03:42:35 +00001071 cluster_id = params["cluster_id"]
1072 op_id = params["operation_id"]
1073 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
garciadeblas96b94f52024-07-08 16:18:21 +02001074 result, cluster_creds = await self.odu.get_cluster_credentials(db_cluster)
1075 if result:
1076 db_cluster["credentials"] = cluster_creds
yshahd940c652024-10-17 06:11:12 +00001077 op_len = 0
1078 for operations in db_cluster["operationHistory"]:
1079 if operations["op_id"] == op_id:
1080 db_cluster["operationHistory"][op_len]["result"] = result
1081 db_cluster["operationHistory"][op_len]["endDate"] = time()
1082 op_len += 1
shahithya70a3fc92024-11-12 11:01:05 +00001083 db_cluster["current_operation"] = None
yshahd940c652024-10-17 06:11:12 +00001084 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri948f7de2024-12-02 03:42:35 +00001085 self.logger.info("Cluster Get Creds Exit")
yshah771dea82024-07-05 15:11:49 +00001086 return
1087
rshri948f7de2024-12-02 03:42:35 +00001088 async def update(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02001089 self.logger.info("Cluster update Enter")
rshri948f7de2024-12-02 03:42:35 +00001090 # To get the cluster details
1091 cluster_id = params["cluster_id"]
1092 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
1093
1094 # To get the operation params details
1095 op_id = params["operation_id"]
1096 op_params = self.get_operation_params(db_cluster, op_id)
yshah771dea82024-07-05 15:11:49 +00001097
garciadeblas995cbf32024-12-18 12:54:00 +01001098 # To copy the cluster content and decrypting fields to use in workflows
1099 workflow_content = {
1100 "cluster": self.decrypted_copy(db_cluster),
1101 }
rshric3564942024-11-12 18:12:38 +00001102
1103 # vim account details
1104 db_vim = self.db.get_one("vim_accounts", {"name": db_cluster["vim_account"]})
garciadeblas995cbf32024-12-18 12:54:00 +01001105 workflow_content["vim_account"] = db_vim
rshric3564942024-11-12 18:12:38 +00001106
garciadeblasadb81e82024-11-08 01:11:46 +01001107 _, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001108 "update_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +02001109 )
1110 workflow_status, workflow_msg = await self.odu.check_workflow_status(
1111 workflow_name
1112 )
1113 self.logger.info(
1114 "Workflow Status: {} Workflow Message: {}".format(
1115 workflow_status, workflow_msg
yshah771dea82024-07-05 15:11:49 +00001116 )
garciadeblas96b94f52024-07-08 16:18:21 +02001117 )
1118
1119 if workflow_status:
1120 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
1121 else:
1122 db_cluster["resourceState"] = "ERROR"
1123
yshahcb9075f2024-11-22 12:08:57 +00001124 db_cluster = self.update_operation_history(
1125 db_cluster, op_id, workflow_status, None
1126 )
garciadeblas96b94f52024-07-08 16:18:21 +02001127 # self.logger.info("Db content: {}".format(db_content))
1128 # self.db.set_one(self.db_collection, {"_id": _id}, db_cluster)
1129 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
1130
garciadeblas28bff0f2024-09-16 12:53:07 +02001131 # Clean items used in the workflow, no matter if the workflow succeeded
1132 clean_status, clean_msg = await self.odu.clean_items_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001133 "update_cluster", op_id, op_params, workflow_content
garciadeblas28bff0f2024-09-16 12:53:07 +02001134 )
1135 self.logger.info(
1136 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1137 )
garciadeblas96b94f52024-07-08 16:18:21 +02001138 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001139 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +01001140 "update_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +02001141 )
1142 self.logger.info(
1143 "Resource Status: {} Resource Message: {}".format(
1144 resource_status, resource_msg
1145 )
1146 )
yshah771dea82024-07-05 15:11:49 +00001147
1148 if resource_status:
garciadeblas96b94f52024-07-08 16:18:21 +02001149 db_cluster["resourceState"] = "READY"
yshah771dea82024-07-05 15:11:49 +00001150 else:
garciadeblas96b94f52024-07-08 16:18:21 +02001151 db_cluster["resourceState"] = "ERROR"
yshah771dea82024-07-05 15:11:49 +00001152
yshah0defcd52024-11-18 07:41:35 +00001153 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +00001154 db_cluster, op_id, workflow_status, resource_status
yshah0defcd52024-11-18 07:41:35 +00001155 )
1156
garciadeblas96b94f52024-07-08 16:18:21 +02001157 db_cluster["operatingState"] = "IDLE"
garciadeblas96b94f52024-07-08 16:18:21 +02001158 # self.logger.info("db_cluster: {}".format(db_cluster))
garciadeblas7cf480d2025-01-27 16:53:45 +01001159 # TODO: verify condition
garciadeblas96b94f52024-07-08 16:18:21 +02001160 # For the moment, if the workflow completed successfully, then we update the db accordingly.
1161 if workflow_status:
1162 if "k8s_version" in op_params:
1163 db_cluster["k8s_version"] = op_params["k8s_version"]
yshah0defcd52024-11-18 07:41:35 +00001164 if "node_count" in op_params:
garciadeblas96b94f52024-07-08 16:18:21 +02001165 db_cluster["node_count"] = op_params["node_count"]
yshah0defcd52024-11-18 07:41:35 +00001166 if "node_size" in op_params:
1167 db_cluster["node_count"] = op_params["node_size"]
garciadeblas96b94f52024-07-08 16:18:21 +02001168 # self.db.set_one(self.db_collection, {"_id": _id}, db_content)
shahithya70a3fc92024-11-12 11:01:05 +00001169 db_cluster["current_operation"] = None
garciadeblas96b94f52024-07-08 16:18:21 +02001170 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
yshah771dea82024-07-05 15:11:49 +00001171 return
1172
garciadeblasad6d1ba2025-01-22 16:02:18 +01001173 async def check_update_cluster(self, op_id, op_params, content):
1174 self.logger.info(
1175 f"check_update_cluster Operation {op_id}. Params: {op_params}."
1176 )
1177 self.logger.debug(f"Content: {content}")
garciadeblasd7d8bde2025-01-27 18:31:06 +01001178 # return await self.check_dummy_operation(op_id, op_params, content)
1179 db_cluster = content["cluster"]
1180 cluster_name = db_cluster["git_name"].lower()
1181 cluster_kustomization_name = cluster_name
1182 db_vim_account = content["vim_account"]
1183 cloud_type = db_vim_account["vim_type"]
1184 if cloud_type == "aws":
1185 cluster_name = f"{cluster_name}-cluster"
1186 if cloud_type in ("azure", "gcp", "aws"):
1187 checkings_list = [
1188 {
1189 "item": "kustomization",
1190 "name": cluster_kustomization_name,
1191 "namespace": "managed-resources",
1192 "condition": {
1193 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
1194 "value": "True",
1195 },
1196 "timeout": self._checkloop_kustomization_timeout,
1197 "enable": True,
1198 "resourceState": "IN_PROGRESS.KUSTOMIZATION_READY",
1199 },
1200 ]
1201 else:
1202 return False, "Not suitable VIM account to check cluster status"
1203 # Scale operation
1204 if "node_count" in op_params:
1205 checkings_list.append(
1206 {
1207 "item": f"cluster_{cloud_type}",
1208 "name": cluster_name,
1209 "namespace": "",
1210 "condition": {
1211 "jsonpath_filter": "status.atProvider.defaultNodePool[0].nodeCount",
1212 "value": f"{op_params['node_count']}",
1213 },
1214 "timeout": self._checkloop_resource_timeout * 2,
1215 "enable": True,
1216 "resourceState": "IN_PROGRESS.RESOURCE_READY.NODE_COUNT.CLUSTER",
1217 }
1218 )
1219 # Upgrade operation
1220 if "k8s_version" in op_params:
1221 checkings_list.append(
1222 {
1223 "item": f"cluster_{cloud_type}",
1224 "name": cluster_name,
1225 "namespace": "",
1226 "condition": {
1227 "jsonpath_filter": "status.atProvider.defaultNodePool[0].orchestratorVersion",
1228 "value": op_params["k8s_version"],
1229 },
1230 "timeout": self._checkloop_resource_timeout * 2,
1231 "enable": True,
1232 "resourceState": "IN_PROGRESS.RESOURCE_READY.K8S_VERSION.CLUSTER",
1233 }
1234 )
1235 return await self.common_check_list(
1236 op_id, checkings_list, "clusters", db_cluster
1237 )
garciadeblasad6d1ba2025-01-22 16:02:18 +01001238
yshah771dea82024-07-05 15:11:49 +00001239
garciadeblas72412282024-11-07 12:41:54 +01001240class CloudCredentialsLcm(GitOpsLcm):
yshah771dea82024-07-05 15:11:49 +00001241 db_collection = "vim_accounts"
1242
1243 def __init__(self, msg, lcm_tasks, config):
1244 """
1245 Init, Connect to database, filesystem storage, and messaging
1246 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
1247 :return: None
1248 """
garciadeblas72412282024-11-07 12:41:54 +01001249 super().__init__(msg, lcm_tasks, config)
yshah771dea82024-07-05 15:11:49 +00001250
yshah564ec9c2024-11-29 07:33:32 +00001251 async def add(self, params, order_id):
yshah771dea82024-07-05 15:11:49 +00001252 self.logger.info("Cloud Credentials create")
yshah564ec9c2024-11-29 07:33:32 +00001253 vim_id = params["_id"]
1254 op_id = vim_id
1255 op_params = params
1256 db_content = self.db.get_one(self.db_collection, {"_id": vim_id})
1257 vim_config = db_content.get("config", {})
1258 self.db.encrypt_decrypt_fields(
1259 vim_config.get("credentials"),
1260 "decrypt",
1261 ["password", "secret"],
1262 schema_version=db_content["schema_version"],
1263 salt=vim_id,
1264 )
1265
garciadeblasadb81e82024-11-08 01:11:46 +01001266 _, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001267 "create_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001268 )
1269
1270 workflow_status, workflow_msg = await self.odu.check_workflow_status(
1271 workflow_name
1272 )
1273
1274 self.logger.info(
1275 "Workflow Status: {} Workflow Msg: {}".format(workflow_status, workflow_msg)
1276 )
1277
garciadeblas28bff0f2024-09-16 12:53:07 +02001278 # Clean items used in the workflow, no matter if the workflow succeeded
1279 clean_status, clean_msg = await self.odu.clean_items_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001280 "create_cloud_credentials", op_id, op_params, db_content
garciadeblas28bff0f2024-09-16 12:53:07 +02001281 )
1282 self.logger.info(
1283 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1284 )
1285
yshah771dea82024-07-05 15:11:49 +00001286 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001287 resource_status, resource_msg = await self.check_resource_status(
yshah564ec9c2024-11-29 07:33:32 +00001288 "create_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001289 )
1290 self.logger.info(
1291 "Resource Status: {} Resource Message: {}".format(
1292 resource_status, resource_msg
1293 )
1294 )
garciadeblas15b8a302024-09-23 12:40:13 +02001295
yshah564ec9c2024-11-29 07:33:32 +00001296 db_content["_admin"]["operationalState"] = "ENABLED"
1297 for operation in db_content["_admin"]["operations"]:
garciadeblas15b8a302024-09-23 12:40:13 +02001298 if operation["lcmOperationType"] == "create":
1299 operation["operationState"] = "ENABLED"
yshah564ec9c2024-11-29 07:33:32 +00001300 self.logger.info("Content : {}".format(db_content))
1301 self.db.set_one("vim_accounts", {"_id": db_content["_id"]}, db_content)
yshah771dea82024-07-05 15:11:49 +00001302 return
1303
yshah564ec9c2024-11-29 07:33:32 +00001304 async def edit(self, params, order_id):
1305 self.logger.info("Cloud Credentials Update")
1306 vim_id = params["_id"]
1307 op_id = vim_id
1308 op_params = params
1309 db_content = self.db.get_one("vim_accounts", {"_id": vim_id})
1310 vim_config = db_content.get("config", {})
1311 self.db.encrypt_decrypt_fields(
1312 vim_config.get("credentials"),
1313 "decrypt",
1314 ["password", "secret"],
1315 schema_version=db_content["schema_version"],
1316 salt=vim_id,
1317 )
1318
garciadeblasadb81e82024-11-08 01:11:46 +01001319 _, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001320 "update_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001321 )
1322 workflow_status, workflow_msg = await self.odu.check_workflow_status(
1323 workflow_name
1324 )
1325 self.logger.info(
1326 "Workflow Status: {} Workflow Msg: {}".format(workflow_status, workflow_msg)
1327 )
1328
garciadeblas28bff0f2024-09-16 12:53:07 +02001329 # Clean items used in the workflow, no matter if the workflow succeeded
1330 clean_status, clean_msg = await self.odu.clean_items_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001331 "update_cloud_credentials", op_id, op_params, db_content
garciadeblas28bff0f2024-09-16 12:53:07 +02001332 )
1333 self.logger.info(
1334 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1335 )
1336
yshah771dea82024-07-05 15:11:49 +00001337 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001338 resource_status, resource_msg = await self.check_resource_status(
yshah564ec9c2024-11-29 07:33:32 +00001339 "update_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001340 )
1341 self.logger.info(
1342 "Resource Status: {} Resource Message: {}".format(
1343 resource_status, resource_msg
1344 )
1345 )
1346 return
1347
yshah564ec9c2024-11-29 07:33:32 +00001348 async def remove(self, params, order_id):
1349 self.logger.info("Cloud Credentials remove")
1350 vim_id = params["_id"]
1351 op_id = vim_id
1352 op_params = params
1353 db_content = self.db.get_one("vim_accounts", {"_id": vim_id})
1354
garciadeblasadb81e82024-11-08 01:11:46 +01001355 _, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001356 "delete_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001357 )
1358 workflow_status, workflow_msg = await self.odu.check_workflow_status(
1359 workflow_name
1360 )
1361 self.logger.info(
1362 "Workflow Status: {} Workflow Msg: {}".format(workflow_status, workflow_msg)
1363 )
1364
1365 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001366 resource_status, resource_msg = await self.check_resource_status(
yshah564ec9c2024-11-29 07:33:32 +00001367 "delete_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001368 )
1369 self.logger.info(
1370 "Resource Status: {} Resource Message: {}".format(
1371 resource_status, resource_msg
1372 )
1373 )
yshah564ec9c2024-11-29 07:33:32 +00001374 self.db.del_one(self.db_collection, {"_id": db_content["_id"]})
yshah771dea82024-07-05 15:11:49 +00001375 return
1376
rshri932105f2024-07-05 15:11:55 +00001377
garciadeblas72412282024-11-07 12:41:54 +01001378class K8sAppLcm(GitOpsLcm):
rshri948f7de2024-12-02 03:42:35 +00001379 db_collection = "k8sapp"
1380
rshri932105f2024-07-05 15:11:55 +00001381 def __init__(self, msg, lcm_tasks, config):
1382 """
1383 Init, Connect to database, filesystem storage, and messaging
1384 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
1385 :return: None
1386 """
garciadeblas72412282024-11-07 12:41:54 +01001387 super().__init__(msg, lcm_tasks, config)
rshri932105f2024-07-05 15:11:55 +00001388
rshri948f7de2024-12-02 03:42:35 +00001389 async def create(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001390 self.logger.info("App Create Enter")
1391
rshri948f7de2024-12-02 03:42:35 +00001392 op_id = params["operation_id"]
1393 profile_id = params["profile_id"]
1394
1395 # To initialize the operation states
1396 self.initialize_operation(profile_id, op_id)
1397
1398 content = self.db.get_one("k8sapp", {"_id": profile_id})
1399 content["profile_type"] = "applications"
1400 op_params = self.get_operation_params(content, op_id)
1401 self.db.set_one("k8sapp", {"_id": content["_id"]}, content)
1402
garciadeblasadb81e82024-11-08 01:11:46 +01001403 _, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001404 "create_profile", op_id, op_params, content
1405 )
rshri932105f2024-07-05 15:11:55 +00001406 self.logger.info("workflow_name is :{}".format(workflow_name))
1407
garciadeblas33b36e72025-01-17 12:49:19 +01001408 workflow_status = await self.check_workflow_and_update_db(
1409 op_id, workflow_name, content
1410 )
rshri932105f2024-07-05 15:11:55 +00001411
1412 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001413 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001414 "create_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001415 )
yshah564ec9c2024-11-29 07:33:32 +00001416 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1417 self.logger.info(f"App Create Exit with resource status: {resource_status}")
rshri932105f2024-07-05 15:11:55 +00001418 return
1419
rshri948f7de2024-12-02 03:42:35 +00001420 async def delete(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001421 self.logger.info("App delete Enter")
rshri932105f2024-07-05 15:11:55 +00001422
rshri948f7de2024-12-02 03:42:35 +00001423 op_id = params["operation_id"]
1424 profile_id = params["profile_id"]
1425
1426 # To initialize the operation states
1427 self.initialize_operation(profile_id, op_id)
1428
1429 content = self.db.get_one("k8sapp", {"_id": profile_id})
1430 op_params = self.get_operation_params(content, op_id)
1431
garciadeblasadb81e82024-11-08 01:11:46 +01001432 _, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001433 "delete_profile", op_id, op_params, content
1434 )
rshri932105f2024-07-05 15:11:55 +00001435 self.logger.info("workflow_name is :{}".format(workflow_name))
1436
garciadeblas33b36e72025-01-17 12:49:19 +01001437 workflow_status = await self.check_workflow_and_update_db(
1438 op_id, workflow_name, content
1439 )
rshri932105f2024-07-05 15:11:55 +00001440
1441 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001442 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001443 "delete_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001444 )
rshri932105f2024-07-05 15:11:55 +00001445
yshah564ec9c2024-11-29 07:33:32 +00001446 if resource_status:
1447 content["state"] = "DELETED"
1448 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1449 self.db.del_one(self.db_collection, {"_id": content["_id"]})
1450 self.logger.info(f"App Delete Exit with resource status: {resource_status}")
rshri932105f2024-07-05 15:11:55 +00001451 return
1452
1453
garciadeblas72412282024-11-07 12:41:54 +01001454class K8sResourceLcm(GitOpsLcm):
rshri948f7de2024-12-02 03:42:35 +00001455 db_collection = "k8sresource"
1456
rshri932105f2024-07-05 15:11:55 +00001457 def __init__(self, msg, lcm_tasks, config):
1458 """
1459 Init, Connect to database, filesystem storage, and messaging
1460 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
1461 :return: None
1462 """
garciadeblas72412282024-11-07 12:41:54 +01001463 super().__init__(msg, lcm_tasks, config)
rshri932105f2024-07-05 15:11:55 +00001464
rshri948f7de2024-12-02 03:42:35 +00001465 async def create(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001466 self.logger.info("Resource Create Enter")
1467
rshri948f7de2024-12-02 03:42:35 +00001468 op_id = params["operation_id"]
1469 profile_id = params["profile_id"]
1470
1471 # To initialize the operation states
1472 self.initialize_operation(profile_id, op_id)
1473
1474 content = self.db.get_one("k8sresource", {"_id": profile_id})
1475 content["profile_type"] = "managed-resources"
1476 op_params = self.get_operation_params(content, op_id)
1477 self.db.set_one("k8sresource", {"_id": content["_id"]}, content)
1478
garciadeblasadb81e82024-11-08 01:11:46 +01001479 _, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001480 "create_profile", op_id, op_params, content
1481 )
rshri932105f2024-07-05 15:11:55 +00001482 self.logger.info("workflow_name is :{}".format(workflow_name))
1483
garciadeblas33b36e72025-01-17 12:49:19 +01001484 workflow_status = await self.check_workflow_and_update_db(
1485 op_id, workflow_name, content
1486 )
rshri932105f2024-07-05 15:11:55 +00001487
1488 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001489 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001490 "create_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001491 )
yshah564ec9c2024-11-29 07:33:32 +00001492 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1493 self.logger.info(
1494 f"Resource Create Exit with resource status: {resource_status}"
rshri932105f2024-07-05 15:11:55 +00001495 )
rshri932105f2024-07-05 15:11:55 +00001496 return
1497
rshri948f7de2024-12-02 03:42:35 +00001498 async def delete(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001499 self.logger.info("Resource delete Enter")
rshri948f7de2024-12-02 03:42:35 +00001500
1501 op_id = params["operation_id"]
1502 profile_id = params["profile_id"]
1503
1504 # To initialize the operation states
1505 self.initialize_operation(profile_id, op_id)
1506
1507 content = self.db.get_one("k8sresource", {"_id": profile_id})
1508 op_params = self.get_operation_params(content, op_id)
rshri932105f2024-07-05 15:11:55 +00001509
garciadeblasadb81e82024-11-08 01:11:46 +01001510 _, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001511 "delete_profile", op_id, op_params, content
1512 )
rshri932105f2024-07-05 15:11:55 +00001513 self.logger.info("workflow_name is :{}".format(workflow_name))
1514
garciadeblas33b36e72025-01-17 12:49:19 +01001515 workflow_status = await self.check_workflow_and_update_db(
1516 op_id, workflow_name, content
1517 )
rshri932105f2024-07-05 15:11:55 +00001518
1519 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001520 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001521 "delete_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001522 )
rshri932105f2024-07-05 15:11:55 +00001523
yshah564ec9c2024-11-29 07:33:32 +00001524 if resource_status:
1525 content["state"] = "DELETED"
1526 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1527 self.db.del_one(self.db_collection, {"_id": content["_id"]})
1528 self.logger.info(
1529 f"Resource Delete Exit with resource status: {resource_status}"
garciadeblas96b94f52024-07-08 16:18:21 +02001530 )
rshri932105f2024-07-05 15:11:55 +00001531 return
1532
1533
garciadeblas72412282024-11-07 12:41:54 +01001534class K8sInfraControllerLcm(GitOpsLcm):
rshri948f7de2024-12-02 03:42:35 +00001535 db_collection = "k8sinfra_controller"
1536
rshri932105f2024-07-05 15:11:55 +00001537 def __init__(self, msg, lcm_tasks, config):
1538 """
1539 Init, Connect to database, filesystem storage, and messaging
1540 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
1541 :return: None
1542 """
garciadeblas72412282024-11-07 12:41:54 +01001543 super().__init__(msg, lcm_tasks, config)
rshri932105f2024-07-05 15:11:55 +00001544
rshri948f7de2024-12-02 03:42:35 +00001545 async def create(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001546 self.logger.info("Infra controller Create Enter")
1547
rshri948f7de2024-12-02 03:42:35 +00001548 op_id = params["operation_id"]
1549 profile_id = params["profile_id"]
1550
1551 # To initialize the operation states
1552 self.initialize_operation(profile_id, op_id)
1553
1554 content = self.db.get_one("k8sinfra_controller", {"_id": profile_id})
1555 content["profile_type"] = "infra-controllers"
1556 op_params = self.get_operation_params(content, op_id)
1557 self.db.set_one("k8sinfra_controller", {"_id": content["_id"]}, content)
1558
garciadeblasadb81e82024-11-08 01:11:46 +01001559 _, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001560 "create_profile", op_id, op_params, content
1561 )
rshri932105f2024-07-05 15:11:55 +00001562 self.logger.info("workflow_name is :{}".format(workflow_name))
1563
garciadeblas33b36e72025-01-17 12:49:19 +01001564 workflow_status = await self.check_workflow_and_update_db(
1565 op_id, workflow_name, content
1566 )
rshri932105f2024-07-05 15:11:55 +00001567
1568 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001569 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001570 "create_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001571 )
yshah564ec9c2024-11-29 07:33:32 +00001572 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1573 self.logger.info(
1574 f"Infra Controller Create Exit with resource status: {resource_status}"
rshri932105f2024-07-05 15:11:55 +00001575 )
rshri932105f2024-07-05 15:11:55 +00001576 return
1577
rshri948f7de2024-12-02 03:42:35 +00001578 async def delete(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001579 self.logger.info("Infra controller delete Enter")
rshri932105f2024-07-05 15:11:55 +00001580
rshri948f7de2024-12-02 03:42:35 +00001581 op_id = params["operation_id"]
1582 profile_id = params["profile_id"]
1583
1584 # To initialize the operation states
1585 self.initialize_operation(profile_id, op_id)
1586
1587 content = self.db.get_one("k8sinfra_controller", {"_id": profile_id})
1588 op_params = self.get_operation_params(content, op_id)
1589
garciadeblasadb81e82024-11-08 01:11:46 +01001590 _, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001591 "delete_profile", op_id, op_params, content
1592 )
rshri932105f2024-07-05 15:11:55 +00001593 self.logger.info("workflow_name is :{}".format(workflow_name))
1594
garciadeblas33b36e72025-01-17 12:49:19 +01001595 workflow_status = await self.check_workflow_and_update_db(
1596 op_id, workflow_name, content
1597 )
rshri932105f2024-07-05 15:11:55 +00001598
1599 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001600 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001601 "delete_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001602 )
rshri932105f2024-07-05 15:11:55 +00001603
yshah564ec9c2024-11-29 07:33:32 +00001604 if resource_status:
1605 content["state"] = "DELETED"
1606 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1607 self.db.del_one(self.db_collection, {"_id": content["_id"]})
1608 self.logger.info(
1609 f"Infra Controller Delete Exit with resource status: {resource_status}"
garciadeblas96b94f52024-07-08 16:18:21 +02001610 )
rshri932105f2024-07-05 15:11:55 +00001611 return
1612
1613
garciadeblas72412282024-11-07 12:41:54 +01001614class K8sInfraConfigLcm(GitOpsLcm):
rshri948f7de2024-12-02 03:42:35 +00001615 db_collection = "k8sinfra_config"
1616
rshri932105f2024-07-05 15:11:55 +00001617 def __init__(self, msg, lcm_tasks, config):
1618 """
1619 Init, Connect to database, filesystem storage, and messaging
1620 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
1621 :return: None
1622 """
garciadeblas72412282024-11-07 12:41:54 +01001623 super().__init__(msg, lcm_tasks, config)
rshri932105f2024-07-05 15:11:55 +00001624
rshri948f7de2024-12-02 03:42:35 +00001625 async def create(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001626 self.logger.info("Infra config Create Enter")
1627
rshri948f7de2024-12-02 03:42:35 +00001628 op_id = params["operation_id"]
1629 profile_id = params["profile_id"]
1630
1631 # To initialize the operation states
1632 self.initialize_operation(profile_id, op_id)
1633
1634 content = self.db.get_one("k8sinfra_config", {"_id": profile_id})
1635 content["profile_type"] = "infra-configs"
1636 op_params = self.get_operation_params(content, op_id)
1637 self.db.set_one("k8sinfra_config", {"_id": content["_id"]}, content)
1638
garciadeblasadb81e82024-11-08 01:11:46 +01001639 _, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001640 "create_profile", op_id, op_params, content
1641 )
rshri932105f2024-07-05 15:11:55 +00001642 self.logger.info("workflow_name is :{}".format(workflow_name))
1643
garciadeblas33b36e72025-01-17 12:49:19 +01001644 workflow_status = await self.check_workflow_and_update_db(
1645 op_id, workflow_name, content
1646 )
rshri932105f2024-07-05 15:11:55 +00001647
1648 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001649 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001650 "create_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001651 )
yshah564ec9c2024-11-29 07:33:32 +00001652 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1653 self.logger.info(
1654 f"Infra Config Create Exit with resource status: {resource_status}"
rshri932105f2024-07-05 15:11:55 +00001655 )
rshri932105f2024-07-05 15:11:55 +00001656 return
1657
rshri948f7de2024-12-02 03:42:35 +00001658 async def delete(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001659 self.logger.info("Infra config delete Enter")
1660
rshri948f7de2024-12-02 03:42:35 +00001661 op_id = params["operation_id"]
1662 profile_id = params["profile_id"]
1663
1664 # To initialize the operation states
1665 self.initialize_operation(profile_id, op_id)
1666
1667 content = self.db.get_one("k8sinfra_config", {"_id": profile_id})
1668 op_params = self.get_operation_params(content, op_id)
1669
garciadeblasadb81e82024-11-08 01:11:46 +01001670 _, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001671 "delete_profile", op_id, op_params, content
1672 )
rshri932105f2024-07-05 15:11:55 +00001673 self.logger.info("workflow_name is :{}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00001674
garciadeblas33b36e72025-01-17 12:49:19 +01001675 workflow_status = await self.check_workflow_and_update_db(
1676 op_id, workflow_name, content
1677 )
yshah564ec9c2024-11-29 07:33:32 +00001678
rshri932105f2024-07-05 15:11:55 +00001679 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001680 resource_status, content = await self.check_resource_and_update_db(
yshah564ec9c2024-11-29 07:33:32 +00001681 "delete_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001682 )
yshah564ec9c2024-11-29 07:33:32 +00001683
rshri932105f2024-07-05 15:11:55 +00001684 if resource_status:
yshah564ec9c2024-11-29 07:33:32 +00001685 content["state"] = "DELETED"
1686 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1687 self.db.del_one(self.db_collection, {"_id": content["_id"]})
1688 self.logger.info(
1689 f"Infra Config Delete Exit with resource status: {resource_status}"
garciadeblas96b94f52024-07-08 16:18:21 +02001690 )
rshri932105f2024-07-05 15:11:55 +00001691
rshri932105f2024-07-05 15:11:55 +00001692 return
yshah771dea82024-07-05 15:11:49 +00001693
1694
garciadeblas72412282024-11-07 12:41:54 +01001695class OkaLcm(GitOpsLcm):
yshah771dea82024-07-05 15:11:49 +00001696 db_collection = "okas"
1697
1698 def __init__(self, msg, lcm_tasks, config):
1699 """
1700 Init, Connect to database, filesystem storage, and messaging
1701 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
1702 :return: None
1703 """
garciadeblas72412282024-11-07 12:41:54 +01001704 super().__init__(msg, lcm_tasks, config)
yshah771dea82024-07-05 15:11:49 +00001705
yshah564ec9c2024-11-29 07:33:32 +00001706 async def create(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02001707 self.logger.info("OKA Create Enter")
yshah564ec9c2024-11-29 07:33:32 +00001708 op_id = params["operation_id"]
1709 oka_id = params["oka_id"]
1710 self.initialize_operation(oka_id, op_id)
1711 db_content = self.db.get_one(self.db_collection, {"_id": oka_id})
1712 op_params = self.get_operation_params(db_content, op_id)
yshah771dea82024-07-05 15:11:49 +00001713
garciadeblasadb81e82024-11-08 01:11:46 +01001714 _, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001715 "create_oka", op_id, op_params, db_content
1716 )
yshah564ec9c2024-11-29 07:33:32 +00001717
garciadeblas33b36e72025-01-17 12:49:19 +01001718 workflow_status = await self.check_workflow_and_update_db(
1719 op_id, workflow_name, db_content
1720 )
yshah771dea82024-07-05 15:11:49 +00001721
1722 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001723 resource_status, db_content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001724 "create_oka", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001725 )
garciadeblas96b94f52024-07-08 16:18:21 +02001726 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
yshah564ec9c2024-11-29 07:33:32 +00001727 self.logger.info(f"OKA Create Exit with resource status: {resource_status}")
yshah771dea82024-07-05 15:11:49 +00001728 return
1729
yshah564ec9c2024-11-29 07:33:32 +00001730 async def edit(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02001731 self.logger.info("OKA Edit Enter")
yshah564ec9c2024-11-29 07:33:32 +00001732 op_id = params["operation_id"]
1733 oka_id = params["oka_id"]
1734 self.initialize_operation(oka_id, op_id)
1735 db_content = self.db.get_one(self.db_collection, {"_id": oka_id})
1736 op_params = self.get_operation_params(db_content, op_id)
yshah771dea82024-07-05 15:11:49 +00001737
garciadeblasadb81e82024-11-08 01:11:46 +01001738 _, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001739 "update_oka", op_id, op_params, db_content
garciadeblas96b94f52024-07-08 16:18:21 +02001740 )
garciadeblas33b36e72025-01-17 12:49:19 +01001741 workflow_status = await self.check_workflow_and_update_db(
1742 op_id, workflow_name, db_content
1743 )
yshah771dea82024-07-05 15:11:49 +00001744
1745 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001746 resource_status, db_content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001747 "update_oka", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001748 )
garciadeblas96b94f52024-07-08 16:18:21 +02001749 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
yshah564ec9c2024-11-29 07:33:32 +00001750 self.logger.info(f"OKA Update Exit with resource status: {resource_status}")
yshah771dea82024-07-05 15:11:49 +00001751 return
1752
yshah564ec9c2024-11-29 07:33:32 +00001753 async def delete(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02001754 self.logger.info("OKA delete Enter")
yshah564ec9c2024-11-29 07:33:32 +00001755 op_id = params["operation_id"]
1756 oka_id = params["oka_id"]
1757 self.initialize_operation(oka_id, op_id)
1758 db_content = self.db.get_one(self.db_collection, {"_id": oka_id})
1759 op_params = self.get_operation_params(db_content, op_id)
yshah771dea82024-07-05 15:11:49 +00001760
garciadeblasadb81e82024-11-08 01:11:46 +01001761 _, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001762 "delete_oka", op_id, op_params, db_content
garciadeblas96b94f52024-07-08 16:18:21 +02001763 )
garciadeblas33b36e72025-01-17 12:49:19 +01001764 workflow_status = await self.check_workflow_and_update_db(
1765 op_id, workflow_name, db_content
1766 )
yshah771dea82024-07-05 15:11:49 +00001767
1768 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001769 resource_status, db_content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001770 "delete_oka", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001771 )
yshah771dea82024-07-05 15:11:49 +00001772
yshah564ec9c2024-11-29 07:33:32 +00001773 if resource_status:
1774 db_content["state"] == "DELETED"
1775 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
garciadeblas96b94f52024-07-08 16:18:21 +02001776 self.db.del_one(self.db_collection, {"_id": db_content["_id"]})
yshah564ec9c2024-11-29 07:33:32 +00001777 self.logger.info(f"OKA Delete Exit with resource status: {resource_status}")
yshah771dea82024-07-05 15:11:49 +00001778 return
1779
1780
garciadeblas72412282024-11-07 12:41:54 +01001781class KsuLcm(GitOpsLcm):
yshah771dea82024-07-05 15:11:49 +00001782 db_collection = "ksus"
1783
1784 def __init__(self, msg, lcm_tasks, config):
1785 """
1786 Init, Connect to database, filesystem storage, and messaging
1787 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
1788 :return: None
1789 """
garciadeblas72412282024-11-07 12:41:54 +01001790 super().__init__(msg, lcm_tasks, config)
garciadeblasad6d1ba2025-01-22 16:02:18 +01001791 self._workflows = {
1792 "create_ksus": {
1793 "check_resource_function": self.check_create_ksus,
1794 },
1795 "delete_ksus": {
1796 "check_resource_function": self.check_delete_ksus,
1797 },
1798 }
1799
1800 def get_dbclusters_from_profile(self, profile_id, profile_type):
1801 cluster_list = []
1802 db_clusters = self.db.get_list("clusters")
1803 self.logger.info(f"Getting list of clusters for {profile_type} {profile_id}")
1804 for db_cluster in db_clusters:
1805 if profile_id in db_cluster.get(profile_type, []):
1806 self.logger.info(
1807 f"Profile {profile_id} found in cluster {db_cluster['name']}"
1808 )
1809 cluster_list.append(db_cluster)
1810 return cluster_list
yshah771dea82024-07-05 15:11:49 +00001811
yshah564ec9c2024-11-29 07:33:32 +00001812 async def create(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02001813 self.logger.info("ksu Create Enter")
yshah564ec9c2024-11-29 07:33:32 +00001814 db_content = []
1815 op_params = []
1816 op_id = params["operation_id"]
1817 for ksu_id in params["ksus_list"]:
1818 self.logger.info("Ksu ID: {}".format(ksu_id))
1819 self.initialize_operation(ksu_id, op_id)
1820 db_ksu = self.db.get_one(self.db_collection, {"_id": ksu_id})
1821 self.logger.info("Db KSU: {}".format(db_ksu))
1822 db_content.append(db_ksu)
1823 ksu_params = {}
1824 ksu_params = self.get_operation_params(db_ksu, op_id)
1825 self.logger.info("Operation Params: {}".format(ksu_params))
1826 # Update ksu_params["profile"] with profile name and age-pubkey
1827 profile_type = ksu_params["profile"]["profile_type"]
1828 profile_id = ksu_params["profile"]["_id"]
1829 profile_collection = self.profile_collection_mapping[profile_type]
1830 db_profile = self.db.get_one(profile_collection, {"_id": profile_id})
1831 ksu_params["profile"]["name"] = db_profile["name"]
1832 ksu_params["profile"]["age_pubkey"] = db_profile.get("age_pubkey", "")
1833 # Update ksu_params["oka"] with sw_catalog_path (when missing)
garciadeblas9f7c9c52025-01-17 01:06:05 +01001834 # TODO: remove this in favor of doing it in ODU workflow
yshah564ec9c2024-11-29 07:33:32 +00001835 for oka in ksu_params["oka"]:
1836 if "sw_catalog_path" not in oka:
1837 oka_id = oka["_id"]
1838 db_oka = self.db.get_one("okas", {"_id": oka_id})
yshah2f39b8a2024-12-19 11:06:24 +00001839 oka_type = MAP_PROFILE[
1840 db_oka.get("profile_type", "infra_controller_profiles")
1841 ]
garciadeblas9f7c9c52025-01-17 01:06:05 +01001842 oka[
1843 "sw_catalog_path"
garciadeblas29f8bcf2025-01-24 14:24:41 +01001844 ] = f"{oka_type}/{db_oka['git_name'].lower()}/templates"
yshah564ec9c2024-11-29 07:33:32 +00001845 op_params.append(ksu_params)
yshah771dea82024-07-05 15:11:49 +00001846
garciadeblasad6d1ba2025-01-22 16:02:18 +01001847 # A single workflow is launched for all KSUs
garciadeblasadb81e82024-11-08 01:11:46 +01001848 _, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001849 "create_ksus", op_id, op_params, db_content
garciadeblas96b94f52024-07-08 16:18:21 +02001850 )
garciadeblasad6d1ba2025-01-22 16:02:18 +01001851 # Update workflow status in all KSUs
1852 wf_status_list = []
yshah564ec9c2024-11-29 07:33:32 +00001853 for db_ksu, ksu_params in zip(db_content, op_params):
garciadeblas33b36e72025-01-17 12:49:19 +01001854 workflow_status = await self.check_workflow_and_update_db(
1855 op_id, workflow_name, db_ksu
1856 )
garciadeblasad6d1ba2025-01-22 16:02:18 +01001857 wf_status_list.append(workflow_status)
1858 # Update resource status in all KSUs
1859 # TODO: Is an operation correct if n KSUs are right and 1 is not OK?
1860 res_status_list = []
1861 for db_ksu, ksu_params, wf_status in zip(db_content, op_params, wf_status_list):
1862 if wf_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001863 resource_status, db_ksu = await self.check_resource_and_update_db(
yshah564ec9c2024-11-29 07:33:32 +00001864 "create_ksus", op_id, ksu_params, db_ksu
1865 )
garciadeblasad6d1ba2025-01-22 16:02:18 +01001866 else:
1867 resource_status = False
1868 res_status_list.append(resource_status)
garciadeblas96b94f52024-07-08 16:18:21 +02001869 self.db.set_one(self.db_collection, {"_id": db_ksu["_id"]}, db_ksu)
1870
garciadeblasd8429852024-10-17 15:30:30 +02001871 # Clean items used in the workflow, no matter if the workflow succeeded
1872 clean_status, clean_msg = await self.odu.clean_items_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001873 "create_ksus", op_id, op_params, db_content
garciadeblasd8429852024-10-17 15:30:30 +02001874 )
1875 self.logger.info(
1876 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1877 )
garciadeblasad6d1ba2025-01-22 16:02:18 +01001878 self.logger.info(f"KSU Create EXIT with Resource Status {res_status_list}")
yshah771dea82024-07-05 15:11:49 +00001879 return
1880
yshah564ec9c2024-11-29 07:33:32 +00001881 async def edit(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02001882 self.logger.info("ksu edit Enter")
yshah564ec9c2024-11-29 07:33:32 +00001883 db_content = []
1884 op_params = []
1885 op_id = params["operation_id"]
1886 for ksu_id in params["ksus_list"]:
1887 self.initialize_operation(ksu_id, op_id)
1888 db_ksu = self.db.get_one("ksus", {"_id": ksu_id})
1889 db_content.append(db_ksu)
1890 ksu_params = {}
1891 ksu_params = self.get_operation_params(db_ksu, op_id)
1892 # Update ksu_params["profile"] with profile name and age-pubkey
1893 profile_type = ksu_params["profile"]["profile_type"]
1894 profile_id = ksu_params["profile"]["_id"]
1895 profile_collection = self.profile_collection_mapping[profile_type]
1896 db_profile = self.db.get_one(profile_collection, {"_id": profile_id})
1897 ksu_params["profile"]["name"] = db_profile["name"]
1898 ksu_params["profile"]["age_pubkey"] = db_profile.get("age_pubkey", "")
1899 # Update ksu_params["oka"] with sw_catalog_path (when missing)
garciadeblas9f7c9c52025-01-17 01:06:05 +01001900 # TODO: remove this in favor of doing it in ODU workflow
yshah564ec9c2024-11-29 07:33:32 +00001901 for oka in ksu_params["oka"]:
1902 if "sw_catalog_path" not in oka:
1903 oka_id = oka["_id"]
1904 db_oka = self.db.get_one("okas", {"_id": oka_id})
yshah2f39b8a2024-12-19 11:06:24 +00001905 oka_type = MAP_PROFILE[
1906 db_oka.get("profile_type", "infra_controller_profiles")
1907 ]
garciadeblas9f7c9c52025-01-17 01:06:05 +01001908 oka[
1909 "sw_catalog_path"
1910 ] = f"{oka_type}/{db_oka['git_name']}/templates"
yshah564ec9c2024-11-29 07:33:32 +00001911 op_params.append(ksu_params)
yshah771dea82024-07-05 15:11:49 +00001912
garciadeblasadb81e82024-11-08 01:11:46 +01001913 _, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001914 "update_ksus", op_id, op_params, db_content
garciadeblas96b94f52024-07-08 16:18:21 +02001915 )
yshah771dea82024-07-05 15:11:49 +00001916
yshah564ec9c2024-11-29 07:33:32 +00001917 for db_ksu, ksu_params in zip(db_content, op_params):
garciadeblas33b36e72025-01-17 12:49:19 +01001918 workflow_status = await self.check_workflow_and_update_db(
1919 op_id, workflow_name, db_ksu
1920 )
yshah564ec9c2024-11-29 07:33:32 +00001921
garciadeblas96b94f52024-07-08 16:18:21 +02001922 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001923 resource_status, db_ksu = await self.check_resource_and_update_db(
yshah564ec9c2024-11-29 07:33:32 +00001924 "update_ksus", op_id, ksu_params, db_ksu
garciadeblas96b94f52024-07-08 16:18:21 +02001925 )
garciadeblas96b94f52024-07-08 16:18:21 +02001926 db_ksu["name"] = ksu_params["name"]
1927 db_ksu["description"] = ksu_params["description"]
1928 db_ksu["profile"]["profile_type"] = ksu_params["profile"][
1929 "profile_type"
1930 ]
1931 db_ksu["profile"]["_id"] = ksu_params["profile"]["_id"]
1932 db_ksu["oka"] = ksu_params["oka"]
1933 self.db.set_one(self.db_collection, {"_id": db_ksu["_id"]}, db_ksu)
1934
yshah564ec9c2024-11-29 07:33:32 +00001935 # Clean items used in the workflow, no matter if the workflow succeeded
1936 clean_status, clean_msg = await self.odu.clean_items_workflow(
1937 "create_ksus", op_id, op_params, db_content
garciadeblas96b94f52024-07-08 16:18:21 +02001938 )
1939 self.logger.info(
yshah564ec9c2024-11-29 07:33:32 +00001940 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
garciadeblas96b94f52024-07-08 16:18:21 +02001941 )
yshah564ec9c2024-11-29 07:33:32 +00001942 self.logger.info(f"KSU Update EXIT with Resource Status {resource_status}")
yshah771dea82024-07-05 15:11:49 +00001943 return
1944
yshah564ec9c2024-11-29 07:33:32 +00001945 async def delete(self, params, order_id):
1946 self.logger.info("ksu delete Enter")
1947 db_content = []
1948 op_params = []
1949 op_id = params["operation_id"]
1950 for ksu_id in params["ksus_list"]:
1951 self.initialize_operation(ksu_id, op_id)
1952 db_ksu = self.db.get_one("ksus", {"_id": ksu_id})
1953 db_content.append(db_ksu)
1954 ksu_params = {}
1955 ksu_params["profile"] = {}
1956 ksu_params["profile"]["profile_type"] = db_ksu["profile"]["profile_type"]
1957 ksu_params["profile"]["_id"] = db_ksu["profile"]["_id"]
1958 # Update ksu_params["profile"] with profile name and age-pubkey
1959 profile_type = ksu_params["profile"]["profile_type"]
1960 profile_id = ksu_params["profile"]["_id"]
1961 profile_collection = self.profile_collection_mapping[profile_type]
1962 db_profile = self.db.get_one(profile_collection, {"_id": profile_id})
1963 ksu_params["profile"]["name"] = db_profile["name"]
1964 ksu_params["profile"]["age_pubkey"] = db_profile.get("age_pubkey", "")
1965 op_params.append(ksu_params)
yshah771dea82024-07-05 15:11:49 +00001966
garciadeblasadb81e82024-11-08 01:11:46 +01001967 _, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001968 "delete_ksus", op_id, op_params, db_content
1969 )
1970
1971 for db_ksu, ksu_params in zip(db_content, op_params):
garciadeblas33b36e72025-01-17 12:49:19 +01001972 workflow_status = await self.check_workflow_and_update_db(
1973 op_id, workflow_name, db_ksu
1974 )
yshah564ec9c2024-11-29 07:33:32 +00001975
1976 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001977 resource_status, db_ksu = await self.check_resource_and_update_db(
yshah564ec9c2024-11-29 07:33:32 +00001978 "delete_ksus", op_id, ksu_params, db_ksu
1979 )
1980
1981 if resource_status:
1982 db_ksu["state"] == "DELETED"
1983 self.db.set_one(self.db_collection, {"_id": db_ksu["_id"]}, db_ksu)
1984 self.db.del_one(self.db_collection, {"_id": db_ksu["_id"]})
1985
1986 self.logger.info(f"KSU Delete Exit with resource status: {resource_status}")
1987 return
1988
1989 async def clone(self, params, order_id):
1990 self.logger.info("ksu clone Enter")
1991 op_id = params["operation_id"]
1992 ksus_id = params["ksus_list"][0]
1993 self.initialize_operation(ksus_id, op_id)
1994 db_content = self.db.get_one(self.db_collection, {"_id": ksus_id})
1995 op_params = self.get_operation_params(db_content, op_id)
1996 _, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001997 "clone_ksus", op_id, op_params, db_content
1998 )
yshah564ec9c2024-11-29 07:33:32 +00001999
garciadeblas33b36e72025-01-17 12:49:19 +01002000 workflow_status = await self.check_workflow_and_update_db(
2001 op_id, workflow_name, db_content
2002 )
yshah771dea82024-07-05 15:11:49 +00002003
2004 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002005 resource_status, db_content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02002006 "clone_ksus", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00002007 )
garciadeblas96b94f52024-07-08 16:18:21 +02002008 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
yshah564ec9c2024-11-29 07:33:32 +00002009
2010 self.logger.info(f"KSU Clone Exit with resource status: {resource_status}")
yshah771dea82024-07-05 15:11:49 +00002011 return
2012
yshah564ec9c2024-11-29 07:33:32 +00002013 async def move(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02002014 self.logger.info("ksu move Enter")
yshah564ec9c2024-11-29 07:33:32 +00002015 op_id = params["operation_id"]
2016 ksus_id = params["ksus_list"][0]
2017 self.initialize_operation(ksus_id, op_id)
2018 db_content = self.db.get_one(self.db_collection, {"_id": ksus_id})
2019 op_params = self.get_operation_params(db_content, op_id)
garciadeblasadb81e82024-11-08 01:11:46 +01002020 _, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02002021 "move_ksus", op_id, op_params, db_content
2022 )
yshah564ec9c2024-11-29 07:33:32 +00002023
garciadeblas33b36e72025-01-17 12:49:19 +01002024 workflow_status = await self.check_workflow_and_update_db(
2025 op_id, workflow_name, db_content
2026 )
yshah771dea82024-07-05 15:11:49 +00002027
2028 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002029 resource_status, db_content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02002030 "move_ksus", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00002031 )
garciadeblas96b94f52024-07-08 16:18:21 +02002032 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
yshah564ec9c2024-11-29 07:33:32 +00002033
2034 self.logger.info(f"KSU Move Exit with resource status: {resource_status}")
yshah771dea82024-07-05 15:11:49 +00002035 return
garciadeblasad6d1ba2025-01-22 16:02:18 +01002036
2037 async def check_create_ksus(self, op_id, op_params, content):
2038 self.logger.info(f"check_create_ksus Operation {op_id}. Params: {op_params}.")
2039 self.logger.debug(f"Content: {content}")
2040 db_ksu = content
2041 kustomization_name = db_ksu["git_name"].lower()
2042 oka_list = op_params["oka"]
2043 oka_item = oka_list[0]
2044 oka_params = oka_item.get("transformation", {})
2045 target_ns = oka_params.get("namespace", "default")
2046 profile_id = op_params.get("profile", {}).get("_id")
2047 profile_type = op_params.get("profile", {}).get("profile_type")
2048 self.logger.info(
2049 f"Checking status of KSU {db_ksu['name']} for profile {profile_id}."
2050 )
2051 dbcluster_list = self.get_dbclusters_from_profile(profile_id, profile_type)
2052 if not dbcluster_list:
2053 self.logger.info(f"No clusters found for profile {profile_id}.")
2054 for db_cluster in dbcluster_list:
2055 try:
2056 self.logger.info(
2057 f"Checking status of KSU in cluster {db_cluster['name']}."
2058 )
2059 cluster_kubectl = self.cluster_kubectl(db_cluster)
2060 checkings_list = [
2061 {
2062 "item": "kustomization",
2063 "name": kustomization_name,
2064 "namespace": target_ns,
garciadeblas7cf480d2025-01-27 16:53:45 +01002065 "condition": {
2066 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
2067 "value": "True",
2068 },
garciadeblasad6d1ba2025-01-22 16:02:18 +01002069 "timeout": self._checkloop_kustomization_timeout,
2070 "enable": True,
2071 "resourceState": "IN_PROGRESS.KUSTOMIZATION_READY",
2072 },
2073 ]
2074 self.logger.info(
2075 f"Checking status of KSU {db_ksu['name']} for profile {profile_id}."
2076 )
2077 result, message = await self.common_check_list(
2078 op_id, checkings_list, "ksus", db_ksu, kubectl=cluster_kubectl
2079 )
2080 if not result:
2081 return False, message
2082 except Exception as e:
2083 self.logger.error(
2084 f"Error checking KSU in cluster {db_cluster['name']}."
2085 )
2086 self.logger.error(e)
2087 return False, f"Error checking KSU in cluster {db_cluster['name']}."
2088 return True, "OK"
2089
2090 async def check_delete_ksus(self, op_id, op_params, content):
2091 self.logger.info(f"check_delete_ksus Operation {op_id}. Params: {op_params}.")
2092 self.logger.debug(f"Content: {content}")
2093 db_ksu = content
2094 kustomization_name = db_ksu["git_name"].lower()
2095 oka_list = db_ksu["oka"]
2096 oka_item = oka_list[0]
2097 oka_params = oka_item.get("transformation", {})
2098 target_ns = oka_params.get("namespace", "default")
2099 profile_id = op_params.get("profile", {}).get("_id")
2100 profile_type = op_params.get("profile", {}).get("profile_type")
2101 self.logger.info(
2102 f"Checking status of KSU {db_ksu['name']} for profile {profile_id}."
2103 )
2104 dbcluster_list = self.get_dbclusters_from_profile(profile_id, profile_type)
2105 if not dbcluster_list:
2106 self.logger.info(f"No clusters found for profile {profile_id}.")
2107 for db_cluster in dbcluster_list:
2108 try:
2109 self.logger.info(
2110 f"Checking status of KSU in cluster {db_cluster['name']}."
2111 )
2112 cluster_kubectl = self.cluster_kubectl(db_cluster)
2113 checkings_list = [
2114 {
2115 "item": "kustomization",
2116 "name": kustomization_name,
2117 "namespace": target_ns,
2118 "deleted": True,
2119 "timeout": self._checkloop_kustomization_timeout,
2120 "enable": True,
2121 "resourceState": "IN_PROGRESS.KUSTOMIZATION_DELETED",
2122 },
2123 ]
2124 self.logger.info(
2125 f"Checking status of KSU {db_ksu['name']} for profile {profile_id}."
2126 )
2127 result, message = await self.common_check_list(
2128 op_id, checkings_list, "ksus", db_ksu, kubectl=cluster_kubectl
2129 )
2130 if not result:
2131 return False, message
2132 except Exception as e:
2133 self.logger.error(
2134 f"Error checking KSU in cluster {db_cluster['name']}."
2135 )
2136 self.logger.error(e)
2137 return False, f"Error checking KSU in cluster {db_cluster['name']}."
2138 return True, "OK"