blob: 952760e95724cde1badea7950fc42c43f223b8e9 [file] [log] [blame]
rshri932105f2024-07-05 15:11:55 +00001# -*- coding: utf-8 -*-
2
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12# implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15
16__author__ = (
17 "Shrinithi R <shrinithi.r@tataelxsi.co.in>",
18 "Shahithya Y <shahithya.y@tataelxsi.co.in>",
19)
20
rshric3564942024-11-12 18:12:38 +000021import copy
rshri932105f2024-07-05 15:11:55 +000022import logging
yshahd940c652024-10-17 06:11:12 +000023from time import time
garciadeblas72412282024-11-07 12:41:54 +010024import traceback
rshri932105f2024-07-05 15:11:55 +000025from osm_lcm.lcm_utils import LcmBase
26from copy import deepcopy
27from osm_lcm import odu_workflows
28from osm_lcm import vim_sdn
rshri948f7de2024-12-02 03:42:35 +000029from osm_lcm.data_utils.list_utils import find_in_list
garciadeblasad6d1ba2025-01-22 16:02:18 +010030from osm_lcm.n2vc.kubectl import Kubectl
31import yaml
rshri932105f2024-07-05 15:11:55 +000032
yshah2f39b8a2024-12-19 11:06:24 +000033MAP_PROFILE = {
34 "infra_controller_profiles": "infra-controllers",
35 "infra_config_profiles": "infra-configs",
36 "resource_profiles": "managed_resources",
37 "app_profiles": "apps",
38}
39
rshri932105f2024-07-05 15:11:55 +000040
garciadeblas72412282024-11-07 12:41:54 +010041class GitOpsLcm(LcmBase):
garciadeblasea865ff2024-11-20 12:42:49 +010042 db_collection = "gitops"
yshah564ec9c2024-11-29 07:33:32 +000043 workflow_status = None
44 resource_status = None
45
46 profile_collection_mapping = {
47 "infra_controller_profiles": "k8sinfra_controller",
48 "infra_config_profiles": "k8sinfra_config",
49 "resource_profiles": "k8sresource",
50 "app_profiles": "k8sapp",
51 }
garciadeblasea865ff2024-11-20 12:42:49 +010052
garciadeblas72412282024-11-07 12:41:54 +010053 def __init__(self, msg, lcm_tasks, config):
54 self.logger = logging.getLogger("lcm.gitops")
55 self.lcm_tasks = lcm_tasks
56 self.odu = odu_workflows.OduWorkflow(msg, self.lcm_tasks, config)
57 self._checkloop_kustomization_timeout = 900
58 self._checkloop_resource_timeout = 900
59 self._workflows = {}
60 super().__init__(msg, self.logger)
61
62 async def check_dummy_operation(self, op_id, op_params, content):
63 self.logger.info(f"Operation {op_id}. Params: {op_params}. Content: {content}")
64 return True, "OK"
65
garciadeblasea865ff2024-11-20 12:42:49 +010066 def initialize_operation(self, item_id, op_id):
67 db_item = self.db.get_one(self.db_collection, {"_id": item_id})
68 operation = next(
69 (op for op in db_item.get("operationHistory", []) if op["op_id"] == op_id),
70 None,
71 )
72 operation["workflowState"] = "PROCESSING"
73 operation["resourceState"] = "NOT_READY"
74 operation["operationState"] = "IN_PROGRESS"
75 operation["gitOperationInfo"] = None
76 db_item["current_operation"] = operation["op_id"]
77 self.db.set_one(self.db_collection, {"_id": item_id}, db_item)
78
yshah564ec9c2024-11-29 07:33:32 +000079 def get_operation_params(self, item, operation_id):
80 operation_history = item.get("operationHistory", [])
81 operation = find_in_list(
82 operation_history, lambda op: op["op_id"] == operation_id
83 )
84 return operation.get("operationParams", {})
85
86 def get_operation_type(self, item, operation_id):
87 operation_history = item.get("operationHistory", [])
88 operation = find_in_list(
89 operation_history, lambda op: op["op_id"] == operation_id
90 )
91 return operation.get("operationType", {})
92
garciadeblasbe890702024-12-20 11:39:13 +010093 def update_state_operation_history(
94 self, content, op_id, workflow_state=None, resource_state=None
95 ):
96 self.logger.info(
97 f"Update state of operation {op_id} in Operation History in DB"
98 )
99 self.logger.info(
100 f"Workflow state: {workflow_state}. Resource state: {resource_state}"
101 )
102 self.logger.debug(f"Content: {content}")
103
104 op_num = 0
105 for operation in content["operationHistory"]:
106 self.logger.debug("Operations: {}".format(operation))
107 if operation["op_id"] == op_id:
108 self.logger.debug("Found operation number: {}".format(op_num))
109 if workflow_state is not None:
110 operation["workflowState"] = workflow_state
111
112 if resource_state is not None:
113 operation["resourceState"] = resource_state
114 break
115 op_num += 1
116 self.logger.debug("content: {}".format(content))
117
118 return content
119
garciadeblas7eae6f42024-11-08 10:41:38 +0100120 def update_operation_history(
garciadeblasf9092892024-12-12 11:07:08 +0100121 self, content, op_id, workflow_status=None, resource_status=None, op_end=True
garciadeblas7eae6f42024-11-08 10:41:38 +0100122 ):
123 self.logger.info(
124 f"Update Operation History in DB. Workflow status: {workflow_status}. Resource status: {resource_status}"
125 )
126 self.logger.debug(f"Content: {content}")
127
garciadeblas7eae6f42024-11-08 10:41:38 +0100128 op_num = 0
129 for operation in content["operationHistory"]:
130 self.logger.debug("Operations: {}".format(operation))
131 if operation["op_id"] == op_id:
132 self.logger.debug("Found operation number: {}".format(op_num))
garciadeblas8bde3f42024-12-20 10:37:12 +0100133 if workflow_status is not None:
134 if workflow_status:
135 operation["workflowState"] = "COMPLETED"
136 operation["result"] = True
137 else:
138 operation["workflowState"] = "ERROR"
139 operation["operationState"] = "FAILED"
140 operation["result"] = False
garciadeblas7eae6f42024-11-08 10:41:38 +0100141
garciadeblas8bde3f42024-12-20 10:37:12 +0100142 if resource_status is not None:
143 if resource_status:
144 operation["resourceState"] = "READY"
145 operation["operationState"] = "COMPLETED"
146 operation["result"] = True
147 else:
148 operation["resourceState"] = "NOT_READY"
149 operation["operationState"] = "FAILED"
150 operation["result"] = False
garciadeblas7eae6f42024-11-08 10:41:38 +0100151
garciadeblasf9092892024-12-12 11:07:08 +0100152 if op_end:
153 now = time()
154 operation["endDate"] = now
garciadeblas7eae6f42024-11-08 10:41:38 +0100155 break
156 op_num += 1
157 self.logger.debug("content: {}".format(content))
158
159 return content
160
garciadeblas33b36e72025-01-17 12:49:19 +0100161 async def check_workflow_and_update_db(self, op_id, workflow_name, db_content):
yshah564ec9c2024-11-29 07:33:32 +0000162 workflow_status, workflow_msg = await self.odu.check_workflow_status(
163 workflow_name
164 )
165 self.logger.info(
166 "Workflow Status: {} Workflow Message: {}".format(
167 workflow_status, workflow_msg
168 )
169 )
170 operation_type = self.get_operation_type(db_content, op_id)
171 if operation_type == "create" and workflow_status:
172 db_content["state"] = "CREATED"
173 elif operation_type == "create" and not workflow_status:
174 db_content["state"] = "FAILED_CREATION"
175 elif operation_type == "delete" and workflow_status:
176 db_content["state"] = "DELETED"
177 elif operation_type == "delete" and not workflow_status:
178 db_content["state"] = "FAILED_DELETION"
179
180 if workflow_status:
181 db_content["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
182 else:
183 db_content["resourceState"] = "ERROR"
184
185 db_content = self.update_operation_history(
186 db_content, op_id, workflow_status, None
187 )
188 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
189 return workflow_status
190
garciadeblas33b36e72025-01-17 12:49:19 +0100191 async def check_resource_and_update_db(
192 self, resource_name, op_id, op_params, db_content
193 ):
yshah564ec9c2024-11-29 07:33:32 +0000194 workflow_status = True
195
196 resource_status, resource_msg = await self.check_resource_status(
197 resource_name, op_id, op_params, db_content
198 )
199 self.logger.info(
200 "Resource Status: {} Resource Message: {}".format(
201 resource_status, resource_msg
202 )
203 )
204
205 if resource_status:
206 db_content["resourceState"] = "READY"
207 else:
208 db_content["resourceState"] = "ERROR"
209
210 db_content = self.update_operation_history(
211 db_content, op_id, workflow_status, resource_status
212 )
213 db_content["operatingState"] = "IDLE"
214 db_content["current_operation"] = None
215 return resource_status, db_content
216
garciadeblasad6d1ba2025-01-22 16:02:18 +0100217 async def common_check_list(
218 self, op_id, checkings_list, db_collection, db_item, kubectl=None
219 ):
garciadeblas72412282024-11-07 12:41:54 +0100220 try:
221 for checking in checkings_list:
222 if checking["enable"]:
223 status, message = await self.odu.readiness_loop(
224 item=checking["item"],
225 name=checking["name"],
226 namespace=checking["namespace"],
garciadeblas7cf480d2025-01-27 16:53:45 +0100227 condition=checking.get("condition"),
garciadeblasad6d1ba2025-01-22 16:02:18 +0100228 deleted=checking.get("deleted", False),
garciadeblas72412282024-11-07 12:41:54 +0100229 timeout=checking["timeout"],
garciadeblasad6d1ba2025-01-22 16:02:18 +0100230 kubectl=kubectl,
garciadeblas72412282024-11-07 12:41:54 +0100231 )
232 if not status:
garciadeblas656818e2025-01-21 18:48:58 +0100233 error_message = "Resources not ready: "
234 error_message += checking.get("error_message", "")
235 return status, f"{error_message}: {message}"
garciadeblas7eae6f42024-11-08 10:41:38 +0100236 else:
237 db_item["resourceState"] = checking["resourceState"]
garciadeblasbe890702024-12-20 11:39:13 +0100238 db_item = self.update_state_operation_history(
239 db_item, op_id, None, checking["resourceState"]
garciadeblas7eae6f42024-11-08 10:41:38 +0100240 )
241 self.db.set_one(db_collection, {"_id": db_item["_id"]}, db_item)
garciadeblas72412282024-11-07 12:41:54 +0100242 except Exception as e:
243 self.logger.debug(traceback.format_exc())
244 self.logger.debug(f"Exception: {e}", exc_info=True)
245 return False, f"Unexpected exception: {e}"
246 return True, "OK"
247
248 async def check_resource_status(self, key, op_id, op_params, content):
249 self.logger.info(
garciadeblasad6d1ba2025-01-22 16:02:18 +0100250 f"Check resource status. Key: {key}. Operation: {op_id}. Params: {op_params}."
garciadeblas72412282024-11-07 12:41:54 +0100251 )
garciadeblasad6d1ba2025-01-22 16:02:18 +0100252 self.logger.debug(f"Check resource status. Content: {content}")
garciadeblas72412282024-11-07 12:41:54 +0100253 check_resource_function = self._workflows.get(key, {}).get(
254 "check_resource_function"
255 )
256 self.logger.info("check_resource function : {}".format(check_resource_function))
257 if check_resource_function:
258 return await check_resource_function(op_id, op_params, content)
259 else:
260 return await self.check_dummy_operation(op_id, op_params, content)
261
garciadeblas995cbf32024-12-18 12:54:00 +0100262 def decrypted_copy(self, content, fields=["age_pubkey", "age_privkey"]):
263 # This deep copy is intended to be passed to ODU workflows.
264 content_copy = copy.deepcopy(content)
rshric3564942024-11-12 18:12:38 +0000265
266 # decrypting the key
267 self.db.encrypt_decrypt_fields(
garciadeblas995cbf32024-12-18 12:54:00 +0100268 content_copy,
rshric3564942024-11-12 18:12:38 +0000269 "decrypt",
garciadeblas995cbf32024-12-18 12:54:00 +0100270 fields,
rshric3564942024-11-12 18:12:38 +0000271 schema_version="1.11",
garciadeblas995cbf32024-12-18 12:54:00 +0100272 salt=content_copy["_id"],
rshric3564942024-11-12 18:12:38 +0000273 )
garciadeblas995cbf32024-12-18 12:54:00 +0100274 return content_copy
rshric3564942024-11-12 18:12:38 +0000275
garciadeblasad6d1ba2025-01-22 16:02:18 +0100276 def cluster_kubectl(self, db_cluster):
277 cluster_kubeconfig = db_cluster["credentials"]
278 kubeconfig_path = f"/tmp/{db_cluster['_id']}_kubeconfig.yaml"
279 with open(kubeconfig_path, "w") as kubeconfig_file:
280 yaml.safe_dump(cluster_kubeconfig, kubeconfig_file)
281 return Kubectl(config_file=kubeconfig_path)
282
garciadeblas72412282024-11-07 12:41:54 +0100283
284class ClusterLcm(GitOpsLcm):
garciadeblas96b94f52024-07-08 16:18:21 +0200285 db_collection = "clusters"
rshri932105f2024-07-05 15:11:55 +0000286
287 def __init__(self, msg, lcm_tasks, config):
288 """
289 Init, Connect to database, filesystem storage, and messaging
290 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
291 :return: None
292 """
garciadeblas72412282024-11-07 12:41:54 +0100293 super().__init__(msg, lcm_tasks, config)
294 self._workflows = {
295 "create_cluster": {
296 "check_resource_function": self.check_create_cluster,
297 },
garciadeblasb0a42c22024-11-13 16:00:10 +0100298 "register_cluster": {
299 "check_resource_function": self.check_register_cluster,
300 },
301 "update_cluster": {
302 "check_resource_function": self.check_update_cluster,
303 },
garciadeblasad6d1ba2025-01-22 16:02:18 +0100304 "delete_cluster": {
305 "check_resource_function": self.check_delete_cluster,
306 },
garciadeblas72412282024-11-07 12:41:54 +0100307 }
rshri932105f2024-07-05 15:11:55 +0000308 self.regist = vim_sdn.K8sClusterLcm(msg, self.lcm_tasks, config)
309
rshri948f7de2024-12-02 03:42:35 +0000310 async def create(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +0000311 self.logger.info("cluster Create Enter")
312
garciadeblas995cbf32024-12-18 12:54:00 +0100313 # To get the cluster and op ids
rshri948f7de2024-12-02 03:42:35 +0000314 cluster_id = params["cluster_id"]
rshri948f7de2024-12-02 03:42:35 +0000315 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +0000316
317 # To initialize the operation states
318 self.initialize_operation(cluster_id, op_id)
319
garciadeblas995cbf32024-12-18 12:54:00 +0100320 # To get the cluster
321 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
322
323 # To get the operation params details
324 op_params = self.get_operation_params(db_cluster, op_id)
325
326 # To copy the cluster content and decrypting fields to use in workflows
327 workflow_content = {
328 "cluster": self.decrypted_copy(db_cluster),
329 }
rshric3564942024-11-12 18:12:38 +0000330
rshri948f7de2024-12-02 03:42:35 +0000331 # To get the vim account details
rshric3564942024-11-12 18:12:38 +0000332 db_vim = self.db.get_one("vim_accounts", {"name": db_cluster["vim_account"]})
garciadeblas995cbf32024-12-18 12:54:00 +0100333 workflow_content["vim_account"] = db_vim
rshric3564942024-11-12 18:12:38 +0000334
garciadeblas41859ce2025-02-04 16:08:51 +0100335 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +0100336 "create_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +0200337 )
garciadeblas41859ce2025-02-04 16:08:51 +0100338 if not workflow_res:
339 self.logger.error(f"Failed to launch workflow: {workflow_name}")
340 db_cluster["state"] = "FAILED_CREATION"
341 db_cluster["resourceState"] = "ERROR"
342 db_cluster = self.update_operation_history(
343 db_cluster, op_id, workflow_status=False, resource_status=None
344 )
345 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
346 # Clean items used in the workflow, no matter if the workflow succeeded
347 clean_status, clean_msg = await self.odu.clean_items_workflow(
348 "create_cluster", op_id, op_params, workflow_content
349 )
350 self.logger.info(
351 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
352 )
353 return
rshri932105f2024-07-05 15:11:55 +0000354
garciadeblas26d733c2025-02-03 16:12:43 +0100355 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +0200356 workflow_status, workflow_msg = await self.odu.check_workflow_status(
357 workflow_name
358 )
rshri932105f2024-07-05 15:11:55 +0000359 self.logger.info(
garciadeblas26d733c2025-02-03 16:12:43 +0100360 "workflow_status is: {} and workflow_msg is: {}".format(
rshri932105f2024-07-05 15:11:55 +0000361 workflow_status, workflow_msg
362 )
363 )
364 if workflow_status:
garciadeblas96b94f52024-07-08 16:18:21 +0200365 db_cluster["state"] = "CREATED"
366 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
rshri932105f2024-07-05 15:11:55 +0000367 else:
garciadeblas96b94f52024-07-08 16:18:21 +0200368 db_cluster["state"] = "FAILED_CREATION"
369 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +0000370 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +0000371 db_cluster = self.update_operation_history(
372 db_cluster, op_id, workflow_status, None
373 )
garciadeblas96b94f52024-07-08 16:18:21 +0200374 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +0000375
garciadeblas28bff0f2024-09-16 12:53:07 +0200376 # Clean items used in the workflow, no matter if the workflow succeeded
377 clean_status, clean_msg = await self.odu.clean_items_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +0100378 "create_cluster", op_id, op_params, workflow_content
garciadeblas28bff0f2024-09-16 12:53:07 +0200379 )
380 self.logger.info(
381 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
382 )
383
rshri932105f2024-07-05 15:11:55 +0000384 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +0100385 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +0100386 "create_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +0000387 )
388 self.logger.info(
389 "resource_status is :{} and resource_msg is :{}".format(
390 resource_status, resource_msg
391 )
392 )
393 if resource_status:
garciadeblas96b94f52024-07-08 16:18:21 +0200394 db_cluster["resourceState"] = "READY"
rshri932105f2024-07-05 15:11:55 +0000395 else:
garciadeblas96b94f52024-07-08 16:18:21 +0200396 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +0000397
garciadeblas96b94f52024-07-08 16:18:21 +0200398 db_cluster["operatingState"] = "IDLE"
399 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +0000400 db_cluster, op_id, workflow_status, resource_status
rshri932105f2024-07-05 15:11:55 +0000401 )
shahithya70a3fc92024-11-12 11:01:05 +0000402 db_cluster["current_operation"] = None
garciadeblas41a600e2025-01-21 11:49:38 +0100403
404 # Retrieve credentials
405 cluster_creds = None
406 if db_cluster["resourceState"] == "READY" and db_cluster["state"] == "CREATED":
407 result, cluster_creds = await self.odu.get_cluster_credentials(db_cluster)
408 # TODO: manage the case where the credentials are not available
409 if result:
410 db_cluster["credentials"] = cluster_creds
411
412 # Update db_cluster
garciadeblas96b94f52024-07-08 16:18:21 +0200413 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
414 self.update_profile_state(db_cluster, workflow_status, resource_status)
rshri948f7de2024-12-02 03:42:35 +0000415
garciadeblas41a600e2025-01-21 11:49:38 +0100416 # Register the cluster in k8sclusters collection
rshri948f7de2024-12-02 03:42:35 +0000417 db_register = self.db.get_one("k8sclusters", {"name": db_cluster["name"]})
garciadeblas41a600e2025-01-21 11:49:38 +0100418 if cluster_creds:
rshri948f7de2024-12-02 03:42:35 +0000419 db_register["credentials"] = cluster_creds
garciadeblas41a600e2025-01-21 11:49:38 +0100420 # To call the lcm.py for registering the cluster in k8scluster lcm.
rshri948f7de2024-12-02 03:42:35 +0000421 self.db.set_one("k8sclusters", {"_id": db_register["_id"]}, db_register)
422 register = await self.regist.create(db_register, order_id)
423 self.logger.debug(f"Register is : {register}")
424 else:
425 db_register["_admin"]["operationalState"] = "ERROR"
426 result, cluster_creds = await self.odu.get_cluster_credentials(db_cluster)
427 # To call the lcm.py for registering the cluster in k8scluster lcm.
428 db_register["credentials"] = cluster_creds
429 self.db.set_one("k8sclusters", {"_id": db_register["_id"]}, db_register)
430
rshri932105f2024-07-05 15:11:55 +0000431 return
432
garciadeblas72412282024-11-07 12:41:54 +0100433 async def check_create_cluster(self, op_id, op_params, content):
garciadeblas7eae6f42024-11-08 10:41:38 +0100434 self.logger.info(
435 f"check_create_cluster Operation {op_id}. Params: {op_params}."
436 )
437 # self.logger.debug(f"Content: {content}")
garciadeblas72412282024-11-07 12:41:54 +0100438 db_cluster = content["cluster"]
439 cluster_name = db_cluster["git_name"].lower()
440 cluster_kustomization_name = cluster_name
441 db_vim_account = content["vim_account"]
442 cloud_type = db_vim_account["vim_type"]
443 nodepool_name = ""
444 if cloud_type == "aws":
445 nodepool_name = f"{cluster_name}-nodegroup"
446 cluster_name = f"{cluster_name}-cluster"
447 elif cloud_type == "gcp":
448 nodepool_name = f"nodepool-{cluster_name}"
449 bootstrap = op_params.get("bootstrap", True)
450 if cloud_type in ("azure", "gcp", "aws"):
451 checkings_list = [
452 {
453 "item": "kustomization",
454 "name": cluster_kustomization_name,
455 "namespace": "managed-resources",
garciadeblas7cf480d2025-01-27 16:53:45 +0100456 "condition": {
457 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
458 "value": "True",
459 },
yshahcb9075f2024-11-22 12:08:57 +0000460 "timeout": 1500,
garciadeblas72412282024-11-07 12:41:54 +0100461 "enable": True,
garciadeblas7eae6f42024-11-08 10:41:38 +0100462 "resourceState": "IN_PROGRESS.KUSTOMIZATION_READY",
garciadeblas72412282024-11-07 12:41:54 +0100463 },
464 {
465 "item": f"cluster_{cloud_type}",
466 "name": cluster_name,
467 "namespace": "",
garciadeblas7cf480d2025-01-27 16:53:45 +0100468 "condition": {
469 "jsonpath_filter": "status.conditions[?(@.type=='Synced')].status",
470 "value": "True",
471 },
garciadeblas72412282024-11-07 12:41:54 +0100472 "timeout": self._checkloop_resource_timeout,
473 "enable": True,
garciadeblas7eae6f42024-11-08 10:41:38 +0100474 "resourceState": "IN_PROGRESS.RESOURCE_SYNCED.CLUSTER",
garciadeblas72412282024-11-07 12:41:54 +0100475 },
476 {
477 "item": f"cluster_{cloud_type}",
478 "name": cluster_name,
479 "namespace": "",
garciadeblas7cf480d2025-01-27 16:53:45 +0100480 "condition": {
481 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
482 "value": "True",
483 },
garciadeblas72412282024-11-07 12:41:54 +0100484 "timeout": self._checkloop_resource_timeout,
485 "enable": True,
garciadeblas7eae6f42024-11-08 10:41:38 +0100486 "resourceState": "IN_PROGRESS.RESOURCE_READY.CLUSTER",
garciadeblas72412282024-11-07 12:41:54 +0100487 },
488 {
489 "item": "kustomization",
490 "name": f"{cluster_kustomization_name}-bstrp-fluxctrl",
491 "namespace": "managed-resources",
garciadeblas7cf480d2025-01-27 16:53:45 +0100492 "condition": {
493 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
494 "value": "True",
495 },
yshahcb9075f2024-11-22 12:08:57 +0000496 "timeout": self._checkloop_resource_timeout,
garciadeblas72412282024-11-07 12:41:54 +0100497 "enable": bootstrap,
garciadeblas7eae6f42024-11-08 10:41:38 +0100498 "resourceState": "IN_PROGRESS.BOOTSTRAP_OK",
garciadeblas72412282024-11-07 12:41:54 +0100499 },
500 ]
501 else:
502 return False, "Not suitable VIM account to check cluster status"
503 if nodepool_name:
504 nodepool_check = {
505 "item": f"nodepool_{cloud_type}",
506 "name": nodepool_name,
507 "namespace": "",
garciadeblas7cf480d2025-01-27 16:53:45 +0100508 "condition": {
509 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
510 "value": "True",
511 },
garciadeblas72412282024-11-07 12:41:54 +0100512 "timeout": self._checkloop_resource_timeout,
513 "enable": True,
garciadeblas7eae6f42024-11-08 10:41:38 +0100514 "resourceState": "IN_PROGRESS.RESOURCE_READY.NODEPOOL",
garciadeblas72412282024-11-07 12:41:54 +0100515 }
516 checkings_list.insert(3, nodepool_check)
yshahcb9075f2024-11-22 12:08:57 +0000517 return await self.common_check_list(
518 op_id, checkings_list, "clusters", db_cluster
519 )
garciadeblas72412282024-11-07 12:41:54 +0100520
garciadeblas96b94f52024-07-08 16:18:21 +0200521 def update_profile_state(self, db_cluster, workflow_status, resource_status):
rshri932105f2024-07-05 15:11:55 +0000522 profiles = [
523 "infra_controller_profiles",
524 "infra_config_profiles",
525 "app_profiles",
526 "resource_profiles",
527 ]
rshri948f7de2024-12-02 03:42:35 +0000528 """
rshri932105f2024-07-05 15:11:55 +0000529 profiles_collection = {
530 "infra_controller_profiles": "k8sinfra_controller",
531 "infra_config_profiles": "k8sinfra_config",
532 "app_profiles": "k8sapp",
533 "resource_profiles": "k8sresource",
534 }
rshri948f7de2024-12-02 03:42:35 +0000535 """
Your Name86149632024-11-14 16:17:16 +0000536 self.logger.info("the db_cluster is :{}".format(db_cluster))
rshri932105f2024-07-05 15:11:55 +0000537 for profile_type in profiles:
garciadeblas96b94f52024-07-08 16:18:21 +0200538 profile_id = db_cluster[profile_type]
rshri948f7de2024-12-02 03:42:35 +0000539 # db_collection = profiles_collection[profile_type]
540 db_collection = self.profile_collection_mapping[profile_type]
rshri932105f2024-07-05 15:11:55 +0000541 db_profile = self.db.get_one(db_collection, {"_id": profile_id})
yshahcb9075f2024-11-22 12:08:57 +0000542 op_id = db_profile["operationHistory"][-1].get("op_id")
garciadeblas96b94f52024-07-08 16:18:21 +0200543 db_profile["state"] = db_cluster["state"]
544 db_profile["resourceState"] = db_cluster["resourceState"]
545 db_profile["operatingState"] = db_cluster["operatingState"]
rshric3564942024-11-12 18:12:38 +0000546 db_profile["age_pubkey"] = db_cluster["age_pubkey"]
Your Name86149632024-11-14 16:17:16 +0000547 db_profile["age_privkey"] = db_cluster["age_privkey"]
rshri932105f2024-07-05 15:11:55 +0000548 db_profile = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +0000549 db_profile, op_id, workflow_status, resource_status
rshri932105f2024-07-05 15:11:55 +0000550 )
rshri932105f2024-07-05 15:11:55 +0000551 self.db.set_one(db_collection, {"_id": db_profile["_id"]}, db_profile)
552
rshri948f7de2024-12-02 03:42:35 +0000553 async def delete(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +0000554 self.logger.info("cluster delete Enter")
rshri948f7de2024-12-02 03:42:35 +0000555
garciadeblas995cbf32024-12-18 12:54:00 +0100556 # To get the cluster and op ids
rshri948f7de2024-12-02 03:42:35 +0000557 cluster_id = params["cluster_id"]
rshri948f7de2024-12-02 03:42:35 +0000558 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +0000559
560 # To initialize the operation states
561 self.initialize_operation(cluster_id, op_id)
562
garciadeblas995cbf32024-12-18 12:54:00 +0100563 # To get the cluster
564 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
565
566 # To get the operation params details
567 op_params = self.get_operation_params(db_cluster, op_id)
568
569 # To copy the cluster content and decrypting fields to use in workflows
570 workflow_content = {
571 "cluster": self.decrypted_copy(db_cluster),
572 }
rshri948f7de2024-12-02 03:42:35 +0000573
garciadeblasad6d1ba2025-01-22 16:02:18 +0100574 # To get the vim account details
575 db_vim = self.db.get_one("vim_accounts", {"name": db_cluster["vim_account"]})
576 workflow_content["vim_account"] = db_vim
577
garciadeblas6b2112c2024-12-20 10:35:13 +0100578 # TODO: workaround until NBI rejects cluster deletion requests for registered clusters
579 # This if clause will be removed
garciadeblas12470812024-11-18 10:33:12 +0100580 if db_cluster["created"] == "false":
rshri948f7de2024-12-02 03:42:35 +0000581 return await self.deregister(params, order_id)
rshri932105f2024-07-05 15:11:55 +0000582
garciadeblas41859ce2025-02-04 16:08:51 +0100583 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +0100584 "delete_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +0200585 )
garciadeblas41859ce2025-02-04 16:08:51 +0100586 if not workflow_res:
587 self.logger.error(f"Failed to launch workflow: {workflow_name}")
588 db_cluster["state"] = "FAILED_DELETION"
589 db_cluster["resourceState"] = "ERROR"
590 db_cluster = self.update_operation_history(
591 db_cluster, op_id, workflow_status=False, resource_status=None
592 )
593 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
594 # Clean items used in the workflow, no matter if the workflow succeeded
595 clean_status, clean_msg = await self.odu.clean_items_workflow(
596 "delete_cluster", op_id, op_params, workflow_content
597 )
598 self.logger.info(
599 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
600 )
601 return
rshri932105f2024-07-05 15:11:55 +0000602
garciadeblas26d733c2025-02-03 16:12:43 +0100603 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +0200604 workflow_status, workflow_msg = await self.odu.check_workflow_status(
605 workflow_name
606 )
rshri932105f2024-07-05 15:11:55 +0000607 self.logger.info(
garciadeblas26d733c2025-02-03 16:12:43 +0100608 "workflow_status is: {} and workflow_msg is: {}".format(
rshri932105f2024-07-05 15:11:55 +0000609 workflow_status, workflow_msg
610 )
611 )
612 if workflow_status:
garciadeblas96b94f52024-07-08 16:18:21 +0200613 db_cluster["state"] = "DELETED"
614 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
rshri932105f2024-07-05 15:11:55 +0000615 else:
garciadeblas96b94f52024-07-08 16:18:21 +0200616 db_cluster["state"] = "FAILED_DELETION"
617 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +0000618 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +0000619 db_cluster = self.update_operation_history(
620 db_cluster, op_id, workflow_status, None
621 )
garciadeblas96b94f52024-07-08 16:18:21 +0200622 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +0000623
garciadeblas98f9a3d2024-12-10 13:42:47 +0100624 # Clean items used in the workflow or in the cluster, no matter if the workflow succeeded
625 clean_status, clean_msg = await self.odu.clean_items_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +0100626 "delete_cluster", op_id, op_params, workflow_content
garciadeblas98f9a3d2024-12-10 13:42:47 +0100627 )
628 self.logger.info(
629 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
630 )
631
rshri932105f2024-07-05 15:11:55 +0000632 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +0100633 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +0100634 "delete_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +0000635 )
636 self.logger.info(
637 "resource_status is :{} and resource_msg is :{}".format(
638 resource_status, resource_msg
639 )
640 )
641 if resource_status:
garciadeblas96b94f52024-07-08 16:18:21 +0200642 db_cluster["resourceState"] = "READY"
rshri932105f2024-07-05 15:11:55 +0000643 else:
garciadeblas96b94f52024-07-08 16:18:21 +0200644 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +0000645
garciadeblas96b94f52024-07-08 16:18:21 +0200646 db_cluster["operatingState"] = "IDLE"
647 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +0000648 db_cluster, op_id, workflow_status, resource_status
garciadeblas96b94f52024-07-08 16:18:21 +0200649 )
shahithya70a3fc92024-11-12 11:01:05 +0000650 db_cluster["current_operation"] = None
garciadeblas96b94f52024-07-08 16:18:21 +0200651 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +0000652
garciadeblas96b94f52024-07-08 16:18:21 +0200653 # To delete it from DB
654 if db_cluster["state"] == "DELETED":
655 self.delete_cluster(db_cluster)
rshri948f7de2024-12-02 03:42:35 +0000656
657 # To delete it from k8scluster collection
658 self.db.del_one("k8sclusters", {"name": db_cluster["name"]})
659
rshri932105f2024-07-05 15:11:55 +0000660 return
661
garciadeblasad6d1ba2025-01-22 16:02:18 +0100662 async def check_delete_cluster(self, op_id, op_params, content):
663 self.logger.info(
664 f"check_delete_cluster Operation {op_id}. Params: {op_params}."
665 )
666 self.logger.debug(f"Content: {content}")
667 db_cluster = content["cluster"]
668 cluster_name = db_cluster["git_name"].lower()
669 cluster_kustomization_name = cluster_name
670 db_vim_account = content["vim_account"]
671 cloud_type = db_vim_account["vim_type"]
672 if cloud_type == "aws":
673 cluster_name = f"{cluster_name}-cluster"
674 if cloud_type in ("azure", "gcp", "aws"):
675 checkings_list = [
676 {
677 "item": "kustomization",
678 "name": cluster_kustomization_name,
679 "namespace": "managed-resources",
680 "deleted": True,
681 "timeout": self._checkloop_kustomization_timeout,
682 "enable": True,
683 "resourceState": "IN_PROGRESS.KUSTOMIZATION_DELETED",
684 },
685 {
686 "item": f"cluster_{cloud_type}",
687 "name": cluster_name,
688 "namespace": "",
689 "deleted": True,
690 "timeout": self._checkloop_resource_timeout,
691 "enable": True,
692 "resourceState": "IN_PROGRESS.RESOURCE_DELETED.CLUSTER",
693 },
694 ]
695 else:
696 return False, "Not suitable VIM account to check cluster status"
697 return await self.common_check_list(
698 op_id, checkings_list, "clusters", db_cluster
699 )
700
garciadeblas96b94f52024-07-08 16:18:21 +0200701 def delete_cluster(self, db_cluster):
702 # Actually, item_content is equal to db_cluster
703 # item_content = self.db.get_one("clusters", {"_id": db_cluster["_id"]})
704 # self.logger.debug("item_content is : {}".format(item_content))
rshri932105f2024-07-05 15:11:55 +0000705
rshri932105f2024-07-05 15:11:55 +0000706 # detach profiles
707 update_dict = None
708 profiles_to_detach = [
709 "infra_controller_profiles",
710 "infra_config_profiles",
711 "app_profiles",
712 "resource_profiles",
713 ]
rshri948f7de2024-12-02 03:42:35 +0000714 """
rshri932105f2024-07-05 15:11:55 +0000715 profiles_collection = {
716 "infra_controller_profiles": "k8sinfra_controller",
717 "infra_config_profiles": "k8sinfra_config",
718 "app_profiles": "k8sapp",
719 "resource_profiles": "k8sresource",
720 }
rshri948f7de2024-12-02 03:42:35 +0000721 """
rshri932105f2024-07-05 15:11:55 +0000722 for profile_type in profiles_to_detach:
garciadeblas96b94f52024-07-08 16:18:21 +0200723 if db_cluster.get(profile_type):
garciadeblas96b94f52024-07-08 16:18:21 +0200724 profile_ids = db_cluster[profile_type]
rshri932105f2024-07-05 15:11:55 +0000725 profile_ids_copy = deepcopy(profile_ids)
rshri932105f2024-07-05 15:11:55 +0000726 for profile_id in profile_ids_copy:
rshri948f7de2024-12-02 03:42:35 +0000727 # db_collection = profiles_collection[profile_type]
728 db_collection = self.profile_collection_mapping[profile_type]
rshri932105f2024-07-05 15:11:55 +0000729 db_profile = self.db.get_one(db_collection, {"_id": profile_id})
garciadeblasc2552852024-10-22 12:39:32 +0200730 self.logger.debug("the db_profile is :{}".format(db_profile))
731 self.logger.debug(
garciadeblas96b94f52024-07-08 16:18:21 +0200732 "the item_content name is :{}".format(db_cluster["name"])
rshri932105f2024-07-05 15:11:55 +0000733 )
garciadeblasc2552852024-10-22 12:39:32 +0200734 self.logger.debug(
rshri932105f2024-07-05 15:11:55 +0000735 "the db_profile name is :{}".format(db_profile["name"])
736 )
garciadeblas96b94f52024-07-08 16:18:21 +0200737 if db_cluster["name"] == db_profile["name"]:
rshri932105f2024-07-05 15:11:55 +0000738 self.db.del_one(db_collection, {"_id": profile_id})
739 else:
rshri932105f2024-07-05 15:11:55 +0000740 profile_ids.remove(profile_id)
741 update_dict = {profile_type: profile_ids}
rshri932105f2024-07-05 15:11:55 +0000742 self.db.set_one(
garciadeblas96b94f52024-07-08 16:18:21 +0200743 "clusters", {"_id": db_cluster["_id"]}, update_dict
rshri932105f2024-07-05 15:11:55 +0000744 )
garciadeblas96b94f52024-07-08 16:18:21 +0200745 self.db.del_one("clusters", {"_id": db_cluster["_id"]})
rshri932105f2024-07-05 15:11:55 +0000746
rshri948f7de2024-12-02 03:42:35 +0000747 async def attach_profile(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +0000748 self.logger.info("profile attach Enter")
rshri948f7de2024-12-02 03:42:35 +0000749
garciadeblas995cbf32024-12-18 12:54:00 +0100750 # To get the cluster and op ids
rshri948f7de2024-12-02 03:42:35 +0000751 cluster_id = params["cluster_id"]
rshri948f7de2024-12-02 03:42:35 +0000752 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +0000753
754 # To initialize the operation states
755 self.initialize_operation(cluster_id, op_id)
756
garciadeblas995cbf32024-12-18 12:54:00 +0100757 # To get the cluster
758 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
759
760 # To get the operation params details
761 op_params = self.get_operation_params(db_cluster, op_id)
762
763 # To copy the cluster content and decrypting fields to use in workflows
764 workflow_content = {
765 "cluster": self.decrypted_copy(db_cluster),
766 }
rshri948f7de2024-12-02 03:42:35 +0000767
768 # To get the profile details
769 profile_id = params["profile_id"]
770 profile_type = params["profile_type"]
771 profile_collection = self.profile_collection_mapping[profile_type]
772 db_profile = self.db.get_one(profile_collection, {"_id": profile_id})
773 db_profile["profile_type"] = profile_type
774 # content["profile"] = db_profile
garciadeblas995cbf32024-12-18 12:54:00 +0100775 workflow_content["profile"] = db_profile
rshri932105f2024-07-05 15:11:55 +0000776
garciadeblas41859ce2025-02-04 16:08:51 +0100777 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +0100778 "attach_profile_to_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +0200779 )
garciadeblas41859ce2025-02-04 16:08:51 +0100780 if not workflow_res:
781 self.logger.error(f"Failed to launch workflow: {workflow_name}")
782 db_cluster["resourceState"] = "ERROR"
783 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
784 db_cluster = self.update_operation_history(
785 db_cluster, op_id, workflow_status=False, resource_status=None
786 )
787 return
rshri932105f2024-07-05 15:11:55 +0000788
garciadeblas26d733c2025-02-03 16:12:43 +0100789 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +0200790 workflow_status, workflow_msg = await self.odu.check_workflow_status(
791 workflow_name
792 )
rshri932105f2024-07-05 15:11:55 +0000793 self.logger.info(
garciadeblas26d733c2025-02-03 16:12:43 +0100794 "workflow_status is: {} and workflow_msg is: {}".format(
rshri932105f2024-07-05 15:11:55 +0000795 workflow_status, workflow_msg
796 )
797 )
798 if workflow_status:
799 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
800 else:
801 db_cluster["resourceState"] = "ERROR"
802 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +0000803 db_cluster = self.update_operation_history(
804 db_cluster, op_id, workflow_status, None
805 )
rshri932105f2024-07-05 15:11:55 +0000806 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
807
808 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +0100809 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +0100810 "attach_profile_to_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +0000811 )
812 self.logger.info(
813 "resource_status is :{} and resource_msg is :{}".format(
814 resource_status, resource_msg
815 )
816 )
817 if resource_status:
818 db_cluster["resourceState"] = "READY"
819 else:
820 db_cluster["resourceState"] = "ERROR"
821
822 db_cluster["operatingState"] = "IDLE"
823 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +0000824 db_cluster, op_id, workflow_status, resource_status
rshri932105f2024-07-05 15:11:55 +0000825 )
rshri932105f2024-07-05 15:11:55 +0000826 profile_list = db_cluster[profile_type]
rshri932105f2024-07-05 15:11:55 +0000827 if resource_status:
rshri932105f2024-07-05 15:11:55 +0000828 profile_list.append(profile_id)
rshri932105f2024-07-05 15:11:55 +0000829 db_cluster[profile_type] = profile_list
shahithya70a3fc92024-11-12 11:01:05 +0000830 db_cluster["current_operation"] = None
rshri932105f2024-07-05 15:11:55 +0000831 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
832
833 return
834
rshri948f7de2024-12-02 03:42:35 +0000835 async def detach_profile(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +0000836 self.logger.info("profile dettach Enter")
rshri948f7de2024-12-02 03:42:35 +0000837
garciadeblas995cbf32024-12-18 12:54:00 +0100838 # To get the cluster and op ids
rshri948f7de2024-12-02 03:42:35 +0000839 cluster_id = params["cluster_id"]
rshri948f7de2024-12-02 03:42:35 +0000840 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +0000841
842 # To initialize the operation states
843 self.initialize_operation(cluster_id, op_id)
844
garciadeblas995cbf32024-12-18 12:54:00 +0100845 # To get the cluster
846 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
847
848 # To get the operation params details
849 op_params = self.get_operation_params(db_cluster, op_id)
850
851 # To copy the cluster content and decrypting fields to use in workflows
852 workflow_content = {
853 "cluster": self.decrypted_copy(db_cluster),
854 }
rshri948f7de2024-12-02 03:42:35 +0000855
856 # To get the profile details
857 profile_id = params["profile_id"]
858 profile_type = params["profile_type"]
859 profile_collection = self.profile_collection_mapping[profile_type]
860 db_profile = self.db.get_one(profile_collection, {"_id": profile_id})
861 db_profile["profile_type"] = profile_type
862 # content["profile"] = db_profile
garciadeblas995cbf32024-12-18 12:54:00 +0100863 workflow_content["profile"] = db_profile
rshri932105f2024-07-05 15:11:55 +0000864
garciadeblas41859ce2025-02-04 16:08:51 +0100865 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +0100866 "detach_profile_from_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +0200867 )
garciadeblas41859ce2025-02-04 16:08:51 +0100868 if not workflow_res:
869 self.logger.error(f"Failed to launch workflow: {workflow_name}")
870 db_cluster["resourceState"] = "ERROR"
871 db_cluster = self.update_operation_history(
872 db_cluster, op_id, workflow_status=False, resource_status=None
873 )
874 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
875 return
rshri932105f2024-07-05 15:11:55 +0000876
garciadeblas26d733c2025-02-03 16:12:43 +0100877 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +0200878 workflow_status, workflow_msg = await self.odu.check_workflow_status(
879 workflow_name
880 )
rshri932105f2024-07-05 15:11:55 +0000881 self.logger.info(
garciadeblas26d733c2025-02-03 16:12:43 +0100882 "workflow_status is: {} and workflow_msg is: {}".format(
rshri932105f2024-07-05 15:11:55 +0000883 workflow_status, workflow_msg
884 )
885 )
886 if workflow_status:
887 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
888 else:
889 db_cluster["resourceState"] = "ERROR"
890 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +0000891 db_cluster = self.update_operation_history(
892 db_cluster, op_id, workflow_status, None
893 )
rshri932105f2024-07-05 15:11:55 +0000894 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
895
896 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +0100897 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +0100898 "detach_profile_from_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +0000899 )
900 self.logger.info(
901 "resource_status is :{} and resource_msg is :{}".format(
902 resource_status, resource_msg
903 )
904 )
905 if resource_status:
906 db_cluster["resourceState"] = "READY"
907 else:
908 db_cluster["resourceState"] = "ERROR"
909
910 db_cluster["operatingState"] = "IDLE"
911 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +0000912 db_cluster, op_id, workflow_status, resource_status
rshri932105f2024-07-05 15:11:55 +0000913 )
rshri932105f2024-07-05 15:11:55 +0000914 profile_list = db_cluster[profile_type]
915 self.logger.info("profile list is : {}".format(profile_list))
916 if resource_status:
rshri932105f2024-07-05 15:11:55 +0000917 profile_list.remove(profile_id)
rshri932105f2024-07-05 15:11:55 +0000918 db_cluster[profile_type] = profile_list
shahithya70a3fc92024-11-12 11:01:05 +0000919 db_cluster["current_operation"] = None
rshri932105f2024-07-05 15:11:55 +0000920 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
921
922 return
923
rshri948f7de2024-12-02 03:42:35 +0000924 async def register(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +0000925 self.logger.info("cluster register enter")
926
garciadeblas995cbf32024-12-18 12:54:00 +0100927 # To get the cluster and op ids
rshri948f7de2024-12-02 03:42:35 +0000928 cluster_id = params["cluster_id"]
rshri948f7de2024-12-02 03:42:35 +0000929 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +0000930
931 # To initialize the operation states
932 self.initialize_operation(cluster_id, op_id)
933
garciadeblas995cbf32024-12-18 12:54:00 +0100934 # To get the cluster
935 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
936
937 # To get the operation params details
938 op_params = self.get_operation_params(db_cluster, op_id)
939
940 # To copy the cluster content and decrypting fields to use in workflows
941 workflow_content = {
942 "cluster": self.decrypted_copy(db_cluster),
943 }
rshric3564942024-11-12 18:12:38 +0000944
garciadeblas41859ce2025-02-04 16:08:51 +0100945 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +0100946 "register_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +0200947 )
garciadeblas41859ce2025-02-04 16:08:51 +0100948 if not workflow_res:
949 self.logger.error(f"Failed to launch workflow: {workflow_name}")
950 db_cluster["state"] = "FAILED_CREATION"
951 db_cluster["resourceState"] = "ERROR"
952 db_cluster = self.update_operation_history(
953 db_cluster, op_id, workflow_status=False, resource_status=None
954 )
955 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
956 # Clean items used in the workflow, no matter if the workflow succeeded
957 clean_status, clean_msg = await self.odu.clean_items_workflow(
958 "register_cluster", op_id, op_params, workflow_content
959 )
960 self.logger.info(
961 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
962 )
963 return
rshri932105f2024-07-05 15:11:55 +0000964
garciadeblas26d733c2025-02-03 16:12:43 +0100965 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +0200966 workflow_status, workflow_msg = await self.odu.check_workflow_status(
967 workflow_name
968 )
rshri932105f2024-07-05 15:11:55 +0000969 self.logger.info(
garciadeblas26d733c2025-02-03 16:12:43 +0100970 "workflow_status is: {} and workflow_msg is: {}".format(
rshri932105f2024-07-05 15:11:55 +0000971 workflow_status, workflow_msg
972 )
973 )
974 if workflow_status:
garciadeblas96b94f52024-07-08 16:18:21 +0200975 db_cluster["state"] = "CREATED"
976 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
rshri932105f2024-07-05 15:11:55 +0000977 else:
garciadeblas96b94f52024-07-08 16:18:21 +0200978 db_cluster["state"] = "FAILED_CREATION"
979 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +0000980 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +0000981 db_cluster = self.update_operation_history(
982 db_cluster, op_id, workflow_status, None
983 )
garciadeblas96b94f52024-07-08 16:18:21 +0200984 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +0000985
garciadeblasdde3a312024-09-17 13:25:06 +0200986 # Clean items used in the workflow, no matter if the workflow succeeded
987 clean_status, clean_msg = await self.odu.clean_items_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +0100988 "register_cluster", op_id, op_params, workflow_content
garciadeblasdde3a312024-09-17 13:25:06 +0200989 )
990 self.logger.info(
991 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
992 )
993
rshri932105f2024-07-05 15:11:55 +0000994 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +0100995 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +0100996 "register_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +0000997 )
998 self.logger.info(
999 "resource_status is :{} and resource_msg is :{}".format(
1000 resource_status, resource_msg
1001 )
1002 )
1003 if resource_status:
garciadeblas96b94f52024-07-08 16:18:21 +02001004 db_cluster["resourceState"] = "READY"
rshri932105f2024-07-05 15:11:55 +00001005 else:
garciadeblas96b94f52024-07-08 16:18:21 +02001006 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +00001007
garciadeblas96b94f52024-07-08 16:18:21 +02001008 db_cluster["operatingState"] = "IDLE"
1009 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +00001010 db_cluster, op_id, workflow_status, resource_status
rshri932105f2024-07-05 15:11:55 +00001011 )
shahithya70a3fc92024-11-12 11:01:05 +00001012 db_cluster["current_operation"] = None
garciadeblas96b94f52024-07-08 16:18:21 +02001013 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri948f7de2024-12-02 03:42:35 +00001014
1015 db_register = self.db.get_one("k8sclusters", {"name": db_cluster["name"]})
1016 db_register["credentials"] = db_cluster["credentials"]
1017 self.db.set_one("k8sclusters", {"_id": db_register["_id"]}, db_register)
1018
1019 if db_cluster["resourceState"] == "READY" and db_cluster["state"] == "CREATED":
1020 # To call the lcm.py for registering the cluster in k8scluster lcm.
1021 register = await self.regist.create(db_register, order_id)
1022 self.logger.debug(f"Register is : {register}")
1023 else:
1024 db_register["_admin"]["operationalState"] = "ERROR"
1025 self.db.set_one("k8sclusters", {"_id": db_register["_id"]}, db_register)
1026
rshri932105f2024-07-05 15:11:55 +00001027 return
1028
garciadeblasad6d1ba2025-01-22 16:02:18 +01001029 async def check_register_cluster(self, op_id, op_params, content):
1030 self.logger.info(
1031 f"check_register_cluster Operation {op_id}. Params: {op_params}."
1032 )
1033 # self.logger.debug(f"Content: {content}")
1034 db_cluster = content["cluster"]
1035 cluster_name = db_cluster["git_name"].lower()
1036 cluster_kustomization_name = cluster_name
1037 bootstrap = op_params.get("bootstrap", True)
1038 checkings_list = [
1039 {
1040 "item": "kustomization",
1041 "name": f"{cluster_kustomization_name}-bstrp-fluxctrl",
1042 "namespace": "managed-resources",
garciadeblas7cf480d2025-01-27 16:53:45 +01001043 "condition": {
1044 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
1045 "value": "True",
1046 },
garciadeblasad6d1ba2025-01-22 16:02:18 +01001047 "timeout": self._checkloop_kustomization_timeout,
1048 "enable": bootstrap,
1049 "resourceState": "IN_PROGRESS.BOOTSTRAP_OK",
1050 },
1051 ]
1052 return await self.common_check_list(
1053 op_id, checkings_list, "clusters", db_cluster
1054 )
1055
rshri948f7de2024-12-02 03:42:35 +00001056 async def deregister(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001057 self.logger.info("cluster deregister enter")
1058
garciadeblas995cbf32024-12-18 12:54:00 +01001059 # To get the cluster and op ids
rshri948f7de2024-12-02 03:42:35 +00001060 cluster_id = params["cluster_id"]
rshri948f7de2024-12-02 03:42:35 +00001061 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +00001062
1063 # To initialize the operation states
1064 self.initialize_operation(cluster_id, op_id)
1065
garciadeblas995cbf32024-12-18 12:54:00 +01001066 # To get the cluster
1067 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
1068
1069 # To get the operation params details
1070 op_params = self.get_operation_params(db_cluster, op_id)
1071
1072 # To copy the cluster content and decrypting fields to use in workflows
1073 workflow_content = {
1074 "cluster": self.decrypted_copy(db_cluster),
1075 }
rshri932105f2024-07-05 15:11:55 +00001076
garciadeblas41859ce2025-02-04 16:08:51 +01001077 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001078 "deregister_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +02001079 )
garciadeblas41859ce2025-02-04 16:08:51 +01001080 if not workflow_res:
1081 self.logger.error(f"Failed to launch workflow: {workflow_name}")
1082 db_cluster["state"] = "FAILED_DELETION"
1083 db_cluster["resourceState"] = "ERROR"
1084 db_cluster = self.update_operation_history(
1085 db_cluster, op_id, workflow_status=False, resource_status=None
1086 )
1087 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
1088 # Clean items used in the workflow, no matter if the workflow succeeded
1089 clean_status, clean_msg = await self.odu.clean_items_workflow(
1090 "deregister_cluster", op_id, op_params, workflow_content
1091 )
1092 self.logger.info(
1093 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1094 )
1095 return
rshri932105f2024-07-05 15:11:55 +00001096
garciadeblas26d733c2025-02-03 16:12:43 +01001097 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +02001098 workflow_status, workflow_msg = await self.odu.check_workflow_status(
1099 workflow_name
1100 )
rshri932105f2024-07-05 15:11:55 +00001101 self.logger.info(
garciadeblas26d733c2025-02-03 16:12:43 +01001102 "workflow_status is: {} and workflow_msg is: {}".format(
rshri932105f2024-07-05 15:11:55 +00001103 workflow_status, workflow_msg
1104 )
1105 )
1106 if workflow_status:
garciadeblas96b94f52024-07-08 16:18:21 +02001107 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
rshri932105f2024-07-05 15:11:55 +00001108 else:
garciadeblas96b94f52024-07-08 16:18:21 +02001109 db_cluster["state"] = "FAILED_DELETION"
1110 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +00001111 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +00001112 db_cluster = self.update_operation_history(
1113 db_cluster, op_id, workflow_status, None
1114 )
garciadeblas96b94f52024-07-08 16:18:21 +02001115 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +00001116
garciadeblas91bb2c42024-11-12 11:17:12 +01001117 # Clean items used in the workflow or in the cluster, no matter if the workflow succeeded
1118 clean_status, clean_msg = await self.odu.clean_items_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001119 "deregister_cluster", op_id, op_params, workflow_content
garciadeblas91bb2c42024-11-12 11:17:12 +01001120 )
1121 self.logger.info(
1122 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1123 )
1124
rshri932105f2024-07-05 15:11:55 +00001125 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001126 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +01001127 "deregister_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +00001128 )
1129 self.logger.info(
1130 "resource_status is :{} and resource_msg is :{}".format(
1131 resource_status, resource_msg
1132 )
1133 )
1134 if resource_status:
garciadeblas96b94f52024-07-08 16:18:21 +02001135 db_cluster["resourceState"] = "READY"
rshri932105f2024-07-05 15:11:55 +00001136 else:
garciadeblas96b94f52024-07-08 16:18:21 +02001137 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +00001138
garciadeblas96b94f52024-07-08 16:18:21 +02001139 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +00001140 db_cluster, op_id, workflow_status, resource_status
garciadeblas96b94f52024-07-08 16:18:21 +02001141 )
garciadeblas6b2112c2024-12-20 10:35:13 +01001142 # TODO: workaround until NBI rejects cluster deletion requests for registered clusters
1143 # Setting created flag to true avoids infinite loops when deregistering a cluster
1144 db_cluster["created"] = "true"
garciadeblas96b94f52024-07-08 16:18:21 +02001145 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +00001146
garciadeblas98f9a3d2024-12-10 13:42:47 +01001147 return await self.delete(params, order_id)
rshri932105f2024-07-05 15:11:55 +00001148
rshri948f7de2024-12-02 03:42:35 +00001149 async def get_creds(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02001150 self.logger.info("Cluster get creds Enter")
rshri948f7de2024-12-02 03:42:35 +00001151 cluster_id = params["cluster_id"]
1152 op_id = params["operation_id"]
1153 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
garciadeblas96b94f52024-07-08 16:18:21 +02001154 result, cluster_creds = await self.odu.get_cluster_credentials(db_cluster)
1155 if result:
1156 db_cluster["credentials"] = cluster_creds
yshahd940c652024-10-17 06:11:12 +00001157 op_len = 0
1158 for operations in db_cluster["operationHistory"]:
1159 if operations["op_id"] == op_id:
1160 db_cluster["operationHistory"][op_len]["result"] = result
1161 db_cluster["operationHistory"][op_len]["endDate"] = time()
1162 op_len += 1
shahithya70a3fc92024-11-12 11:01:05 +00001163 db_cluster["current_operation"] = None
yshahd940c652024-10-17 06:11:12 +00001164 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri948f7de2024-12-02 03:42:35 +00001165 self.logger.info("Cluster Get Creds Exit")
yshah771dea82024-07-05 15:11:49 +00001166 return
1167
rshri948f7de2024-12-02 03:42:35 +00001168 async def update(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02001169 self.logger.info("Cluster update Enter")
rshri948f7de2024-12-02 03:42:35 +00001170 # To get the cluster details
1171 cluster_id = params["cluster_id"]
1172 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
1173
1174 # To get the operation params details
1175 op_id = params["operation_id"]
1176 op_params = self.get_operation_params(db_cluster, op_id)
yshah771dea82024-07-05 15:11:49 +00001177
garciadeblas995cbf32024-12-18 12:54:00 +01001178 # To copy the cluster content and decrypting fields to use in workflows
1179 workflow_content = {
1180 "cluster": self.decrypted_copy(db_cluster),
1181 }
rshric3564942024-11-12 18:12:38 +00001182
1183 # vim account details
1184 db_vim = self.db.get_one("vim_accounts", {"name": db_cluster["vim_account"]})
garciadeblas995cbf32024-12-18 12:54:00 +01001185 workflow_content["vim_account"] = db_vim
rshric3564942024-11-12 18:12:38 +00001186
garciadeblas41859ce2025-02-04 16:08:51 +01001187 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001188 "update_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +02001189 )
garciadeblas41859ce2025-02-04 16:08:51 +01001190 if not workflow_res:
1191 self.logger.error(f"Failed to launch workflow: {workflow_name}")
1192 db_cluster["resourceState"] = "ERROR"
1193 db_cluster = self.update_operation_history(
1194 db_cluster, op_id, workflow_status=False, resource_status=None
1195 )
1196 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
1197 # Clean items used in the workflow, no matter if the workflow succeeded
1198 clean_status, clean_msg = await self.odu.clean_items_workflow(
1199 "update_cluster", op_id, op_params, workflow_content
1200 )
1201 self.logger.info(
1202 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1203 )
1204 return
garciadeblas26d733c2025-02-03 16:12:43 +01001205 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +02001206 workflow_status, workflow_msg = await self.odu.check_workflow_status(
1207 workflow_name
1208 )
1209 self.logger.info(
1210 "Workflow Status: {} Workflow Message: {}".format(
1211 workflow_status, workflow_msg
yshah771dea82024-07-05 15:11:49 +00001212 )
garciadeblas96b94f52024-07-08 16:18:21 +02001213 )
1214
1215 if workflow_status:
1216 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
1217 else:
1218 db_cluster["resourceState"] = "ERROR"
1219
yshahcb9075f2024-11-22 12:08:57 +00001220 db_cluster = self.update_operation_history(
1221 db_cluster, op_id, workflow_status, None
1222 )
garciadeblas96b94f52024-07-08 16:18:21 +02001223 # self.logger.info("Db content: {}".format(db_content))
1224 # self.db.set_one(self.db_collection, {"_id": _id}, db_cluster)
1225 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
1226
garciadeblas28bff0f2024-09-16 12:53:07 +02001227 # Clean items used in the workflow, no matter if the workflow succeeded
1228 clean_status, clean_msg = await self.odu.clean_items_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001229 "update_cluster", op_id, op_params, workflow_content
garciadeblas28bff0f2024-09-16 12:53:07 +02001230 )
1231 self.logger.info(
1232 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1233 )
garciadeblas96b94f52024-07-08 16:18:21 +02001234 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001235 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +01001236 "update_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +02001237 )
1238 self.logger.info(
1239 "Resource Status: {} Resource Message: {}".format(
1240 resource_status, resource_msg
1241 )
1242 )
yshah771dea82024-07-05 15:11:49 +00001243
1244 if resource_status:
garciadeblas96b94f52024-07-08 16:18:21 +02001245 db_cluster["resourceState"] = "READY"
yshah771dea82024-07-05 15:11:49 +00001246 else:
garciadeblas96b94f52024-07-08 16:18:21 +02001247 db_cluster["resourceState"] = "ERROR"
yshah771dea82024-07-05 15:11:49 +00001248
yshah0defcd52024-11-18 07:41:35 +00001249 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +00001250 db_cluster, op_id, workflow_status, resource_status
yshah0defcd52024-11-18 07:41:35 +00001251 )
1252
garciadeblas96b94f52024-07-08 16:18:21 +02001253 db_cluster["operatingState"] = "IDLE"
garciadeblas96b94f52024-07-08 16:18:21 +02001254 # self.logger.info("db_cluster: {}".format(db_cluster))
garciadeblas7cf480d2025-01-27 16:53:45 +01001255 # TODO: verify condition
garciadeblas96b94f52024-07-08 16:18:21 +02001256 # For the moment, if the workflow completed successfully, then we update the db accordingly.
1257 if workflow_status:
1258 if "k8s_version" in op_params:
1259 db_cluster["k8s_version"] = op_params["k8s_version"]
yshah0defcd52024-11-18 07:41:35 +00001260 if "node_count" in op_params:
garciadeblas96b94f52024-07-08 16:18:21 +02001261 db_cluster["node_count"] = op_params["node_count"]
yshah0defcd52024-11-18 07:41:35 +00001262 if "node_size" in op_params:
1263 db_cluster["node_count"] = op_params["node_size"]
garciadeblas96b94f52024-07-08 16:18:21 +02001264 # self.db.set_one(self.db_collection, {"_id": _id}, db_content)
shahithya70a3fc92024-11-12 11:01:05 +00001265 db_cluster["current_operation"] = None
garciadeblas96b94f52024-07-08 16:18:21 +02001266 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
yshah771dea82024-07-05 15:11:49 +00001267 return
1268
garciadeblasad6d1ba2025-01-22 16:02:18 +01001269 async def check_update_cluster(self, op_id, op_params, content):
1270 self.logger.info(
1271 f"check_update_cluster Operation {op_id}. Params: {op_params}."
1272 )
1273 self.logger.debug(f"Content: {content}")
garciadeblasd7d8bde2025-01-27 18:31:06 +01001274 # return await self.check_dummy_operation(op_id, op_params, content)
1275 db_cluster = content["cluster"]
1276 cluster_name = db_cluster["git_name"].lower()
1277 cluster_kustomization_name = cluster_name
1278 db_vim_account = content["vim_account"]
1279 cloud_type = db_vim_account["vim_type"]
1280 if cloud_type == "aws":
1281 cluster_name = f"{cluster_name}-cluster"
1282 if cloud_type in ("azure", "gcp", "aws"):
1283 checkings_list = [
1284 {
1285 "item": "kustomization",
1286 "name": cluster_kustomization_name,
1287 "namespace": "managed-resources",
1288 "condition": {
1289 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
1290 "value": "True",
1291 },
1292 "timeout": self._checkloop_kustomization_timeout,
1293 "enable": True,
1294 "resourceState": "IN_PROGRESS.KUSTOMIZATION_READY",
1295 },
1296 ]
1297 else:
1298 return False, "Not suitable VIM account to check cluster status"
1299 # Scale operation
1300 if "node_count" in op_params:
1301 checkings_list.append(
1302 {
1303 "item": f"cluster_{cloud_type}",
1304 "name": cluster_name,
1305 "namespace": "",
1306 "condition": {
1307 "jsonpath_filter": "status.atProvider.defaultNodePool[0].nodeCount",
1308 "value": f"{op_params['node_count']}",
1309 },
1310 "timeout": self._checkloop_resource_timeout * 2,
1311 "enable": True,
1312 "resourceState": "IN_PROGRESS.RESOURCE_READY.NODE_COUNT.CLUSTER",
1313 }
1314 )
1315 # Upgrade operation
1316 if "k8s_version" in op_params:
1317 checkings_list.append(
1318 {
1319 "item": f"cluster_{cloud_type}",
1320 "name": cluster_name,
1321 "namespace": "",
1322 "condition": {
1323 "jsonpath_filter": "status.atProvider.defaultNodePool[0].orchestratorVersion",
1324 "value": op_params["k8s_version"],
1325 },
1326 "timeout": self._checkloop_resource_timeout * 2,
1327 "enable": True,
1328 "resourceState": "IN_PROGRESS.RESOURCE_READY.K8S_VERSION.CLUSTER",
1329 }
1330 )
1331 return await self.common_check_list(
1332 op_id, checkings_list, "clusters", db_cluster
1333 )
garciadeblasad6d1ba2025-01-22 16:02:18 +01001334
yshah771dea82024-07-05 15:11:49 +00001335
garciadeblas72412282024-11-07 12:41:54 +01001336class CloudCredentialsLcm(GitOpsLcm):
yshah771dea82024-07-05 15:11:49 +00001337 db_collection = "vim_accounts"
1338
1339 def __init__(self, msg, lcm_tasks, config):
1340 """
1341 Init, Connect to database, filesystem storage, and messaging
1342 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
1343 :return: None
1344 """
garciadeblas72412282024-11-07 12:41:54 +01001345 super().__init__(msg, lcm_tasks, config)
yshah771dea82024-07-05 15:11:49 +00001346
yshah564ec9c2024-11-29 07:33:32 +00001347 async def add(self, params, order_id):
yshah771dea82024-07-05 15:11:49 +00001348 self.logger.info("Cloud Credentials create")
yshah564ec9c2024-11-29 07:33:32 +00001349 vim_id = params["_id"]
1350 op_id = vim_id
1351 op_params = params
1352 db_content = self.db.get_one(self.db_collection, {"_id": vim_id})
1353 vim_config = db_content.get("config", {})
1354 self.db.encrypt_decrypt_fields(
1355 vim_config.get("credentials"),
1356 "decrypt",
1357 ["password", "secret"],
1358 schema_version=db_content["schema_version"],
1359 salt=vim_id,
1360 )
1361
garciadeblas41859ce2025-02-04 16:08:51 +01001362 workflow_res, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001363 "create_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001364 )
1365
1366 workflow_status, workflow_msg = await self.odu.check_workflow_status(
1367 workflow_name
1368 )
1369
1370 self.logger.info(
1371 "Workflow Status: {} Workflow Msg: {}".format(workflow_status, workflow_msg)
1372 )
1373
garciadeblas28bff0f2024-09-16 12:53:07 +02001374 # Clean items used in the workflow, no matter if the workflow succeeded
1375 clean_status, clean_msg = await self.odu.clean_items_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001376 "create_cloud_credentials", op_id, op_params, db_content
garciadeblas28bff0f2024-09-16 12:53:07 +02001377 )
1378 self.logger.info(
1379 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1380 )
1381
yshah771dea82024-07-05 15:11:49 +00001382 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001383 resource_status, resource_msg = await self.check_resource_status(
yshah564ec9c2024-11-29 07:33:32 +00001384 "create_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001385 )
1386 self.logger.info(
1387 "Resource Status: {} Resource Message: {}".format(
1388 resource_status, resource_msg
1389 )
1390 )
garciadeblas15b8a302024-09-23 12:40:13 +02001391
yshah564ec9c2024-11-29 07:33:32 +00001392 db_content["_admin"]["operationalState"] = "ENABLED"
1393 for operation in db_content["_admin"]["operations"]:
garciadeblas15b8a302024-09-23 12:40:13 +02001394 if operation["lcmOperationType"] == "create":
1395 operation["operationState"] = "ENABLED"
yshah564ec9c2024-11-29 07:33:32 +00001396 self.logger.info("Content : {}".format(db_content))
1397 self.db.set_one("vim_accounts", {"_id": db_content["_id"]}, db_content)
yshah771dea82024-07-05 15:11:49 +00001398 return
1399
yshah564ec9c2024-11-29 07:33:32 +00001400 async def edit(self, params, order_id):
1401 self.logger.info("Cloud Credentials Update")
1402 vim_id = params["_id"]
1403 op_id = vim_id
1404 op_params = params
1405 db_content = self.db.get_one("vim_accounts", {"_id": vim_id})
1406 vim_config = db_content.get("config", {})
1407 self.db.encrypt_decrypt_fields(
1408 vim_config.get("credentials"),
1409 "decrypt",
1410 ["password", "secret"],
1411 schema_version=db_content["schema_version"],
1412 salt=vim_id,
1413 )
1414
garciadeblas41859ce2025-02-04 16:08:51 +01001415 workflow_res, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001416 "update_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001417 )
1418 workflow_status, workflow_msg = await self.odu.check_workflow_status(
1419 workflow_name
1420 )
1421 self.logger.info(
1422 "Workflow Status: {} Workflow Msg: {}".format(workflow_status, workflow_msg)
1423 )
1424
garciadeblas28bff0f2024-09-16 12:53:07 +02001425 # Clean items used in the workflow, no matter if the workflow succeeded
1426 clean_status, clean_msg = await self.odu.clean_items_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001427 "update_cloud_credentials", op_id, op_params, db_content
garciadeblas28bff0f2024-09-16 12:53:07 +02001428 )
1429 self.logger.info(
1430 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1431 )
1432
yshah771dea82024-07-05 15:11:49 +00001433 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001434 resource_status, resource_msg = await self.check_resource_status(
yshah564ec9c2024-11-29 07:33:32 +00001435 "update_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001436 )
1437 self.logger.info(
1438 "Resource Status: {} Resource Message: {}".format(
1439 resource_status, resource_msg
1440 )
1441 )
1442 return
1443
yshah564ec9c2024-11-29 07:33:32 +00001444 async def remove(self, params, order_id):
1445 self.logger.info("Cloud Credentials remove")
1446 vim_id = params["_id"]
1447 op_id = vim_id
1448 op_params = params
1449 db_content = self.db.get_one("vim_accounts", {"_id": vim_id})
1450
garciadeblas41859ce2025-02-04 16:08:51 +01001451 workflow_res, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001452 "delete_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001453 )
1454 workflow_status, workflow_msg = await self.odu.check_workflow_status(
1455 workflow_name
1456 )
1457 self.logger.info(
1458 "Workflow Status: {} Workflow Msg: {}".format(workflow_status, workflow_msg)
1459 )
1460
1461 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001462 resource_status, resource_msg = await self.check_resource_status(
yshah564ec9c2024-11-29 07:33:32 +00001463 "delete_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001464 )
1465 self.logger.info(
1466 "Resource Status: {} Resource Message: {}".format(
1467 resource_status, resource_msg
1468 )
1469 )
yshah564ec9c2024-11-29 07:33:32 +00001470 self.db.del_one(self.db_collection, {"_id": db_content["_id"]})
yshah771dea82024-07-05 15:11:49 +00001471 return
1472
rshri932105f2024-07-05 15:11:55 +00001473
garciadeblas72412282024-11-07 12:41:54 +01001474class K8sAppLcm(GitOpsLcm):
rshri948f7de2024-12-02 03:42:35 +00001475 db_collection = "k8sapp"
1476
rshri932105f2024-07-05 15:11:55 +00001477 def __init__(self, msg, lcm_tasks, config):
1478 """
1479 Init, Connect to database, filesystem storage, and messaging
1480 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
1481 :return: None
1482 """
garciadeblas72412282024-11-07 12:41:54 +01001483 super().__init__(msg, lcm_tasks, config)
rshri932105f2024-07-05 15:11:55 +00001484
rshri948f7de2024-12-02 03:42:35 +00001485 async def create(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001486 self.logger.info("App Create Enter")
1487
rshri948f7de2024-12-02 03:42:35 +00001488 op_id = params["operation_id"]
1489 profile_id = params["profile_id"]
1490
1491 # To initialize the operation states
1492 self.initialize_operation(profile_id, op_id)
1493
1494 content = self.db.get_one("k8sapp", {"_id": profile_id})
1495 content["profile_type"] = "applications"
1496 op_params = self.get_operation_params(content, op_id)
1497 self.db.set_one("k8sapp", {"_id": content["_id"]}, content)
1498
garciadeblas41859ce2025-02-04 16:08:51 +01001499 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001500 "create_profile", op_id, op_params, content
1501 )
garciadeblas26d733c2025-02-03 16:12:43 +01001502 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00001503
garciadeblas33b36e72025-01-17 12:49:19 +01001504 workflow_status = await self.check_workflow_and_update_db(
1505 op_id, workflow_name, content
1506 )
rshri932105f2024-07-05 15:11:55 +00001507
1508 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001509 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001510 "create_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001511 )
yshah564ec9c2024-11-29 07:33:32 +00001512 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1513 self.logger.info(f"App Create Exit with resource status: {resource_status}")
rshri932105f2024-07-05 15:11:55 +00001514 return
1515
rshri948f7de2024-12-02 03:42:35 +00001516 async def delete(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001517 self.logger.info("App delete Enter")
rshri932105f2024-07-05 15:11:55 +00001518
rshri948f7de2024-12-02 03:42:35 +00001519 op_id = params["operation_id"]
1520 profile_id = params["profile_id"]
1521
1522 # To initialize the operation states
1523 self.initialize_operation(profile_id, op_id)
1524
1525 content = self.db.get_one("k8sapp", {"_id": profile_id})
1526 op_params = self.get_operation_params(content, op_id)
1527
garciadeblas41859ce2025-02-04 16:08:51 +01001528 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001529 "delete_profile", op_id, op_params, content
1530 )
garciadeblas26d733c2025-02-03 16:12:43 +01001531 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00001532
garciadeblas33b36e72025-01-17 12:49:19 +01001533 workflow_status = await self.check_workflow_and_update_db(
1534 op_id, workflow_name, content
1535 )
rshri932105f2024-07-05 15:11:55 +00001536
1537 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001538 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001539 "delete_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001540 )
rshri932105f2024-07-05 15:11:55 +00001541
yshah564ec9c2024-11-29 07:33:32 +00001542 if resource_status:
1543 content["state"] = "DELETED"
1544 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1545 self.db.del_one(self.db_collection, {"_id": content["_id"]})
1546 self.logger.info(f"App Delete Exit with resource status: {resource_status}")
rshri932105f2024-07-05 15:11:55 +00001547 return
1548
1549
garciadeblas72412282024-11-07 12:41:54 +01001550class K8sResourceLcm(GitOpsLcm):
rshri948f7de2024-12-02 03:42:35 +00001551 db_collection = "k8sresource"
1552
rshri932105f2024-07-05 15:11:55 +00001553 def __init__(self, msg, lcm_tasks, config):
1554 """
1555 Init, Connect to database, filesystem storage, and messaging
1556 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
1557 :return: None
1558 """
garciadeblas72412282024-11-07 12:41:54 +01001559 super().__init__(msg, lcm_tasks, config)
rshri932105f2024-07-05 15:11:55 +00001560
rshri948f7de2024-12-02 03:42:35 +00001561 async def create(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001562 self.logger.info("Resource Create Enter")
1563
rshri948f7de2024-12-02 03:42:35 +00001564 op_id = params["operation_id"]
1565 profile_id = params["profile_id"]
1566
1567 # To initialize the operation states
1568 self.initialize_operation(profile_id, op_id)
1569
1570 content = self.db.get_one("k8sresource", {"_id": profile_id})
1571 content["profile_type"] = "managed-resources"
1572 op_params = self.get_operation_params(content, op_id)
1573 self.db.set_one("k8sresource", {"_id": content["_id"]}, content)
1574
garciadeblas41859ce2025-02-04 16:08:51 +01001575 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001576 "create_profile", op_id, op_params, content
1577 )
garciadeblas26d733c2025-02-03 16:12:43 +01001578 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00001579
garciadeblas33b36e72025-01-17 12:49:19 +01001580 workflow_status = await self.check_workflow_and_update_db(
1581 op_id, workflow_name, content
1582 )
rshri932105f2024-07-05 15:11:55 +00001583
1584 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001585 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001586 "create_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001587 )
yshah564ec9c2024-11-29 07:33:32 +00001588 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1589 self.logger.info(
1590 f"Resource Create Exit with resource status: {resource_status}"
rshri932105f2024-07-05 15:11:55 +00001591 )
rshri932105f2024-07-05 15:11:55 +00001592 return
1593
rshri948f7de2024-12-02 03:42:35 +00001594 async def delete(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001595 self.logger.info("Resource delete Enter")
rshri948f7de2024-12-02 03:42:35 +00001596
1597 op_id = params["operation_id"]
1598 profile_id = params["profile_id"]
1599
1600 # To initialize the operation states
1601 self.initialize_operation(profile_id, op_id)
1602
1603 content = self.db.get_one("k8sresource", {"_id": profile_id})
1604 op_params = self.get_operation_params(content, op_id)
rshri932105f2024-07-05 15:11:55 +00001605
garciadeblas41859ce2025-02-04 16:08:51 +01001606 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001607 "delete_profile", op_id, op_params, content
1608 )
garciadeblas26d733c2025-02-03 16:12:43 +01001609 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00001610
garciadeblas33b36e72025-01-17 12:49:19 +01001611 workflow_status = await self.check_workflow_and_update_db(
1612 op_id, workflow_name, content
1613 )
rshri932105f2024-07-05 15:11:55 +00001614
1615 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001616 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001617 "delete_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001618 )
rshri932105f2024-07-05 15:11:55 +00001619
yshah564ec9c2024-11-29 07:33:32 +00001620 if resource_status:
1621 content["state"] = "DELETED"
1622 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1623 self.db.del_one(self.db_collection, {"_id": content["_id"]})
1624 self.logger.info(
1625 f"Resource Delete Exit with resource status: {resource_status}"
garciadeblas96b94f52024-07-08 16:18:21 +02001626 )
rshri932105f2024-07-05 15:11:55 +00001627 return
1628
1629
garciadeblas72412282024-11-07 12:41:54 +01001630class K8sInfraControllerLcm(GitOpsLcm):
rshri948f7de2024-12-02 03:42:35 +00001631 db_collection = "k8sinfra_controller"
1632
rshri932105f2024-07-05 15:11:55 +00001633 def __init__(self, msg, lcm_tasks, config):
1634 """
1635 Init, Connect to database, filesystem storage, and messaging
1636 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
1637 :return: None
1638 """
garciadeblas72412282024-11-07 12:41:54 +01001639 super().__init__(msg, lcm_tasks, config)
rshri932105f2024-07-05 15:11:55 +00001640
rshri948f7de2024-12-02 03:42:35 +00001641 async def create(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001642 self.logger.info("Infra controller Create Enter")
1643
rshri948f7de2024-12-02 03:42:35 +00001644 op_id = params["operation_id"]
1645 profile_id = params["profile_id"]
1646
1647 # To initialize the operation states
1648 self.initialize_operation(profile_id, op_id)
1649
1650 content = self.db.get_one("k8sinfra_controller", {"_id": profile_id})
1651 content["profile_type"] = "infra-controllers"
1652 op_params = self.get_operation_params(content, op_id)
1653 self.db.set_one("k8sinfra_controller", {"_id": content["_id"]}, content)
1654
garciadeblas41859ce2025-02-04 16:08:51 +01001655 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001656 "create_profile", op_id, op_params, content
1657 )
garciadeblas26d733c2025-02-03 16:12:43 +01001658 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00001659
garciadeblas33b36e72025-01-17 12:49:19 +01001660 workflow_status = await self.check_workflow_and_update_db(
1661 op_id, workflow_name, content
1662 )
rshri932105f2024-07-05 15:11:55 +00001663
1664 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001665 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001666 "create_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001667 )
yshah564ec9c2024-11-29 07:33:32 +00001668 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1669 self.logger.info(
1670 f"Infra Controller Create Exit with resource status: {resource_status}"
rshri932105f2024-07-05 15:11:55 +00001671 )
rshri932105f2024-07-05 15:11:55 +00001672 return
1673
rshri948f7de2024-12-02 03:42:35 +00001674 async def delete(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001675 self.logger.info("Infra controller delete Enter")
rshri932105f2024-07-05 15:11:55 +00001676
rshri948f7de2024-12-02 03:42:35 +00001677 op_id = params["operation_id"]
1678 profile_id = params["profile_id"]
1679
1680 # To initialize the operation states
1681 self.initialize_operation(profile_id, op_id)
1682
1683 content = self.db.get_one("k8sinfra_controller", {"_id": profile_id})
1684 op_params = self.get_operation_params(content, op_id)
1685
garciadeblas41859ce2025-02-04 16:08:51 +01001686 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001687 "delete_profile", op_id, op_params, content
1688 )
garciadeblas26d733c2025-02-03 16:12:43 +01001689 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00001690
garciadeblas33b36e72025-01-17 12:49:19 +01001691 workflow_status = await self.check_workflow_and_update_db(
1692 op_id, workflow_name, content
1693 )
rshri932105f2024-07-05 15:11:55 +00001694
1695 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001696 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001697 "delete_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001698 )
rshri932105f2024-07-05 15:11:55 +00001699
yshah564ec9c2024-11-29 07:33:32 +00001700 if resource_status:
1701 content["state"] = "DELETED"
1702 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1703 self.db.del_one(self.db_collection, {"_id": content["_id"]})
1704 self.logger.info(
1705 f"Infra Controller Delete Exit with resource status: {resource_status}"
garciadeblas96b94f52024-07-08 16:18:21 +02001706 )
rshri932105f2024-07-05 15:11:55 +00001707 return
1708
1709
garciadeblas72412282024-11-07 12:41:54 +01001710class K8sInfraConfigLcm(GitOpsLcm):
rshri948f7de2024-12-02 03:42:35 +00001711 db_collection = "k8sinfra_config"
1712
rshri932105f2024-07-05 15:11:55 +00001713 def __init__(self, msg, lcm_tasks, config):
1714 """
1715 Init, Connect to database, filesystem storage, and messaging
1716 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
1717 :return: None
1718 """
garciadeblas72412282024-11-07 12:41:54 +01001719 super().__init__(msg, lcm_tasks, config)
rshri932105f2024-07-05 15:11:55 +00001720
rshri948f7de2024-12-02 03:42:35 +00001721 async def create(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001722 self.logger.info("Infra config Create Enter")
1723
rshri948f7de2024-12-02 03:42:35 +00001724 op_id = params["operation_id"]
1725 profile_id = params["profile_id"]
1726
1727 # To initialize the operation states
1728 self.initialize_operation(profile_id, op_id)
1729
1730 content = self.db.get_one("k8sinfra_config", {"_id": profile_id})
1731 content["profile_type"] = "infra-configs"
1732 op_params = self.get_operation_params(content, op_id)
1733 self.db.set_one("k8sinfra_config", {"_id": content["_id"]}, content)
1734
garciadeblas41859ce2025-02-04 16:08:51 +01001735 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001736 "create_profile", op_id, op_params, content
1737 )
garciadeblas26d733c2025-02-03 16:12:43 +01001738 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00001739
garciadeblas33b36e72025-01-17 12:49:19 +01001740 workflow_status = await self.check_workflow_and_update_db(
1741 op_id, workflow_name, content
1742 )
rshri932105f2024-07-05 15:11:55 +00001743
1744 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001745 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001746 "create_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001747 )
yshah564ec9c2024-11-29 07:33:32 +00001748 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1749 self.logger.info(
1750 f"Infra Config Create Exit with resource status: {resource_status}"
rshri932105f2024-07-05 15:11:55 +00001751 )
rshri932105f2024-07-05 15:11:55 +00001752 return
1753
rshri948f7de2024-12-02 03:42:35 +00001754 async def delete(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001755 self.logger.info("Infra config delete Enter")
1756
rshri948f7de2024-12-02 03:42:35 +00001757 op_id = params["operation_id"]
1758 profile_id = params["profile_id"]
1759
1760 # To initialize the operation states
1761 self.initialize_operation(profile_id, op_id)
1762
1763 content = self.db.get_one("k8sinfra_config", {"_id": profile_id})
1764 op_params = self.get_operation_params(content, op_id)
1765
garciadeblas41859ce2025-02-04 16:08:51 +01001766 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001767 "delete_profile", op_id, op_params, content
1768 )
garciadeblas26d733c2025-02-03 16:12:43 +01001769 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00001770
garciadeblas33b36e72025-01-17 12:49:19 +01001771 workflow_status = await self.check_workflow_and_update_db(
1772 op_id, workflow_name, content
1773 )
yshah564ec9c2024-11-29 07:33:32 +00001774
rshri932105f2024-07-05 15:11:55 +00001775 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001776 resource_status, content = await self.check_resource_and_update_db(
yshah564ec9c2024-11-29 07:33:32 +00001777 "delete_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001778 )
yshah564ec9c2024-11-29 07:33:32 +00001779
rshri932105f2024-07-05 15:11:55 +00001780 if resource_status:
yshah564ec9c2024-11-29 07:33:32 +00001781 content["state"] = "DELETED"
1782 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1783 self.db.del_one(self.db_collection, {"_id": content["_id"]})
1784 self.logger.info(
1785 f"Infra Config Delete Exit with resource status: {resource_status}"
garciadeblas96b94f52024-07-08 16:18:21 +02001786 )
rshri932105f2024-07-05 15:11:55 +00001787
rshri932105f2024-07-05 15:11:55 +00001788 return
yshah771dea82024-07-05 15:11:49 +00001789
1790
garciadeblas72412282024-11-07 12:41:54 +01001791class OkaLcm(GitOpsLcm):
yshah771dea82024-07-05 15:11:49 +00001792 db_collection = "okas"
1793
1794 def __init__(self, msg, lcm_tasks, config):
1795 """
1796 Init, Connect to database, filesystem storage, and messaging
1797 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
1798 :return: None
1799 """
garciadeblas72412282024-11-07 12:41:54 +01001800 super().__init__(msg, lcm_tasks, config)
yshah771dea82024-07-05 15:11:49 +00001801
yshah564ec9c2024-11-29 07:33:32 +00001802 async def create(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02001803 self.logger.info("OKA Create Enter")
yshah564ec9c2024-11-29 07:33:32 +00001804 op_id = params["operation_id"]
1805 oka_id = params["oka_id"]
1806 self.initialize_operation(oka_id, op_id)
1807 db_content = self.db.get_one(self.db_collection, {"_id": oka_id})
1808 op_params = self.get_operation_params(db_content, op_id)
yshah771dea82024-07-05 15:11:49 +00001809
garciadeblas41859ce2025-02-04 16:08:51 +01001810 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001811 "create_oka", op_id, op_params, db_content
1812 )
yshah564ec9c2024-11-29 07:33:32 +00001813
garciadeblas33b36e72025-01-17 12:49:19 +01001814 workflow_status = await self.check_workflow_and_update_db(
1815 op_id, workflow_name, db_content
1816 )
yshah771dea82024-07-05 15:11:49 +00001817
1818 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001819 resource_status, db_content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001820 "create_oka", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001821 )
garciadeblas96b94f52024-07-08 16:18:21 +02001822 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
yshah564ec9c2024-11-29 07:33:32 +00001823 self.logger.info(f"OKA Create Exit with resource status: {resource_status}")
yshah771dea82024-07-05 15:11:49 +00001824 return
1825
yshah564ec9c2024-11-29 07:33:32 +00001826 async def edit(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02001827 self.logger.info("OKA Edit Enter")
yshah564ec9c2024-11-29 07:33:32 +00001828 op_id = params["operation_id"]
1829 oka_id = params["oka_id"]
1830 self.initialize_operation(oka_id, op_id)
1831 db_content = self.db.get_one(self.db_collection, {"_id": oka_id})
1832 op_params = self.get_operation_params(db_content, op_id)
yshah771dea82024-07-05 15:11:49 +00001833
garciadeblas41859ce2025-02-04 16:08:51 +01001834 workflow_res, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001835 "update_oka", op_id, op_params, db_content
garciadeblas96b94f52024-07-08 16:18:21 +02001836 )
garciadeblas33b36e72025-01-17 12:49:19 +01001837 workflow_status = await self.check_workflow_and_update_db(
1838 op_id, workflow_name, db_content
1839 )
yshah771dea82024-07-05 15:11:49 +00001840
1841 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001842 resource_status, db_content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001843 "update_oka", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001844 )
garciadeblas96b94f52024-07-08 16:18:21 +02001845 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
yshah564ec9c2024-11-29 07:33:32 +00001846 self.logger.info(f"OKA Update Exit with resource status: {resource_status}")
yshah771dea82024-07-05 15:11:49 +00001847 return
1848
yshah564ec9c2024-11-29 07:33:32 +00001849 async def delete(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02001850 self.logger.info("OKA delete Enter")
yshah564ec9c2024-11-29 07:33:32 +00001851 op_id = params["operation_id"]
1852 oka_id = params["oka_id"]
1853 self.initialize_operation(oka_id, op_id)
1854 db_content = self.db.get_one(self.db_collection, {"_id": oka_id})
1855 op_params = self.get_operation_params(db_content, op_id)
yshah771dea82024-07-05 15:11:49 +00001856
garciadeblas41859ce2025-02-04 16:08:51 +01001857 workflow_res, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001858 "delete_oka", op_id, op_params, db_content
garciadeblas96b94f52024-07-08 16:18:21 +02001859 )
garciadeblas33b36e72025-01-17 12:49:19 +01001860 workflow_status = await self.check_workflow_and_update_db(
1861 op_id, workflow_name, db_content
1862 )
yshah771dea82024-07-05 15:11:49 +00001863
1864 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001865 resource_status, db_content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001866 "delete_oka", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001867 )
yshah771dea82024-07-05 15:11:49 +00001868
yshah564ec9c2024-11-29 07:33:32 +00001869 if resource_status:
1870 db_content["state"] == "DELETED"
1871 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
garciadeblas96b94f52024-07-08 16:18:21 +02001872 self.db.del_one(self.db_collection, {"_id": db_content["_id"]})
yshah564ec9c2024-11-29 07:33:32 +00001873 self.logger.info(f"OKA Delete Exit with resource status: {resource_status}")
yshah771dea82024-07-05 15:11:49 +00001874 return
1875
1876
garciadeblas72412282024-11-07 12:41:54 +01001877class KsuLcm(GitOpsLcm):
yshah771dea82024-07-05 15:11:49 +00001878 db_collection = "ksus"
1879
1880 def __init__(self, msg, lcm_tasks, config):
1881 """
1882 Init, Connect to database, filesystem storage, and messaging
1883 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
1884 :return: None
1885 """
garciadeblas72412282024-11-07 12:41:54 +01001886 super().__init__(msg, lcm_tasks, config)
garciadeblasad6d1ba2025-01-22 16:02:18 +01001887 self._workflows = {
1888 "create_ksus": {
1889 "check_resource_function": self.check_create_ksus,
1890 },
1891 "delete_ksus": {
1892 "check_resource_function": self.check_delete_ksus,
1893 },
1894 }
1895
1896 def get_dbclusters_from_profile(self, profile_id, profile_type):
1897 cluster_list = []
1898 db_clusters = self.db.get_list("clusters")
1899 self.logger.info(f"Getting list of clusters for {profile_type} {profile_id}")
1900 for db_cluster in db_clusters:
1901 if profile_id in db_cluster.get(profile_type, []):
1902 self.logger.info(
1903 f"Profile {profile_id} found in cluster {db_cluster['name']}"
1904 )
1905 cluster_list.append(db_cluster)
1906 return cluster_list
yshah771dea82024-07-05 15:11:49 +00001907
yshah564ec9c2024-11-29 07:33:32 +00001908 async def create(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02001909 self.logger.info("ksu Create Enter")
yshah564ec9c2024-11-29 07:33:32 +00001910 db_content = []
1911 op_params = []
1912 op_id = params["operation_id"]
1913 for ksu_id in params["ksus_list"]:
1914 self.logger.info("Ksu ID: {}".format(ksu_id))
1915 self.initialize_operation(ksu_id, op_id)
1916 db_ksu = self.db.get_one(self.db_collection, {"_id": ksu_id})
1917 self.logger.info("Db KSU: {}".format(db_ksu))
1918 db_content.append(db_ksu)
1919 ksu_params = {}
1920 ksu_params = self.get_operation_params(db_ksu, op_id)
1921 self.logger.info("Operation Params: {}".format(ksu_params))
1922 # Update ksu_params["profile"] with profile name and age-pubkey
1923 profile_type = ksu_params["profile"]["profile_type"]
1924 profile_id = ksu_params["profile"]["_id"]
1925 profile_collection = self.profile_collection_mapping[profile_type]
1926 db_profile = self.db.get_one(profile_collection, {"_id": profile_id})
1927 ksu_params["profile"]["name"] = db_profile["name"]
1928 ksu_params["profile"]["age_pubkey"] = db_profile.get("age_pubkey", "")
1929 # Update ksu_params["oka"] with sw_catalog_path (when missing)
garciadeblas9f7c9c52025-01-17 01:06:05 +01001930 # TODO: remove this in favor of doing it in ODU workflow
yshah564ec9c2024-11-29 07:33:32 +00001931 for oka in ksu_params["oka"]:
1932 if "sw_catalog_path" not in oka:
1933 oka_id = oka["_id"]
1934 db_oka = self.db.get_one("okas", {"_id": oka_id})
yshah2f39b8a2024-12-19 11:06:24 +00001935 oka_type = MAP_PROFILE[
1936 db_oka.get("profile_type", "infra_controller_profiles")
1937 ]
garciadeblas9f7c9c52025-01-17 01:06:05 +01001938 oka[
1939 "sw_catalog_path"
garciadeblas29f8bcf2025-01-24 14:24:41 +01001940 ] = f"{oka_type}/{db_oka['git_name'].lower()}/templates"
yshah564ec9c2024-11-29 07:33:32 +00001941 op_params.append(ksu_params)
yshah771dea82024-07-05 15:11:49 +00001942
garciadeblasad6d1ba2025-01-22 16:02:18 +01001943 # A single workflow is launched for all KSUs
garciadeblas41859ce2025-02-04 16:08:51 +01001944 workflow_res, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001945 "create_ksus", op_id, op_params, db_content
garciadeblas96b94f52024-07-08 16:18:21 +02001946 )
garciadeblasad6d1ba2025-01-22 16:02:18 +01001947 # Update workflow status in all KSUs
1948 wf_status_list = []
yshah564ec9c2024-11-29 07:33:32 +00001949 for db_ksu, ksu_params in zip(db_content, op_params):
garciadeblas33b36e72025-01-17 12:49:19 +01001950 workflow_status = await self.check_workflow_and_update_db(
1951 op_id, workflow_name, db_ksu
1952 )
garciadeblasad6d1ba2025-01-22 16:02:18 +01001953 wf_status_list.append(workflow_status)
1954 # Update resource status in all KSUs
1955 # TODO: Is an operation correct if n KSUs are right and 1 is not OK?
1956 res_status_list = []
1957 for db_ksu, ksu_params, wf_status in zip(db_content, op_params, wf_status_list):
1958 if wf_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001959 resource_status, db_ksu = await self.check_resource_and_update_db(
yshah564ec9c2024-11-29 07:33:32 +00001960 "create_ksus", op_id, ksu_params, db_ksu
1961 )
garciadeblasad6d1ba2025-01-22 16:02:18 +01001962 else:
1963 resource_status = False
1964 res_status_list.append(resource_status)
garciadeblas96b94f52024-07-08 16:18:21 +02001965 self.db.set_one(self.db_collection, {"_id": db_ksu["_id"]}, db_ksu)
1966
garciadeblasd8429852024-10-17 15:30:30 +02001967 # Clean items used in the workflow, no matter if the workflow succeeded
1968 clean_status, clean_msg = await self.odu.clean_items_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001969 "create_ksus", op_id, op_params, db_content
garciadeblasd8429852024-10-17 15:30:30 +02001970 )
1971 self.logger.info(
1972 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1973 )
garciadeblasad6d1ba2025-01-22 16:02:18 +01001974 self.logger.info(f"KSU Create EXIT with Resource Status {res_status_list}")
yshah771dea82024-07-05 15:11:49 +00001975 return
1976
yshah564ec9c2024-11-29 07:33:32 +00001977 async def edit(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02001978 self.logger.info("ksu edit Enter")
yshah564ec9c2024-11-29 07:33:32 +00001979 db_content = []
1980 op_params = []
1981 op_id = params["operation_id"]
1982 for ksu_id in params["ksus_list"]:
1983 self.initialize_operation(ksu_id, op_id)
1984 db_ksu = self.db.get_one("ksus", {"_id": ksu_id})
1985 db_content.append(db_ksu)
1986 ksu_params = {}
1987 ksu_params = self.get_operation_params(db_ksu, op_id)
1988 # Update ksu_params["profile"] with profile name and age-pubkey
1989 profile_type = ksu_params["profile"]["profile_type"]
1990 profile_id = ksu_params["profile"]["_id"]
1991 profile_collection = self.profile_collection_mapping[profile_type]
1992 db_profile = self.db.get_one(profile_collection, {"_id": profile_id})
1993 ksu_params["profile"]["name"] = db_profile["name"]
1994 ksu_params["profile"]["age_pubkey"] = db_profile.get("age_pubkey", "")
1995 # Update ksu_params["oka"] with sw_catalog_path (when missing)
garciadeblas9f7c9c52025-01-17 01:06:05 +01001996 # TODO: remove this in favor of doing it in ODU workflow
yshah564ec9c2024-11-29 07:33:32 +00001997 for oka in ksu_params["oka"]:
1998 if "sw_catalog_path" not in oka:
1999 oka_id = oka["_id"]
2000 db_oka = self.db.get_one("okas", {"_id": oka_id})
yshah2f39b8a2024-12-19 11:06:24 +00002001 oka_type = MAP_PROFILE[
2002 db_oka.get("profile_type", "infra_controller_profiles")
2003 ]
garciadeblas9f7c9c52025-01-17 01:06:05 +01002004 oka[
2005 "sw_catalog_path"
2006 ] = f"{oka_type}/{db_oka['git_name']}/templates"
yshah564ec9c2024-11-29 07:33:32 +00002007 op_params.append(ksu_params)
yshah771dea82024-07-05 15:11:49 +00002008
garciadeblas41859ce2025-02-04 16:08:51 +01002009 workflow_res, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00002010 "update_ksus", op_id, op_params, db_content
garciadeblas96b94f52024-07-08 16:18:21 +02002011 )
yshah771dea82024-07-05 15:11:49 +00002012
yshah564ec9c2024-11-29 07:33:32 +00002013 for db_ksu, ksu_params in zip(db_content, op_params):
garciadeblas33b36e72025-01-17 12:49:19 +01002014 workflow_status = await self.check_workflow_and_update_db(
2015 op_id, workflow_name, db_ksu
2016 )
yshah564ec9c2024-11-29 07:33:32 +00002017
garciadeblas96b94f52024-07-08 16:18:21 +02002018 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002019 resource_status, db_ksu = await self.check_resource_and_update_db(
yshah564ec9c2024-11-29 07:33:32 +00002020 "update_ksus", op_id, ksu_params, db_ksu
garciadeblas96b94f52024-07-08 16:18:21 +02002021 )
garciadeblas96b94f52024-07-08 16:18:21 +02002022 db_ksu["name"] = ksu_params["name"]
2023 db_ksu["description"] = ksu_params["description"]
2024 db_ksu["profile"]["profile_type"] = ksu_params["profile"][
2025 "profile_type"
2026 ]
2027 db_ksu["profile"]["_id"] = ksu_params["profile"]["_id"]
2028 db_ksu["oka"] = ksu_params["oka"]
2029 self.db.set_one(self.db_collection, {"_id": db_ksu["_id"]}, db_ksu)
2030
yshah564ec9c2024-11-29 07:33:32 +00002031 # Clean items used in the workflow, no matter if the workflow succeeded
2032 clean_status, clean_msg = await self.odu.clean_items_workflow(
2033 "create_ksus", op_id, op_params, db_content
garciadeblas96b94f52024-07-08 16:18:21 +02002034 )
2035 self.logger.info(
yshah564ec9c2024-11-29 07:33:32 +00002036 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
garciadeblas96b94f52024-07-08 16:18:21 +02002037 )
yshah564ec9c2024-11-29 07:33:32 +00002038 self.logger.info(f"KSU Update EXIT with Resource Status {resource_status}")
yshah771dea82024-07-05 15:11:49 +00002039 return
2040
yshah564ec9c2024-11-29 07:33:32 +00002041 async def delete(self, params, order_id):
2042 self.logger.info("ksu delete Enter")
2043 db_content = []
2044 op_params = []
2045 op_id = params["operation_id"]
2046 for ksu_id in params["ksus_list"]:
2047 self.initialize_operation(ksu_id, op_id)
2048 db_ksu = self.db.get_one("ksus", {"_id": ksu_id})
2049 db_content.append(db_ksu)
2050 ksu_params = {}
2051 ksu_params["profile"] = {}
2052 ksu_params["profile"]["profile_type"] = db_ksu["profile"]["profile_type"]
2053 ksu_params["profile"]["_id"] = db_ksu["profile"]["_id"]
2054 # Update ksu_params["profile"] with profile name and age-pubkey
2055 profile_type = ksu_params["profile"]["profile_type"]
2056 profile_id = ksu_params["profile"]["_id"]
2057 profile_collection = self.profile_collection_mapping[profile_type]
2058 db_profile = self.db.get_one(profile_collection, {"_id": profile_id})
2059 ksu_params["profile"]["name"] = db_profile["name"]
2060 ksu_params["profile"]["age_pubkey"] = db_profile.get("age_pubkey", "")
2061 op_params.append(ksu_params)
yshah771dea82024-07-05 15:11:49 +00002062
garciadeblas41859ce2025-02-04 16:08:51 +01002063 workflow_res, workflow_name = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00002064 "delete_ksus", op_id, op_params, db_content
2065 )
2066
2067 for db_ksu, ksu_params in zip(db_content, op_params):
garciadeblas33b36e72025-01-17 12:49:19 +01002068 workflow_status = await self.check_workflow_and_update_db(
2069 op_id, workflow_name, db_ksu
2070 )
yshah564ec9c2024-11-29 07:33:32 +00002071
2072 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002073 resource_status, db_ksu = await self.check_resource_and_update_db(
yshah564ec9c2024-11-29 07:33:32 +00002074 "delete_ksus", op_id, ksu_params, db_ksu
2075 )
2076
2077 if resource_status:
2078 db_ksu["state"] == "DELETED"
2079 self.db.set_one(self.db_collection, {"_id": db_ksu["_id"]}, db_ksu)
2080 self.db.del_one(self.db_collection, {"_id": db_ksu["_id"]})
2081
2082 self.logger.info(f"KSU Delete Exit with resource status: {resource_status}")
2083 return
2084
2085 async def clone(self, params, order_id):
2086 self.logger.info("ksu clone Enter")
2087 op_id = params["operation_id"]
2088 ksus_id = params["ksus_list"][0]
2089 self.initialize_operation(ksus_id, op_id)
2090 db_content = self.db.get_one(self.db_collection, {"_id": ksus_id})
2091 op_params = self.get_operation_params(db_content, op_id)
garciadeblas41859ce2025-02-04 16:08:51 +01002092 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02002093 "clone_ksus", op_id, op_params, db_content
2094 )
yshah564ec9c2024-11-29 07:33:32 +00002095
garciadeblas33b36e72025-01-17 12:49:19 +01002096 workflow_status = await self.check_workflow_and_update_db(
2097 op_id, workflow_name, db_content
2098 )
yshah771dea82024-07-05 15:11:49 +00002099
2100 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002101 resource_status, db_content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02002102 "clone_ksus", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00002103 )
garciadeblas96b94f52024-07-08 16:18:21 +02002104 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
yshah564ec9c2024-11-29 07:33:32 +00002105
2106 self.logger.info(f"KSU Clone Exit with resource status: {resource_status}")
yshah771dea82024-07-05 15:11:49 +00002107 return
2108
yshah564ec9c2024-11-29 07:33:32 +00002109 async def move(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02002110 self.logger.info("ksu move Enter")
yshah564ec9c2024-11-29 07:33:32 +00002111 op_id = params["operation_id"]
2112 ksus_id = params["ksus_list"][0]
2113 self.initialize_operation(ksus_id, op_id)
2114 db_content = self.db.get_one(self.db_collection, {"_id": ksus_id})
2115 op_params = self.get_operation_params(db_content, op_id)
garciadeblas41859ce2025-02-04 16:08:51 +01002116 workflow_res, workflow_name = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02002117 "move_ksus", op_id, op_params, db_content
2118 )
yshah564ec9c2024-11-29 07:33:32 +00002119
garciadeblas33b36e72025-01-17 12:49:19 +01002120 workflow_status = await self.check_workflow_and_update_db(
2121 op_id, workflow_name, db_content
2122 )
yshah771dea82024-07-05 15:11:49 +00002123
2124 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002125 resource_status, db_content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02002126 "move_ksus", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00002127 )
garciadeblas96b94f52024-07-08 16:18:21 +02002128 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
yshah564ec9c2024-11-29 07:33:32 +00002129
2130 self.logger.info(f"KSU Move Exit with resource status: {resource_status}")
yshah771dea82024-07-05 15:11:49 +00002131 return
garciadeblasad6d1ba2025-01-22 16:02:18 +01002132
2133 async def check_create_ksus(self, op_id, op_params, content):
2134 self.logger.info(f"check_create_ksus Operation {op_id}. Params: {op_params}.")
2135 self.logger.debug(f"Content: {content}")
2136 db_ksu = content
2137 kustomization_name = db_ksu["git_name"].lower()
2138 oka_list = op_params["oka"]
2139 oka_item = oka_list[0]
2140 oka_params = oka_item.get("transformation", {})
2141 target_ns = oka_params.get("namespace", "default")
2142 profile_id = op_params.get("profile", {}).get("_id")
2143 profile_type = op_params.get("profile", {}).get("profile_type")
2144 self.logger.info(
2145 f"Checking status of KSU {db_ksu['name']} for profile {profile_id}."
2146 )
2147 dbcluster_list = self.get_dbclusters_from_profile(profile_id, profile_type)
2148 if not dbcluster_list:
2149 self.logger.info(f"No clusters found for profile {profile_id}.")
2150 for db_cluster in dbcluster_list:
2151 try:
2152 self.logger.info(
garciadeblasae238482025-02-03 08:44:19 +01002153 f"Checking status of KSU {db_ksu['name']} in cluster {db_cluster['name']}."
garciadeblasad6d1ba2025-01-22 16:02:18 +01002154 )
2155 cluster_kubectl = self.cluster_kubectl(db_cluster)
2156 checkings_list = [
2157 {
2158 "item": "kustomization",
2159 "name": kustomization_name,
2160 "namespace": target_ns,
garciadeblas7cf480d2025-01-27 16:53:45 +01002161 "condition": {
2162 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
2163 "value": "True",
2164 },
garciadeblasad6d1ba2025-01-22 16:02:18 +01002165 "timeout": self._checkloop_kustomization_timeout,
2166 "enable": True,
2167 "resourceState": "IN_PROGRESS.KUSTOMIZATION_READY",
2168 },
2169 ]
2170 self.logger.info(
2171 f"Checking status of KSU {db_ksu['name']} for profile {profile_id}."
2172 )
2173 result, message = await self.common_check_list(
2174 op_id, checkings_list, "ksus", db_ksu, kubectl=cluster_kubectl
2175 )
2176 if not result:
2177 return False, message
2178 except Exception as e:
2179 self.logger.error(
2180 f"Error checking KSU in cluster {db_cluster['name']}."
2181 )
2182 self.logger.error(e)
2183 return False, f"Error checking KSU in cluster {db_cluster['name']}."
2184 return True, "OK"
2185
2186 async def check_delete_ksus(self, op_id, op_params, content):
2187 self.logger.info(f"check_delete_ksus Operation {op_id}. Params: {op_params}.")
2188 self.logger.debug(f"Content: {content}")
2189 db_ksu = content
2190 kustomization_name = db_ksu["git_name"].lower()
2191 oka_list = db_ksu["oka"]
2192 oka_item = oka_list[0]
2193 oka_params = oka_item.get("transformation", {})
2194 target_ns = oka_params.get("namespace", "default")
2195 profile_id = op_params.get("profile", {}).get("_id")
2196 profile_type = op_params.get("profile", {}).get("profile_type")
2197 self.logger.info(
2198 f"Checking status of KSU {db_ksu['name']} for profile {profile_id}."
2199 )
2200 dbcluster_list = self.get_dbclusters_from_profile(profile_id, profile_type)
2201 if not dbcluster_list:
2202 self.logger.info(f"No clusters found for profile {profile_id}.")
2203 for db_cluster in dbcluster_list:
2204 try:
2205 self.logger.info(
2206 f"Checking status of KSU in cluster {db_cluster['name']}."
2207 )
2208 cluster_kubectl = self.cluster_kubectl(db_cluster)
2209 checkings_list = [
2210 {
2211 "item": "kustomization",
2212 "name": kustomization_name,
2213 "namespace": target_ns,
2214 "deleted": True,
2215 "timeout": self._checkloop_kustomization_timeout,
2216 "enable": True,
2217 "resourceState": "IN_PROGRESS.KUSTOMIZATION_DELETED",
2218 },
2219 ]
2220 self.logger.info(
2221 f"Checking status of KSU {db_ksu['name']} for profile {profile_id}."
2222 )
2223 result, message = await self.common_check_list(
2224 op_id, checkings_list, "ksus", db_ksu, kubectl=cluster_kubectl
2225 )
2226 if not result:
2227 return False, message
2228 except Exception as e:
2229 self.logger.error(
2230 f"Error checking KSU in cluster {db_cluster['name']}."
2231 )
2232 self.logger.error(e)
2233 return False, f"Error checking KSU in cluster {db_cluster['name']}."
2234 return True, "OK"