blob: 6b02ec5fbf68fa754506d5b14f94ff9f36da0b91 [file] [log] [blame]
rshri932105f2024-07-05 15:11:55 +00001# -*- coding: utf-8 -*-
2
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12# implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15
16__author__ = (
17 "Shrinithi R <shrinithi.r@tataelxsi.co.in>",
18 "Shahithya Y <shahithya.y@tataelxsi.co.in>",
19)
20
garciadeblas61a4c692025-07-17 13:04:13 +020021import os
yshahd940c652024-10-17 06:11:12 +000022from time import time
garciadeblas72412282024-11-07 12:41:54 +010023import traceback
garciadeblasad6d1ba2025-01-22 16:02:18 +010024import yaml
garciadeblas61a4c692025-07-17 13:04:13 +020025from copy import deepcopy
26from osm_lcm import vim_sdn
27from osm_lcm.gitops import GitOpsLcm
28from osm_lcm.lcm_utils import LcmException
rshri932105f2024-07-05 15:11:55 +000029
yshah2f39b8a2024-12-19 11:06:24 +000030MAP_PROFILE = {
31 "infra_controller_profiles": "infra-controllers",
32 "infra_config_profiles": "infra-configs",
33 "resource_profiles": "managed_resources",
34 "app_profiles": "apps",
35}
36
rshri932105f2024-07-05 15:11:55 +000037
yshah83a30572025-06-13 08:38:49 +000038class NodeGroupLcm(GitOpsLcm):
39 db_collection = "nodegroups"
40
41 def __init__(self, msg, lcm_tasks, config):
42 """
43 Init, Connect to database, filesystem storage, and messaging
44 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
45 :return: None
46 """
47 super().__init__(msg, lcm_tasks, config)
48 self._workflows = {
49 "add_nodegroup": {
50 "check_resource_function": self.check_add_nodegroup,
51 },
52 "scale_nodegroup": {
53 "check_resource_function": self.check_scale_nodegroup,
54 },
55 "delete_nodegroup": {
56 "check_resource_function": self.check_delete_nodegroup,
57 },
58 }
59
60 async def create(self, params, order_id):
61 self.logger.info("Add NodeGroup Enter")
62
63 # To get the nodegroup and op ids
64 nodegroup_id = params["nodegroup_id"]
65 op_id = params["operation_id"]
66
67 # To initialize the operation states
68 self.initialize_operation(nodegroup_id, op_id)
69
70 # To get the nodegroup details and control plane from DB
71 db_nodegroup = self.db.get_one(self.db_collection, {"_id": nodegroup_id})
72 db_cluster = self.db.get_one("clusters", {"_id": db_nodegroup["cluster_id"]})
73
74 # To get the operation params details
75 op_params = self.get_operation_params(db_nodegroup, op_id)
76 self.logger.info(f"Operations Params: {op_params}")
77
78 db_vim = self.db.get_one("vim_accounts", {"name": db_cluster["vim_account"]})
79
80 # To copy the cluster content and decrypting fields to use in workflows
81 workflow_content = {
82 "nodegroup": db_nodegroup,
83 "cluster": db_cluster,
84 "vim_account": db_vim,
85 }
86 self.logger.info(f"Workflow content: {workflow_content}")
87
garciadeblas61a4c692025-07-17 13:04:13 +020088 workflow_res, workflow_name, _ = await self.odu.launch_workflow(
yshah83a30572025-06-13 08:38:49 +000089 "add_nodegroup", op_id, op_params, workflow_content
90 )
91 self.logger.info("workflow_name is: {}".format(workflow_name))
92
93 workflow_status = await self.check_workflow_and_update_db(
94 op_id, workflow_name, db_nodegroup
95 )
96
97 # Clean items used in the workflow, no matter if the workflow succeeded
98 clean_status, clean_msg = await self.odu.clean_items_workflow(
99 "add_nodegroup", op_id, op_params, workflow_content
100 )
101 self.logger.info(
102 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
103 )
104 if workflow_status:
105 resource_status, content = await self.check_resource_and_update_db(
106 "add_nodegroup", op_id, op_params, db_nodegroup
107 )
108 self.db.set_one(self.db_collection, {"_id": db_nodegroup["_id"]}, db_nodegroup)
109 self.logger.info(f"Add NodeGroup Exit with resource status: {resource_status}")
110 return
111
112 async def check_add_nodegroup(self, op_id, op_params, content):
113 self.logger.info(f"check_add_nodegroup Operation {op_id}. Params: {op_params}.")
114 self.logger.info(f"Content: {content}")
115 db_nodegroup = content
116 nodegroup_name = db_nodegroup["git_name"].lower()
117 nodegroup_kustomization_name = nodegroup_name
118 checkings_list = [
119 {
120 "item": "kustomization",
121 "name": nodegroup_kustomization_name,
122 "namespace": "managed-resources",
123 "condition": {
124 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
125 "value": "True",
126 },
127 "timeout": self._checkloop_kustomization_timeout,
128 "enable": True,
129 "resourceState": "IN_PROGRESS.KUSTOMIZATION_READY",
130 },
131 {
yshah4a2abbe2025-09-23 09:58:27 +0000132 "item": "nodegroup_aws",
yshah83a30572025-06-13 08:38:49 +0000133 "name": nodegroup_name,
134 "namespace": "",
135 "condition": {
136 "jsonpath_filter": "status.conditions[?(@.type=='Synced')].status",
137 "value": "True",
138 },
139 "timeout": self._checkloop_resource_timeout,
140 "enable": True,
141 "resourceState": "IN_PROGRESS.RESOURCE_SYNCED.NODEGROUP",
142 },
143 {
yshah4a2abbe2025-09-23 09:58:27 +0000144 "item": "nodegroup_aws",
yshah83a30572025-06-13 08:38:49 +0000145 "name": nodegroup_name,
146 "namespace": "",
147 "condition": {
148 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
149 "value": "True",
150 },
151 "timeout": self._checkloop_resource_timeout,
152 "enable": True,
153 "resourceState": "IN_PROGRESS.RESOURCE_READY.NODEGROUP",
154 },
155 ]
156 self.logger.info(f"Checking list: {checkings_list}")
157 result, message = await self.common_check_list(
158 op_id, checkings_list, "nodegroups", db_nodegroup
159 )
160 if not result:
161 return False, message
162 return True, "OK"
163
164 async def scale(self, params, order_id):
165 self.logger.info("Scale nodegroup Enter")
166
167 op_id = params["operation_id"]
168 nodegroup_id = params["nodegroup_id"]
169
170 # To initialize the operation states
171 self.initialize_operation(nodegroup_id, op_id)
172
173 db_nodegroup = self.db.get_one(self.db_collection, {"_id": nodegroup_id})
174 db_cluster = self.db.get_one("clusters", {"_id": db_nodegroup["cluster_id"]})
175 op_params = self.get_operation_params(db_nodegroup, op_id)
176 db_vim = self.db.get_one("vim_accounts", {"name": db_cluster["vim_account"]})
177
178 workflow_content = {
179 "nodegroup": db_nodegroup,
180 "cluster": db_cluster,
181 "vim_account": db_vim,
182 }
183 self.logger.info(f"Workflow content: {workflow_content}")
184
garciadeblas61a4c692025-07-17 13:04:13 +0200185 workflow_res, workflow_name, _ = await self.odu.launch_workflow(
yshah83a30572025-06-13 08:38:49 +0000186 "scale_nodegroup", op_id, op_params, workflow_content
187 )
188 self.logger.info("workflow_name is: {}".format(workflow_name))
189
190 workflow_status = await self.check_workflow_and_update_db(
191 op_id, workflow_name, db_nodegroup
192 )
193
194 # Clean items used in the workflow, no matter if the workflow succeeded
195 clean_status, clean_msg = await self.odu.clean_items_workflow(
196 "scale_nodegroup", op_id, op_params, workflow_content
197 )
198 self.logger.info(
199 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
200 )
201
202 if workflow_status:
203 resource_status, content = await self.check_resource_and_update_db(
204 "scale_nodegroup", op_id, op_params, db_nodegroup
205 )
206
207 if resource_status:
208 db_nodegroup["state"] = "READY"
209 self.db.set_one(
210 self.db_collection, {"_id": db_nodegroup["_id"]}, db_nodegroup
211 )
212 self.logger.info(
213 f"Nodegroup Scale Exit with resource status: {resource_status}"
214 )
215 return
216
217 async def check_scale_nodegroup(self, op_id, op_params, content):
218 self.logger.info(
219 f"check_scale_nodegroup Operation {op_id}. Params: {op_params}."
220 )
221 self.logger.debug(f"Content: {content}")
222 db_nodegroup = content
223 nodegroup_name = db_nodegroup["git_name"].lower()
224 nodegroup_kustomization_name = nodegroup_name
225 checkings_list = [
226 {
227 "item": "kustomization",
228 "name": nodegroup_kustomization_name,
229 "namespace": "managed-resources",
230 "condition": {
231 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
232 "value": "True",
233 },
234 "timeout": self._checkloop_kustomization_timeout,
235 "enable": True,
236 "resourceState": "IN_PROGRESS.KUSTOMIZATION_READY",
237 },
238 {
yshah4a2abbe2025-09-23 09:58:27 +0000239 "item": "nodegroup_aws",
yshah83a30572025-06-13 08:38:49 +0000240 "name": nodegroup_name,
241 "namespace": "",
242 "condition": {
243 "jsonpath_filter": "status.atProvider.scalingConfig[0].desiredSize",
244 "value": f"{op_params['node_count']}",
245 },
246 "timeout": self._checkloop_resource_timeout,
247 "enable": True,
248 "resourceState": "IN_PROGRESS.RESOURCE_SYNCED.NODEGROUP",
249 },
250 ]
251 self.logger.info(f"Checking list: {checkings_list}")
252 return await self.common_check_list(
253 op_id, checkings_list, "nodegroups", db_nodegroup
254 )
255
256 async def delete(self, params, order_id):
257 self.logger.info("Delete nodegroup Enter")
258
259 op_id = params["operation_id"]
260 nodegroup_id = params["nodegroup_id"]
261
262 # To initialize the operation states
263 self.initialize_operation(nodegroup_id, op_id)
264
265 db_nodegroup = self.db.get_one(self.db_collection, {"_id": nodegroup_id})
266 db_cluster = self.db.get_one("clusters", {"_id": db_nodegroup["cluster_id"]})
267 op_params = self.get_operation_params(db_nodegroup, op_id)
268
269 workflow_content = {"nodegroup": db_nodegroup, "cluster": db_cluster}
270
garciadeblas61a4c692025-07-17 13:04:13 +0200271 workflow_res, workflow_name, _ = await self.odu.launch_workflow(
yshah83a30572025-06-13 08:38:49 +0000272 "delete_nodegroup", op_id, op_params, workflow_content
273 )
274 self.logger.info("workflow_name is: {}".format(workflow_name))
275
276 workflow_status = await self.check_workflow_and_update_db(
277 op_id, workflow_name, db_nodegroup
278 )
279
280 # Clean items used in the workflow, no matter if the workflow succeeded
281 clean_status, clean_msg = await self.odu.clean_items_workflow(
282 "delete_nodegroup", op_id, op_params, workflow_content
283 )
284 self.logger.info(
285 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
286 )
287
288 if workflow_status:
289 resource_status, content = await self.check_resource_and_update_db(
290 "delete_nodegroup", op_id, op_params, db_nodegroup
291 )
292
293 if resource_status:
294 node_count = db_cluster.get("node_count")
295 new_node_count = node_count - 1
296 self.logger.info(f"New Node count: {new_node_count}")
297 db_cluster["node_count"] = new_node_count
298 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
299 db_nodegroup["state"] = "DELETED"
300 self.db.set_one(
301 self.db_collection, {"_id": db_nodegroup["_id"]}, db_nodegroup
302 )
303 self.db.del_one(self.db_collection, {"_id": db_nodegroup["_id"]})
304 self.logger.info(
305 f"Nodegroup Delete Exit with resource status: {resource_status}"
306 )
307 return
308
309 async def check_delete_nodegroup(self, op_id, op_params, content):
310 self.logger.info(
311 f"check_delete_nodegroup Operation {op_id}. Params: {op_params}."
312 )
313 db_nodegroup = content
314 nodegroup_name = db_nodegroup["git_name"].lower()
315 nodegroup_kustomization_name = nodegroup_name
316 checkings_list = [
317 {
318 "item": "kustomization",
319 "name": nodegroup_kustomization_name,
320 "namespace": "managed-resources",
321 "deleted": True,
322 "timeout": self._checkloop_kustomization_timeout,
323 "enable": True,
324 "resourceState": "IN_PROGRESS.KUSTOMIZATION_DELETED",
325 },
326 {
yshah4a2abbe2025-09-23 09:58:27 +0000327 "item": "nodegroup_aws",
yshah83a30572025-06-13 08:38:49 +0000328 "name": nodegroup_name,
329 "namespace": "",
330 "deleted": True,
331 "timeout": self._checkloop_resource_timeout,
332 "enable": True,
333 "resourceState": "IN_PROGRESS.RESOURCE_DELETED.NODEGROUP",
334 },
335 ]
336 self.logger.info(f"Checking list: {checkings_list}")
337 return await self.common_check_list(
338 op_id, checkings_list, "nodegroups", db_nodegroup
339 )
340
341
garciadeblas72412282024-11-07 12:41:54 +0100342class ClusterLcm(GitOpsLcm):
garciadeblas96b94f52024-07-08 16:18:21 +0200343 db_collection = "clusters"
rshri932105f2024-07-05 15:11:55 +0000344
345 def __init__(self, msg, lcm_tasks, config):
346 """
347 Init, Connect to database, filesystem storage, and messaging
348 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
349 :return: None
350 """
garciadeblas72412282024-11-07 12:41:54 +0100351 super().__init__(msg, lcm_tasks, config)
352 self._workflows = {
353 "create_cluster": {
354 "check_resource_function": self.check_create_cluster,
355 },
garciadeblasb0a42c22024-11-13 16:00:10 +0100356 "register_cluster": {
357 "check_resource_function": self.check_register_cluster,
358 },
359 "update_cluster": {
360 "check_resource_function": self.check_update_cluster,
361 },
garciadeblasad6d1ba2025-01-22 16:02:18 +0100362 "delete_cluster": {
363 "check_resource_function": self.check_delete_cluster,
364 },
garciadeblas72412282024-11-07 12:41:54 +0100365 }
rshri932105f2024-07-05 15:11:55 +0000366 self.regist = vim_sdn.K8sClusterLcm(msg, self.lcm_tasks, config)
367
rshri948f7de2024-12-02 03:42:35 +0000368 async def create(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +0000369 self.logger.info("cluster Create Enter")
garciadeblas8bdb3d42025-04-04 00:19:13 +0200370 workflow_status = None
371 resource_status = None
rshri932105f2024-07-05 15:11:55 +0000372
garciadeblas995cbf32024-12-18 12:54:00 +0100373 # To get the cluster and op ids
rshri948f7de2024-12-02 03:42:35 +0000374 cluster_id = params["cluster_id"]
rshri948f7de2024-12-02 03:42:35 +0000375 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +0000376
377 # To initialize the operation states
378 self.initialize_operation(cluster_id, op_id)
379
garciadeblas995cbf32024-12-18 12:54:00 +0100380 # To get the cluster
381 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
382
383 # To get the operation params details
384 op_params = self.get_operation_params(db_cluster, op_id)
385
386 # To copy the cluster content and decrypting fields to use in workflows
garciadeblasd41e9292025-03-11 15:44:25 +0100387 db_cluster_copy = self.decrypted_copy(db_cluster)
garciadeblas995cbf32024-12-18 12:54:00 +0100388 workflow_content = {
garciadeblasd41e9292025-03-11 15:44:25 +0100389 "cluster": db_cluster_copy,
garciadeblas995cbf32024-12-18 12:54:00 +0100390 }
rshric3564942024-11-12 18:12:38 +0000391
rshri948f7de2024-12-02 03:42:35 +0000392 # To get the vim account details
rshric3564942024-11-12 18:12:38 +0000393 db_vim = self.db.get_one("vim_accounts", {"name": db_cluster["vim_account"]})
garciadeblas995cbf32024-12-18 12:54:00 +0100394 workflow_content["vim_account"] = db_vim
rshric3564942024-11-12 18:12:38 +0000395
garciadeblas61a4c692025-07-17 13:04:13 +0200396 workflow_res, workflow_name, _ = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +0100397 "create_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +0200398 )
garciadeblas41859ce2025-02-04 16:08:51 +0100399 if not workflow_res:
400 self.logger.error(f"Failed to launch workflow: {workflow_name}")
401 db_cluster["state"] = "FAILED_CREATION"
402 db_cluster["resourceState"] = "ERROR"
403 db_cluster = self.update_operation_history(
404 db_cluster, op_id, workflow_status=False, resource_status=None
405 )
406 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
407 # Clean items used in the workflow, no matter if the workflow succeeded
408 clean_status, clean_msg = await self.odu.clean_items_workflow(
409 "create_cluster", op_id, op_params, workflow_content
410 )
411 self.logger.info(
412 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
413 )
414 return
rshri932105f2024-07-05 15:11:55 +0000415
garciadeblas26d733c2025-02-03 16:12:43 +0100416 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +0200417 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +0100418 op_id, workflow_name
garciadeblas96b94f52024-07-08 16:18:21 +0200419 )
rshri932105f2024-07-05 15:11:55 +0000420 self.logger.info(
garciadeblas26d733c2025-02-03 16:12:43 +0100421 "workflow_status is: {} and workflow_msg is: {}".format(
rshri932105f2024-07-05 15:11:55 +0000422 workflow_status, workflow_msg
423 )
424 )
425 if workflow_status:
garciadeblas96b94f52024-07-08 16:18:21 +0200426 db_cluster["state"] = "CREATED"
427 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
rshri932105f2024-07-05 15:11:55 +0000428 else:
garciadeblas96b94f52024-07-08 16:18:21 +0200429 db_cluster["state"] = "FAILED_CREATION"
430 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +0000431 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +0000432 db_cluster = self.update_operation_history(
433 db_cluster, op_id, workflow_status, None
434 )
garciadeblas96b94f52024-07-08 16:18:21 +0200435 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +0000436
garciadeblas28bff0f2024-09-16 12:53:07 +0200437 # Clean items used in the workflow, no matter if the workflow succeeded
438 clean_status, clean_msg = await self.odu.clean_items_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +0100439 "create_cluster", op_id, op_params, workflow_content
garciadeblas28bff0f2024-09-16 12:53:07 +0200440 )
441 self.logger.info(
442 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
443 )
444
rshri932105f2024-07-05 15:11:55 +0000445 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +0100446 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +0100447 "create_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +0000448 )
449 self.logger.info(
450 "resource_status is :{} and resource_msg is :{}".format(
451 resource_status, resource_msg
452 )
453 )
454 if resource_status:
garciadeblas96b94f52024-07-08 16:18:21 +0200455 db_cluster["resourceState"] = "READY"
rshri932105f2024-07-05 15:11:55 +0000456 else:
garciadeblas96b94f52024-07-08 16:18:21 +0200457 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +0000458
garciadeblas96b94f52024-07-08 16:18:21 +0200459 db_cluster["operatingState"] = "IDLE"
460 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +0000461 db_cluster, op_id, workflow_status, resource_status
rshri932105f2024-07-05 15:11:55 +0000462 )
shahithya70a3fc92024-11-12 11:01:05 +0000463 db_cluster["current_operation"] = None
garciadeblas41a600e2025-01-21 11:49:38 +0100464
rshrif8911b92025-06-11 18:19:07 +0000465 # Retrieve credentials and subnets and register the cluster in k8sclusters collection
garciadeblas41a600e2025-01-21 11:49:38 +0100466 cluster_creds = None
rshrif8911b92025-06-11 18:19:07 +0000467 db_register = self.db.get_one("k8sclusters", {"name": db_cluster["name"]})
garciadeblas41a600e2025-01-21 11:49:38 +0100468 if db_cluster["resourceState"] == "READY" and db_cluster["state"] == "CREATED":
rshrif8911b92025-06-11 18:19:07 +0000469 # Retrieve credentials
garciadeblas41a600e2025-01-21 11:49:38 +0100470 result, cluster_creds = await self.odu.get_cluster_credentials(db_cluster)
471 # TODO: manage the case where the credentials are not available
472 if result:
473 db_cluster["credentials"] = cluster_creds
474
rshrif8911b92025-06-11 18:19:07 +0000475 # Retrieve subnets
yshah1f070ba2025-09-23 09:47:12 +0000476 if op_params.get("private_subnet") and op_params.get("public_subnet"):
477 db_cluster["private_subnet"] = op_params["private_subnet"]
478 db_cluster["public_subnet"] = op_params["public_subnet"]
479 else:
480 if db_vim["vim_type"] == "aws":
481 generic_object = await self.odu.list_object(
482 api_group="ec2.aws.upbound.io",
483 api_plural="subnets",
484 api_version="v1beta1",
485 )
486 private_subnet = []
487 public_subnet = []
488 for subnet in generic_object:
489 labels = subnet.get("metadata", {}).get("labels", {})
490 status = subnet.get("status", {}).get("atProvider", {})
491 # Extract relevant label values
492 cluster_label = labels.get("cluster")
493 access_label = labels.get("access")
494 subnet_id = status.get("id")
495 # Apply filtering
496 if cluster_label == db_cluster["name"] and subnet_id:
497 if access_label == "private":
498 private_subnet.append(subnet_id)
499 elif access_label == "public":
500 public_subnet.append(subnet_id)
501 # Update db_cluster
502 db_cluster["private_subnet"] = private_subnet
503 db_cluster["public_subnet"] = public_subnet
504 self.logger.info("DB cluster: {}".format(db_cluster))
rshri948f7de2024-12-02 03:42:35 +0000505
rshrif8911b92025-06-11 18:19:07 +0000506 # Register the cluster in k8sclusters collection
rshri948f7de2024-12-02 03:42:35 +0000507 db_register["credentials"] = cluster_creds
garciadeblas41a600e2025-01-21 11:49:38 +0100508 # To call the lcm.py for registering the cluster in k8scluster lcm.
rshri948f7de2024-12-02 03:42:35 +0000509 self.db.set_one("k8sclusters", {"_id": db_register["_id"]}, db_register)
510 register = await self.regist.create(db_register, order_id)
511 self.logger.debug(f"Register is : {register}")
512 else:
513 db_register["_admin"]["operationalState"] = "ERROR"
rshri948f7de2024-12-02 03:42:35 +0000514 self.db.set_one("k8sclusters", {"_id": db_register["_id"]}, db_register)
515
rshrif8911b92025-06-11 18:19:07 +0000516 # Update db_cluster
517 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
518 self.update_default_profile_agekeys(db_cluster_copy)
519 self.update_profile_state(db_cluster, workflow_status, resource_status)
520
rshri932105f2024-07-05 15:11:55 +0000521 return
522
garciadeblas72412282024-11-07 12:41:54 +0100523 async def check_create_cluster(self, op_id, op_params, content):
garciadeblas7eae6f42024-11-08 10:41:38 +0100524 self.logger.info(
525 f"check_create_cluster Operation {op_id}. Params: {op_params}."
526 )
garciadeblas72412282024-11-07 12:41:54 +0100527 db_cluster = content["cluster"]
528 cluster_name = db_cluster["git_name"].lower()
529 cluster_kustomization_name = cluster_name
530 db_vim_account = content["vim_account"]
531 cloud_type = db_vim_account["vim_type"]
garciadeblas1ca09852025-05-30 11:19:06 +0200532 nodegroup_name = ""
garciadeblas72412282024-11-07 12:41:54 +0100533 if cloud_type == "aws":
garciadeblas1ca09852025-05-30 11:19:06 +0200534 nodegroup_name = f"{cluster_name}-nodegroup"
garciadeblas72412282024-11-07 12:41:54 +0100535 cluster_name = f"{cluster_name}-cluster"
536 elif cloud_type == "gcp":
garciadeblas1ca09852025-05-30 11:19:06 +0200537 nodegroup_name = f"nodepool-{cluster_name}"
garciadeblas72412282024-11-07 12:41:54 +0100538 bootstrap = op_params.get("bootstrap", True)
539 if cloud_type in ("azure", "gcp", "aws"):
540 checkings_list = [
541 {
542 "item": "kustomization",
543 "name": cluster_kustomization_name,
544 "namespace": "managed-resources",
garciadeblas7cf480d2025-01-27 16:53:45 +0100545 "condition": {
546 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
547 "value": "True",
548 },
yshahcb9075f2024-11-22 12:08:57 +0000549 "timeout": 1500,
garciadeblas72412282024-11-07 12:41:54 +0100550 "enable": True,
garciadeblas7eae6f42024-11-08 10:41:38 +0100551 "resourceState": "IN_PROGRESS.KUSTOMIZATION_READY",
garciadeblas72412282024-11-07 12:41:54 +0100552 },
553 {
554 "item": f"cluster_{cloud_type}",
555 "name": cluster_name,
556 "namespace": "",
garciadeblas7cf480d2025-01-27 16:53:45 +0100557 "condition": {
558 "jsonpath_filter": "status.conditions[?(@.type=='Synced')].status",
559 "value": "True",
560 },
garciadeblas72412282024-11-07 12:41:54 +0100561 "timeout": self._checkloop_resource_timeout,
562 "enable": True,
garciadeblas7eae6f42024-11-08 10:41:38 +0100563 "resourceState": "IN_PROGRESS.RESOURCE_SYNCED.CLUSTER",
garciadeblas72412282024-11-07 12:41:54 +0100564 },
565 {
566 "item": f"cluster_{cloud_type}",
567 "name": cluster_name,
568 "namespace": "",
garciadeblas7cf480d2025-01-27 16:53:45 +0100569 "condition": {
570 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
571 "value": "True",
572 },
garciadeblas72412282024-11-07 12:41:54 +0100573 "timeout": self._checkloop_resource_timeout,
574 "enable": True,
garciadeblas7eae6f42024-11-08 10:41:38 +0100575 "resourceState": "IN_PROGRESS.RESOURCE_READY.CLUSTER",
garciadeblas72412282024-11-07 12:41:54 +0100576 },
577 {
578 "item": "kustomization",
579 "name": f"{cluster_kustomization_name}-bstrp-fluxctrl",
580 "namespace": "managed-resources",
garciadeblas7cf480d2025-01-27 16:53:45 +0100581 "condition": {
582 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
583 "value": "True",
584 },
yshahcb9075f2024-11-22 12:08:57 +0000585 "timeout": self._checkloop_resource_timeout,
garciadeblas72412282024-11-07 12:41:54 +0100586 "enable": bootstrap,
garciadeblas7eae6f42024-11-08 10:41:38 +0100587 "resourceState": "IN_PROGRESS.BOOTSTRAP_OK",
garciadeblas72412282024-11-07 12:41:54 +0100588 },
589 ]
590 else:
591 return False, "Not suitable VIM account to check cluster status"
rshrif8911b92025-06-11 18:19:07 +0000592 if cloud_type != "aws":
593 if nodegroup_name:
594 nodegroup_check = {
595 "item": f"nodegroup_{cloud_type}",
596 "name": nodegroup_name,
597 "namespace": "",
598 "condition": {
599 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
600 "value": "True",
601 },
602 "timeout": self._checkloop_resource_timeout,
603 "enable": True,
604 "resourceState": "IN_PROGRESS.RESOURCE_READY.NODEGROUP",
605 }
606 checkings_list.insert(3, nodegroup_check)
yshahcb9075f2024-11-22 12:08:57 +0000607 return await self.common_check_list(
608 op_id, checkings_list, "clusters", db_cluster
609 )
garciadeblas72412282024-11-07 12:41:54 +0100610
garciadeblasd41e9292025-03-11 15:44:25 +0100611 def update_default_profile_agekeys(self, db_cluster):
612 profiles = [
613 "infra_controller_profiles",
614 "infra_config_profiles",
615 "app_profiles",
616 "resource_profiles",
617 ]
618 self.logger.debug("the db_cluster is :{}".format(db_cluster))
619 for profile_type in profiles:
620 profile_id = db_cluster[profile_type]
621 db_collection = self.profile_collection_mapping[profile_type]
622 db_profile = self.db.get_one(db_collection, {"_id": profile_id})
623 db_profile["age_pubkey"] = db_cluster["age_pubkey"]
624 db_profile["age_privkey"] = db_cluster["age_privkey"]
625 self.encrypt_age_keys(db_profile)
626 self.db.set_one(db_collection, {"_id": db_profile["_id"]}, db_profile)
627
garciadeblas96b94f52024-07-08 16:18:21 +0200628 def update_profile_state(self, db_cluster, workflow_status, resource_status):
rshri932105f2024-07-05 15:11:55 +0000629 profiles = [
630 "infra_controller_profiles",
631 "infra_config_profiles",
632 "app_profiles",
633 "resource_profiles",
634 ]
garciadeblasd41e9292025-03-11 15:44:25 +0100635 self.logger.debug("the db_cluster is :{}".format(db_cluster))
rshri932105f2024-07-05 15:11:55 +0000636 for profile_type in profiles:
garciadeblas96b94f52024-07-08 16:18:21 +0200637 profile_id = db_cluster[profile_type]
rshri948f7de2024-12-02 03:42:35 +0000638 db_collection = self.profile_collection_mapping[profile_type]
rshri932105f2024-07-05 15:11:55 +0000639 db_profile = self.db.get_one(db_collection, {"_id": profile_id})
yshahcb9075f2024-11-22 12:08:57 +0000640 op_id = db_profile["operationHistory"][-1].get("op_id")
garciadeblas96b94f52024-07-08 16:18:21 +0200641 db_profile["state"] = db_cluster["state"]
642 db_profile["resourceState"] = db_cluster["resourceState"]
643 db_profile["operatingState"] = db_cluster["operatingState"]
rshri932105f2024-07-05 15:11:55 +0000644 db_profile = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +0000645 db_profile, op_id, workflow_status, resource_status
rshri932105f2024-07-05 15:11:55 +0000646 )
rshri932105f2024-07-05 15:11:55 +0000647 self.db.set_one(db_collection, {"_id": db_profile["_id"]}, db_profile)
648
rshri948f7de2024-12-02 03:42:35 +0000649 async def delete(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +0000650 self.logger.info("cluster delete Enter")
rshri948f7de2024-12-02 03:42:35 +0000651
garciadeblas926ffac2025-02-12 16:45:40 +0100652 try:
653 # To get the cluster and op ids
654 cluster_id = params["cluster_id"]
655 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +0000656
garciadeblas926ffac2025-02-12 16:45:40 +0100657 # To initialize the operation states
658 self.initialize_operation(cluster_id, op_id)
rshri948f7de2024-12-02 03:42:35 +0000659
garciadeblas926ffac2025-02-12 16:45:40 +0100660 # To get the cluster
661 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
garciadeblas995cbf32024-12-18 12:54:00 +0100662
garciadeblas926ffac2025-02-12 16:45:40 +0100663 # To get the operation params details
664 op_params = self.get_operation_params(db_cluster, op_id)
garciadeblas995cbf32024-12-18 12:54:00 +0100665
garciadeblas926ffac2025-02-12 16:45:40 +0100666 # To copy the cluster content and decrypting fields to use in workflows
667 workflow_content = {
668 "cluster": self.decrypted_copy(db_cluster),
669 }
rshri948f7de2024-12-02 03:42:35 +0000670
garciadeblas926ffac2025-02-12 16:45:40 +0100671 # To get the vim account details
672 db_vim = self.db.get_one(
673 "vim_accounts", {"name": db_cluster["vim_account"]}
674 )
675 workflow_content["vim_account"] = db_vim
676 except Exception as e:
677 self.logger.debug(traceback.format_exc())
678 self.logger.debug(f"Exception: {e}", exc_info=True)
679 raise e
garciadeblasad6d1ba2025-01-22 16:02:18 +0100680
garciadeblas61a4c692025-07-17 13:04:13 +0200681 workflow_res, workflow_name, _ = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +0100682 "delete_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +0200683 )
garciadeblas41859ce2025-02-04 16:08:51 +0100684 if not workflow_res:
685 self.logger.error(f"Failed to launch workflow: {workflow_name}")
686 db_cluster["state"] = "FAILED_DELETION"
687 db_cluster["resourceState"] = "ERROR"
688 db_cluster = self.update_operation_history(
689 db_cluster, op_id, workflow_status=False, resource_status=None
690 )
691 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
692 # Clean items used in the workflow, no matter if the workflow succeeded
693 clean_status, clean_msg = await self.odu.clean_items_workflow(
694 "delete_cluster", op_id, op_params, workflow_content
695 )
696 self.logger.info(
697 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
698 )
699 return
rshri932105f2024-07-05 15:11:55 +0000700
garciadeblas26d733c2025-02-03 16:12:43 +0100701 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +0200702 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +0100703 op_id, workflow_name
garciadeblas96b94f52024-07-08 16:18:21 +0200704 )
rshri932105f2024-07-05 15:11:55 +0000705 self.logger.info(
garciadeblas26d733c2025-02-03 16:12:43 +0100706 "workflow_status is: {} and workflow_msg is: {}".format(
rshri932105f2024-07-05 15:11:55 +0000707 workflow_status, workflow_msg
708 )
709 )
710 if workflow_status:
garciadeblas96b94f52024-07-08 16:18:21 +0200711 db_cluster["state"] = "DELETED"
712 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
rshri932105f2024-07-05 15:11:55 +0000713 else:
garciadeblas96b94f52024-07-08 16:18:21 +0200714 db_cluster["state"] = "FAILED_DELETION"
715 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +0000716 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +0000717 db_cluster = self.update_operation_history(
718 db_cluster, op_id, workflow_status, None
719 )
garciadeblas96b94f52024-07-08 16:18:21 +0200720 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +0000721
garciadeblas98f9a3d2024-12-10 13:42:47 +0100722 # Clean items used in the workflow or in the cluster, no matter if the workflow succeeded
723 clean_status, clean_msg = await self.odu.clean_items_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +0100724 "delete_cluster", op_id, op_params, workflow_content
garciadeblas98f9a3d2024-12-10 13:42:47 +0100725 )
726 self.logger.info(
727 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
728 )
729
rshri932105f2024-07-05 15:11:55 +0000730 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +0100731 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +0100732 "delete_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +0000733 )
734 self.logger.info(
735 "resource_status is :{} and resource_msg is :{}".format(
736 resource_status, resource_msg
737 )
738 )
739 if resource_status:
garciadeblas96b94f52024-07-08 16:18:21 +0200740 db_cluster["resourceState"] = "READY"
rshri932105f2024-07-05 15:11:55 +0000741 else:
garciadeblas96b94f52024-07-08 16:18:21 +0200742 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +0000743
garciadeblas96b94f52024-07-08 16:18:21 +0200744 db_cluster["operatingState"] = "IDLE"
745 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +0000746 db_cluster, op_id, workflow_status, resource_status
garciadeblas96b94f52024-07-08 16:18:21 +0200747 )
shahithya70a3fc92024-11-12 11:01:05 +0000748 db_cluster["current_operation"] = None
garciadeblas96b94f52024-07-08 16:18:21 +0200749 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +0000750
yshahb36649f2025-02-28 09:01:51 +0000751 force = params.get("force", False)
752 if force:
753 force_delete_status = self.check_force_delete_and_delete_from_db(
754 cluster_id, workflow_status, resource_status, force
755 )
756 if force_delete_status:
757 return
758
garciadeblas96b94f52024-07-08 16:18:21 +0200759 # To delete it from DB
760 if db_cluster["state"] == "DELETED":
761 self.delete_cluster(db_cluster)
rshri948f7de2024-12-02 03:42:35 +0000762
763 # To delete it from k8scluster collection
764 self.db.del_one("k8sclusters", {"name": db_cluster["name"]})
765
rshri932105f2024-07-05 15:11:55 +0000766 return
767
garciadeblasad6d1ba2025-01-22 16:02:18 +0100768 async def check_delete_cluster(self, op_id, op_params, content):
769 self.logger.info(
770 f"check_delete_cluster Operation {op_id}. Params: {op_params}."
771 )
772 self.logger.debug(f"Content: {content}")
773 db_cluster = content["cluster"]
774 cluster_name = db_cluster["git_name"].lower()
775 cluster_kustomization_name = cluster_name
776 db_vim_account = content["vim_account"]
777 cloud_type = db_vim_account["vim_type"]
778 if cloud_type == "aws":
779 cluster_name = f"{cluster_name}-cluster"
780 if cloud_type in ("azure", "gcp", "aws"):
781 checkings_list = [
782 {
783 "item": "kustomization",
784 "name": cluster_kustomization_name,
785 "namespace": "managed-resources",
786 "deleted": True,
787 "timeout": self._checkloop_kustomization_timeout,
788 "enable": True,
789 "resourceState": "IN_PROGRESS.KUSTOMIZATION_DELETED",
790 },
791 {
792 "item": f"cluster_{cloud_type}",
793 "name": cluster_name,
794 "namespace": "",
795 "deleted": True,
796 "timeout": self._checkloop_resource_timeout,
797 "enable": True,
798 "resourceState": "IN_PROGRESS.RESOURCE_DELETED.CLUSTER",
799 },
800 ]
801 else:
802 return False, "Not suitable VIM account to check cluster status"
803 return await self.common_check_list(
804 op_id, checkings_list, "clusters", db_cluster
805 )
806
garciadeblas96b94f52024-07-08 16:18:21 +0200807 def delete_cluster(self, db_cluster):
808 # Actually, item_content is equal to db_cluster
rshri932105f2024-07-05 15:11:55 +0000809 # detach profiles
810 update_dict = None
811 profiles_to_detach = [
812 "infra_controller_profiles",
813 "infra_config_profiles",
814 "app_profiles",
815 "resource_profiles",
816 ]
rshri948f7de2024-12-02 03:42:35 +0000817 """
rshri932105f2024-07-05 15:11:55 +0000818 profiles_collection = {
819 "infra_controller_profiles": "k8sinfra_controller",
820 "infra_config_profiles": "k8sinfra_config",
821 "app_profiles": "k8sapp",
822 "resource_profiles": "k8sresource",
823 }
rshri948f7de2024-12-02 03:42:35 +0000824 """
rshri932105f2024-07-05 15:11:55 +0000825 for profile_type in profiles_to_detach:
garciadeblas96b94f52024-07-08 16:18:21 +0200826 if db_cluster.get(profile_type):
garciadeblas96b94f52024-07-08 16:18:21 +0200827 profile_ids = db_cluster[profile_type]
rshri932105f2024-07-05 15:11:55 +0000828 profile_ids_copy = deepcopy(profile_ids)
rshri932105f2024-07-05 15:11:55 +0000829 for profile_id in profile_ids_copy:
rshri948f7de2024-12-02 03:42:35 +0000830 db_collection = self.profile_collection_mapping[profile_type]
rshri932105f2024-07-05 15:11:55 +0000831 db_profile = self.db.get_one(db_collection, {"_id": profile_id})
garciadeblasc2552852024-10-22 12:39:32 +0200832 self.logger.debug("the db_profile is :{}".format(db_profile))
833 self.logger.debug(
garciadeblas96b94f52024-07-08 16:18:21 +0200834 "the item_content name is :{}".format(db_cluster["name"])
rshri932105f2024-07-05 15:11:55 +0000835 )
garciadeblasc2552852024-10-22 12:39:32 +0200836 self.logger.debug(
rshri932105f2024-07-05 15:11:55 +0000837 "the db_profile name is :{}".format(db_profile["name"])
838 )
garciadeblas96b94f52024-07-08 16:18:21 +0200839 if db_cluster["name"] == db_profile["name"]:
yshah6bad8892025-02-11 12:37:04 +0000840 self.delete_profile_ksu(profile_id, profile_type)
rshri932105f2024-07-05 15:11:55 +0000841 self.db.del_one(db_collection, {"_id": profile_id})
842 else:
rshri932105f2024-07-05 15:11:55 +0000843 profile_ids.remove(profile_id)
844 update_dict = {profile_type: profile_ids}
rshri932105f2024-07-05 15:11:55 +0000845 self.db.set_one(
garciadeblas96b94f52024-07-08 16:18:21 +0200846 "clusters", {"_id": db_cluster["_id"]}, update_dict
rshri932105f2024-07-05 15:11:55 +0000847 )
garciadeblas96b94f52024-07-08 16:18:21 +0200848 self.db.del_one("clusters", {"_id": db_cluster["_id"]})
rshri932105f2024-07-05 15:11:55 +0000849
rshri948f7de2024-12-02 03:42:35 +0000850 async def attach_profile(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +0000851 self.logger.info("profile attach Enter")
rshri948f7de2024-12-02 03:42:35 +0000852
garciadeblas995cbf32024-12-18 12:54:00 +0100853 # To get the cluster and op ids
rshri948f7de2024-12-02 03:42:35 +0000854 cluster_id = params["cluster_id"]
rshri948f7de2024-12-02 03:42:35 +0000855 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +0000856
857 # To initialize the operation states
858 self.initialize_operation(cluster_id, op_id)
859
garciadeblas995cbf32024-12-18 12:54:00 +0100860 # To get the cluster
861 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
862
863 # To get the operation params details
864 op_params = self.get_operation_params(db_cluster, op_id)
865
866 # To copy the cluster content and decrypting fields to use in workflows
867 workflow_content = {
868 "cluster": self.decrypted_copy(db_cluster),
869 }
rshri948f7de2024-12-02 03:42:35 +0000870
871 # To get the profile details
872 profile_id = params["profile_id"]
873 profile_type = params["profile_type"]
874 profile_collection = self.profile_collection_mapping[profile_type]
875 db_profile = self.db.get_one(profile_collection, {"_id": profile_id})
876 db_profile["profile_type"] = profile_type
877 # content["profile"] = db_profile
garciadeblas995cbf32024-12-18 12:54:00 +0100878 workflow_content["profile"] = db_profile
rshri932105f2024-07-05 15:11:55 +0000879
garciadeblas61a4c692025-07-17 13:04:13 +0200880 workflow_res, workflow_name, _ = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +0100881 "attach_profile_to_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +0200882 )
garciadeblas41859ce2025-02-04 16:08:51 +0100883 if not workflow_res:
884 self.logger.error(f"Failed to launch workflow: {workflow_name}")
885 db_cluster["resourceState"] = "ERROR"
886 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
887 db_cluster = self.update_operation_history(
888 db_cluster, op_id, workflow_status=False, resource_status=None
889 )
890 return
rshri932105f2024-07-05 15:11:55 +0000891
garciadeblas26d733c2025-02-03 16:12:43 +0100892 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +0200893 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +0100894 op_id, workflow_name
garciadeblas96b94f52024-07-08 16:18:21 +0200895 )
rshri932105f2024-07-05 15:11:55 +0000896 self.logger.info(
garciadeblas26d733c2025-02-03 16:12:43 +0100897 "workflow_status is: {} and workflow_msg is: {}".format(
rshri932105f2024-07-05 15:11:55 +0000898 workflow_status, workflow_msg
899 )
900 )
901 if workflow_status:
902 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
903 else:
904 db_cluster["resourceState"] = "ERROR"
905 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +0000906 db_cluster = self.update_operation_history(
907 db_cluster, op_id, workflow_status, None
908 )
rshri932105f2024-07-05 15:11:55 +0000909 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
910
911 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +0100912 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +0100913 "attach_profile_to_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +0000914 )
915 self.logger.info(
916 "resource_status is :{} and resource_msg is :{}".format(
917 resource_status, resource_msg
918 )
919 )
920 if resource_status:
921 db_cluster["resourceState"] = "READY"
922 else:
923 db_cluster["resourceState"] = "ERROR"
924
925 db_cluster["operatingState"] = "IDLE"
926 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +0000927 db_cluster, op_id, workflow_status, resource_status
rshri932105f2024-07-05 15:11:55 +0000928 )
rshri932105f2024-07-05 15:11:55 +0000929 profile_list = db_cluster[profile_type]
rshri932105f2024-07-05 15:11:55 +0000930 if resource_status:
rshri932105f2024-07-05 15:11:55 +0000931 profile_list.append(profile_id)
rshri932105f2024-07-05 15:11:55 +0000932 db_cluster[profile_type] = profile_list
shahithya70a3fc92024-11-12 11:01:05 +0000933 db_cluster["current_operation"] = None
rshri932105f2024-07-05 15:11:55 +0000934 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
935
936 return
937
rshri948f7de2024-12-02 03:42:35 +0000938 async def detach_profile(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +0000939 self.logger.info("profile dettach Enter")
rshri948f7de2024-12-02 03:42:35 +0000940
garciadeblas995cbf32024-12-18 12:54:00 +0100941 # To get the cluster and op ids
rshri948f7de2024-12-02 03:42:35 +0000942 cluster_id = params["cluster_id"]
rshri948f7de2024-12-02 03:42:35 +0000943 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +0000944
945 # To initialize the operation states
946 self.initialize_operation(cluster_id, op_id)
947
garciadeblas995cbf32024-12-18 12:54:00 +0100948 # To get the cluster
949 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
950
951 # To get the operation params details
952 op_params = self.get_operation_params(db_cluster, op_id)
953
954 # To copy the cluster content and decrypting fields to use in workflows
955 workflow_content = {
956 "cluster": self.decrypted_copy(db_cluster),
957 }
rshri948f7de2024-12-02 03:42:35 +0000958
959 # To get the profile details
960 profile_id = params["profile_id"]
961 profile_type = params["profile_type"]
962 profile_collection = self.profile_collection_mapping[profile_type]
963 db_profile = self.db.get_one(profile_collection, {"_id": profile_id})
964 db_profile["profile_type"] = profile_type
garciadeblas995cbf32024-12-18 12:54:00 +0100965 workflow_content["profile"] = db_profile
rshri932105f2024-07-05 15:11:55 +0000966
garciadeblas61a4c692025-07-17 13:04:13 +0200967 workflow_res, workflow_name, _ = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +0100968 "detach_profile_from_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +0200969 )
garciadeblas41859ce2025-02-04 16:08:51 +0100970 if not workflow_res:
971 self.logger.error(f"Failed to launch workflow: {workflow_name}")
972 db_cluster["resourceState"] = "ERROR"
973 db_cluster = self.update_operation_history(
974 db_cluster, op_id, workflow_status=False, resource_status=None
975 )
976 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
977 return
rshri932105f2024-07-05 15:11:55 +0000978
garciadeblas26d733c2025-02-03 16:12:43 +0100979 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +0200980 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +0100981 op_id, workflow_name
garciadeblas96b94f52024-07-08 16:18:21 +0200982 )
rshri932105f2024-07-05 15:11:55 +0000983 self.logger.info(
garciadeblas26d733c2025-02-03 16:12:43 +0100984 "workflow_status is: {} and workflow_msg is: {}".format(
rshri932105f2024-07-05 15:11:55 +0000985 workflow_status, workflow_msg
986 )
987 )
988 if workflow_status:
989 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
990 else:
991 db_cluster["resourceState"] = "ERROR"
992 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +0000993 db_cluster = self.update_operation_history(
994 db_cluster, op_id, workflow_status, None
995 )
rshri932105f2024-07-05 15:11:55 +0000996 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
997
998 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +0100999 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +01001000 "detach_profile_from_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +00001001 )
1002 self.logger.info(
1003 "resource_status is :{} and resource_msg is :{}".format(
1004 resource_status, resource_msg
1005 )
1006 )
1007 if resource_status:
1008 db_cluster["resourceState"] = "READY"
1009 else:
1010 db_cluster["resourceState"] = "ERROR"
1011
1012 db_cluster["operatingState"] = "IDLE"
1013 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +00001014 db_cluster, op_id, workflow_status, resource_status
rshri932105f2024-07-05 15:11:55 +00001015 )
rshri932105f2024-07-05 15:11:55 +00001016 profile_list = db_cluster[profile_type]
1017 self.logger.info("profile list is : {}".format(profile_list))
1018 if resource_status:
rshri932105f2024-07-05 15:11:55 +00001019 profile_list.remove(profile_id)
rshri932105f2024-07-05 15:11:55 +00001020 db_cluster[profile_type] = profile_list
shahithya70a3fc92024-11-12 11:01:05 +00001021 db_cluster["current_operation"] = None
rshri932105f2024-07-05 15:11:55 +00001022 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
1023
1024 return
1025
rshri948f7de2024-12-02 03:42:35 +00001026 async def register(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001027 self.logger.info("cluster register enter")
garciadeblas8bdb3d42025-04-04 00:19:13 +02001028 workflow_status = None
1029 resource_status = None
rshri932105f2024-07-05 15:11:55 +00001030
garciadeblas995cbf32024-12-18 12:54:00 +01001031 # To get the cluster and op ids
rshri948f7de2024-12-02 03:42:35 +00001032 cluster_id = params["cluster_id"]
rshri948f7de2024-12-02 03:42:35 +00001033 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +00001034
1035 # To initialize the operation states
1036 self.initialize_operation(cluster_id, op_id)
1037
garciadeblas995cbf32024-12-18 12:54:00 +01001038 # To get the cluster
1039 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
1040
1041 # To get the operation params details
1042 op_params = self.get_operation_params(db_cluster, op_id)
1043
1044 # To copy the cluster content and decrypting fields to use in workflows
garciadeblas8bdb3d42025-04-04 00:19:13 +02001045 db_cluster_copy = self.decrypted_copy(db_cluster)
garciadeblas995cbf32024-12-18 12:54:00 +01001046 workflow_content = {
garciadeblas8bdb3d42025-04-04 00:19:13 +02001047 "cluster": db_cluster_copy,
garciadeblas995cbf32024-12-18 12:54:00 +01001048 }
rshric3564942024-11-12 18:12:38 +00001049
garciadeblas61a4c692025-07-17 13:04:13 +02001050 workflow_res, workflow_name, _ = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001051 "register_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +02001052 )
garciadeblas41859ce2025-02-04 16:08:51 +01001053 if not workflow_res:
1054 self.logger.error(f"Failed to launch workflow: {workflow_name}")
1055 db_cluster["state"] = "FAILED_CREATION"
1056 db_cluster["resourceState"] = "ERROR"
1057 db_cluster = self.update_operation_history(
1058 db_cluster, op_id, workflow_status=False, resource_status=None
1059 )
1060 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
1061 # Clean items used in the workflow, no matter if the workflow succeeded
1062 clean_status, clean_msg = await self.odu.clean_items_workflow(
1063 "register_cluster", op_id, op_params, workflow_content
1064 )
1065 self.logger.info(
1066 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1067 )
1068 return
rshri932105f2024-07-05 15:11:55 +00001069
garciadeblas26d733c2025-02-03 16:12:43 +01001070 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +02001071 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +01001072 op_id, workflow_name
garciadeblas96b94f52024-07-08 16:18:21 +02001073 )
rshri932105f2024-07-05 15:11:55 +00001074 self.logger.info(
garciadeblas26d733c2025-02-03 16:12:43 +01001075 "workflow_status is: {} and workflow_msg is: {}".format(
rshri932105f2024-07-05 15:11:55 +00001076 workflow_status, workflow_msg
1077 )
1078 )
1079 if workflow_status:
garciadeblas96b94f52024-07-08 16:18:21 +02001080 db_cluster["state"] = "CREATED"
1081 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
rshri932105f2024-07-05 15:11:55 +00001082 else:
garciadeblas96b94f52024-07-08 16:18:21 +02001083 db_cluster["state"] = "FAILED_CREATION"
1084 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +00001085 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +00001086 db_cluster = self.update_operation_history(
1087 db_cluster, op_id, workflow_status, None
1088 )
garciadeblas96b94f52024-07-08 16:18:21 +02001089 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +00001090
garciadeblasdde3a312024-09-17 13:25:06 +02001091 # Clean items used in the workflow, no matter if the workflow succeeded
1092 clean_status, clean_msg = await self.odu.clean_items_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001093 "register_cluster", op_id, op_params, workflow_content
garciadeblasdde3a312024-09-17 13:25:06 +02001094 )
1095 self.logger.info(
1096 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1097 )
1098
rshri932105f2024-07-05 15:11:55 +00001099 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001100 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +01001101 "register_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +00001102 )
1103 self.logger.info(
1104 "resource_status is :{} and resource_msg is :{}".format(
1105 resource_status, resource_msg
1106 )
1107 )
1108 if resource_status:
garciadeblas96b94f52024-07-08 16:18:21 +02001109 db_cluster["resourceState"] = "READY"
rshri932105f2024-07-05 15:11:55 +00001110 else:
garciadeblas96b94f52024-07-08 16:18:21 +02001111 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +00001112
garciadeblas96b94f52024-07-08 16:18:21 +02001113 db_cluster["operatingState"] = "IDLE"
1114 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +00001115 db_cluster, op_id, workflow_status, resource_status
rshri932105f2024-07-05 15:11:55 +00001116 )
shahithya70a3fc92024-11-12 11:01:05 +00001117 db_cluster["current_operation"] = None
garciadeblas96b94f52024-07-08 16:18:21 +02001118 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri948f7de2024-12-02 03:42:35 +00001119
garciadeblas8bdb3d42025-04-04 00:19:13 +02001120 # Update default profile agekeys and state
1121 self.update_default_profile_agekeys(db_cluster_copy)
1122 self.update_profile_state(db_cluster, workflow_status, resource_status)
1123
rshri948f7de2024-12-02 03:42:35 +00001124 db_register = self.db.get_one("k8sclusters", {"name": db_cluster["name"]})
1125 db_register["credentials"] = db_cluster["credentials"]
1126 self.db.set_one("k8sclusters", {"_id": db_register["_id"]}, db_register)
1127
1128 if db_cluster["resourceState"] == "READY" and db_cluster["state"] == "CREATED":
1129 # To call the lcm.py for registering the cluster in k8scluster lcm.
1130 register = await self.regist.create(db_register, order_id)
1131 self.logger.debug(f"Register is : {register}")
1132 else:
1133 db_register["_admin"]["operationalState"] = "ERROR"
1134 self.db.set_one("k8sclusters", {"_id": db_register["_id"]}, db_register)
1135
rshri932105f2024-07-05 15:11:55 +00001136 return
1137
garciadeblasad6d1ba2025-01-22 16:02:18 +01001138 async def check_register_cluster(self, op_id, op_params, content):
1139 self.logger.info(
1140 f"check_register_cluster Operation {op_id}. Params: {op_params}."
1141 )
1142 # self.logger.debug(f"Content: {content}")
1143 db_cluster = content["cluster"]
1144 cluster_name = db_cluster["git_name"].lower()
1145 cluster_kustomization_name = cluster_name
1146 bootstrap = op_params.get("bootstrap", True)
1147 checkings_list = [
1148 {
1149 "item": "kustomization",
1150 "name": f"{cluster_kustomization_name}-bstrp-fluxctrl",
1151 "namespace": "managed-resources",
garciadeblas7cf480d2025-01-27 16:53:45 +01001152 "condition": {
1153 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
1154 "value": "True",
1155 },
garciadeblasad6d1ba2025-01-22 16:02:18 +01001156 "timeout": self._checkloop_kustomization_timeout,
1157 "enable": bootstrap,
1158 "resourceState": "IN_PROGRESS.BOOTSTRAP_OK",
1159 },
1160 ]
1161 return await self.common_check_list(
1162 op_id, checkings_list, "clusters", db_cluster
1163 )
1164
rshri948f7de2024-12-02 03:42:35 +00001165 async def deregister(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001166 self.logger.info("cluster deregister enter")
1167
garciadeblas995cbf32024-12-18 12:54:00 +01001168 # To get the cluster and op ids
rshri948f7de2024-12-02 03:42:35 +00001169 cluster_id = params["cluster_id"]
rshri948f7de2024-12-02 03:42:35 +00001170 op_id = params["operation_id"]
rshri948f7de2024-12-02 03:42:35 +00001171
1172 # To initialize the operation states
1173 self.initialize_operation(cluster_id, op_id)
1174
garciadeblas995cbf32024-12-18 12:54:00 +01001175 # To get the cluster
1176 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
1177
1178 # To get the operation params details
1179 op_params = self.get_operation_params(db_cluster, op_id)
1180
1181 # To copy the cluster content and decrypting fields to use in workflows
1182 workflow_content = {
1183 "cluster": self.decrypted_copy(db_cluster),
1184 }
rshri932105f2024-07-05 15:11:55 +00001185
garciadeblas61a4c692025-07-17 13:04:13 +02001186 workflow_res, workflow_name, _ = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001187 "deregister_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +02001188 )
garciadeblas41859ce2025-02-04 16:08:51 +01001189 if not workflow_res:
1190 self.logger.error(f"Failed to launch workflow: {workflow_name}")
1191 db_cluster["state"] = "FAILED_DELETION"
1192 db_cluster["resourceState"] = "ERROR"
1193 db_cluster = self.update_operation_history(
1194 db_cluster, op_id, workflow_status=False, resource_status=None
1195 )
1196 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
garciadeblas41859ce2025-02-04 16:08:51 +01001197 return
rshri932105f2024-07-05 15:11:55 +00001198
garciadeblas26d733c2025-02-03 16:12:43 +01001199 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +02001200 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +01001201 op_id, workflow_name
garciadeblas96b94f52024-07-08 16:18:21 +02001202 )
rshri932105f2024-07-05 15:11:55 +00001203 self.logger.info(
garciadeblas26d733c2025-02-03 16:12:43 +01001204 "workflow_status is: {} and workflow_msg is: {}".format(
rshri932105f2024-07-05 15:11:55 +00001205 workflow_status, workflow_msg
1206 )
1207 )
1208 if workflow_status:
garciadeblas96b94f52024-07-08 16:18:21 +02001209 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
rshri932105f2024-07-05 15:11:55 +00001210 else:
garciadeblas96b94f52024-07-08 16:18:21 +02001211 db_cluster["state"] = "FAILED_DELETION"
1212 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +00001213 # has to call update_operation_history return content
yshahcb9075f2024-11-22 12:08:57 +00001214 db_cluster = self.update_operation_history(
1215 db_cluster, op_id, workflow_status, None
1216 )
garciadeblas96b94f52024-07-08 16:18:21 +02001217 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +00001218
1219 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001220 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +01001221 "deregister_cluster", op_id, op_params, workflow_content
rshri932105f2024-07-05 15:11:55 +00001222 )
1223 self.logger.info(
1224 "resource_status is :{} and resource_msg is :{}".format(
1225 resource_status, resource_msg
1226 )
1227 )
1228 if resource_status:
garciadeblas96b94f52024-07-08 16:18:21 +02001229 db_cluster["resourceState"] = "READY"
rshri932105f2024-07-05 15:11:55 +00001230 else:
garciadeblas96b94f52024-07-08 16:18:21 +02001231 db_cluster["resourceState"] = "ERROR"
rshri932105f2024-07-05 15:11:55 +00001232
garciadeblas96b94f52024-07-08 16:18:21 +02001233 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +00001234 db_cluster, op_id, workflow_status, resource_status
garciadeblas96b94f52024-07-08 16:18:21 +02001235 )
1236 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri932105f2024-07-05 15:11:55 +00001237
garciadeblas93380452025-02-05 09:32:52 +01001238 await self.delete(params, order_id)
1239 # Clean items used in the workflow or in the cluster, no matter if the workflow succeeded
1240 clean_status, clean_msg = await self.odu.clean_items_workflow(
1241 "deregister_cluster", op_id, op_params, workflow_content
1242 )
1243 self.logger.info(
1244 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1245 )
1246 return
rshri932105f2024-07-05 15:11:55 +00001247
rshri948f7de2024-12-02 03:42:35 +00001248 async def get_creds(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02001249 self.logger.info("Cluster get creds Enter")
rshri948f7de2024-12-02 03:42:35 +00001250 cluster_id = params["cluster_id"]
1251 op_id = params["operation_id"]
1252 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
garciadeblas96b94f52024-07-08 16:18:21 +02001253 result, cluster_creds = await self.odu.get_cluster_credentials(db_cluster)
1254 if result:
1255 db_cluster["credentials"] = cluster_creds
yshahd940c652024-10-17 06:11:12 +00001256 op_len = 0
1257 for operations in db_cluster["operationHistory"]:
1258 if operations["op_id"] == op_id:
1259 db_cluster["operationHistory"][op_len]["result"] = result
1260 db_cluster["operationHistory"][op_len]["endDate"] = time()
1261 op_len += 1
shahithya70a3fc92024-11-12 11:01:05 +00001262 db_cluster["current_operation"] = None
yshahd940c652024-10-17 06:11:12 +00001263 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
rshri948f7de2024-12-02 03:42:35 +00001264 self.logger.info("Cluster Get Creds Exit")
yshah771dea82024-07-05 15:11:49 +00001265 return
1266
rshri948f7de2024-12-02 03:42:35 +00001267 async def update(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02001268 self.logger.info("Cluster update Enter")
rshri948f7de2024-12-02 03:42:35 +00001269 # To get the cluster details
1270 cluster_id = params["cluster_id"]
1271 db_cluster = self.db.get_one("clusters", {"_id": cluster_id})
1272
1273 # To get the operation params details
1274 op_id = params["operation_id"]
1275 op_params = self.get_operation_params(db_cluster, op_id)
yshah771dea82024-07-05 15:11:49 +00001276
garciadeblas995cbf32024-12-18 12:54:00 +01001277 # To copy the cluster content and decrypting fields to use in workflows
1278 workflow_content = {
1279 "cluster": self.decrypted_copy(db_cluster),
1280 }
rshric3564942024-11-12 18:12:38 +00001281
1282 # vim account details
1283 db_vim = self.db.get_one("vim_accounts", {"name": db_cluster["vim_account"]})
garciadeblas995cbf32024-12-18 12:54:00 +01001284 workflow_content["vim_account"] = db_vim
rshric3564942024-11-12 18:12:38 +00001285
garciadeblas61a4c692025-07-17 13:04:13 +02001286 workflow_res, workflow_name, _ = await self.odu.launch_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001287 "update_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +02001288 )
garciadeblas41859ce2025-02-04 16:08:51 +01001289 if not workflow_res:
1290 self.logger.error(f"Failed to launch workflow: {workflow_name}")
1291 db_cluster["resourceState"] = "ERROR"
1292 db_cluster = self.update_operation_history(
1293 db_cluster, op_id, workflow_status=False, resource_status=None
1294 )
1295 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
1296 # Clean items used in the workflow, no matter if the workflow succeeded
1297 clean_status, clean_msg = await self.odu.clean_items_workflow(
1298 "update_cluster", op_id, op_params, workflow_content
1299 )
1300 self.logger.info(
1301 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1302 )
1303 return
garciadeblas26d733c2025-02-03 16:12:43 +01001304 self.logger.info("workflow_name is: {}".format(workflow_name))
garciadeblas96b94f52024-07-08 16:18:21 +02001305 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +01001306 op_id, workflow_name
garciadeblas96b94f52024-07-08 16:18:21 +02001307 )
1308 self.logger.info(
1309 "Workflow Status: {} Workflow Message: {}".format(
1310 workflow_status, workflow_msg
yshah771dea82024-07-05 15:11:49 +00001311 )
garciadeblas96b94f52024-07-08 16:18:21 +02001312 )
1313
1314 if workflow_status:
1315 db_cluster["resourceState"] = "IN_PROGRESS.GIT_SYNCED"
1316 else:
1317 db_cluster["resourceState"] = "ERROR"
1318
yshahcb9075f2024-11-22 12:08:57 +00001319 db_cluster = self.update_operation_history(
1320 db_cluster, op_id, workflow_status, None
1321 )
garciadeblas96b94f52024-07-08 16:18:21 +02001322 # self.logger.info("Db content: {}".format(db_content))
1323 # self.db.set_one(self.db_collection, {"_id": _id}, db_cluster)
1324 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
1325
garciadeblas28bff0f2024-09-16 12:53:07 +02001326 # Clean items used in the workflow, no matter if the workflow succeeded
1327 clean_status, clean_msg = await self.odu.clean_items_workflow(
garciadeblas995cbf32024-12-18 12:54:00 +01001328 "update_cluster", op_id, op_params, workflow_content
garciadeblas28bff0f2024-09-16 12:53:07 +02001329 )
1330 self.logger.info(
1331 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1332 )
garciadeblas96b94f52024-07-08 16:18:21 +02001333 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001334 resource_status, resource_msg = await self.check_resource_status(
garciadeblas995cbf32024-12-18 12:54:00 +01001335 "update_cluster", op_id, op_params, workflow_content
garciadeblas96b94f52024-07-08 16:18:21 +02001336 )
1337 self.logger.info(
1338 "Resource Status: {} Resource Message: {}".format(
1339 resource_status, resource_msg
1340 )
1341 )
yshah771dea82024-07-05 15:11:49 +00001342
1343 if resource_status:
garciadeblas96b94f52024-07-08 16:18:21 +02001344 db_cluster["resourceState"] = "READY"
yshah771dea82024-07-05 15:11:49 +00001345 else:
garciadeblas96b94f52024-07-08 16:18:21 +02001346 db_cluster["resourceState"] = "ERROR"
yshah771dea82024-07-05 15:11:49 +00001347
yshah0defcd52024-11-18 07:41:35 +00001348 db_cluster = self.update_operation_history(
yshahcb9075f2024-11-22 12:08:57 +00001349 db_cluster, op_id, workflow_status, resource_status
yshah0defcd52024-11-18 07:41:35 +00001350 )
1351
garciadeblas96b94f52024-07-08 16:18:21 +02001352 db_cluster["operatingState"] = "IDLE"
garciadeblas96b94f52024-07-08 16:18:21 +02001353 # self.logger.info("db_cluster: {}".format(db_cluster))
garciadeblas7cf480d2025-01-27 16:53:45 +01001354 # TODO: verify condition
garciadeblas96b94f52024-07-08 16:18:21 +02001355 # For the moment, if the workflow completed successfully, then we update the db accordingly.
1356 if workflow_status:
1357 if "k8s_version" in op_params:
1358 db_cluster["k8s_version"] = op_params["k8s_version"]
yshah0defcd52024-11-18 07:41:35 +00001359 if "node_count" in op_params:
garciadeblas96b94f52024-07-08 16:18:21 +02001360 db_cluster["node_count"] = op_params["node_count"]
yshah0defcd52024-11-18 07:41:35 +00001361 if "node_size" in op_params:
1362 db_cluster["node_count"] = op_params["node_size"]
garciadeblas96b94f52024-07-08 16:18:21 +02001363 # self.db.set_one(self.db_collection, {"_id": _id}, db_content)
shahithya70a3fc92024-11-12 11:01:05 +00001364 db_cluster["current_operation"] = None
garciadeblas96b94f52024-07-08 16:18:21 +02001365 self.db.set_one("clusters", {"_id": db_cluster["_id"]}, db_cluster)
yshah771dea82024-07-05 15:11:49 +00001366 return
1367
garciadeblasad6d1ba2025-01-22 16:02:18 +01001368 async def check_update_cluster(self, op_id, op_params, content):
1369 self.logger.info(
1370 f"check_update_cluster Operation {op_id}. Params: {op_params}."
1371 )
1372 self.logger.debug(f"Content: {content}")
garciadeblasd7d8bde2025-01-27 18:31:06 +01001373 # return await self.check_dummy_operation(op_id, op_params, content)
1374 db_cluster = content["cluster"]
1375 cluster_name = db_cluster["git_name"].lower()
1376 cluster_kustomization_name = cluster_name
1377 db_vim_account = content["vim_account"]
1378 cloud_type = db_vim_account["vim_type"]
1379 if cloud_type == "aws":
1380 cluster_name = f"{cluster_name}-cluster"
1381 if cloud_type in ("azure", "gcp", "aws"):
1382 checkings_list = [
1383 {
1384 "item": "kustomization",
1385 "name": cluster_kustomization_name,
1386 "namespace": "managed-resources",
1387 "condition": {
1388 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
1389 "value": "True",
1390 },
1391 "timeout": self._checkloop_kustomization_timeout,
1392 "enable": True,
1393 "resourceState": "IN_PROGRESS.KUSTOMIZATION_READY",
1394 },
1395 ]
1396 else:
1397 return False, "Not suitable VIM account to check cluster status"
1398 # Scale operation
1399 if "node_count" in op_params:
garciadeblas1ca09852025-05-30 11:19:06 +02001400 if cloud_type in ("azure", "gcp"):
1401 checkings_list.append(
1402 {
1403 "item": f"cluster_{cloud_type}",
1404 "name": cluster_name,
1405 "namespace": "",
1406 "condition": {
1407 "jsonpath_filter": "status.atProvider.defaultNodePool[0].nodeCount",
1408 "value": f"{op_params['node_count']}",
1409 },
1410 "timeout": self._checkloop_resource_timeout * 3,
1411 "enable": True,
1412 "resourceState": "IN_PROGRESS.RESOURCE_READY.NODE_COUNT.CLUSTER",
1413 }
1414 )
1415 elif cloud_type == "aws":
1416 checkings_list.append(
1417 {
1418 "item": f"nodegroup_{cloud_type}",
1419 "name": f"{cluster_name}-nodegroup",
1420 "namespace": "",
1421 "condition": {
1422 "jsonpath_filter": "status.atProvider.scalingConfig[0].desiredSize",
1423 "value": f"{op_params['node_count']}",
1424 },
1425 "timeout": self._checkloop_resource_timeout * 3,
1426 "enable": True,
1427 "resourceState": "IN_PROGRESS.RESOURCE_READY.NODE_COUNT.CLUSTER",
1428 }
1429 )
1430
garciadeblasd7d8bde2025-01-27 18:31:06 +01001431 # Upgrade operation
1432 if "k8s_version" in op_params:
1433 checkings_list.append(
1434 {
1435 "item": f"cluster_{cloud_type}",
1436 "name": cluster_name,
1437 "namespace": "",
1438 "condition": {
1439 "jsonpath_filter": "status.atProvider.defaultNodePool[0].orchestratorVersion",
1440 "value": op_params["k8s_version"],
1441 },
1442 "timeout": self._checkloop_resource_timeout * 2,
1443 "enable": True,
1444 "resourceState": "IN_PROGRESS.RESOURCE_READY.K8S_VERSION.CLUSTER",
1445 }
1446 )
1447 return await self.common_check_list(
1448 op_id, checkings_list, "clusters", db_cluster
1449 )
garciadeblasad6d1ba2025-01-22 16:02:18 +01001450
yshah771dea82024-07-05 15:11:49 +00001451
garciadeblas72412282024-11-07 12:41:54 +01001452class CloudCredentialsLcm(GitOpsLcm):
yshah771dea82024-07-05 15:11:49 +00001453 db_collection = "vim_accounts"
1454
1455 def __init__(self, msg, lcm_tasks, config):
1456 """
1457 Init, Connect to database, filesystem storage, and messaging
1458 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
1459 :return: None
1460 """
garciadeblas72412282024-11-07 12:41:54 +01001461 super().__init__(msg, lcm_tasks, config)
yshah771dea82024-07-05 15:11:49 +00001462
yshah564ec9c2024-11-29 07:33:32 +00001463 async def add(self, params, order_id):
yshah771dea82024-07-05 15:11:49 +00001464 self.logger.info("Cloud Credentials create")
yshah564ec9c2024-11-29 07:33:32 +00001465 vim_id = params["_id"]
1466 op_id = vim_id
1467 op_params = params
1468 db_content = self.db.get_one(self.db_collection, {"_id": vim_id})
1469 vim_config = db_content.get("config", {})
1470 self.db.encrypt_decrypt_fields(
1471 vim_config.get("credentials"),
1472 "decrypt",
1473 ["password", "secret"],
1474 schema_version=db_content["schema_version"],
1475 salt=vim_id,
1476 )
1477
garciadeblas61a4c692025-07-17 13:04:13 +02001478 workflow_res, workflow_name, _ = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001479 "create_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001480 )
1481
1482 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +01001483 op_id, workflow_name
yshah771dea82024-07-05 15:11:49 +00001484 )
1485
1486 self.logger.info(
1487 "Workflow Status: {} Workflow Msg: {}".format(workflow_status, workflow_msg)
1488 )
1489
garciadeblas28bff0f2024-09-16 12:53:07 +02001490 # Clean items used in the workflow, no matter if the workflow succeeded
1491 clean_status, clean_msg = await self.odu.clean_items_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001492 "create_cloud_credentials", op_id, op_params, db_content
garciadeblas28bff0f2024-09-16 12:53:07 +02001493 )
1494 self.logger.info(
1495 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1496 )
1497
yshah771dea82024-07-05 15:11:49 +00001498 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001499 resource_status, resource_msg = await self.check_resource_status(
yshah564ec9c2024-11-29 07:33:32 +00001500 "create_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001501 )
1502 self.logger.info(
1503 "Resource Status: {} Resource Message: {}".format(
1504 resource_status, resource_msg
1505 )
1506 )
garciadeblas15b8a302024-09-23 12:40:13 +02001507
yshah564ec9c2024-11-29 07:33:32 +00001508 db_content["_admin"]["operationalState"] = "ENABLED"
1509 for operation in db_content["_admin"]["operations"]:
garciadeblas15b8a302024-09-23 12:40:13 +02001510 if operation["lcmOperationType"] == "create":
1511 operation["operationState"] = "ENABLED"
yshah564ec9c2024-11-29 07:33:32 +00001512 self.logger.info("Content : {}".format(db_content))
1513 self.db.set_one("vim_accounts", {"_id": db_content["_id"]}, db_content)
yshah771dea82024-07-05 15:11:49 +00001514 return
1515
yshah564ec9c2024-11-29 07:33:32 +00001516 async def edit(self, params, order_id):
1517 self.logger.info("Cloud Credentials Update")
1518 vim_id = params["_id"]
1519 op_id = vim_id
1520 op_params = params
1521 db_content = self.db.get_one("vim_accounts", {"_id": vim_id})
1522 vim_config = db_content.get("config", {})
1523 self.db.encrypt_decrypt_fields(
1524 vim_config.get("credentials"),
1525 "decrypt",
1526 ["password", "secret"],
1527 schema_version=db_content["schema_version"],
1528 salt=vim_id,
1529 )
1530
garciadeblas61a4c692025-07-17 13:04:13 +02001531 workflow_res, workflow_name, _ = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001532 "update_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001533 )
1534 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +01001535 op_id, workflow_name
yshah771dea82024-07-05 15:11:49 +00001536 )
1537 self.logger.info(
1538 "Workflow Status: {} Workflow Msg: {}".format(workflow_status, workflow_msg)
1539 )
1540
garciadeblas28bff0f2024-09-16 12:53:07 +02001541 # Clean items used in the workflow, no matter if the workflow succeeded
1542 clean_status, clean_msg = await self.odu.clean_items_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001543 "update_cloud_credentials", op_id, op_params, db_content
garciadeblas28bff0f2024-09-16 12:53:07 +02001544 )
1545 self.logger.info(
1546 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1547 )
1548
yshah771dea82024-07-05 15:11:49 +00001549 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001550 resource_status, resource_msg = await self.check_resource_status(
yshah564ec9c2024-11-29 07:33:32 +00001551 "update_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001552 )
1553 self.logger.info(
1554 "Resource Status: {} Resource Message: {}".format(
1555 resource_status, resource_msg
1556 )
1557 )
1558 return
1559
yshah564ec9c2024-11-29 07:33:32 +00001560 async def remove(self, params, order_id):
1561 self.logger.info("Cloud Credentials remove")
1562 vim_id = params["_id"]
1563 op_id = vim_id
1564 op_params = params
1565 db_content = self.db.get_one("vim_accounts", {"_id": vim_id})
1566
garciadeblas61a4c692025-07-17 13:04:13 +02001567 workflow_res, workflow_name, _ = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00001568 "delete_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001569 )
1570 workflow_status, workflow_msg = await self.odu.check_workflow_status(
garciadeblasc89134b2025-02-05 16:36:17 +01001571 op_id, workflow_name
yshah771dea82024-07-05 15:11:49 +00001572 )
1573 self.logger.info(
1574 "Workflow Status: {} Workflow Msg: {}".format(workflow_status, workflow_msg)
1575 )
1576
1577 if workflow_status:
garciadeblas72412282024-11-07 12:41:54 +01001578 resource_status, resource_msg = await self.check_resource_status(
yshah564ec9c2024-11-29 07:33:32 +00001579 "delete_cloud_credentials", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001580 )
1581 self.logger.info(
1582 "Resource Status: {} Resource Message: {}".format(
1583 resource_status, resource_msg
1584 )
1585 )
yshah564ec9c2024-11-29 07:33:32 +00001586 self.db.del_one(self.db_collection, {"_id": db_content["_id"]})
yshah771dea82024-07-05 15:11:49 +00001587 return
1588
rshri932105f2024-07-05 15:11:55 +00001589
garciadeblas72412282024-11-07 12:41:54 +01001590class K8sAppLcm(GitOpsLcm):
rshri948f7de2024-12-02 03:42:35 +00001591 db_collection = "k8sapp"
1592
rshri932105f2024-07-05 15:11:55 +00001593 def __init__(self, msg, lcm_tasks, config):
1594 """
1595 Init, Connect to database, filesystem storage, and messaging
1596 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
1597 :return: None
1598 """
garciadeblas72412282024-11-07 12:41:54 +01001599 super().__init__(msg, lcm_tasks, config)
rshri932105f2024-07-05 15:11:55 +00001600
rshri948f7de2024-12-02 03:42:35 +00001601 async def create(self, params, order_id):
garciadeblas61a4c692025-07-17 13:04:13 +02001602 self.logger.info("App Profile Create Enter")
rshri932105f2024-07-05 15:11:55 +00001603
rshri948f7de2024-12-02 03:42:35 +00001604 op_id = params["operation_id"]
1605 profile_id = params["profile_id"]
1606
1607 # To initialize the operation states
1608 self.initialize_operation(profile_id, op_id)
1609
1610 content = self.db.get_one("k8sapp", {"_id": profile_id})
1611 content["profile_type"] = "applications"
1612 op_params = self.get_operation_params(content, op_id)
1613 self.db.set_one("k8sapp", {"_id": content["_id"]}, content)
1614
garciadeblas61a4c692025-07-17 13:04:13 +02001615 workflow_res, workflow_name, _ = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001616 "create_profile", op_id, op_params, content
1617 )
garciadeblas26d733c2025-02-03 16:12:43 +01001618 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00001619
garciadeblas33b36e72025-01-17 12:49:19 +01001620 workflow_status = await self.check_workflow_and_update_db(
1621 op_id, workflow_name, content
1622 )
rshri932105f2024-07-05 15:11:55 +00001623
1624 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001625 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001626 "create_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001627 )
yshah564ec9c2024-11-29 07:33:32 +00001628 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
garciadeblas61a4c692025-07-17 13:04:13 +02001629 self.logger.info(
1630 f"App Profile Create Exit with resource status: {resource_status}"
1631 )
rshri932105f2024-07-05 15:11:55 +00001632 return
1633
rshri948f7de2024-12-02 03:42:35 +00001634 async def delete(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001635 self.logger.info("App delete Enter")
rshri932105f2024-07-05 15:11:55 +00001636
rshri948f7de2024-12-02 03:42:35 +00001637 op_id = params["operation_id"]
1638 profile_id = params["profile_id"]
1639
1640 # To initialize the operation states
1641 self.initialize_operation(profile_id, op_id)
1642
1643 content = self.db.get_one("k8sapp", {"_id": profile_id})
1644 op_params = self.get_operation_params(content, op_id)
1645
garciadeblas61a4c692025-07-17 13:04:13 +02001646 workflow_res, workflow_name, _ = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001647 "delete_profile", op_id, op_params, content
1648 )
garciadeblas26d733c2025-02-03 16:12:43 +01001649 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00001650
garciadeblas33b36e72025-01-17 12:49:19 +01001651 workflow_status = await self.check_workflow_and_update_db(
1652 op_id, workflow_name, content
1653 )
rshri932105f2024-07-05 15:11:55 +00001654
1655 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001656 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001657 "delete_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001658 )
rshri932105f2024-07-05 15:11:55 +00001659
yshahb36649f2025-02-28 09:01:51 +00001660 force = params.get("force", False)
1661 if force:
1662 force_delete_status = self.check_force_delete_and_delete_from_db(
1663 profile_id, workflow_status, resource_status, force
1664 )
1665 if force_delete_status:
1666 return
1667
1668 self.logger.info(f"Resource status: {resource_status}")
yshah564ec9c2024-11-29 07:33:32 +00001669 if resource_status:
1670 content["state"] = "DELETED"
yshah6bad8892025-02-11 12:37:04 +00001671 profile_type = self.profile_type_mapping[content["profile_type"]]
1672 self.delete_profile_ksu(profile_id, profile_type)
yshah564ec9c2024-11-29 07:33:32 +00001673 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1674 self.db.del_one(self.db_collection, {"_id": content["_id"]})
garciadeblas61a4c692025-07-17 13:04:13 +02001675 self.logger.info(
1676 f"App Profile Delete Exit with resource status: {resource_status}"
1677 )
rshri932105f2024-07-05 15:11:55 +00001678 return
1679
1680
garciadeblas72412282024-11-07 12:41:54 +01001681class K8sResourceLcm(GitOpsLcm):
rshri948f7de2024-12-02 03:42:35 +00001682 db_collection = "k8sresource"
1683
rshri932105f2024-07-05 15:11:55 +00001684 def __init__(self, msg, lcm_tasks, config):
1685 """
1686 Init, Connect to database, filesystem storage, and messaging
1687 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
1688 :return: None
1689 """
garciadeblas72412282024-11-07 12:41:54 +01001690 super().__init__(msg, lcm_tasks, config)
rshri932105f2024-07-05 15:11:55 +00001691
rshri948f7de2024-12-02 03:42:35 +00001692 async def create(self, params, order_id):
garciadeblas61a4c692025-07-17 13:04:13 +02001693 self.logger.info("Resource Profile Create Enter")
rshri932105f2024-07-05 15:11:55 +00001694
rshri948f7de2024-12-02 03:42:35 +00001695 op_id = params["operation_id"]
1696 profile_id = params["profile_id"]
1697
1698 # To initialize the operation states
1699 self.initialize_operation(profile_id, op_id)
1700
1701 content = self.db.get_one("k8sresource", {"_id": profile_id})
1702 content["profile_type"] = "managed-resources"
1703 op_params = self.get_operation_params(content, op_id)
1704 self.db.set_one("k8sresource", {"_id": content["_id"]}, content)
1705
garciadeblas61a4c692025-07-17 13:04:13 +02001706 workflow_res, workflow_name, _ = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001707 "create_profile", op_id, op_params, content
1708 )
garciadeblas26d733c2025-02-03 16:12:43 +01001709 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00001710
garciadeblas33b36e72025-01-17 12:49:19 +01001711 workflow_status = await self.check_workflow_and_update_db(
1712 op_id, workflow_name, content
1713 )
rshri932105f2024-07-05 15:11:55 +00001714
1715 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001716 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001717 "create_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001718 )
yshah564ec9c2024-11-29 07:33:32 +00001719 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1720 self.logger.info(
1721 f"Resource Create Exit with resource status: {resource_status}"
rshri932105f2024-07-05 15:11:55 +00001722 )
rshri932105f2024-07-05 15:11:55 +00001723 return
1724
rshri948f7de2024-12-02 03:42:35 +00001725 async def delete(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001726 self.logger.info("Resource delete Enter")
rshri948f7de2024-12-02 03:42:35 +00001727
1728 op_id = params["operation_id"]
1729 profile_id = params["profile_id"]
1730
1731 # To initialize the operation states
1732 self.initialize_operation(profile_id, op_id)
1733
1734 content = self.db.get_one("k8sresource", {"_id": profile_id})
1735 op_params = self.get_operation_params(content, op_id)
rshri932105f2024-07-05 15:11:55 +00001736
garciadeblas61a4c692025-07-17 13:04:13 +02001737 workflow_res, workflow_name, _ = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001738 "delete_profile", op_id, op_params, content
1739 )
garciadeblas26d733c2025-02-03 16:12:43 +01001740 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00001741
garciadeblas33b36e72025-01-17 12:49:19 +01001742 workflow_status = await self.check_workflow_and_update_db(
1743 op_id, workflow_name, content
1744 )
rshri932105f2024-07-05 15:11:55 +00001745
1746 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001747 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001748 "delete_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001749 )
rshri932105f2024-07-05 15:11:55 +00001750
yshahb36649f2025-02-28 09:01:51 +00001751 force = params.get("force", False)
1752 if force:
1753 force_delete_status = self.check_force_delete_and_delete_from_db(
1754 profile_id, workflow_status, resource_status, force
1755 )
1756 if force_delete_status:
1757 return
1758
yshah564ec9c2024-11-29 07:33:32 +00001759 if resource_status:
1760 content["state"] = "DELETED"
yshah6bad8892025-02-11 12:37:04 +00001761 profile_type = self.profile_type_mapping[content["profile_type"]]
1762 self.delete_profile_ksu(profile_id, profile_type)
yshah564ec9c2024-11-29 07:33:32 +00001763 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1764 self.db.del_one(self.db_collection, {"_id": content["_id"]})
1765 self.logger.info(
1766 f"Resource Delete Exit with resource status: {resource_status}"
garciadeblas96b94f52024-07-08 16:18:21 +02001767 )
rshri932105f2024-07-05 15:11:55 +00001768 return
1769
1770
garciadeblas72412282024-11-07 12:41:54 +01001771class K8sInfraControllerLcm(GitOpsLcm):
rshri948f7de2024-12-02 03:42:35 +00001772 db_collection = "k8sinfra_controller"
1773
rshri932105f2024-07-05 15:11:55 +00001774 def __init__(self, msg, lcm_tasks, config):
1775 """
1776 Init, Connect to database, filesystem storage, and messaging
1777 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
1778 :return: None
1779 """
garciadeblas72412282024-11-07 12:41:54 +01001780 super().__init__(msg, lcm_tasks, config)
rshri932105f2024-07-05 15:11:55 +00001781
rshri948f7de2024-12-02 03:42:35 +00001782 async def create(self, params, order_id):
garciadeblas61a4c692025-07-17 13:04:13 +02001783 self.logger.info("Infra controller Profile Create Enter")
rshri932105f2024-07-05 15:11:55 +00001784
rshri948f7de2024-12-02 03:42:35 +00001785 op_id = params["operation_id"]
1786 profile_id = params["profile_id"]
1787
1788 # To initialize the operation states
1789 self.initialize_operation(profile_id, op_id)
1790
1791 content = self.db.get_one("k8sinfra_controller", {"_id": profile_id})
1792 content["profile_type"] = "infra-controllers"
1793 op_params = self.get_operation_params(content, op_id)
1794 self.db.set_one("k8sinfra_controller", {"_id": content["_id"]}, content)
1795
garciadeblas61a4c692025-07-17 13:04:13 +02001796 workflow_res, workflow_name, _ = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001797 "create_profile", op_id, op_params, content
1798 )
garciadeblas26d733c2025-02-03 16:12:43 +01001799 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00001800
garciadeblas33b36e72025-01-17 12:49:19 +01001801 workflow_status = await self.check_workflow_and_update_db(
1802 op_id, workflow_name, content
1803 )
rshri932105f2024-07-05 15:11:55 +00001804
1805 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001806 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001807 "create_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001808 )
yshah564ec9c2024-11-29 07:33:32 +00001809 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1810 self.logger.info(
1811 f"Infra Controller Create Exit with resource status: {resource_status}"
rshri932105f2024-07-05 15:11:55 +00001812 )
rshri932105f2024-07-05 15:11:55 +00001813 return
1814
rshri948f7de2024-12-02 03:42:35 +00001815 async def delete(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001816 self.logger.info("Infra controller delete Enter")
rshri932105f2024-07-05 15:11:55 +00001817
rshri948f7de2024-12-02 03:42:35 +00001818 op_id = params["operation_id"]
1819 profile_id = params["profile_id"]
1820
1821 # To initialize the operation states
1822 self.initialize_operation(profile_id, op_id)
1823
1824 content = self.db.get_one("k8sinfra_controller", {"_id": profile_id})
1825 op_params = self.get_operation_params(content, op_id)
1826
garciadeblas61a4c692025-07-17 13:04:13 +02001827 workflow_res, workflow_name, _ = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001828 "delete_profile", op_id, op_params, content
1829 )
garciadeblas26d733c2025-02-03 16:12:43 +01001830 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00001831
garciadeblas33b36e72025-01-17 12:49:19 +01001832 workflow_status = await self.check_workflow_and_update_db(
1833 op_id, workflow_name, content
1834 )
rshri932105f2024-07-05 15:11:55 +00001835
1836 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001837 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001838 "delete_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001839 )
rshri932105f2024-07-05 15:11:55 +00001840
yshahb36649f2025-02-28 09:01:51 +00001841 force = params.get("force", False)
1842 if force:
1843 force_delete_status = self.check_force_delete_and_delete_from_db(
1844 profile_id, workflow_status, resource_status, force
1845 )
1846 if force_delete_status:
1847 return
1848
yshah564ec9c2024-11-29 07:33:32 +00001849 if resource_status:
1850 content["state"] = "DELETED"
yshah6bad8892025-02-11 12:37:04 +00001851 profile_type = self.profile_type_mapping[content["profile_type"]]
1852 self.delete_profile_ksu(profile_id, profile_type)
yshah564ec9c2024-11-29 07:33:32 +00001853 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1854 self.db.del_one(self.db_collection, {"_id": content["_id"]})
1855 self.logger.info(
1856 f"Infra Controller Delete Exit with resource status: {resource_status}"
garciadeblas96b94f52024-07-08 16:18:21 +02001857 )
rshri932105f2024-07-05 15:11:55 +00001858 return
1859
1860
garciadeblas72412282024-11-07 12:41:54 +01001861class K8sInfraConfigLcm(GitOpsLcm):
rshri948f7de2024-12-02 03:42:35 +00001862 db_collection = "k8sinfra_config"
1863
rshri932105f2024-07-05 15:11:55 +00001864 def __init__(self, msg, lcm_tasks, config):
1865 """
1866 Init, Connect to database, filesystem storage, and messaging
1867 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
1868 :return: None
1869 """
garciadeblas72412282024-11-07 12:41:54 +01001870 super().__init__(msg, lcm_tasks, config)
rshri932105f2024-07-05 15:11:55 +00001871
rshri948f7de2024-12-02 03:42:35 +00001872 async def create(self, params, order_id):
garciadeblas61a4c692025-07-17 13:04:13 +02001873 self.logger.info("Infra config Profile Create Enter")
rshri932105f2024-07-05 15:11:55 +00001874
rshri948f7de2024-12-02 03:42:35 +00001875 op_id = params["operation_id"]
1876 profile_id = params["profile_id"]
1877
1878 # To initialize the operation states
1879 self.initialize_operation(profile_id, op_id)
1880
1881 content = self.db.get_one("k8sinfra_config", {"_id": profile_id})
1882 content["profile_type"] = "infra-configs"
1883 op_params = self.get_operation_params(content, op_id)
1884 self.db.set_one("k8sinfra_config", {"_id": content["_id"]}, content)
1885
garciadeblas61a4c692025-07-17 13:04:13 +02001886 workflow_res, workflow_name, _ = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001887 "create_profile", op_id, op_params, content
1888 )
garciadeblas26d733c2025-02-03 16:12:43 +01001889 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00001890
garciadeblas33b36e72025-01-17 12:49:19 +01001891 workflow_status = await self.check_workflow_and_update_db(
1892 op_id, workflow_name, content
1893 )
rshri932105f2024-07-05 15:11:55 +00001894
1895 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001896 resource_status, content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001897 "create_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001898 )
yshah564ec9c2024-11-29 07:33:32 +00001899 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1900 self.logger.info(
1901 f"Infra Config Create Exit with resource status: {resource_status}"
rshri932105f2024-07-05 15:11:55 +00001902 )
rshri932105f2024-07-05 15:11:55 +00001903 return
1904
rshri948f7de2024-12-02 03:42:35 +00001905 async def delete(self, params, order_id):
rshri932105f2024-07-05 15:11:55 +00001906 self.logger.info("Infra config delete Enter")
1907
rshri948f7de2024-12-02 03:42:35 +00001908 op_id = params["operation_id"]
1909 profile_id = params["profile_id"]
1910
1911 # To initialize the operation states
1912 self.initialize_operation(profile_id, op_id)
1913
1914 content = self.db.get_one("k8sinfra_config", {"_id": profile_id})
1915 op_params = self.get_operation_params(content, op_id)
1916
garciadeblas61a4c692025-07-17 13:04:13 +02001917 workflow_res, workflow_name, _ = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001918 "delete_profile", op_id, op_params, content
1919 )
garciadeblas26d733c2025-02-03 16:12:43 +01001920 self.logger.info("workflow_name is: {}".format(workflow_name))
rshri932105f2024-07-05 15:11:55 +00001921
garciadeblas33b36e72025-01-17 12:49:19 +01001922 workflow_status = await self.check_workflow_and_update_db(
1923 op_id, workflow_name, content
1924 )
yshah564ec9c2024-11-29 07:33:32 +00001925
rshri932105f2024-07-05 15:11:55 +00001926 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001927 resource_status, content = await self.check_resource_and_update_db(
yshah564ec9c2024-11-29 07:33:32 +00001928 "delete_profile", op_id, op_params, content
rshri932105f2024-07-05 15:11:55 +00001929 )
yshah564ec9c2024-11-29 07:33:32 +00001930
yshahb36649f2025-02-28 09:01:51 +00001931 force = params.get("force", False)
1932 if force:
1933 force_delete_status = self.check_force_delete_and_delete_from_db(
1934 profile_id, workflow_status, resource_status, force
1935 )
1936 if force_delete_status:
1937 return
1938
rshri932105f2024-07-05 15:11:55 +00001939 if resource_status:
yshah564ec9c2024-11-29 07:33:32 +00001940 content["state"] = "DELETED"
yshah6bad8892025-02-11 12:37:04 +00001941 profile_type = self.profile_type_mapping[content["profile_type"]]
1942 self.delete_profile_ksu(profile_id, profile_type)
yshah564ec9c2024-11-29 07:33:32 +00001943 self.db.set_one(self.db_collection, {"_id": content["_id"]}, content)
1944 self.db.del_one(self.db_collection, {"_id": content["_id"]})
1945 self.logger.info(
1946 f"Infra Config Delete Exit with resource status: {resource_status}"
garciadeblas96b94f52024-07-08 16:18:21 +02001947 )
rshri932105f2024-07-05 15:11:55 +00001948
rshri932105f2024-07-05 15:11:55 +00001949 return
yshah771dea82024-07-05 15:11:49 +00001950
1951
garciadeblas72412282024-11-07 12:41:54 +01001952class OkaLcm(GitOpsLcm):
yshah771dea82024-07-05 15:11:49 +00001953 db_collection = "okas"
1954
1955 def __init__(self, msg, lcm_tasks, config):
1956 """
1957 Init, Connect to database, filesystem storage, and messaging
1958 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
1959 :return: None
1960 """
garciadeblas72412282024-11-07 12:41:54 +01001961 super().__init__(msg, lcm_tasks, config)
yshah771dea82024-07-05 15:11:49 +00001962
yshah564ec9c2024-11-29 07:33:32 +00001963 async def create(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02001964 self.logger.info("OKA Create Enter")
yshah564ec9c2024-11-29 07:33:32 +00001965 op_id = params["operation_id"]
1966 oka_id = params["oka_id"]
1967 self.initialize_operation(oka_id, op_id)
1968 db_content = self.db.get_one(self.db_collection, {"_id": oka_id})
1969 op_params = self.get_operation_params(db_content, op_id)
yshah771dea82024-07-05 15:11:49 +00001970
garciadeblas61a4c692025-07-17 13:04:13 +02001971 workflow_res, workflow_name, _ = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02001972 "create_oka", op_id, op_params, db_content
1973 )
yshah564ec9c2024-11-29 07:33:32 +00001974
garciadeblas33b36e72025-01-17 12:49:19 +01001975 workflow_status = await self.check_workflow_and_update_db(
1976 op_id, workflow_name, db_content
1977 )
yshah771dea82024-07-05 15:11:49 +00001978
1979 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01001980 resource_status, db_content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02001981 "create_oka", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00001982 )
garciadeblas96b94f52024-07-08 16:18:21 +02001983 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
garciadeblasb23d2dc2025-02-21 10:15:49 +01001984
1985 # Clean items used in the workflow, no matter if the workflow succeeded
1986 clean_status, clean_msg = await self.odu.clean_items_workflow(
1987 "create_oka", op_id, op_params, db_content
1988 )
1989 self.logger.info(
1990 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
1991 )
yshah564ec9c2024-11-29 07:33:32 +00001992 self.logger.info(f"OKA Create Exit with resource status: {resource_status}")
yshah771dea82024-07-05 15:11:49 +00001993 return
1994
yshah564ec9c2024-11-29 07:33:32 +00001995 async def edit(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02001996 self.logger.info("OKA Edit Enter")
yshah564ec9c2024-11-29 07:33:32 +00001997 op_id = params["operation_id"]
1998 oka_id = params["oka_id"]
1999 self.initialize_operation(oka_id, op_id)
2000 db_content = self.db.get_one(self.db_collection, {"_id": oka_id})
2001 op_params = self.get_operation_params(db_content, op_id)
yshah771dea82024-07-05 15:11:49 +00002002
garciadeblas61a4c692025-07-17 13:04:13 +02002003 workflow_res, workflow_name, _ = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00002004 "update_oka", op_id, op_params, db_content
garciadeblas96b94f52024-07-08 16:18:21 +02002005 )
garciadeblas33b36e72025-01-17 12:49:19 +01002006 workflow_status = await self.check_workflow_and_update_db(
2007 op_id, workflow_name, db_content
2008 )
yshah771dea82024-07-05 15:11:49 +00002009
2010 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002011 resource_status, db_content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02002012 "update_oka", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00002013 )
garciadeblas96b94f52024-07-08 16:18:21 +02002014 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
garciadeblasb23d2dc2025-02-21 10:15:49 +01002015 # Clean items used in the workflow, no matter if the workflow succeeded
2016 clean_status, clean_msg = await self.odu.clean_items_workflow(
2017 "update_oka", op_id, op_params, db_content
2018 )
2019 self.logger.info(
2020 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
2021 )
yshah564ec9c2024-11-29 07:33:32 +00002022 self.logger.info(f"OKA Update Exit with resource status: {resource_status}")
yshah771dea82024-07-05 15:11:49 +00002023 return
2024
yshah564ec9c2024-11-29 07:33:32 +00002025 async def delete(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02002026 self.logger.info("OKA delete Enter")
yshah564ec9c2024-11-29 07:33:32 +00002027 op_id = params["operation_id"]
2028 oka_id = params["oka_id"]
2029 self.initialize_operation(oka_id, op_id)
2030 db_content = self.db.get_one(self.db_collection, {"_id": oka_id})
2031 op_params = self.get_operation_params(db_content, op_id)
yshah771dea82024-07-05 15:11:49 +00002032
garciadeblas61a4c692025-07-17 13:04:13 +02002033 workflow_res, workflow_name, _ = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00002034 "delete_oka", op_id, op_params, db_content
garciadeblas96b94f52024-07-08 16:18:21 +02002035 )
garciadeblas33b36e72025-01-17 12:49:19 +01002036 workflow_status = await self.check_workflow_and_update_db(
2037 op_id, workflow_name, db_content
2038 )
yshah771dea82024-07-05 15:11:49 +00002039
2040 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002041 resource_status, db_content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02002042 "delete_oka", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00002043 )
yshah771dea82024-07-05 15:11:49 +00002044
yshahb36649f2025-02-28 09:01:51 +00002045 force = params.get("force", False)
2046 if force:
2047 force_delete_status = self.check_force_delete_and_delete_from_db(
2048 oka_id, workflow_status, resource_status, force
2049 )
2050 if force_delete_status:
2051 return
2052
yshah564ec9c2024-11-29 07:33:32 +00002053 if resource_status:
2054 db_content["state"] == "DELETED"
2055 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
garciadeblas96b94f52024-07-08 16:18:21 +02002056 self.db.del_one(self.db_collection, {"_id": db_content["_id"]})
garciadeblasb23d2dc2025-02-21 10:15:49 +01002057 # Clean items used in the workflow, no matter if the workflow succeeded
2058 clean_status, clean_msg = await self.odu.clean_items_workflow(
2059 "delete_oka", op_id, op_params, db_content
2060 )
2061 self.logger.info(
2062 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
2063 )
yshah564ec9c2024-11-29 07:33:32 +00002064 self.logger.info(f"OKA Delete Exit with resource status: {resource_status}")
yshah771dea82024-07-05 15:11:49 +00002065 return
2066
2067
garciadeblas72412282024-11-07 12:41:54 +01002068class KsuLcm(GitOpsLcm):
yshah771dea82024-07-05 15:11:49 +00002069 db_collection = "ksus"
2070
2071 def __init__(self, msg, lcm_tasks, config):
2072 """
2073 Init, Connect to database, filesystem storage, and messaging
2074 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
2075 :return: None
2076 """
garciadeblas72412282024-11-07 12:41:54 +01002077 super().__init__(msg, lcm_tasks, config)
garciadeblasad6d1ba2025-01-22 16:02:18 +01002078 self._workflows = {
2079 "create_ksus": {
2080 "check_resource_function": self.check_create_ksus,
2081 },
2082 "delete_ksus": {
2083 "check_resource_function": self.check_delete_ksus,
2084 },
2085 }
2086
2087 def get_dbclusters_from_profile(self, profile_id, profile_type):
2088 cluster_list = []
2089 db_clusters = self.db.get_list("clusters")
2090 self.logger.info(f"Getting list of clusters for {profile_type} {profile_id}")
2091 for db_cluster in db_clusters:
2092 if profile_id in db_cluster.get(profile_type, []):
2093 self.logger.info(
2094 f"Profile {profile_id} found in cluster {db_cluster['name']}"
2095 )
2096 cluster_list.append(db_cluster)
2097 return cluster_list
yshah771dea82024-07-05 15:11:49 +00002098
yshah564ec9c2024-11-29 07:33:32 +00002099 async def create(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02002100 self.logger.info("ksu Create Enter")
yshah564ec9c2024-11-29 07:33:32 +00002101 db_content = []
2102 op_params = []
2103 op_id = params["operation_id"]
2104 for ksu_id in params["ksus_list"]:
2105 self.logger.info("Ksu ID: {}".format(ksu_id))
2106 self.initialize_operation(ksu_id, op_id)
2107 db_ksu = self.db.get_one(self.db_collection, {"_id": ksu_id})
2108 self.logger.info("Db KSU: {}".format(db_ksu))
2109 db_content.append(db_ksu)
2110 ksu_params = {}
2111 ksu_params = self.get_operation_params(db_ksu, op_id)
2112 self.logger.info("Operation Params: {}".format(ksu_params))
2113 # Update ksu_params["profile"] with profile name and age-pubkey
2114 profile_type = ksu_params["profile"]["profile_type"]
2115 profile_id = ksu_params["profile"]["_id"]
2116 profile_collection = self.profile_collection_mapping[profile_type]
2117 db_profile = self.db.get_one(profile_collection, {"_id": profile_id})
garciadeblasd41e9292025-03-11 15:44:25 +01002118 # db_profile is decrypted inline
2119 # No need to use decrypted_copy because db_profile won't be updated.
2120 self.decrypt_age_keys(db_profile)
yshah564ec9c2024-11-29 07:33:32 +00002121 ksu_params["profile"]["name"] = db_profile["name"]
2122 ksu_params["profile"]["age_pubkey"] = db_profile.get("age_pubkey", "")
2123 # Update ksu_params["oka"] with sw_catalog_path (when missing)
garciadeblas9f7c9c52025-01-17 01:06:05 +01002124 # TODO: remove this in favor of doing it in ODU workflow
yshah564ec9c2024-11-29 07:33:32 +00002125 for oka in ksu_params["oka"]:
2126 if "sw_catalog_path" not in oka:
2127 oka_id = oka["_id"]
2128 db_oka = self.db.get_one("okas", {"_id": oka_id})
yshah2f39b8a2024-12-19 11:06:24 +00002129 oka_type = MAP_PROFILE[
2130 db_oka.get("profile_type", "infra_controller_profiles")
2131 ]
garciadeblas9f7c9c52025-01-17 01:06:05 +01002132 oka[
2133 "sw_catalog_path"
garciadeblas29f8bcf2025-01-24 14:24:41 +01002134 ] = f"{oka_type}/{db_oka['git_name'].lower()}/templates"
yshah564ec9c2024-11-29 07:33:32 +00002135 op_params.append(ksu_params)
yshah771dea82024-07-05 15:11:49 +00002136
garciadeblasad6d1ba2025-01-22 16:02:18 +01002137 # A single workflow is launched for all KSUs
garciadeblas61a4c692025-07-17 13:04:13 +02002138 workflow_res, workflow_name, _ = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00002139 "create_ksus", op_id, op_params, db_content
garciadeblas96b94f52024-07-08 16:18:21 +02002140 )
garciadeblasad6d1ba2025-01-22 16:02:18 +01002141 # Update workflow status in all KSUs
2142 wf_status_list = []
yshah564ec9c2024-11-29 07:33:32 +00002143 for db_ksu, ksu_params in zip(db_content, op_params):
garciadeblas33b36e72025-01-17 12:49:19 +01002144 workflow_status = await self.check_workflow_and_update_db(
2145 op_id, workflow_name, db_ksu
2146 )
garciadeblasad6d1ba2025-01-22 16:02:18 +01002147 wf_status_list.append(workflow_status)
2148 # Update resource status in all KSUs
2149 # TODO: Is an operation correct if n KSUs are right and 1 is not OK?
2150 res_status_list = []
2151 for db_ksu, ksu_params, wf_status in zip(db_content, op_params, wf_status_list):
2152 if wf_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002153 resource_status, db_ksu = await self.check_resource_and_update_db(
yshah564ec9c2024-11-29 07:33:32 +00002154 "create_ksus", op_id, ksu_params, db_ksu
2155 )
garciadeblasad6d1ba2025-01-22 16:02:18 +01002156 else:
2157 resource_status = False
2158 res_status_list.append(resource_status)
garciadeblas96b94f52024-07-08 16:18:21 +02002159 self.db.set_one(self.db_collection, {"_id": db_ksu["_id"]}, db_ksu)
2160
garciadeblasd8429852024-10-17 15:30:30 +02002161 # Clean items used in the workflow, no matter if the workflow succeeded
2162 clean_status, clean_msg = await self.odu.clean_items_workflow(
yshah564ec9c2024-11-29 07:33:32 +00002163 "create_ksus", op_id, op_params, db_content
garciadeblasd8429852024-10-17 15:30:30 +02002164 )
2165 self.logger.info(
2166 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
2167 )
garciadeblasad6d1ba2025-01-22 16:02:18 +01002168 self.logger.info(f"KSU Create EXIT with Resource Status {res_status_list}")
yshah771dea82024-07-05 15:11:49 +00002169 return
2170
yshah564ec9c2024-11-29 07:33:32 +00002171 async def edit(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02002172 self.logger.info("ksu edit Enter")
yshah564ec9c2024-11-29 07:33:32 +00002173 db_content = []
2174 op_params = []
2175 op_id = params["operation_id"]
2176 for ksu_id in params["ksus_list"]:
2177 self.initialize_operation(ksu_id, op_id)
2178 db_ksu = self.db.get_one("ksus", {"_id": ksu_id})
2179 db_content.append(db_ksu)
2180 ksu_params = {}
2181 ksu_params = self.get_operation_params(db_ksu, op_id)
2182 # Update ksu_params["profile"] with profile name and age-pubkey
2183 profile_type = ksu_params["profile"]["profile_type"]
2184 profile_id = ksu_params["profile"]["_id"]
2185 profile_collection = self.profile_collection_mapping[profile_type]
2186 db_profile = self.db.get_one(profile_collection, {"_id": profile_id})
garciadeblasd41e9292025-03-11 15:44:25 +01002187 # db_profile is decrypted inline
2188 # No need to use decrypted_copy because db_profile won't be updated.
2189 self.decrypt_age_keys(db_profile)
yshah564ec9c2024-11-29 07:33:32 +00002190 ksu_params["profile"]["name"] = db_profile["name"]
2191 ksu_params["profile"]["age_pubkey"] = db_profile.get("age_pubkey", "")
2192 # Update ksu_params["oka"] with sw_catalog_path (when missing)
garciadeblas9f7c9c52025-01-17 01:06:05 +01002193 # TODO: remove this in favor of doing it in ODU workflow
yshah564ec9c2024-11-29 07:33:32 +00002194 for oka in ksu_params["oka"]:
2195 if "sw_catalog_path" not in oka:
2196 oka_id = oka["_id"]
2197 db_oka = self.db.get_one("okas", {"_id": oka_id})
yshah2f39b8a2024-12-19 11:06:24 +00002198 oka_type = MAP_PROFILE[
2199 db_oka.get("profile_type", "infra_controller_profiles")
2200 ]
garciadeblas9f7c9c52025-01-17 01:06:05 +01002201 oka[
2202 "sw_catalog_path"
2203 ] = f"{oka_type}/{db_oka['git_name']}/templates"
yshah564ec9c2024-11-29 07:33:32 +00002204 op_params.append(ksu_params)
yshah771dea82024-07-05 15:11:49 +00002205
garciadeblas61a4c692025-07-17 13:04:13 +02002206 workflow_res, workflow_name, _ = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00002207 "update_ksus", op_id, op_params, db_content
garciadeblas96b94f52024-07-08 16:18:21 +02002208 )
yshah771dea82024-07-05 15:11:49 +00002209
yshah564ec9c2024-11-29 07:33:32 +00002210 for db_ksu, ksu_params in zip(db_content, op_params):
garciadeblas33b36e72025-01-17 12:49:19 +01002211 workflow_status = await self.check_workflow_and_update_db(
2212 op_id, workflow_name, db_ksu
2213 )
yshah564ec9c2024-11-29 07:33:32 +00002214
garciadeblas96b94f52024-07-08 16:18:21 +02002215 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002216 resource_status, db_ksu = await self.check_resource_and_update_db(
yshah564ec9c2024-11-29 07:33:32 +00002217 "update_ksus", op_id, ksu_params, db_ksu
garciadeblas96b94f52024-07-08 16:18:21 +02002218 )
garciadeblas96b94f52024-07-08 16:18:21 +02002219 db_ksu["name"] = ksu_params["name"]
2220 db_ksu["description"] = ksu_params["description"]
2221 db_ksu["profile"]["profile_type"] = ksu_params["profile"][
2222 "profile_type"
2223 ]
2224 db_ksu["profile"]["_id"] = ksu_params["profile"]["_id"]
2225 db_ksu["oka"] = ksu_params["oka"]
2226 self.db.set_one(self.db_collection, {"_id": db_ksu["_id"]}, db_ksu)
2227
yshah564ec9c2024-11-29 07:33:32 +00002228 # Clean items used in the workflow, no matter if the workflow succeeded
2229 clean_status, clean_msg = await self.odu.clean_items_workflow(
2230 "create_ksus", op_id, op_params, db_content
garciadeblas96b94f52024-07-08 16:18:21 +02002231 )
2232 self.logger.info(
yshah564ec9c2024-11-29 07:33:32 +00002233 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
garciadeblas96b94f52024-07-08 16:18:21 +02002234 )
yshah564ec9c2024-11-29 07:33:32 +00002235 self.logger.info(f"KSU Update EXIT with Resource Status {resource_status}")
yshah771dea82024-07-05 15:11:49 +00002236 return
2237
yshah564ec9c2024-11-29 07:33:32 +00002238 async def delete(self, params, order_id):
2239 self.logger.info("ksu delete Enter")
2240 db_content = []
2241 op_params = []
2242 op_id = params["operation_id"]
2243 for ksu_id in params["ksus_list"]:
2244 self.initialize_operation(ksu_id, op_id)
2245 db_ksu = self.db.get_one("ksus", {"_id": ksu_id})
2246 db_content.append(db_ksu)
2247 ksu_params = {}
2248 ksu_params["profile"] = {}
2249 ksu_params["profile"]["profile_type"] = db_ksu["profile"]["profile_type"]
2250 ksu_params["profile"]["_id"] = db_ksu["profile"]["_id"]
garciadeblasd41e9292025-03-11 15:44:25 +01002251 # Update ksu_params["profile"] with profile name
yshah564ec9c2024-11-29 07:33:32 +00002252 profile_type = ksu_params["profile"]["profile_type"]
2253 profile_id = ksu_params["profile"]["_id"]
2254 profile_collection = self.profile_collection_mapping[profile_type]
2255 db_profile = self.db.get_one(profile_collection, {"_id": profile_id})
2256 ksu_params["profile"]["name"] = db_profile["name"]
yshah564ec9c2024-11-29 07:33:32 +00002257 op_params.append(ksu_params)
yshah771dea82024-07-05 15:11:49 +00002258
garciadeblas61a4c692025-07-17 13:04:13 +02002259 workflow_res, workflow_name, _ = await self.odu.launch_workflow(
yshah564ec9c2024-11-29 07:33:32 +00002260 "delete_ksus", op_id, op_params, db_content
2261 )
2262
2263 for db_ksu, ksu_params in zip(db_content, op_params):
garciadeblas33b36e72025-01-17 12:49:19 +01002264 workflow_status = await self.check_workflow_and_update_db(
2265 op_id, workflow_name, db_ksu
2266 )
yshah564ec9c2024-11-29 07:33:32 +00002267
2268 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002269 resource_status, db_ksu = await self.check_resource_and_update_db(
yshah564ec9c2024-11-29 07:33:32 +00002270 "delete_ksus", op_id, ksu_params, db_ksu
2271 )
2272
yshahb36649f2025-02-28 09:01:51 +00002273 force = params.get("force", False)
2274 if force:
2275 force_delete_status = self.check_force_delete_and_delete_from_db(
2276 db_ksu["_id"], workflow_status, resource_status, force
2277 )
2278 if force_delete_status:
2279 return
2280
yshah564ec9c2024-11-29 07:33:32 +00002281 if resource_status:
2282 db_ksu["state"] == "DELETED"
yshah5e109152025-05-19 12:29:01 +00002283 self.delete_ksu_dependency(db_ksu["_id"], db_ksu)
yshah564ec9c2024-11-29 07:33:32 +00002284 self.db.set_one(self.db_collection, {"_id": db_ksu["_id"]}, db_ksu)
2285 self.db.del_one(self.db_collection, {"_id": db_ksu["_id"]})
2286
2287 self.logger.info(f"KSU Delete Exit with resource status: {resource_status}")
2288 return
2289
2290 async def clone(self, params, order_id):
2291 self.logger.info("ksu clone Enter")
2292 op_id = params["operation_id"]
2293 ksus_id = params["ksus_list"][0]
2294 self.initialize_operation(ksus_id, op_id)
2295 db_content = self.db.get_one(self.db_collection, {"_id": ksus_id})
2296 op_params = self.get_operation_params(db_content, op_id)
garciadeblas61a4c692025-07-17 13:04:13 +02002297 workflow_res, workflow_name, _ = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02002298 "clone_ksus", op_id, op_params, db_content
2299 )
yshah564ec9c2024-11-29 07:33:32 +00002300
garciadeblas33b36e72025-01-17 12:49:19 +01002301 workflow_status = await self.check_workflow_and_update_db(
2302 op_id, workflow_name, db_content
2303 )
yshah771dea82024-07-05 15:11:49 +00002304
2305 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002306 resource_status, db_content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02002307 "clone_ksus", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00002308 )
garciadeblas96b94f52024-07-08 16:18:21 +02002309 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
yshah564ec9c2024-11-29 07:33:32 +00002310
2311 self.logger.info(f"KSU Clone Exit with resource status: {resource_status}")
yshah771dea82024-07-05 15:11:49 +00002312 return
2313
yshah564ec9c2024-11-29 07:33:32 +00002314 async def move(self, params, order_id):
garciadeblas96b94f52024-07-08 16:18:21 +02002315 self.logger.info("ksu move Enter")
yshah564ec9c2024-11-29 07:33:32 +00002316 op_id = params["operation_id"]
2317 ksus_id = params["ksus_list"][0]
2318 self.initialize_operation(ksus_id, op_id)
2319 db_content = self.db.get_one(self.db_collection, {"_id": ksus_id})
2320 op_params = self.get_operation_params(db_content, op_id)
garciadeblas61a4c692025-07-17 13:04:13 +02002321 workflow_res, workflow_name, _ = await self.odu.launch_workflow(
garciadeblas96b94f52024-07-08 16:18:21 +02002322 "move_ksus", op_id, op_params, db_content
2323 )
yshah564ec9c2024-11-29 07:33:32 +00002324
garciadeblas33b36e72025-01-17 12:49:19 +01002325 workflow_status = await self.check_workflow_and_update_db(
2326 op_id, workflow_name, db_content
2327 )
yshah771dea82024-07-05 15:11:49 +00002328
2329 if workflow_status:
garciadeblas33b36e72025-01-17 12:49:19 +01002330 resource_status, db_content = await self.check_resource_and_update_db(
garciadeblas96b94f52024-07-08 16:18:21 +02002331 "move_ksus", op_id, op_params, db_content
yshah771dea82024-07-05 15:11:49 +00002332 )
garciadeblas96b94f52024-07-08 16:18:21 +02002333 self.db.set_one(self.db_collection, {"_id": db_content["_id"]}, db_content)
yshah564ec9c2024-11-29 07:33:32 +00002334
2335 self.logger.info(f"KSU Move Exit with resource status: {resource_status}")
yshah771dea82024-07-05 15:11:49 +00002336 return
garciadeblasad6d1ba2025-01-22 16:02:18 +01002337
2338 async def check_create_ksus(self, op_id, op_params, content):
2339 self.logger.info(f"check_create_ksus Operation {op_id}. Params: {op_params}.")
2340 self.logger.debug(f"Content: {content}")
2341 db_ksu = content
2342 kustomization_name = db_ksu["git_name"].lower()
2343 oka_list = op_params["oka"]
2344 oka_item = oka_list[0]
2345 oka_params = oka_item.get("transformation", {})
garciadeblas167dde32025-02-14 00:44:58 +01002346 kustomization_ns = oka_params.get("kustomization_namespace", "flux-system")
garciadeblasad6d1ba2025-01-22 16:02:18 +01002347 profile_id = op_params.get("profile", {}).get("_id")
2348 profile_type = op_params.get("profile", {}).get("profile_type")
2349 self.logger.info(
2350 f"Checking status of KSU {db_ksu['name']} for profile {profile_id}."
2351 )
2352 dbcluster_list = self.get_dbclusters_from_profile(profile_id, profile_type)
2353 if not dbcluster_list:
2354 self.logger.info(f"No clusters found for profile {profile_id}.")
2355 for db_cluster in dbcluster_list:
2356 try:
2357 self.logger.info(
garciadeblasae238482025-02-03 08:44:19 +01002358 f"Checking status of KSU {db_ksu['name']} in cluster {db_cluster['name']}."
garciadeblasad6d1ba2025-01-22 16:02:18 +01002359 )
2360 cluster_kubectl = self.cluster_kubectl(db_cluster)
2361 checkings_list = [
2362 {
2363 "item": "kustomization",
2364 "name": kustomization_name,
garciadeblas167dde32025-02-14 00:44:58 +01002365 "namespace": kustomization_ns,
garciadeblas7cf480d2025-01-27 16:53:45 +01002366 "condition": {
2367 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
2368 "value": "True",
2369 },
garciadeblasad6d1ba2025-01-22 16:02:18 +01002370 "timeout": self._checkloop_kustomization_timeout,
2371 "enable": True,
2372 "resourceState": "IN_PROGRESS.KUSTOMIZATION_READY",
2373 },
2374 ]
2375 self.logger.info(
2376 f"Checking status of KSU {db_ksu['name']} for profile {profile_id}."
2377 )
2378 result, message = await self.common_check_list(
garciadeblas6d8acf32025-02-06 13:34:37 +01002379 op_id, checkings_list, "ksus", db_ksu, kubectl_obj=cluster_kubectl
garciadeblasad6d1ba2025-01-22 16:02:18 +01002380 )
2381 if not result:
2382 return False, message
2383 except Exception as e:
2384 self.logger.error(
2385 f"Error checking KSU in cluster {db_cluster['name']}."
2386 )
2387 self.logger.error(e)
2388 return False, f"Error checking KSU in cluster {db_cluster['name']}."
2389 return True, "OK"
2390
2391 async def check_delete_ksus(self, op_id, op_params, content):
2392 self.logger.info(f"check_delete_ksus Operation {op_id}. Params: {op_params}.")
2393 self.logger.debug(f"Content: {content}")
2394 db_ksu = content
2395 kustomization_name = db_ksu["git_name"].lower()
2396 oka_list = db_ksu["oka"]
2397 oka_item = oka_list[0]
2398 oka_params = oka_item.get("transformation", {})
garciadeblas167dde32025-02-14 00:44:58 +01002399 kustomization_ns = oka_params.get("kustomization_namespace", "flux-system")
garciadeblasad6d1ba2025-01-22 16:02:18 +01002400 profile_id = op_params.get("profile", {}).get("_id")
2401 profile_type = op_params.get("profile", {}).get("profile_type")
2402 self.logger.info(
2403 f"Checking status of KSU {db_ksu['name']} for profile {profile_id}."
2404 )
2405 dbcluster_list = self.get_dbclusters_from_profile(profile_id, profile_type)
2406 if not dbcluster_list:
2407 self.logger.info(f"No clusters found for profile {profile_id}.")
2408 for db_cluster in dbcluster_list:
2409 try:
2410 self.logger.info(
2411 f"Checking status of KSU in cluster {db_cluster['name']}."
2412 )
2413 cluster_kubectl = self.cluster_kubectl(db_cluster)
2414 checkings_list = [
2415 {
2416 "item": "kustomization",
2417 "name": kustomization_name,
garciadeblas167dde32025-02-14 00:44:58 +01002418 "namespace": kustomization_ns,
garciadeblasad6d1ba2025-01-22 16:02:18 +01002419 "deleted": True,
2420 "timeout": self._checkloop_kustomization_timeout,
2421 "enable": True,
2422 "resourceState": "IN_PROGRESS.KUSTOMIZATION_DELETED",
2423 },
2424 ]
2425 self.logger.info(
2426 f"Checking status of KSU {db_ksu['name']} for profile {profile_id}."
2427 )
2428 result, message = await self.common_check_list(
garciadeblas6d8acf32025-02-06 13:34:37 +01002429 op_id, checkings_list, "ksus", db_ksu, kubectl_obj=cluster_kubectl
garciadeblasad6d1ba2025-01-22 16:02:18 +01002430 )
2431 if not result:
2432 return False, message
2433 except Exception as e:
2434 self.logger.error(
2435 f"Error checking KSU in cluster {db_cluster['name']}."
2436 )
2437 self.logger.error(e)
2438 return False, f"Error checking KSU in cluster {db_cluster['name']}."
2439 return True, "OK"
garciadeblas61a4c692025-07-17 13:04:13 +02002440
2441
2442class AppInstanceLcm(GitOpsLcm):
2443 db_collection = "appinstances"
2444
2445 def __init__(self, msg, lcm_tasks, config):
2446 """
2447 Init, Connect to database, filesystem storage, and messaging
2448 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
2449 :return: None
2450 """
2451 super().__init__(msg, lcm_tasks, config)
2452 self._workflows = {
2453 "create_app": {
2454 "check_resource_function": self.check_create_app,
2455 },
2456 "update_app": {
2457 "check_resource_function": self.check_update_app,
2458 },
2459 "delete_app": {
2460 "check_resource_function": self.check_delete_app,
2461 },
2462 }
2463
2464 def get_dbclusters_from_profile(self, profile_id, profile_type):
2465 cluster_list = []
2466 db_clusters = self.db.get_list("clusters")
2467 self.logger.info(f"Getting list of clusters for {profile_type} {profile_id}")
2468 for db_cluster in db_clusters:
2469 if profile_id in db_cluster.get(profile_type, []):
2470 self.logger.info(
2471 f"Profile {profile_id} found in cluster {db_cluster['name']}"
2472 )
2473 cluster_list.append(db_cluster)
2474 return cluster_list
2475
2476 def update_app_dependency(self, app_id, db_app):
2477 self.logger.info(f"Updating AppInstance dependencies for AppInstance {app_id}")
2478 oka_id = db_app.get("oka")
2479 if not oka_id:
2480 self.logger.info(f"No OKA associated with AppInstance {app_id}")
2481 return
2482
2483 used_oka = []
2484 all_apps = self.db.get_list(self.db_collection, {})
2485 for app in all_apps:
2486 if app["_id"] != app_id:
2487 app_oka_id = app["oka"]
2488 if app_oka_id not in used_oka:
2489 used_oka.append(app_oka_id)
2490 self.logger.info(f"Used OKA: {used_oka}")
2491
2492 if oka_id not in used_oka:
2493 self.db.set_one(
2494 "okas", {"_id": oka_id}, {"_admin.usageState": "NOT_IN_USE"}
2495 )
2496 return
2497
2498 async def generic_operation(self, params, order_id, operation_name):
2499 self.logger.info(f"Generic operation. Operation name: {operation_name}")
2500 # self.logger.debug(f"Params: {params}")
2501 try:
2502 op_id = params["operation_id"]
2503 app_id = params["appinstance"]
2504 self.initialize_operation(app_id, op_id)
2505 db_app = self.db.get_one(self.db_collection, {"_id": app_id})
2506 # self.logger.debug("Db App: {}".format(db_app))
2507
2508 # Initialize workflow_content with a copy of the db_app, decrypting fields to use in workflows
2509 db_app_copy = self.decrypted_copy(db_app)
2510 workflow_content = {
2511 "app": db_app_copy,
2512 }
2513
2514 # Update workflow_content with profile info
2515 profile_type = db_app["profile_type"]
2516 profile_id = db_app["profile"]
2517 profile_collection = self.profile_collection_mapping[profile_type]
2518 db_profile = self.db.get_one(profile_collection, {"_id": profile_id})
2519 # db_profile is decrypted inline
2520 # No need to use decrypted_copy because db_profile won't be updated.
2521 self.decrypt_age_keys(db_profile)
2522 workflow_content["profile"] = db_profile
2523
2524 op_params = self.get_operation_params(db_app, op_id)
2525 if not op_params:
2526 op_params = {}
2527 self.logger.debug("Operation Params: {}".format(op_params))
2528
2529 # Get SW catalog path from op_params or from DB
2530 aux_dict = {}
2531 if operation_name == "create_app":
2532 aux_dict = op_params
2533 else:
2534 aux_dict = db_app
2535 sw_catalog_path = ""
2536 if "sw_catalog_path" in aux_dict:
2537 sw_catalog_path = aux_dict.get("sw_catalog_path", "")
2538 elif "oka" in aux_dict:
2539 oka_id = aux_dict["oka"]
2540 db_oka = self.db.get_one("okas", {"_id": oka_id})
2541 oka_type = MAP_PROFILE[
2542 db_oka.get("profile_type", "infra_controller_profiles")
2543 ]
2544 sw_catalog_path = f"{oka_type}/{db_oka['git_name'].lower()}"
2545 else:
2546 self.logger.error("SW Catalog path could not be determined.")
2547 raise LcmException("SW Catalog path could not be determined.")
2548 self.logger.debug(f"SW Catalog path: {sw_catalog_path}")
2549
2550 # Get model from Git repo
2551 # Clone the SW catalog repo
2552 repodir = self.cloneGitRepo(
2553 repo_url=self._full_repo_sw_catalogs_url, branch="main"
2554 )
2555 model_file_path = os.path.join(repodir, sw_catalog_path, "model.yaml")
2556 if not os.path.exists(model_file_path):
2557 self.logger.error(f"Model file not found at path: {model_file_path}")
2558 raise LcmException(f"Model file not found at path: {model_file_path}")
2559 # Store the model content in workflow_content
2560 with open(model_file_path) as model_file:
2561 workflow_content["model"] = yaml.safe_load(model_file.read())
2562
2563 # A single workflow is launched for the App operation
2564 self.logger.debug("Launching workflow {}".format(operation_name))
2565 (
2566 workflow_res,
2567 workflow_name,
2568 workflow_resources,
2569 ) = await self.odu.launch_workflow(
2570 operation_name, op_id, op_params, workflow_content
2571 )
2572
2573 if not workflow_res:
2574 self.logger.error(f"Failed to launch workflow: {workflow_name}")
2575 if operation_name == "create_app":
2576 db_app["state"] = "FAILED_CREATION"
2577 elif operation_name == "delete_app":
2578 db_app["state"] = "FAILED_DELETION"
2579 db_app["resourceState"] = "ERROR"
2580 db_app = self.update_operation_history(
2581 db_app, op_id, workflow_status=False, resource_status=None
2582 )
2583 self.db.set_one(self.db_collection, {"_id": db_app["_id"]}, db_app)
2584 # Clean items used in the workflow, no matter if the workflow succeeded
2585 clean_status, clean_msg = await self.odu.clean_items_workflow(
2586 operation_name, op_id, op_params, workflow_content
2587 )
2588 self.logger.info(
2589 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
2590 )
2591 return
2592
2593 # Update resources created in workflow
2594 db_app["app_model"] = workflow_resources.get("app_model", {})
2595
2596 # Update workflow status in App
2597 workflow_status = await self.check_workflow_and_update_db(
2598 op_id, workflow_name, db_app
2599 )
2600 # Update resource status in DB
2601 if workflow_status:
2602 resource_status, db_app = await self.check_resource_and_update_db(
2603 operation_name, op_id, op_params, db_app
2604 )
2605 else:
2606 resource_status = False
2607 self.db.set_one(self.db_collection, {"_id": db_app["_id"]}, db_app)
2608
2609 # Clean items used in the workflow, no matter if the workflow succeeded
2610 clean_status, clean_msg = await self.odu.clean_items_workflow(
2611 operation_name, op_id, op_params, workflow_content
2612 )
2613 self.logger.info(
2614 f"clean_status is :{clean_status} and clean_msg is :{clean_msg}"
2615 )
2616
2617 if operation_name == "delete_app":
2618 force = params.get("force", False)
2619 if force:
2620 force_delete_status = self.check_force_delete_and_delete_from_db(
2621 db_app["_id"], workflow_status, resource_status, force
2622 )
2623 if force_delete_status:
2624 return
2625 if resource_status:
2626 db_app["state"] == "DELETED"
2627 self.update_app_dependency(db_app["_id"], db_app)
2628 self.db.del_one(self.db_collection, {"_id": db_app["_id"]})
2629
2630 self.logger.info(
2631 f"Generic app operation Exit {operation_name} with resource Status {resource_status}"
2632 )
2633 return
2634 except Exception as e:
2635 self.logger.debug(traceback.format_exc())
2636 self.logger.debug(f"Exception: {e}", exc_info=True)
2637 return
2638
2639 async def create(self, params, order_id):
2640 self.logger.info("App Create Enter")
2641 return await self.generic_operation(params, order_id, "create_app")
2642
2643 async def update(self, params, order_id):
2644 self.logger.info("App Edit Enter")
2645 return await self.generic_operation(params, order_id, "update_app")
2646
2647 async def delete(self, params, order_id):
2648 self.logger.info("App Delete Enter")
2649 return await self.generic_operation(params, order_id, "delete_app")
2650
2651 async def check_appinstance(self, op_id, op_params, content, deleted=False):
2652 self.logger.info(
2653 f"check_app_instance Operation {op_id}. Params: {op_params}. Deleted: {deleted}"
2654 )
2655 self.logger.debug(f"Content: {content}")
2656 db_app = content
2657 profile_id = db_app["profile"]
2658 profile_type = db_app["profile_type"]
2659 app_name = db_app["name"]
2660 self.logger.info(
2661 f"Checking status of AppInstance {app_name} for profile {profile_id}."
2662 )
2663
2664 # TODO: read app_model and get kustomization name and namespace
2665 # app_model = db_app.get("app_model", {})
2666 kustomization_list = [
2667 {
2668 "name": f"jenkins-{app_name}",
2669 "namespace": "flux-system",
2670 }
2671 ]
2672 checkings_list = []
2673 if deleted:
2674 for kustomization in kustomization_list:
2675 checkings_list.append(
2676 {
2677 "item": "kustomization",
2678 "name": kustomization["name"].lower(),
2679 "namespace": kustomization["namespace"],
2680 "deleted": True,
2681 "timeout": self._checkloop_kustomization_timeout,
2682 "enable": True,
2683 "resourceState": "IN_PROGRESS.KUSTOMIZATION_DELETED",
2684 }
2685 )
2686 else:
2687 for kustomization in kustomization_list:
2688 checkings_list.append(
2689 {
2690 "item": "kustomization",
2691 "name": kustomization["name"].lower(),
2692 "namespace": kustomization["namespace"],
2693 "condition": {
2694 "jsonpath_filter": "status.conditions[?(@.type=='Ready')].status",
2695 "value": "True",
2696 },
2697 "timeout": self._checkloop_kustomization_timeout,
2698 "enable": True,
2699 "resourceState": "IN_PROGRESS.KUSTOMIZATION_READY",
2700 }
2701 )
2702
2703 dbcluster_list = self.get_dbclusters_from_profile(profile_id, profile_type)
2704 if not dbcluster_list:
2705 self.logger.info(f"No clusters found for profile {profile_id}.")
2706 for db_cluster in dbcluster_list:
2707 try:
2708 self.logger.info(
2709 f"Checking status of AppInstance {app_name} in cluster {db_cluster['name']}."
2710 )
2711 cluster_kubectl = self.cluster_kubectl(db_cluster)
2712 result, message = await self.common_check_list(
2713 op_id,
2714 checkings_list,
2715 self.db_collection,
2716 db_app,
2717 kubectl_obj=cluster_kubectl,
2718 )
2719 if not result:
2720 return False, message
2721 except Exception as e:
2722 self.logger.error(
2723 f"Error checking AppInstance in cluster {db_cluster['name']}."
2724 )
2725 self.logger.error(e)
2726 return (
2727 False,
2728 f"Error checking AppInstance in cluster {db_cluster['name']}.",
2729 )
2730 return True, "OK"
2731
2732 async def check_create_app(self, op_id, op_params, content):
2733 self.logger.info(f"check_update_app Operation {op_id}. Params: {op_params}.")
2734 # self.logger.debug(f"Content: {content}")
2735 return await self.check_appinstance(op_id, op_params, content)
2736
2737 async def check_update_app(self, op_id, op_params, content):
2738 self.logger.info(f"check_update_app Operation {op_id}. Params: {op_params}.")
2739 # self.logger.debug(f"Content: {content}")
2740 return await self.check_appinstance(op_id, op_params, content)
2741
2742 async def check_delete_app(self, op_id, op_params, content):
2743 self.logger.info(f"check_delete_app Operation {op_id}. Params: {op_params}.")
2744 # self.logger.debug(f"Content: {content}")
2745 return await self.check_appinstance(op_id, op_params, content, deleted=True)