Bug 2160 fixed: verifying if VDUR exists within the method update_ns_vld_target
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import yaml
21 import logging
22 import logging.handlers
23 import traceback
24 import json
25 from jinja2 import (
26 Environment,
27 TemplateError,
28 TemplateNotFound,
29 StrictUndefined,
30 UndefinedError,
31 )
32
33 from osm_lcm import ROclient
34 from osm_lcm.data_utils.nsr import get_deployed_kdu
35 from osm_lcm.ng_ro import NgRoClient, NgRoException
36 from osm_lcm.lcm_utils import (
37 LcmException,
38 LcmExceptionNoMgmtIP,
39 LcmBase,
40 deep_get,
41 get_iterable,
42 populate_dict,
43 )
44 from osm_lcm.data_utils.nsd import get_vnf_profiles
45 from osm_lcm.data_utils.vnfd import (
46 get_vdu_list,
47 get_vdu_profile,
48 get_ee_sorted_initial_config_primitive_list,
49 get_ee_sorted_terminate_config_primitive_list,
50 get_kdu_list,
51 get_virtual_link_profiles,
52 get_vdu,
53 get_configuration,
54 get_vdu_index,
55 get_scaling_aspect,
56 get_number_of_instances,
57 get_juju_ee_ref,
58 get_kdu_profile,
59 )
60 from osm_lcm.data_utils.list_utils import find_in_list
61 from osm_lcm.data_utils.vnfr import get_osm_params, get_vdur_index, get_kdur
62 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
63 from osm_lcm.data_utils.database.vim_account import VimAccountDB
64 from n2vc.k8s_helm_conn import K8sHelmConnector
65 from n2vc.k8s_helm3_conn import K8sHelm3Connector
66 from n2vc.k8s_juju_conn import K8sJujuConnector
67
68 from osm_common.dbbase import DbException
69 from osm_common.fsbase import FsException
70
71 from osm_lcm.data_utils.database.database import Database
72 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
73
74 from n2vc.n2vc_juju_conn import N2VCJujuConnector
75 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
76
77 from osm_lcm.lcm_helm_conn import LCMHelmConn
78
79 from copy import copy, deepcopy
80 from time import time
81 from uuid import uuid4
82
83 from random import randint
84
85 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
86
87
88 class NsLcm(LcmBase):
89 timeout_vca_on_error = (
90 5 * 60
91 ) # Time for charm from first time at blocked,error status to mark as failed
92 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
93 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
94 timeout_charm_delete = 10 * 60
95 timeout_primitive = 30 * 60 # timeout for primitive execution
96 timeout_progress_primitive = (
97 10 * 60
98 ) # timeout for some progress in a primitive execution
99
100 SUBOPERATION_STATUS_NOT_FOUND = -1
101 SUBOPERATION_STATUS_NEW = -2
102 SUBOPERATION_STATUS_SKIP = -3
103 task_name_deploy_vca = "Deploying VCA"
104
105 def __init__(self, msg, lcm_tasks, config, loop, prometheus=None):
106 """
107 Init, Connect to database, filesystem storage, and messaging
108 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
109 :return: None
110 """
111 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
112
113 self.db = Database().instance.db
114 self.fs = Filesystem().instance.fs
115 self.loop = loop
116 self.lcm_tasks = lcm_tasks
117 self.timeout = config["timeout"]
118 self.ro_config = config["ro_config"]
119 self.ng_ro = config["ro_config"].get("ng")
120 self.vca_config = config["VCA"].copy()
121
122 # create N2VC connector
123 self.n2vc = N2VCJujuConnector(
124 log=self.logger,
125 loop=self.loop,
126 on_update_db=self._on_update_n2vc_db,
127 fs=self.fs,
128 db=self.db,
129 )
130
131 self.conn_helm_ee = LCMHelmConn(
132 log=self.logger,
133 loop=self.loop,
134 vca_config=self.vca_config,
135 on_update_db=self._on_update_n2vc_db,
136 )
137
138 self.k8sclusterhelm2 = K8sHelmConnector(
139 kubectl_command=self.vca_config.get("kubectlpath"),
140 helm_command=self.vca_config.get("helmpath"),
141 log=self.logger,
142 on_update_db=None,
143 fs=self.fs,
144 db=self.db,
145 )
146
147 self.k8sclusterhelm3 = K8sHelm3Connector(
148 kubectl_command=self.vca_config.get("kubectlpath"),
149 helm_command=self.vca_config.get("helm3path"),
150 fs=self.fs,
151 log=self.logger,
152 db=self.db,
153 on_update_db=None,
154 )
155
156 self.k8sclusterjuju = K8sJujuConnector(
157 kubectl_command=self.vca_config.get("kubectlpath"),
158 juju_command=self.vca_config.get("jujupath"),
159 log=self.logger,
160 loop=self.loop,
161 on_update_db=self._on_update_k8s_db,
162 fs=self.fs,
163 db=self.db,
164 )
165
166 self.k8scluster_map = {
167 "helm-chart": self.k8sclusterhelm2,
168 "helm-chart-v3": self.k8sclusterhelm3,
169 "chart": self.k8sclusterhelm3,
170 "juju-bundle": self.k8sclusterjuju,
171 "juju": self.k8sclusterjuju,
172 }
173
174 self.vca_map = {
175 "lxc_proxy_charm": self.n2vc,
176 "native_charm": self.n2vc,
177 "k8s_proxy_charm": self.n2vc,
178 "helm": self.conn_helm_ee,
179 "helm-v3": self.conn_helm_ee,
180 }
181
182 self.prometheus = prometheus
183
184 # create RO client
185 self.RO = NgRoClient(self.loop, **self.ro_config)
186
187 @staticmethod
188 def increment_ip_mac(ip_mac, vm_index=1):
189 if not isinstance(ip_mac, str):
190 return ip_mac
191 try:
192 # try with ipv4 look for last dot
193 i = ip_mac.rfind(".")
194 if i > 0:
195 i += 1
196 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
197 # try with ipv6 or mac look for last colon. Operate in hex
198 i = ip_mac.rfind(":")
199 if i > 0:
200 i += 1
201 # format in hex, len can be 2 for mac or 4 for ipv6
202 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
203 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
204 )
205 except Exception:
206 pass
207 return None
208
209 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
210
211 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
212
213 try:
214 # TODO filter RO descriptor fields...
215
216 # write to database
217 db_dict = dict()
218 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
219 db_dict["deploymentStatus"] = ro_descriptor
220 self.update_db_2("nsrs", nsrs_id, db_dict)
221
222 except Exception as e:
223 self.logger.warn(
224 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
225 )
226
227 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
228
229 # remove last dot from path (if exists)
230 if path.endswith("."):
231 path = path[:-1]
232
233 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
234 # .format(table, filter, path, updated_data))
235 try:
236
237 nsr_id = filter.get("_id")
238
239 # read ns record from database
240 nsr = self.db.get_one(table="nsrs", q_filter=filter)
241 current_ns_status = nsr.get("nsState")
242
243 # get vca status for NS
244 status_dict = await self.n2vc.get_status(
245 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
246 )
247
248 # vcaStatus
249 db_dict = dict()
250 db_dict["vcaStatus"] = status_dict
251 await self.n2vc.update_vca_status(db_dict["vcaStatus"], vca_id=vca_id)
252
253 # update configurationStatus for this VCA
254 try:
255 vca_index = int(path[path.rfind(".") + 1 :])
256
257 vca_list = deep_get(
258 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
259 )
260 vca_status = vca_list[vca_index].get("status")
261
262 configuration_status_list = nsr.get("configurationStatus")
263 config_status = configuration_status_list[vca_index].get("status")
264
265 if config_status == "BROKEN" and vca_status != "failed":
266 db_dict["configurationStatus"][vca_index] = "READY"
267 elif config_status != "BROKEN" and vca_status == "failed":
268 db_dict["configurationStatus"][vca_index] = "BROKEN"
269 except Exception as e:
270 # not update configurationStatus
271 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
272
273 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
274 # if nsState = 'DEGRADED' check if all is OK
275 is_degraded = False
276 if current_ns_status in ("READY", "DEGRADED"):
277 error_description = ""
278 # check machines
279 if status_dict.get("machines"):
280 for machine_id in status_dict.get("machines"):
281 machine = status_dict.get("machines").get(machine_id)
282 # check machine agent-status
283 if machine.get("agent-status"):
284 s = machine.get("agent-status").get("status")
285 if s != "started":
286 is_degraded = True
287 error_description += (
288 "machine {} agent-status={} ; ".format(
289 machine_id, s
290 )
291 )
292 # check machine instance status
293 if machine.get("instance-status"):
294 s = machine.get("instance-status").get("status")
295 if s != "running":
296 is_degraded = True
297 error_description += (
298 "machine {} instance-status={} ; ".format(
299 machine_id, s
300 )
301 )
302 # check applications
303 if status_dict.get("applications"):
304 for app_id in status_dict.get("applications"):
305 app = status_dict.get("applications").get(app_id)
306 # check application status
307 if app.get("status"):
308 s = app.get("status").get("status")
309 if s != "active":
310 is_degraded = True
311 error_description += (
312 "application {} status={} ; ".format(app_id, s)
313 )
314
315 if error_description:
316 db_dict["errorDescription"] = error_description
317 if current_ns_status == "READY" and is_degraded:
318 db_dict["nsState"] = "DEGRADED"
319 if current_ns_status == "DEGRADED" and not is_degraded:
320 db_dict["nsState"] = "READY"
321
322 # write to database
323 self.update_db_2("nsrs", nsr_id, db_dict)
324
325 except (asyncio.CancelledError, asyncio.TimeoutError):
326 raise
327 except Exception as e:
328 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
329
330 async def _on_update_k8s_db(
331 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
332 ):
333 """
334 Updating vca status in NSR record
335 :param cluster_uuid: UUID of a k8s cluster
336 :param kdu_instance: The unique name of the KDU instance
337 :param filter: To get nsr_id
338 :cluster_type: The cluster type (juju, k8s)
339 :return: none
340 """
341
342 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
343 # .format(cluster_uuid, kdu_instance, filter))
344
345 nsr_id = filter.get("_id")
346 try:
347 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
348 cluster_uuid=cluster_uuid,
349 kdu_instance=kdu_instance,
350 yaml_format=False,
351 complete_status=True,
352 vca_id=vca_id,
353 )
354
355 # vcaStatus
356 db_dict = dict()
357 db_dict["vcaStatus"] = {nsr_id: vca_status}
358
359 if cluster_type in ("juju-bundle", "juju"):
360 # TODO -> this should be done in a more uniform way, I think in N2VC, in order to update the K8s VCA
361 # status in a similar way between Juju Bundles and Helm Charts on this side
362 await self.k8sclusterjuju.update_vca_status(
363 db_dict["vcaStatus"],
364 kdu_instance,
365 vca_id=vca_id,
366 )
367
368 self.logger.debug(
369 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
370 )
371
372 # write to database
373 self.update_db_2("nsrs", nsr_id, db_dict)
374 except (asyncio.CancelledError, asyncio.TimeoutError):
375 raise
376 except Exception as e:
377 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
378
379 @staticmethod
380 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
381 try:
382 env = Environment(undefined=StrictUndefined)
383 template = env.from_string(cloud_init_text)
384 return template.render(additional_params or {})
385 except UndefinedError as e:
386 raise LcmException(
387 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
388 "file, must be provided in the instantiation parameters inside the "
389 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
390 )
391 except (TemplateError, TemplateNotFound) as e:
392 raise LcmException(
393 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
394 vnfd_id, vdu_id, e
395 )
396 )
397
398 def _get_vdu_cloud_init_content(self, vdu, vnfd):
399 cloud_init_content = cloud_init_file = None
400 try:
401 if vdu.get("cloud-init-file"):
402 base_folder = vnfd["_admin"]["storage"]
403 cloud_init_file = "{}/{}/cloud_init/{}".format(
404 base_folder["folder"],
405 base_folder["pkg-dir"],
406 vdu["cloud-init-file"],
407 )
408 with self.fs.file_open(cloud_init_file, "r") as ci_file:
409 cloud_init_content = ci_file.read()
410 elif vdu.get("cloud-init"):
411 cloud_init_content = vdu["cloud-init"]
412
413 return cloud_init_content
414 except FsException as e:
415 raise LcmException(
416 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
417 vnfd["id"], vdu["id"], cloud_init_file, e
418 )
419 )
420
421 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
422 vdur = next(
423 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]),
424 {}
425 )
426 additional_params = vdur.get("additionalParams")
427 return parse_yaml_strings(additional_params)
428
429 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
430 """
431 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
432 :param vnfd: input vnfd
433 :param new_id: overrides vnf id if provided
434 :param additionalParams: Instantiation params for VNFs provided
435 :param nsrId: Id of the NSR
436 :return: copy of vnfd
437 """
438 vnfd_RO = deepcopy(vnfd)
439 # remove unused by RO configuration, monitoring, scaling and internal keys
440 vnfd_RO.pop("_id", None)
441 vnfd_RO.pop("_admin", None)
442 vnfd_RO.pop("monitoring-param", None)
443 vnfd_RO.pop("scaling-group-descriptor", None)
444 vnfd_RO.pop("kdu", None)
445 vnfd_RO.pop("k8s-cluster", None)
446 if new_id:
447 vnfd_RO["id"] = new_id
448
449 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
450 for vdu in get_iterable(vnfd_RO, "vdu"):
451 vdu.pop("cloud-init-file", None)
452 vdu.pop("cloud-init", None)
453 return vnfd_RO
454
455 @staticmethod
456 def ip_profile_2_RO(ip_profile):
457 RO_ip_profile = deepcopy(ip_profile)
458 if "dns-server" in RO_ip_profile:
459 if isinstance(RO_ip_profile["dns-server"], list):
460 RO_ip_profile["dns-address"] = []
461 for ds in RO_ip_profile.pop("dns-server"):
462 RO_ip_profile["dns-address"].append(ds["address"])
463 else:
464 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
465 if RO_ip_profile.get("ip-version") == "ipv4":
466 RO_ip_profile["ip-version"] = "IPv4"
467 if RO_ip_profile.get("ip-version") == "ipv6":
468 RO_ip_profile["ip-version"] = "IPv6"
469 if "dhcp-params" in RO_ip_profile:
470 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
471 return RO_ip_profile
472
473 def _get_ro_vim_id_for_vim_account(self, vim_account):
474 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
475 if db_vim["_admin"]["operationalState"] != "ENABLED":
476 raise LcmException(
477 "VIM={} is not available. operationalState={}".format(
478 vim_account, db_vim["_admin"]["operationalState"]
479 )
480 )
481 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
482 return RO_vim_id
483
484 def get_ro_wim_id_for_wim_account(self, wim_account):
485 if isinstance(wim_account, str):
486 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
487 if db_wim["_admin"]["operationalState"] != "ENABLED":
488 raise LcmException(
489 "WIM={} is not available. operationalState={}".format(
490 wim_account, db_wim["_admin"]["operationalState"]
491 )
492 )
493 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
494 return RO_wim_id
495 else:
496 return wim_account
497
498 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
499
500 db_vdu_push_list = []
501 template_vdur = []
502 db_update = {"_admin.modified": time()}
503 if vdu_create:
504 for vdu_id, vdu_count in vdu_create.items():
505 vdur = next(
506 (
507 vdur
508 for vdur in reversed(db_vnfr["vdur"])
509 if vdur["vdu-id-ref"] == vdu_id
510 ),
511 None,
512 )
513 if not vdur:
514 # Read the template saved in the db:
515 self.logger.debug(f"No vdur in the database. Using the vdur-template to scale")
516 vdur_template = db_vnfr.get("vdur-template")
517 if not vdur_template:
518 raise LcmException(
519 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
520 vdu_id
521 )
522 )
523 vdur = vdur_template[0]
524 #Delete a template from the database after using it
525 self.db.set_one("vnfrs",
526 {"_id": db_vnfr["_id"]},
527 None,
528 pull={"vdur-template": {"_id": vdur['_id']}}
529 )
530 for count in range(vdu_count):
531 vdur_copy = deepcopy(vdur)
532 vdur_copy["status"] = "BUILD"
533 vdur_copy["status-detailed"] = None
534 vdur_copy["ip-address"] = None
535 vdur_copy["_id"] = str(uuid4())
536 vdur_copy["count-index"] += count + 1
537 vdur_copy["id"] = "{}-{}".format(
538 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
539 )
540 vdur_copy.pop("vim_info", None)
541 for iface in vdur_copy["interfaces"]:
542 if iface.get("fixed-ip"):
543 iface["ip-address"] = self.increment_ip_mac(
544 iface["ip-address"], count + 1
545 )
546 else:
547 iface.pop("ip-address", None)
548 if iface.get("fixed-mac"):
549 iface["mac-address"] = self.increment_ip_mac(
550 iface["mac-address"], count + 1
551 )
552 else:
553 iface.pop("mac-address", None)
554 if db_vnfr["vdur"]:
555 iface.pop(
556 "mgmt_vnf", None
557 ) # only first vdu can be managment of vnf
558 db_vdu_push_list.append(vdur_copy)
559 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
560 if vdu_delete:
561 if len(db_vnfr["vdur"]) == 1:
562 # The scale will move to 0 instances
563 self.logger.debug(f"Scaling to 0 !, creating the template with the last vdur")
564 template_vdur = [db_vnfr["vdur"][0]]
565 for vdu_id, vdu_count in vdu_delete.items():
566 if mark_delete:
567 indexes_to_delete = [
568 iv[0]
569 for iv in enumerate(db_vnfr["vdur"])
570 if iv[1]["vdu-id-ref"] == vdu_id
571 ]
572 db_update.update(
573 {
574 "vdur.{}.status".format(i): "DELETING"
575 for i in indexes_to_delete[-vdu_count:]
576 }
577 )
578 else:
579 # it must be deleted one by one because common.db does not allow otherwise
580 vdus_to_delete = [
581 v
582 for v in reversed(db_vnfr["vdur"])
583 if v["vdu-id-ref"] == vdu_id
584 ]
585 for vdu in vdus_to_delete[:vdu_count]:
586 self.db.set_one(
587 "vnfrs",
588 {"_id": db_vnfr["_id"]},
589 None,
590 pull={"vdur": {"_id": vdu["_id"]}},
591 )
592 db_push = {}
593 if db_vdu_push_list:
594 db_push["vdur"] = db_vdu_push_list
595 if template_vdur:
596 db_push["vdur-template"] = template_vdur
597 if not db_push:
598 db_push = None
599 db_vnfr["vdur-template"] = template_vdur
600 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
601 # modify passed dictionary db_vnfr
602 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
603 db_vnfr["vdur"] = db_vnfr_["vdur"]
604
605 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
606 """
607 Updates database nsr with the RO info for the created vld
608 :param ns_update_nsr: dictionary to be filled with the updated info
609 :param db_nsr: content of db_nsr. This is also modified
610 :param nsr_desc_RO: nsr descriptor from RO
611 :return: Nothing, LcmException is raised on errors
612 """
613
614 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
615 for net_RO in get_iterable(nsr_desc_RO, "nets"):
616 if vld["id"] != net_RO.get("ns_net_osm_id"):
617 continue
618 vld["vim-id"] = net_RO.get("vim_net_id")
619 vld["name"] = net_RO.get("vim_name")
620 vld["status"] = net_RO.get("status")
621 vld["status-detailed"] = net_RO.get("error_msg")
622 ns_update_nsr["vld.{}".format(vld_index)] = vld
623 break
624 else:
625 raise LcmException(
626 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
627 )
628
629 def set_vnfr_at_error(self, db_vnfrs, error_text):
630 try:
631 for db_vnfr in db_vnfrs.values():
632 vnfr_update = {"status": "ERROR"}
633 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
634 if "status" not in vdur:
635 vdur["status"] = "ERROR"
636 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
637 if error_text:
638 vdur["status-detailed"] = str(error_text)
639 vnfr_update[
640 "vdur.{}.status-detailed".format(vdu_index)
641 ] = "ERROR"
642 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
643 except DbException as e:
644 self.logger.error("Cannot update vnf. {}".format(e))
645
646 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
647 """
648 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
649 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
650 :param nsr_desc_RO: nsr descriptor from RO
651 :return: Nothing, LcmException is raised on errors
652 """
653 for vnf_index, db_vnfr in db_vnfrs.items():
654 for vnf_RO in nsr_desc_RO["vnfs"]:
655 if vnf_RO["member_vnf_index"] != vnf_index:
656 continue
657 vnfr_update = {}
658 if vnf_RO.get("ip_address"):
659 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
660 "ip_address"
661 ].split(";")[0]
662 elif not db_vnfr.get("ip-address"):
663 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
664 raise LcmExceptionNoMgmtIP(
665 "ns member_vnf_index '{}' has no IP address".format(
666 vnf_index
667 )
668 )
669
670 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
671 vdur_RO_count_index = 0
672 if vdur.get("pdu-type"):
673 continue
674 for vdur_RO in get_iterable(vnf_RO, "vms"):
675 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
676 continue
677 if vdur["count-index"] != vdur_RO_count_index:
678 vdur_RO_count_index += 1
679 continue
680 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
681 if vdur_RO.get("ip_address"):
682 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
683 else:
684 vdur["ip-address"] = None
685 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
686 vdur["name"] = vdur_RO.get("vim_name")
687 vdur["status"] = vdur_RO.get("status")
688 vdur["status-detailed"] = vdur_RO.get("error_msg")
689 for ifacer in get_iterable(vdur, "interfaces"):
690 for interface_RO in get_iterable(vdur_RO, "interfaces"):
691 if ifacer["name"] == interface_RO.get("internal_name"):
692 ifacer["ip-address"] = interface_RO.get(
693 "ip_address"
694 )
695 ifacer["mac-address"] = interface_RO.get(
696 "mac_address"
697 )
698 break
699 else:
700 raise LcmException(
701 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
702 "from VIM info".format(
703 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
704 )
705 )
706 vnfr_update["vdur.{}".format(vdu_index)] = vdur
707 break
708 else:
709 raise LcmException(
710 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
711 "VIM info".format(
712 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
713 )
714 )
715
716 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
717 for net_RO in get_iterable(nsr_desc_RO, "nets"):
718 if vld["id"] != net_RO.get("vnf_net_osm_id"):
719 continue
720 vld["vim-id"] = net_RO.get("vim_net_id")
721 vld["name"] = net_RO.get("vim_name")
722 vld["status"] = net_RO.get("status")
723 vld["status-detailed"] = net_RO.get("error_msg")
724 vnfr_update["vld.{}".format(vld_index)] = vld
725 break
726 else:
727 raise LcmException(
728 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
729 vnf_index, vld["id"]
730 )
731 )
732
733 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
734 break
735
736 else:
737 raise LcmException(
738 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
739 vnf_index
740 )
741 )
742
743 def _get_ns_config_info(self, nsr_id):
744 """
745 Generates a mapping between vnf,vdu elements and the N2VC id
746 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
747 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
748 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
749 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
750 """
751 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
752 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
753 mapping = {}
754 ns_config_info = {"osm-config-mapping": mapping}
755 for vca in vca_deployed_list:
756 if not vca["member-vnf-index"]:
757 continue
758 if not vca["vdu_id"]:
759 mapping[vca["member-vnf-index"]] = vca["application"]
760 else:
761 mapping[
762 "{}.{}.{}".format(
763 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
764 )
765 ] = vca["application"]
766 return ns_config_info
767
768 async def _instantiate_ng_ro(
769 self,
770 logging_text,
771 nsr_id,
772 nsd,
773 db_nsr,
774 db_nslcmop,
775 db_vnfrs,
776 db_vnfds,
777 n2vc_key_list,
778 stage,
779 start_deploy,
780 timeout_ns_deploy,
781 ):
782
783 db_vims = {}
784
785 def get_vim_account(vim_account_id):
786 nonlocal db_vims
787 if vim_account_id in db_vims:
788 return db_vims[vim_account_id]
789 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
790 db_vims[vim_account_id] = db_vim
791 return db_vim
792
793 # modify target_vld info with instantiation parameters
794 def parse_vld_instantiation_params(
795 target_vim, target_vld, vld_params, target_sdn
796 ):
797 if vld_params.get("ip-profile"):
798 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
799 "ip-profile"
800 ]
801 if vld_params.get("provider-network"):
802 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
803 "provider-network"
804 ]
805 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
806 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
807 "provider-network"
808 ]["sdn-ports"]
809 if vld_params.get("wimAccountId"):
810 target_wim = "wim:{}".format(vld_params["wimAccountId"])
811 target_vld["vim_info"][target_wim] = {}
812 for param in ("vim-network-name", "vim-network-id"):
813 if vld_params.get(param):
814 if isinstance(vld_params[param], dict):
815 for vim, vim_net in vld_params[param].items():
816 other_target_vim = "vim:" + vim
817 populate_dict(
818 target_vld["vim_info"],
819 (other_target_vim, param.replace("-", "_")),
820 vim_net,
821 )
822 else: # isinstance str
823 target_vld["vim_info"][target_vim][
824 param.replace("-", "_")
825 ] = vld_params[param]
826 if vld_params.get("common_id"):
827 target_vld["common_id"] = vld_params.get("common_id")
828
829 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
830 def update_ns_vld_target(target, ns_params):
831 for vnf_params in ns_params.get("vnf", ()):
832 if vnf_params.get("vimAccountId"):
833 target_vnf = next(
834 (
835 vnfr
836 for vnfr in db_vnfrs.values()
837 if vnf_params["member-vnf-index"]
838 == vnfr["member-vnf-index-ref"]
839 ),
840 None,
841 )
842 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
843 if not vdur:
844 return
845 for a_index, a_vld in enumerate(target["ns"]["vld"]):
846 target_vld = find_in_list(
847 get_iterable(vdur, "interfaces"),
848 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
849 )
850 if target_vld:
851 if vnf_params.get("vimAccountId") not in a_vld.get(
852 "vim_info", {}
853 ):
854 target["ns"]["vld"][a_index].get("vim_info").update(
855 {
856 "vim:{}".format(vnf_params["vimAccountId"]): {
857 "vim_network_name": ""
858 }
859 }
860 )
861
862 nslcmop_id = db_nslcmop["_id"]
863 target = {
864 "name": db_nsr["name"],
865 "ns": {"vld": []},
866 "vnf": [],
867 "image": deepcopy(db_nsr["image"]),
868 "flavor": deepcopy(db_nsr["flavor"]),
869 "action_id": nslcmop_id,
870 "cloud_init_content": {},
871 }
872 for image in target["image"]:
873 image["vim_info"] = {}
874 for flavor in target["flavor"]:
875 flavor["vim_info"] = {}
876 if db_nsr.get("affinity-or-anti-affinity-group"):
877 target["affinity-or-anti-affinity-group"] = deepcopy(db_nsr["affinity-or-anti-affinity-group"])
878 for affinity_or_anti_affinity_group in target["affinity-or-anti-affinity-group"]:
879 affinity_or_anti_affinity_group["vim_info"] = {}
880
881 if db_nslcmop.get("lcmOperationType") != "instantiate":
882 # get parameters of instantiation:
883 db_nslcmop_instantiate = self.db.get_list(
884 "nslcmops",
885 {
886 "nsInstanceId": db_nslcmop["nsInstanceId"],
887 "lcmOperationType": "instantiate",
888 },
889 )[-1]
890 ns_params = db_nslcmop_instantiate.get("operationParams")
891 else:
892 ns_params = db_nslcmop.get("operationParams")
893 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
894 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
895
896 cp2target = {}
897 for vld_index, vld in enumerate(db_nsr.get("vld")):
898 target_vim = "vim:{}".format(ns_params["vimAccountId"])
899 target_vld = {
900 "id": vld["id"],
901 "name": vld["name"],
902 "mgmt-network": vld.get("mgmt-network", False),
903 "type": vld.get("type"),
904 "vim_info": {
905 target_vim: {
906 "vim_network_name": vld.get("vim-network-name"),
907 "vim_account_id": ns_params["vimAccountId"],
908 }
909 },
910 }
911 # check if this network needs SDN assist
912 if vld.get("pci-interfaces"):
913 db_vim = get_vim_account(ns_params["vimAccountId"])
914 if vim_config := db_vim.get("config"):
915 if sdnc_id := vim_config.get("sdn-controller"):
916 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
917 target_sdn = "sdn:{}".format(sdnc_id)
918 target_vld["vim_info"][target_sdn] = {
919 "sdn": True,
920 "target_vim": target_vim,
921 "vlds": [sdn_vld],
922 "type": vld.get("type"),
923 }
924
925 nsd_vnf_profiles = get_vnf_profiles(nsd)
926 for nsd_vnf_profile in nsd_vnf_profiles:
927 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
928 if cp["virtual-link-profile-id"] == vld["id"]:
929 cp2target[
930 "member_vnf:{}.{}".format(
931 cp["constituent-cpd-id"][0][
932 "constituent-base-element-id"
933 ],
934 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
935 )
936 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
937
938 # check at nsd descriptor, if there is an ip-profile
939 vld_params = {}
940 nsd_vlp = find_in_list(
941 get_virtual_link_profiles(nsd),
942 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
943 == vld["id"],
944 )
945 if (
946 nsd_vlp
947 and nsd_vlp.get("virtual-link-protocol-data")
948 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
949 ):
950 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
951 "l3-protocol-data"
952 ]
953 ip_profile_dest_data = {}
954 if "ip-version" in ip_profile_source_data:
955 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
956 "ip-version"
957 ]
958 if "cidr" in ip_profile_source_data:
959 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
960 "cidr"
961 ]
962 if "gateway-ip" in ip_profile_source_data:
963 ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
964 "gateway-ip"
965 ]
966 if "dhcp-enabled" in ip_profile_source_data:
967 ip_profile_dest_data["dhcp-params"] = {
968 "enabled": ip_profile_source_data["dhcp-enabled"]
969 }
970 vld_params["ip-profile"] = ip_profile_dest_data
971
972 # update vld_params with instantiation params
973 vld_instantiation_params = find_in_list(
974 get_iterable(ns_params, "vld"),
975 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
976 )
977 if vld_instantiation_params:
978 vld_params.update(vld_instantiation_params)
979 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
980 target["ns"]["vld"].append(target_vld)
981 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
982 update_ns_vld_target(target, ns_params)
983
984 for vnfr in db_vnfrs.values():
985 vnfd = find_in_list(
986 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
987 )
988 vnf_params = find_in_list(
989 get_iterable(ns_params, "vnf"),
990 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
991 )
992 target_vnf = deepcopy(vnfr)
993 target_vim = "vim:{}".format(vnfr["vim-account-id"])
994 for vld in target_vnf.get("vld", ()):
995 # check if connected to a ns.vld, to fill target'
996 vnf_cp = find_in_list(
997 vnfd.get("int-virtual-link-desc", ()),
998 lambda cpd: cpd.get("id") == vld["id"],
999 )
1000 if vnf_cp:
1001 ns_cp = "member_vnf:{}.{}".format(
1002 vnfr["member-vnf-index-ref"], vnf_cp["id"]
1003 )
1004 if cp2target.get(ns_cp):
1005 vld["target"] = cp2target[ns_cp]
1006
1007 vld["vim_info"] = {
1008 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1009 }
1010 # check if this network needs SDN assist
1011 target_sdn = None
1012 if vld.get("pci-interfaces"):
1013 db_vim = get_vim_account(vnfr["vim-account-id"])
1014 sdnc_id = db_vim["config"].get("sdn-controller")
1015 if sdnc_id:
1016 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1017 target_sdn = "sdn:{}".format(sdnc_id)
1018 vld["vim_info"][target_sdn] = {
1019 "sdn": True,
1020 "target_vim": target_vim,
1021 "vlds": [sdn_vld],
1022 "type": vld.get("type"),
1023 }
1024
1025 # check at vnfd descriptor, if there is an ip-profile
1026 vld_params = {}
1027 vnfd_vlp = find_in_list(
1028 get_virtual_link_profiles(vnfd),
1029 lambda a_link_profile: a_link_profile["id"] == vld["id"],
1030 )
1031 if (
1032 vnfd_vlp
1033 and vnfd_vlp.get("virtual-link-protocol-data")
1034 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1035 ):
1036 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
1037 "l3-protocol-data"
1038 ]
1039 ip_profile_dest_data = {}
1040 if "ip-version" in ip_profile_source_data:
1041 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1042 "ip-version"
1043 ]
1044 if "cidr" in ip_profile_source_data:
1045 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1046 "cidr"
1047 ]
1048 if "gateway-ip" in ip_profile_source_data:
1049 ip_profile_dest_data[
1050 "gateway-address"
1051 ] = ip_profile_source_data["gateway-ip"]
1052 if "dhcp-enabled" in ip_profile_source_data:
1053 ip_profile_dest_data["dhcp-params"] = {
1054 "enabled": ip_profile_source_data["dhcp-enabled"]
1055 }
1056
1057 vld_params["ip-profile"] = ip_profile_dest_data
1058 # update vld_params with instantiation params
1059 if vnf_params:
1060 vld_instantiation_params = find_in_list(
1061 get_iterable(vnf_params, "internal-vld"),
1062 lambda i_vld: i_vld["name"] == vld["id"],
1063 )
1064 if vld_instantiation_params:
1065 vld_params.update(vld_instantiation_params)
1066 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1067
1068 vdur_list = []
1069 for vdur in target_vnf.get("vdur", ()):
1070 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1071 continue # This vdu must not be created
1072 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1073
1074 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1075
1076 if ssh_keys_all:
1077 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1078 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1079 if (
1080 vdu_configuration
1081 and vdu_configuration.get("config-access")
1082 and vdu_configuration.get("config-access").get("ssh-access")
1083 ):
1084 vdur["ssh-keys"] = ssh_keys_all
1085 vdur["ssh-access-required"] = vdu_configuration[
1086 "config-access"
1087 ]["ssh-access"]["required"]
1088 elif (
1089 vnf_configuration
1090 and vnf_configuration.get("config-access")
1091 and vnf_configuration.get("config-access").get("ssh-access")
1092 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1093 ):
1094 vdur["ssh-keys"] = ssh_keys_all
1095 vdur["ssh-access-required"] = vnf_configuration[
1096 "config-access"
1097 ]["ssh-access"]["required"]
1098 elif ssh_keys_instantiation and find_in_list(
1099 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1100 ):
1101 vdur["ssh-keys"] = ssh_keys_instantiation
1102
1103 self.logger.debug("NS > vdur > {}".format(vdur))
1104
1105 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1106 # cloud-init
1107 if vdud.get("cloud-init-file"):
1108 vdur["cloud-init"] = "{}:file:{}".format(
1109 vnfd["_id"], vdud.get("cloud-init-file")
1110 )
1111 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1112 if vdur["cloud-init"] not in target["cloud_init_content"]:
1113 base_folder = vnfd["_admin"]["storage"]
1114 cloud_init_file = "{}/{}/cloud_init/{}".format(
1115 base_folder["folder"],
1116 base_folder["pkg-dir"],
1117 vdud.get("cloud-init-file"),
1118 )
1119 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1120 target["cloud_init_content"][
1121 vdur["cloud-init"]
1122 ] = ci_file.read()
1123 elif vdud.get("cloud-init"):
1124 vdur["cloud-init"] = "{}:vdu:{}".format(
1125 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1126 )
1127 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1128 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1129 "cloud-init"
1130 ]
1131 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1132 deploy_params_vdu = self._format_additional_params(
1133 vdur.get("additionalParams") or {}
1134 )
1135 deploy_params_vdu["OSM"] = get_osm_params(
1136 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1137 )
1138 vdur["additionalParams"] = deploy_params_vdu
1139
1140 # flavor
1141 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1142 if target_vim not in ns_flavor["vim_info"]:
1143 ns_flavor["vim_info"][target_vim] = {}
1144
1145 # deal with images
1146 # in case alternative images are provided we must check if they should be applied
1147 # for the vim_type, modify the vim_type taking into account
1148 ns_image_id = int(vdur["ns-image-id"])
1149 if vdur.get("alt-image-ids"):
1150 db_vim = get_vim_account(vnfr["vim-account-id"])
1151 vim_type = db_vim["vim_type"]
1152 for alt_image_id in vdur.get("alt-image-ids"):
1153 ns_alt_image = target["image"][int(alt_image_id)]
1154 if vim_type == ns_alt_image.get("vim-type"):
1155 # must use alternative image
1156 self.logger.debug(
1157 "use alternative image id: {}".format(alt_image_id)
1158 )
1159 ns_image_id = alt_image_id
1160 vdur["ns-image-id"] = ns_image_id
1161 break
1162 ns_image = target["image"][int(ns_image_id)]
1163 if target_vim not in ns_image["vim_info"]:
1164 ns_image["vim_info"][target_vim] = {}
1165
1166 # Affinity groups
1167 if vdur.get("affinity-or-anti-affinity-group-id"):
1168 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1169 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1170 if target_vim not in ns_ags["vim_info"]:
1171 ns_ags["vim_info"][target_vim] = {}
1172
1173 vdur["vim_info"] = {target_vim: {}}
1174 # instantiation parameters
1175 # if vnf_params:
1176 # vdu_instantiation_params = next((v for v in get_iterable(vnf_params, "vdu") if v["id"] ==
1177 # vdud["id"]), None)
1178 vdur_list.append(vdur)
1179 target_vnf["vdur"] = vdur_list
1180 target["vnf"].append(target_vnf)
1181
1182 desc = await self.RO.deploy(nsr_id, target)
1183 self.logger.debug("RO return > {}".format(desc))
1184 action_id = desc["action_id"]
1185 await self._wait_ng_ro(
1186 nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage
1187 )
1188
1189 # Updating NSR
1190 db_nsr_update = {
1191 "_admin.deployed.RO.operational-status": "running",
1192 "detailed-status": " ".join(stage),
1193 }
1194 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1195 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1196 self._write_op_status(nslcmop_id, stage)
1197 self.logger.debug(
1198 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1199 )
1200 return
1201
1202 async def _wait_ng_ro(
1203 self,
1204 nsr_id,
1205 action_id,
1206 nslcmop_id=None,
1207 start_time=None,
1208 timeout=600,
1209 stage=None,
1210 ):
1211 detailed_status_old = None
1212 db_nsr_update = {}
1213 start_time = start_time or time()
1214 while time() <= start_time + timeout:
1215 desc_status = await self.RO.status(nsr_id, action_id)
1216 self.logger.debug("Wait NG RO > {}".format(desc_status))
1217 if desc_status["status"] == "FAILED":
1218 raise NgRoException(desc_status["details"])
1219 elif desc_status["status"] == "BUILD":
1220 if stage:
1221 stage[2] = "VIM: ({})".format(desc_status["details"])
1222 elif desc_status["status"] == "DONE":
1223 if stage:
1224 stage[2] = "Deployed at VIM"
1225 break
1226 else:
1227 assert False, "ROclient.check_ns_status returns unknown {}".format(
1228 desc_status["status"]
1229 )
1230 if stage and nslcmop_id and stage[2] != detailed_status_old:
1231 detailed_status_old = stage[2]
1232 db_nsr_update["detailed-status"] = " ".join(stage)
1233 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1234 self._write_op_status(nslcmop_id, stage)
1235 await asyncio.sleep(15, loop=self.loop)
1236 else: # timeout_ns_deploy
1237 raise NgRoException("Timeout waiting ns to deploy")
1238
1239 async def _terminate_ng_ro(
1240 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1241 ):
1242 db_nsr_update = {}
1243 failed_detail = []
1244 action_id = None
1245 start_deploy = time()
1246 try:
1247 target = {
1248 "ns": {"vld": []},
1249 "vnf": [],
1250 "image": [],
1251 "flavor": [],
1252 "action_id": nslcmop_id,
1253 }
1254 desc = await self.RO.deploy(nsr_id, target)
1255 action_id = desc["action_id"]
1256 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
1257 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1258 self.logger.debug(
1259 logging_text
1260 + "ns terminate action at RO. action_id={}".format(action_id)
1261 )
1262
1263 # wait until done
1264 delete_timeout = 20 * 60 # 20 minutes
1265 await self._wait_ng_ro(
1266 nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage
1267 )
1268
1269 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1270 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1271 # delete all nsr
1272 await self.RO.delete(nsr_id)
1273 except Exception as e:
1274 if isinstance(e, NgRoException) and e.http_code == 404: # not found
1275 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1276 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1277 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1278 self.logger.debug(
1279 logging_text + "RO_action_id={} already deleted".format(action_id)
1280 )
1281 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
1282 failed_detail.append("delete conflict: {}".format(e))
1283 self.logger.debug(
1284 logging_text
1285 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1286 )
1287 else:
1288 failed_detail.append("delete error: {}".format(e))
1289 self.logger.error(
1290 logging_text
1291 + "RO_action_id={} delete error: {}".format(action_id, e)
1292 )
1293
1294 if failed_detail:
1295 stage[2] = "Error deleting from VIM"
1296 else:
1297 stage[2] = "Deleted from VIM"
1298 db_nsr_update["detailed-status"] = " ".join(stage)
1299 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1300 self._write_op_status(nslcmop_id, stage)
1301
1302 if failed_detail:
1303 raise LcmException("; ".join(failed_detail))
1304 return
1305
1306 async def instantiate_RO(
1307 self,
1308 logging_text,
1309 nsr_id,
1310 nsd,
1311 db_nsr,
1312 db_nslcmop,
1313 db_vnfrs,
1314 db_vnfds,
1315 n2vc_key_list,
1316 stage,
1317 ):
1318 """
1319 Instantiate at RO
1320 :param logging_text: preffix text to use at logging
1321 :param nsr_id: nsr identity
1322 :param nsd: database content of ns descriptor
1323 :param db_nsr: database content of ns record
1324 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1325 :param db_vnfrs:
1326 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1327 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1328 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1329 :return: None or exception
1330 """
1331 try:
1332 start_deploy = time()
1333 ns_params = db_nslcmop.get("operationParams")
1334 if ns_params and ns_params.get("timeout_ns_deploy"):
1335 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1336 else:
1337 timeout_ns_deploy = self.timeout.get(
1338 "ns_deploy", self.timeout_ns_deploy
1339 )
1340
1341 # Check for and optionally request placement optimization. Database will be updated if placement activated
1342 stage[2] = "Waiting for Placement."
1343 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1344 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1345 for vnfr in db_vnfrs.values():
1346 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1347 break
1348 else:
1349 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1350
1351 return await self._instantiate_ng_ro(
1352 logging_text,
1353 nsr_id,
1354 nsd,
1355 db_nsr,
1356 db_nslcmop,
1357 db_vnfrs,
1358 db_vnfds,
1359 n2vc_key_list,
1360 stage,
1361 start_deploy,
1362 timeout_ns_deploy,
1363 )
1364 except Exception as e:
1365 stage[2] = "ERROR deploying at VIM"
1366 self.set_vnfr_at_error(db_vnfrs, str(e))
1367 self.logger.error(
1368 "Error deploying at VIM {}".format(e),
1369 exc_info=not isinstance(
1370 e,
1371 (
1372 ROclient.ROClientException,
1373 LcmException,
1374 DbException,
1375 NgRoException,
1376 ),
1377 ),
1378 )
1379 raise
1380
1381 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1382 """
1383 Wait for kdu to be up, get ip address
1384 :param logging_text: prefix use for logging
1385 :param nsr_id:
1386 :param vnfr_id:
1387 :param kdu_name:
1388 :return: IP address
1389 """
1390
1391 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1392 nb_tries = 0
1393
1394 while nb_tries < 360:
1395 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1396 kdur = next(
1397 (
1398 x
1399 for x in get_iterable(db_vnfr, "kdur")
1400 if x.get("kdu-name") == kdu_name
1401 ),
1402 None,
1403 )
1404 if not kdur:
1405 raise LcmException(
1406 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1407 )
1408 if kdur.get("status"):
1409 if kdur["status"] in ("READY", "ENABLED"):
1410 return kdur.get("ip-address")
1411 else:
1412 raise LcmException(
1413 "target KDU={} is in error state".format(kdu_name)
1414 )
1415
1416 await asyncio.sleep(10, loop=self.loop)
1417 nb_tries += 1
1418 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1419
1420 async def wait_vm_up_insert_key_ro(
1421 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1422 ):
1423 """
1424 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1425 :param logging_text: prefix use for logging
1426 :param nsr_id:
1427 :param vnfr_id:
1428 :param vdu_id:
1429 :param vdu_index:
1430 :param pub_key: public ssh key to inject, None to skip
1431 :param user: user to apply the public ssh key
1432 :return: IP address
1433 """
1434
1435 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1436 ro_nsr_id = None
1437 ip_address = None
1438 nb_tries = 0
1439 target_vdu_id = None
1440 ro_retries = 0
1441
1442 while True:
1443
1444 ro_retries += 1
1445 if ro_retries >= 360: # 1 hour
1446 raise LcmException(
1447 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1448 )
1449
1450 await asyncio.sleep(10, loop=self.loop)
1451
1452 # get ip address
1453 if not target_vdu_id:
1454 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1455
1456 if not vdu_id: # for the VNF case
1457 if db_vnfr.get("status") == "ERROR":
1458 raise LcmException(
1459 "Cannot inject ssh-key because target VNF is in error state"
1460 )
1461 ip_address = db_vnfr.get("ip-address")
1462 if not ip_address:
1463 continue
1464 vdur = next(
1465 (
1466 x
1467 for x in get_iterable(db_vnfr, "vdur")
1468 if x.get("ip-address") == ip_address
1469 ),
1470 None,
1471 )
1472 else: # VDU case
1473 vdur = next(
1474 (
1475 x
1476 for x in get_iterable(db_vnfr, "vdur")
1477 if x.get("vdu-id-ref") == vdu_id
1478 and x.get("count-index") == vdu_index
1479 ),
1480 None,
1481 )
1482
1483 if (
1484 not vdur and len(db_vnfr.get("vdur", ())) == 1
1485 ): # If only one, this should be the target vdu
1486 vdur = db_vnfr["vdur"][0]
1487 if not vdur:
1488 raise LcmException(
1489 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1490 vnfr_id, vdu_id, vdu_index
1491 )
1492 )
1493 # New generation RO stores information at "vim_info"
1494 ng_ro_status = None
1495 target_vim = None
1496 if vdur.get("vim_info"):
1497 target_vim = next(
1498 t for t in vdur["vim_info"]
1499 ) # there should be only one key
1500 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1501 if (
1502 vdur.get("pdu-type")
1503 or vdur.get("status") == "ACTIVE"
1504 or ng_ro_status == "ACTIVE"
1505 ):
1506 ip_address = vdur.get("ip-address")
1507 if not ip_address:
1508 continue
1509 target_vdu_id = vdur["vdu-id-ref"]
1510 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1511 raise LcmException(
1512 "Cannot inject ssh-key because target VM is in error state"
1513 )
1514
1515 if not target_vdu_id:
1516 continue
1517
1518 # inject public key into machine
1519 if pub_key and user:
1520 self.logger.debug(logging_text + "Inserting RO key")
1521 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1522 if vdur.get("pdu-type"):
1523 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1524 return ip_address
1525 try:
1526 ro_vm_id = "{}-{}".format(
1527 db_vnfr["member-vnf-index-ref"], target_vdu_id
1528 ) # TODO add vdu_index
1529 if self.ng_ro:
1530 target = {
1531 "action": {
1532 "action": "inject_ssh_key",
1533 "key": pub_key,
1534 "user": user,
1535 },
1536 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1537 }
1538 desc = await self.RO.deploy(nsr_id, target)
1539 action_id = desc["action_id"]
1540 await self._wait_ng_ro(nsr_id, action_id, timeout=600)
1541 break
1542 else:
1543 # wait until NS is deployed at RO
1544 if not ro_nsr_id:
1545 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1546 ro_nsr_id = deep_get(
1547 db_nsrs, ("_admin", "deployed", "RO", "nsr_id")
1548 )
1549 if not ro_nsr_id:
1550 continue
1551 result_dict = await self.RO.create_action(
1552 item="ns",
1553 item_id_name=ro_nsr_id,
1554 descriptor={
1555 "add_public_key": pub_key,
1556 "vms": [ro_vm_id],
1557 "user": user,
1558 },
1559 )
1560 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1561 if not result_dict or not isinstance(result_dict, dict):
1562 raise LcmException(
1563 "Unknown response from RO when injecting key"
1564 )
1565 for result in result_dict.values():
1566 if result.get("vim_result") == 200:
1567 break
1568 else:
1569 raise ROclient.ROClientException(
1570 "error injecting key: {}".format(
1571 result.get("description")
1572 )
1573 )
1574 break
1575 except NgRoException as e:
1576 raise LcmException(
1577 "Reaching max tries injecting key. Error: {}".format(e)
1578 )
1579 except ROclient.ROClientException as e:
1580 if not nb_tries:
1581 self.logger.debug(
1582 logging_text
1583 + "error injecting key: {}. Retrying until {} seconds".format(
1584 e, 20 * 10
1585 )
1586 )
1587 nb_tries += 1
1588 if nb_tries >= 20:
1589 raise LcmException(
1590 "Reaching max tries injecting key. Error: {}".format(e)
1591 )
1592 else:
1593 break
1594
1595 return ip_address
1596
1597 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1598 """
1599 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1600 """
1601 my_vca = vca_deployed_list[vca_index]
1602 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1603 # vdu or kdu: no dependencies
1604 return
1605 timeout = 300
1606 while timeout >= 0:
1607 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1608 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1609 configuration_status_list = db_nsr["configurationStatus"]
1610 for index, vca_deployed in enumerate(configuration_status_list):
1611 if index == vca_index:
1612 # myself
1613 continue
1614 if not my_vca.get("member-vnf-index") or (
1615 vca_deployed.get("member-vnf-index")
1616 == my_vca.get("member-vnf-index")
1617 ):
1618 internal_status = configuration_status_list[index].get("status")
1619 if internal_status == "READY":
1620 continue
1621 elif internal_status == "BROKEN":
1622 raise LcmException(
1623 "Configuration aborted because dependent charm/s has failed"
1624 )
1625 else:
1626 break
1627 else:
1628 # no dependencies, return
1629 return
1630 await asyncio.sleep(10)
1631 timeout -= 1
1632
1633 raise LcmException("Configuration aborted because dependent charm/s timeout")
1634
1635 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1636 vca_id = None
1637 if db_vnfr:
1638 vca_id = deep_get(db_vnfr, ("vca-id",))
1639 elif db_nsr:
1640 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1641 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1642 return vca_id
1643
1644 async def instantiate_N2VC(
1645 self,
1646 logging_text,
1647 vca_index,
1648 nsi_id,
1649 db_nsr,
1650 db_vnfr,
1651 vdu_id,
1652 kdu_name,
1653 vdu_index,
1654 config_descriptor,
1655 deploy_params,
1656 base_folder,
1657 nslcmop_id,
1658 stage,
1659 vca_type,
1660 vca_name,
1661 ee_config_descriptor,
1662 ):
1663 nsr_id = db_nsr["_id"]
1664 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1665 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1666 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1667 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1668 db_dict = {
1669 "collection": "nsrs",
1670 "filter": {"_id": nsr_id},
1671 "path": db_update_entry,
1672 }
1673 step = ""
1674 try:
1675
1676 element_type = "NS"
1677 element_under_configuration = nsr_id
1678
1679 vnfr_id = None
1680 if db_vnfr:
1681 vnfr_id = db_vnfr["_id"]
1682 osm_config["osm"]["vnf_id"] = vnfr_id
1683
1684 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1685
1686 if vca_type == "native_charm":
1687 index_number = 0
1688 else:
1689 index_number = vdu_index or 0
1690
1691 if vnfr_id:
1692 element_type = "VNF"
1693 element_under_configuration = vnfr_id
1694 namespace += ".{}-{}".format(vnfr_id, index_number)
1695 if vdu_id:
1696 namespace += ".{}-{}".format(vdu_id, index_number)
1697 element_type = "VDU"
1698 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1699 osm_config["osm"]["vdu_id"] = vdu_id
1700 elif kdu_name:
1701 namespace += ".{}".format(kdu_name)
1702 element_type = "KDU"
1703 element_under_configuration = kdu_name
1704 osm_config["osm"]["kdu_name"] = kdu_name
1705
1706 # Get artifact path
1707 artifact_path = "{}/{}/{}/{}".format(
1708 base_folder["folder"],
1709 base_folder["pkg-dir"],
1710 "charms"
1711 if vca_type in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1712 else "helm-charts",
1713 vca_name,
1714 )
1715
1716 self.logger.debug("Artifact path > {}".format(artifact_path))
1717
1718 # get initial_config_primitive_list that applies to this element
1719 initial_config_primitive_list = config_descriptor.get(
1720 "initial-config-primitive"
1721 )
1722
1723 self.logger.debug(
1724 "Initial config primitive list > {}".format(
1725 initial_config_primitive_list
1726 )
1727 )
1728
1729 # add config if not present for NS charm
1730 ee_descriptor_id = ee_config_descriptor.get("id")
1731 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1732 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1733 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1734 )
1735
1736 self.logger.debug(
1737 "Initial config primitive list #2 > {}".format(
1738 initial_config_primitive_list
1739 )
1740 )
1741 # n2vc_redesign STEP 3.1
1742 # find old ee_id if exists
1743 ee_id = vca_deployed.get("ee_id")
1744
1745 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1746 # create or register execution environment in VCA
1747 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1748
1749 self._write_configuration_status(
1750 nsr_id=nsr_id,
1751 vca_index=vca_index,
1752 status="CREATING",
1753 element_under_configuration=element_under_configuration,
1754 element_type=element_type,
1755 )
1756
1757 step = "create execution environment"
1758 self.logger.debug(logging_text + step)
1759
1760 ee_id = None
1761 credentials = None
1762 if vca_type == "k8s_proxy_charm":
1763 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1764 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1765 namespace=namespace,
1766 artifact_path=artifact_path,
1767 db_dict=db_dict,
1768 vca_id=vca_id,
1769 )
1770 elif vca_type == "helm" or vca_type == "helm-v3":
1771 ee_id, credentials = await self.vca_map[
1772 vca_type
1773 ].create_execution_environment(
1774 namespace=namespace,
1775 reuse_ee_id=ee_id,
1776 db_dict=db_dict,
1777 config=osm_config,
1778 artifact_path=artifact_path,
1779 vca_type=vca_type,
1780 )
1781 else:
1782 ee_id, credentials = await self.vca_map[
1783 vca_type
1784 ].create_execution_environment(
1785 namespace=namespace,
1786 reuse_ee_id=ee_id,
1787 db_dict=db_dict,
1788 vca_id=vca_id,
1789 )
1790
1791 elif vca_type == "native_charm":
1792 step = "Waiting to VM being up and getting IP address"
1793 self.logger.debug(logging_text + step)
1794 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1795 logging_text,
1796 nsr_id,
1797 vnfr_id,
1798 vdu_id,
1799 vdu_index,
1800 user=None,
1801 pub_key=None,
1802 )
1803 credentials = {"hostname": rw_mgmt_ip}
1804 # get username
1805 username = deep_get(
1806 config_descriptor, ("config-access", "ssh-access", "default-user")
1807 )
1808 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1809 # merged. Meanwhile let's get username from initial-config-primitive
1810 if not username and initial_config_primitive_list:
1811 for config_primitive in initial_config_primitive_list:
1812 for param in config_primitive.get("parameter", ()):
1813 if param["name"] == "ssh-username":
1814 username = param["value"]
1815 break
1816 if not username:
1817 raise LcmException(
1818 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1819 "'config-access.ssh-access.default-user'"
1820 )
1821 credentials["username"] = username
1822 # n2vc_redesign STEP 3.2
1823
1824 self._write_configuration_status(
1825 nsr_id=nsr_id,
1826 vca_index=vca_index,
1827 status="REGISTERING",
1828 element_under_configuration=element_under_configuration,
1829 element_type=element_type,
1830 )
1831
1832 step = "register execution environment {}".format(credentials)
1833 self.logger.debug(logging_text + step)
1834 ee_id = await self.vca_map[vca_type].register_execution_environment(
1835 credentials=credentials,
1836 namespace=namespace,
1837 db_dict=db_dict,
1838 vca_id=vca_id,
1839 )
1840
1841 # for compatibility with MON/POL modules, the need model and application name at database
1842 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1843 ee_id_parts = ee_id.split(".")
1844 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1845 if len(ee_id_parts) >= 2:
1846 model_name = ee_id_parts[0]
1847 application_name = ee_id_parts[1]
1848 db_nsr_update[db_update_entry + "model"] = model_name
1849 db_nsr_update[db_update_entry + "application"] = application_name
1850
1851 # n2vc_redesign STEP 3.3
1852 step = "Install configuration Software"
1853
1854 self._write_configuration_status(
1855 nsr_id=nsr_id,
1856 vca_index=vca_index,
1857 status="INSTALLING SW",
1858 element_under_configuration=element_under_configuration,
1859 element_type=element_type,
1860 other_update=db_nsr_update,
1861 )
1862
1863 # TODO check if already done
1864 self.logger.debug(logging_text + step)
1865 config = None
1866 if vca_type == "native_charm":
1867 config_primitive = next(
1868 (p for p in initial_config_primitive_list if p["name"] == "config"),
1869 None,
1870 )
1871 if config_primitive:
1872 config = self._map_primitive_params(
1873 config_primitive, {}, deploy_params
1874 )
1875 num_units = 1
1876 if vca_type == "lxc_proxy_charm":
1877 if element_type == "NS":
1878 num_units = db_nsr.get("config-units") or 1
1879 elif element_type == "VNF":
1880 num_units = db_vnfr.get("config-units") or 1
1881 elif element_type == "VDU":
1882 for v in db_vnfr["vdur"]:
1883 if vdu_id == v["vdu-id-ref"]:
1884 num_units = v.get("config-units") or 1
1885 break
1886 if vca_type != "k8s_proxy_charm":
1887 await self.vca_map[vca_type].install_configuration_sw(
1888 ee_id=ee_id,
1889 artifact_path=artifact_path,
1890 db_dict=db_dict,
1891 config=config,
1892 num_units=num_units,
1893 vca_id=vca_id,
1894 vca_type=vca_type,
1895 )
1896
1897 # write in db flag of configuration_sw already installed
1898 self.update_db_2(
1899 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
1900 )
1901
1902 # add relations for this VCA (wait for other peers related with this VCA)
1903 await self._add_vca_relations(
1904 logging_text=logging_text,
1905 nsr_id=nsr_id,
1906 vca_index=vca_index,
1907 vca_id=vca_id,
1908 vca_type=vca_type,
1909 )
1910
1911 # if SSH access is required, then get execution environment SSH public
1912 # if native charm we have waited already to VM be UP
1913 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
1914 pub_key = None
1915 user = None
1916 # self.logger.debug("get ssh key block")
1917 if deep_get(
1918 config_descriptor, ("config-access", "ssh-access", "required")
1919 ):
1920 # self.logger.debug("ssh key needed")
1921 # Needed to inject a ssh key
1922 user = deep_get(
1923 config_descriptor,
1924 ("config-access", "ssh-access", "default-user"),
1925 )
1926 step = "Install configuration Software, getting public ssh key"
1927 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
1928 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
1929 )
1930
1931 step = "Insert public key into VM user={} ssh_key={}".format(
1932 user, pub_key
1933 )
1934 else:
1935 # self.logger.debug("no need to get ssh key")
1936 step = "Waiting to VM being up and getting IP address"
1937 self.logger.debug(logging_text + step)
1938
1939 # default rw_mgmt_ip to None, avoiding the non definition of the variable
1940 rw_mgmt_ip = None
1941
1942 # n2vc_redesign STEP 5.1
1943 # wait for RO (ip-address) Insert pub_key into VM
1944 if vnfr_id:
1945 if kdu_name:
1946 rw_mgmt_ip = await self.wait_kdu_up(
1947 logging_text, nsr_id, vnfr_id, kdu_name
1948 )
1949
1950 # This verification is needed in order to avoid trying to add a public key
1951 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
1952 # for a KNF and not for its KDUs, the previous verification gives False, and the code
1953 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
1954 # or it is a KNF)
1955 elif db_vnfr.get('vdur'):
1956 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1957 logging_text,
1958 nsr_id,
1959 vnfr_id,
1960 vdu_id,
1961 vdu_index,
1962 user=user,
1963 pub_key=pub_key,
1964 )
1965
1966 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
1967
1968 # store rw_mgmt_ip in deploy params for later replacement
1969 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
1970
1971 # n2vc_redesign STEP 6 Execute initial config primitive
1972 step = "execute initial config primitive"
1973
1974 # wait for dependent primitives execution (NS -> VNF -> VDU)
1975 if initial_config_primitive_list:
1976 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
1977
1978 # stage, in function of element type: vdu, kdu, vnf or ns
1979 my_vca = vca_deployed_list[vca_index]
1980 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1981 # VDU or KDU
1982 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
1983 elif my_vca.get("member-vnf-index"):
1984 # VNF
1985 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
1986 else:
1987 # NS
1988 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
1989
1990 self._write_configuration_status(
1991 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
1992 )
1993
1994 self._write_op_status(op_id=nslcmop_id, stage=stage)
1995
1996 check_if_terminated_needed = True
1997 for initial_config_primitive in initial_config_primitive_list:
1998 # adding information on the vca_deployed if it is a NS execution environment
1999 if not vca_deployed["member-vnf-index"]:
2000 deploy_params["ns_config_info"] = json.dumps(
2001 self._get_ns_config_info(nsr_id)
2002 )
2003 # TODO check if already done
2004 primitive_params_ = self._map_primitive_params(
2005 initial_config_primitive, {}, deploy_params
2006 )
2007
2008 step = "execute primitive '{}' params '{}'".format(
2009 initial_config_primitive["name"], primitive_params_
2010 )
2011 self.logger.debug(logging_text + step)
2012 await self.vca_map[vca_type].exec_primitive(
2013 ee_id=ee_id,
2014 primitive_name=initial_config_primitive["name"],
2015 params_dict=primitive_params_,
2016 db_dict=db_dict,
2017 vca_id=vca_id,
2018 vca_type=vca_type,
2019 )
2020 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2021 if check_if_terminated_needed:
2022 if config_descriptor.get("terminate-config-primitive"):
2023 self.update_db_2(
2024 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2025 )
2026 check_if_terminated_needed = False
2027
2028 # TODO register in database that primitive is done
2029
2030 # STEP 7 Configure metrics
2031 if vca_type == "helm" or vca_type == "helm-v3":
2032 prometheus_jobs = await self.add_prometheus_metrics(
2033 ee_id=ee_id,
2034 artifact_path=artifact_path,
2035 ee_config_descriptor=ee_config_descriptor,
2036 vnfr_id=vnfr_id,
2037 nsr_id=nsr_id,
2038 target_ip=rw_mgmt_ip,
2039 )
2040 if prometheus_jobs:
2041 self.update_db_2(
2042 "nsrs",
2043 nsr_id,
2044 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2045 )
2046
2047 step = "instantiated at VCA"
2048 self.logger.debug(logging_text + step)
2049
2050 self._write_configuration_status(
2051 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2052 )
2053
2054 except Exception as e: # TODO not use Exception but N2VC exception
2055 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2056 if not isinstance(
2057 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2058 ):
2059 self.logger.error(
2060 "Exception while {} : {}".format(step, e), exc_info=True
2061 )
2062 self._write_configuration_status(
2063 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2064 )
2065 raise LcmException("{} {}".format(step, e)) from e
2066
2067 def _write_ns_status(
2068 self,
2069 nsr_id: str,
2070 ns_state: str,
2071 current_operation: str,
2072 current_operation_id: str,
2073 error_description: str = None,
2074 error_detail: str = None,
2075 other_update: dict = None,
2076 ):
2077 """
2078 Update db_nsr fields.
2079 :param nsr_id:
2080 :param ns_state:
2081 :param current_operation:
2082 :param current_operation_id:
2083 :param error_description:
2084 :param error_detail:
2085 :param other_update: Other required changes at database if provided, will be cleared
2086 :return:
2087 """
2088 try:
2089 db_dict = other_update or {}
2090 db_dict[
2091 "_admin.nslcmop"
2092 ] = current_operation_id # for backward compatibility
2093 db_dict["_admin.current-operation"] = current_operation_id
2094 db_dict["_admin.operation-type"] = (
2095 current_operation if current_operation != "IDLE" else None
2096 )
2097 db_dict["currentOperation"] = current_operation
2098 db_dict["currentOperationID"] = current_operation_id
2099 db_dict["errorDescription"] = error_description
2100 db_dict["errorDetail"] = error_detail
2101
2102 if ns_state:
2103 db_dict["nsState"] = ns_state
2104 self.update_db_2("nsrs", nsr_id, db_dict)
2105 except DbException as e:
2106 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2107
2108 def _write_op_status(
2109 self,
2110 op_id: str,
2111 stage: list = None,
2112 error_message: str = None,
2113 queuePosition: int = 0,
2114 operation_state: str = None,
2115 other_update: dict = None,
2116 ):
2117 try:
2118 db_dict = other_update or {}
2119 db_dict["queuePosition"] = queuePosition
2120 if isinstance(stage, list):
2121 db_dict["stage"] = stage[0]
2122 db_dict["detailed-status"] = " ".join(stage)
2123 elif stage is not None:
2124 db_dict["stage"] = str(stage)
2125
2126 if error_message is not None:
2127 db_dict["errorMessage"] = error_message
2128 if operation_state is not None:
2129 db_dict["operationState"] = operation_state
2130 db_dict["statusEnteredTime"] = time()
2131 self.update_db_2("nslcmops", op_id, db_dict)
2132 except DbException as e:
2133 self.logger.warn(
2134 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2135 )
2136
2137 def _write_all_config_status(self, db_nsr: dict, status: str):
2138 try:
2139 nsr_id = db_nsr["_id"]
2140 # configurationStatus
2141 config_status = db_nsr.get("configurationStatus")
2142 if config_status:
2143 db_nsr_update = {
2144 "configurationStatus.{}.status".format(index): status
2145 for index, v in enumerate(config_status)
2146 if v
2147 }
2148 # update status
2149 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2150
2151 except DbException as e:
2152 self.logger.warn(
2153 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2154 )
2155
2156 def _write_configuration_status(
2157 self,
2158 nsr_id: str,
2159 vca_index: int,
2160 status: str = None,
2161 element_under_configuration: str = None,
2162 element_type: str = None,
2163 other_update: dict = None,
2164 ):
2165
2166 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2167 # .format(vca_index, status))
2168
2169 try:
2170 db_path = "configurationStatus.{}.".format(vca_index)
2171 db_dict = other_update or {}
2172 if status:
2173 db_dict[db_path + "status"] = status
2174 if element_under_configuration:
2175 db_dict[
2176 db_path + "elementUnderConfiguration"
2177 ] = element_under_configuration
2178 if element_type:
2179 db_dict[db_path + "elementType"] = element_type
2180 self.update_db_2("nsrs", nsr_id, db_dict)
2181 except DbException as e:
2182 self.logger.warn(
2183 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2184 status, nsr_id, vca_index, e
2185 )
2186 )
2187
2188 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2189 """
2190 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2191 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2192 Database is used because the result can be obtained from a different LCM worker in case of HA.
2193 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2194 :param db_nslcmop: database content of nslcmop
2195 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2196 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2197 computed 'vim-account-id'
2198 """
2199 modified = False
2200 nslcmop_id = db_nslcmop["_id"]
2201 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2202 if placement_engine == "PLA":
2203 self.logger.debug(
2204 logging_text + "Invoke and wait for placement optimization"
2205 )
2206 await self.msg.aiowrite(
2207 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2208 )
2209 db_poll_interval = 5
2210 wait = db_poll_interval * 10
2211 pla_result = None
2212 while not pla_result and wait >= 0:
2213 await asyncio.sleep(db_poll_interval)
2214 wait -= db_poll_interval
2215 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2216 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2217
2218 if not pla_result:
2219 raise LcmException(
2220 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2221 )
2222
2223 for pla_vnf in pla_result["vnf"]:
2224 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2225 if not pla_vnf.get("vimAccountId") or not vnfr:
2226 continue
2227 modified = True
2228 self.db.set_one(
2229 "vnfrs",
2230 {"_id": vnfr["_id"]},
2231 {"vim-account-id": pla_vnf["vimAccountId"]},
2232 )
2233 # Modifies db_vnfrs
2234 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2235 return modified
2236
2237 def update_nsrs_with_pla_result(self, params):
2238 try:
2239 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2240 self.update_db_2(
2241 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2242 )
2243 except Exception as e:
2244 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2245
2246 async def instantiate(self, nsr_id, nslcmop_id):
2247 """
2248
2249 :param nsr_id: ns instance to deploy
2250 :param nslcmop_id: operation to run
2251 :return:
2252 """
2253
2254 # Try to lock HA task here
2255 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2256 if not task_is_locked_by_me:
2257 self.logger.debug(
2258 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2259 )
2260 return
2261
2262 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2263 self.logger.debug(logging_text + "Enter")
2264
2265 # get all needed from database
2266
2267 # database nsrs record
2268 db_nsr = None
2269
2270 # database nslcmops record
2271 db_nslcmop = None
2272
2273 # update operation on nsrs
2274 db_nsr_update = {}
2275 # update operation on nslcmops
2276 db_nslcmop_update = {}
2277
2278 nslcmop_operation_state = None
2279 db_vnfrs = {} # vnf's info indexed by member-index
2280 # n2vc_info = {}
2281 tasks_dict_info = {} # from task to info text
2282 exc = None
2283 error_list = []
2284 stage = [
2285 "Stage 1/5: preparation of the environment.",
2286 "Waiting for previous operations to terminate.",
2287 "",
2288 ]
2289 # ^ stage, step, VIM progress
2290 try:
2291 # wait for any previous tasks in process
2292 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2293
2294 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2295 stage[1] = "Reading from database."
2296 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2297 db_nsr_update["detailed-status"] = "creating"
2298 db_nsr_update["operational-status"] = "init"
2299 self._write_ns_status(
2300 nsr_id=nsr_id,
2301 ns_state="BUILDING",
2302 current_operation="INSTANTIATING",
2303 current_operation_id=nslcmop_id,
2304 other_update=db_nsr_update,
2305 )
2306 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2307
2308 # read from db: operation
2309 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2310 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2311 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2312 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2313 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2314 )
2315 ns_params = db_nslcmop.get("operationParams")
2316 if ns_params and ns_params.get("timeout_ns_deploy"):
2317 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2318 else:
2319 timeout_ns_deploy = self.timeout.get(
2320 "ns_deploy", self.timeout_ns_deploy
2321 )
2322
2323 # read from db: ns
2324 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2325 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2326 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2327 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2328 self.fs.sync(db_nsr["nsd-id"])
2329 db_nsr["nsd"] = nsd
2330 # nsr_name = db_nsr["name"] # TODO short-name??
2331
2332 # read from db: vnf's of this ns
2333 stage[1] = "Getting vnfrs from db."
2334 self.logger.debug(logging_text + stage[1])
2335 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2336
2337 # read from db: vnfd's for every vnf
2338 db_vnfds = [] # every vnfd data
2339
2340 # for each vnf in ns, read vnfd
2341 for vnfr in db_vnfrs_list:
2342 if vnfr.get("kdur"):
2343 kdur_list = []
2344 for kdur in vnfr["kdur"]:
2345 if kdur.get("additionalParams"):
2346 kdur["additionalParams"] = json.loads(
2347 kdur["additionalParams"]
2348 )
2349 kdur_list.append(kdur)
2350 vnfr["kdur"] = kdur_list
2351
2352 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2353 vnfd_id = vnfr["vnfd-id"]
2354 vnfd_ref = vnfr["vnfd-ref"]
2355 self.fs.sync(vnfd_id)
2356
2357 # if we haven't this vnfd, read it from db
2358 if vnfd_id not in db_vnfds:
2359 # read from db
2360 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2361 vnfd_id, vnfd_ref
2362 )
2363 self.logger.debug(logging_text + stage[1])
2364 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2365
2366 # store vnfd
2367 db_vnfds.append(vnfd)
2368
2369 # Get or generates the _admin.deployed.VCA list
2370 vca_deployed_list = None
2371 if db_nsr["_admin"].get("deployed"):
2372 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2373 if vca_deployed_list is None:
2374 vca_deployed_list = []
2375 configuration_status_list = []
2376 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2377 db_nsr_update["configurationStatus"] = configuration_status_list
2378 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2379 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2380 elif isinstance(vca_deployed_list, dict):
2381 # maintain backward compatibility. Change a dict to list at database
2382 vca_deployed_list = list(vca_deployed_list.values())
2383 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2384 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2385
2386 if not isinstance(
2387 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2388 ):
2389 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2390 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2391
2392 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2393 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2394 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2395 self.db.set_list(
2396 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2397 )
2398
2399 # n2vc_redesign STEP 2 Deploy Network Scenario
2400 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2401 self._write_op_status(op_id=nslcmop_id, stage=stage)
2402
2403 stage[1] = "Deploying KDUs."
2404 # self.logger.debug(logging_text + "Before deploy_kdus")
2405 # Call to deploy_kdus in case exists the "vdu:kdu" param
2406 await self.deploy_kdus(
2407 logging_text=logging_text,
2408 nsr_id=nsr_id,
2409 nslcmop_id=nslcmop_id,
2410 db_vnfrs=db_vnfrs,
2411 db_vnfds=db_vnfds,
2412 task_instantiation_info=tasks_dict_info,
2413 )
2414
2415 stage[1] = "Getting VCA public key."
2416 # n2vc_redesign STEP 1 Get VCA public ssh-key
2417 # feature 1429. Add n2vc public key to needed VMs
2418 n2vc_key = self.n2vc.get_public_key()
2419 n2vc_key_list = [n2vc_key]
2420 if self.vca_config.get("public_key"):
2421 n2vc_key_list.append(self.vca_config["public_key"])
2422
2423 stage[1] = "Deploying NS at VIM."
2424 task_ro = asyncio.ensure_future(
2425 self.instantiate_RO(
2426 logging_text=logging_text,
2427 nsr_id=nsr_id,
2428 nsd=nsd,
2429 db_nsr=db_nsr,
2430 db_nslcmop=db_nslcmop,
2431 db_vnfrs=db_vnfrs,
2432 db_vnfds=db_vnfds,
2433 n2vc_key_list=n2vc_key_list,
2434 stage=stage,
2435 )
2436 )
2437 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2438 tasks_dict_info[task_ro] = "Deploying at VIM"
2439
2440 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2441 stage[1] = "Deploying Execution Environments."
2442 self.logger.debug(logging_text + stage[1])
2443
2444 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2445 for vnf_profile in get_vnf_profiles(nsd):
2446 vnfd_id = vnf_profile["vnfd-id"]
2447 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2448 member_vnf_index = str(vnf_profile["id"])
2449 db_vnfr = db_vnfrs[member_vnf_index]
2450 base_folder = vnfd["_admin"]["storage"]
2451 vdu_id = None
2452 vdu_index = 0
2453 vdu_name = None
2454 kdu_name = None
2455
2456 # Get additional parameters
2457 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2458 if db_vnfr.get("additionalParamsForVnf"):
2459 deploy_params.update(
2460 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2461 )
2462
2463 descriptor_config = get_configuration(vnfd, vnfd["id"])
2464 if descriptor_config:
2465 self._deploy_n2vc(
2466 logging_text=logging_text
2467 + "member_vnf_index={} ".format(member_vnf_index),
2468 db_nsr=db_nsr,
2469 db_vnfr=db_vnfr,
2470 nslcmop_id=nslcmop_id,
2471 nsr_id=nsr_id,
2472 nsi_id=nsi_id,
2473 vnfd_id=vnfd_id,
2474 vdu_id=vdu_id,
2475 kdu_name=kdu_name,
2476 member_vnf_index=member_vnf_index,
2477 vdu_index=vdu_index,
2478 vdu_name=vdu_name,
2479 deploy_params=deploy_params,
2480 descriptor_config=descriptor_config,
2481 base_folder=base_folder,
2482 task_instantiation_info=tasks_dict_info,
2483 stage=stage,
2484 )
2485
2486 # Deploy charms for each VDU that supports one.
2487 for vdud in get_vdu_list(vnfd):
2488 vdu_id = vdud["id"]
2489 descriptor_config = get_configuration(vnfd, vdu_id)
2490 vdur = find_in_list(
2491 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2492 )
2493
2494 if vdur.get("additionalParams"):
2495 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2496 else:
2497 deploy_params_vdu = deploy_params
2498 deploy_params_vdu["OSM"] = get_osm_params(
2499 db_vnfr, vdu_id, vdu_count_index=0
2500 )
2501 vdud_count = get_number_of_instances(vnfd, vdu_id)
2502
2503 self.logger.debug("VDUD > {}".format(vdud))
2504 self.logger.debug(
2505 "Descriptor config > {}".format(descriptor_config)
2506 )
2507 if descriptor_config:
2508 vdu_name = None
2509 kdu_name = None
2510 for vdu_index in range(vdud_count):
2511 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2512 self._deploy_n2vc(
2513 logging_text=logging_text
2514 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2515 member_vnf_index, vdu_id, vdu_index
2516 ),
2517 db_nsr=db_nsr,
2518 db_vnfr=db_vnfr,
2519 nslcmop_id=nslcmop_id,
2520 nsr_id=nsr_id,
2521 nsi_id=nsi_id,
2522 vnfd_id=vnfd_id,
2523 vdu_id=vdu_id,
2524 kdu_name=kdu_name,
2525 member_vnf_index=member_vnf_index,
2526 vdu_index=vdu_index,
2527 vdu_name=vdu_name,
2528 deploy_params=deploy_params_vdu,
2529 descriptor_config=descriptor_config,
2530 base_folder=base_folder,
2531 task_instantiation_info=tasks_dict_info,
2532 stage=stage,
2533 )
2534 for kdud in get_kdu_list(vnfd):
2535 kdu_name = kdud["name"]
2536 descriptor_config = get_configuration(vnfd, kdu_name)
2537 if descriptor_config:
2538 vdu_id = None
2539 vdu_index = 0
2540 vdu_name = None
2541 kdur = next(
2542 x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name
2543 )
2544 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2545 if kdur.get("additionalParams"):
2546 deploy_params_kdu.update(
2547 parse_yaml_strings(kdur["additionalParams"].copy())
2548 )
2549
2550 self._deploy_n2vc(
2551 logging_text=logging_text,
2552 db_nsr=db_nsr,
2553 db_vnfr=db_vnfr,
2554 nslcmop_id=nslcmop_id,
2555 nsr_id=nsr_id,
2556 nsi_id=nsi_id,
2557 vnfd_id=vnfd_id,
2558 vdu_id=vdu_id,
2559 kdu_name=kdu_name,
2560 member_vnf_index=member_vnf_index,
2561 vdu_index=vdu_index,
2562 vdu_name=vdu_name,
2563 deploy_params=deploy_params_kdu,
2564 descriptor_config=descriptor_config,
2565 base_folder=base_folder,
2566 task_instantiation_info=tasks_dict_info,
2567 stage=stage,
2568 )
2569
2570 # Check if this NS has a charm configuration
2571 descriptor_config = nsd.get("ns-configuration")
2572 if descriptor_config and descriptor_config.get("juju"):
2573 vnfd_id = None
2574 db_vnfr = None
2575 member_vnf_index = None
2576 vdu_id = None
2577 kdu_name = None
2578 vdu_index = 0
2579 vdu_name = None
2580
2581 # Get additional parameters
2582 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2583 if db_nsr.get("additionalParamsForNs"):
2584 deploy_params.update(
2585 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2586 )
2587 base_folder = nsd["_admin"]["storage"]
2588 self._deploy_n2vc(
2589 logging_text=logging_text,
2590 db_nsr=db_nsr,
2591 db_vnfr=db_vnfr,
2592 nslcmop_id=nslcmop_id,
2593 nsr_id=nsr_id,
2594 nsi_id=nsi_id,
2595 vnfd_id=vnfd_id,
2596 vdu_id=vdu_id,
2597 kdu_name=kdu_name,
2598 member_vnf_index=member_vnf_index,
2599 vdu_index=vdu_index,
2600 vdu_name=vdu_name,
2601 deploy_params=deploy_params,
2602 descriptor_config=descriptor_config,
2603 base_folder=base_folder,
2604 task_instantiation_info=tasks_dict_info,
2605 stage=stage,
2606 )
2607
2608 # rest of staff will be done at finally
2609
2610 except (
2611 ROclient.ROClientException,
2612 DbException,
2613 LcmException,
2614 N2VCException,
2615 ) as e:
2616 self.logger.error(
2617 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2618 )
2619 exc = e
2620 except asyncio.CancelledError:
2621 self.logger.error(
2622 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2623 )
2624 exc = "Operation was cancelled"
2625 except Exception as e:
2626 exc = traceback.format_exc()
2627 self.logger.critical(
2628 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2629 exc_info=True,
2630 )
2631 finally:
2632 if exc:
2633 error_list.append(str(exc))
2634 try:
2635 # wait for pending tasks
2636 if tasks_dict_info:
2637 stage[1] = "Waiting for instantiate pending tasks."
2638 self.logger.debug(logging_text + stage[1])
2639 error_list += await self._wait_for_tasks(
2640 logging_text,
2641 tasks_dict_info,
2642 timeout_ns_deploy,
2643 stage,
2644 nslcmop_id,
2645 nsr_id=nsr_id,
2646 )
2647 stage[1] = stage[2] = ""
2648 except asyncio.CancelledError:
2649 error_list.append("Cancelled")
2650 # TODO cancel all tasks
2651 except Exception as exc:
2652 error_list.append(str(exc))
2653
2654 # update operation-status
2655 db_nsr_update["operational-status"] = "running"
2656 # let's begin with VCA 'configured' status (later we can change it)
2657 db_nsr_update["config-status"] = "configured"
2658 for task, task_name in tasks_dict_info.items():
2659 if not task.done() or task.cancelled() or task.exception():
2660 if task_name.startswith(self.task_name_deploy_vca):
2661 # A N2VC task is pending
2662 db_nsr_update["config-status"] = "failed"
2663 else:
2664 # RO or KDU task is pending
2665 db_nsr_update["operational-status"] = "failed"
2666
2667 # update status at database
2668 if error_list:
2669 error_detail = ". ".join(error_list)
2670 self.logger.error(logging_text + error_detail)
2671 error_description_nslcmop = "{} Detail: {}".format(
2672 stage[0], error_detail
2673 )
2674 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2675 nslcmop_id, stage[0]
2676 )
2677
2678 db_nsr_update["detailed-status"] = (
2679 error_description_nsr + " Detail: " + error_detail
2680 )
2681 db_nslcmop_update["detailed-status"] = error_detail
2682 nslcmop_operation_state = "FAILED"
2683 ns_state = "BROKEN"
2684 else:
2685 error_detail = None
2686 error_description_nsr = error_description_nslcmop = None
2687 ns_state = "READY"
2688 db_nsr_update["detailed-status"] = "Done"
2689 db_nslcmop_update["detailed-status"] = "Done"
2690 nslcmop_operation_state = "COMPLETED"
2691
2692 if db_nsr:
2693 self._write_ns_status(
2694 nsr_id=nsr_id,
2695 ns_state=ns_state,
2696 current_operation="IDLE",
2697 current_operation_id=None,
2698 error_description=error_description_nsr,
2699 error_detail=error_detail,
2700 other_update=db_nsr_update,
2701 )
2702 self._write_op_status(
2703 op_id=nslcmop_id,
2704 stage="",
2705 error_message=error_description_nslcmop,
2706 operation_state=nslcmop_operation_state,
2707 other_update=db_nslcmop_update,
2708 )
2709
2710 if nslcmop_operation_state:
2711 try:
2712 await self.msg.aiowrite(
2713 "ns",
2714 "instantiated",
2715 {
2716 "nsr_id": nsr_id,
2717 "nslcmop_id": nslcmop_id,
2718 "operationState": nslcmop_operation_state,
2719 },
2720 loop=self.loop,
2721 )
2722 except Exception as e:
2723 self.logger.error(
2724 logging_text + "kafka_write notification Exception {}".format(e)
2725 )
2726
2727 self.logger.debug(logging_text + "Exit")
2728 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2729
2730 async def _add_vca_relations(
2731 self,
2732 logging_text,
2733 nsr_id,
2734 vca_index: int,
2735 timeout: int = 3600,
2736 vca_type: str = None,
2737 vca_id: str = None,
2738 ) -> bool:
2739
2740 # steps:
2741 # 1. find all relations for this VCA
2742 # 2. wait for other peers related
2743 # 3. add relations
2744
2745 try:
2746 vca_type = vca_type or "lxc_proxy_charm"
2747
2748 # STEP 1: find all relations for this VCA
2749
2750 # read nsr record
2751 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2752 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2753
2754 # this VCA data
2755 my_vca = deep_get(db_nsr, ("_admin", "deployed", "VCA"))[vca_index]
2756
2757 # read all ns-configuration relations
2758 ns_relations = list()
2759 db_ns_relations = deep_get(nsd, ("ns-configuration", "relation"))
2760 if db_ns_relations:
2761 for r in db_ns_relations:
2762 # check if this VCA is in the relation
2763 if my_vca.get("member-vnf-index") in (
2764 r.get("entities")[0].get("id"),
2765 r.get("entities")[1].get("id"),
2766 ):
2767 ns_relations.append(r)
2768
2769 # read all vnf-configuration relations
2770 vnf_relations = list()
2771 db_vnfd_list = db_nsr.get("vnfd-id")
2772 if db_vnfd_list:
2773 for vnfd in db_vnfd_list:
2774 db_vnf_relations = None
2775 db_vnfd = self.db.get_one("vnfds", {"_id": vnfd})
2776 db_vnf_configuration = get_configuration(db_vnfd, db_vnfd["id"])
2777 if db_vnf_configuration:
2778 db_vnf_relations = db_vnf_configuration.get("relation", [])
2779 if db_vnf_relations:
2780 for r in db_vnf_relations:
2781 # check if this VCA is in the relation
2782 if my_vca.get("vdu_id") in (
2783 r.get("entities")[0].get("id"),
2784 r.get("entities")[1].get("id"),
2785 ):
2786 vnf_relations.append(r)
2787
2788 # if no relations, terminate
2789 if not ns_relations and not vnf_relations:
2790 self.logger.debug(logging_text + " No relations")
2791 return True
2792
2793 self.logger.debug(
2794 logging_text
2795 + " adding relations\n {}\n {}".format(
2796 ns_relations, vnf_relations
2797 )
2798 )
2799
2800 # add all relations
2801 start = time()
2802 while True:
2803 # check timeout
2804 now = time()
2805 if now - start >= timeout:
2806 self.logger.error(logging_text + " : timeout adding relations")
2807 return False
2808
2809 # reload nsr from database (we need to update record: _admin.deloyed.VCA)
2810 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2811
2812 # for each defined NS relation, find the VCA's related
2813 for r in ns_relations.copy():
2814 from_vca_ee_id = None
2815 to_vca_ee_id = None
2816 from_vca_endpoint = None
2817 to_vca_endpoint = None
2818 vca_list = deep_get(db_nsr, ("_admin", "deployed", "VCA"))
2819 for vca in vca_list:
2820 if vca.get("member-vnf-index") == r.get("entities")[0].get(
2821 "id"
2822 ) and vca.get("config_sw_installed"):
2823 from_vca_ee_id = vca.get("ee_id")
2824 from_vca_endpoint = r.get("entities")[0].get("endpoint")
2825 if vca.get("member-vnf-index") == r.get("entities")[1].get(
2826 "id"
2827 ) and vca.get("config_sw_installed"):
2828 to_vca_ee_id = vca.get("ee_id")
2829 to_vca_endpoint = r.get("entities")[1].get("endpoint")
2830 if from_vca_ee_id and to_vca_ee_id:
2831 # add relation
2832 await self.vca_map[vca_type].add_relation(
2833 ee_id_1=from_vca_ee_id,
2834 ee_id_2=to_vca_ee_id,
2835 endpoint_1=from_vca_endpoint,
2836 endpoint_2=to_vca_endpoint,
2837 vca_id=vca_id,
2838 )
2839 # remove entry from relations list
2840 ns_relations.remove(r)
2841 else:
2842 # check failed peers
2843 try:
2844 vca_status_list = db_nsr.get("configurationStatus")
2845 if vca_status_list:
2846 for i in range(len(vca_list)):
2847 vca = vca_list[i]
2848 vca_status = vca_status_list[i]
2849 if vca.get("member-vnf-index") == r.get("entities")[
2850 0
2851 ].get("id"):
2852 if vca_status.get("status") == "BROKEN":
2853 # peer broken: remove relation from list
2854 ns_relations.remove(r)
2855 if vca.get("member-vnf-index") == r.get("entities")[
2856 1
2857 ].get("id"):
2858 if vca_status.get("status") == "BROKEN":
2859 # peer broken: remove relation from list
2860 ns_relations.remove(r)
2861 except Exception:
2862 # ignore
2863 pass
2864
2865 # for each defined VNF relation, find the VCA's related
2866 for r in vnf_relations.copy():
2867 from_vca_ee_id = None
2868 to_vca_ee_id = None
2869 from_vca_endpoint = None
2870 to_vca_endpoint = None
2871 vca_list = deep_get(db_nsr, ("_admin", "deployed", "VCA"))
2872 for vca in vca_list:
2873 key_to_check = "vdu_id"
2874 if vca.get("vdu_id") is None:
2875 key_to_check = "vnfd_id"
2876 if vca.get(key_to_check) == r.get("entities")[0].get(
2877 "id"
2878 ) and vca.get("config_sw_installed"):
2879 from_vca_ee_id = vca.get("ee_id")
2880 from_vca_endpoint = r.get("entities")[0].get("endpoint")
2881 if vca.get(key_to_check) == r.get("entities")[1].get(
2882 "id"
2883 ) and vca.get("config_sw_installed"):
2884 to_vca_ee_id = vca.get("ee_id")
2885 to_vca_endpoint = r.get("entities")[1].get("endpoint")
2886 if from_vca_ee_id and to_vca_ee_id:
2887 # add relation
2888 await self.vca_map[vca_type].add_relation(
2889 ee_id_1=from_vca_ee_id,
2890 ee_id_2=to_vca_ee_id,
2891 endpoint_1=from_vca_endpoint,
2892 endpoint_2=to_vca_endpoint,
2893 vca_id=vca_id,
2894 )
2895 # remove entry from relations list
2896 vnf_relations.remove(r)
2897 else:
2898 # check failed peers
2899 try:
2900 vca_status_list = db_nsr.get("configurationStatus")
2901 if vca_status_list:
2902 for i in range(len(vca_list)):
2903 vca = vca_list[i]
2904 vca_status = vca_status_list[i]
2905 if vca.get("vdu_id") == r.get("entities")[0].get(
2906 "id"
2907 ):
2908 if vca_status.get("status") == "BROKEN":
2909 # peer broken: remove relation from list
2910 vnf_relations.remove(r)
2911 if vca.get("vdu_id") == r.get("entities")[1].get(
2912 "id"
2913 ):
2914 if vca_status.get("status") == "BROKEN":
2915 # peer broken: remove relation from list
2916 vnf_relations.remove(r)
2917 except Exception:
2918 # ignore
2919 pass
2920
2921 # wait for next try
2922 await asyncio.sleep(5.0)
2923
2924 if not ns_relations and not vnf_relations:
2925 self.logger.debug("Relations added")
2926 break
2927
2928 return True
2929
2930 except Exception as e:
2931 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
2932 return False
2933
2934 async def _install_kdu(
2935 self,
2936 nsr_id: str,
2937 nsr_db_path: str,
2938 vnfr_data: dict,
2939 kdu_index: int,
2940 kdud: dict,
2941 vnfd: dict,
2942 k8s_instance_info: dict,
2943 k8params: dict = None,
2944 timeout: int = 600,
2945 vca_id: str = None,
2946 ):
2947
2948 try:
2949 k8sclustertype = k8s_instance_info["k8scluster-type"]
2950 # Instantiate kdu
2951 db_dict_install = {
2952 "collection": "nsrs",
2953 "filter": {"_id": nsr_id},
2954 "path": nsr_db_path,
2955 }
2956
2957 if k8s_instance_info.get("kdu-deployment-name"):
2958 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
2959 else:
2960 kdu_instance = self.k8scluster_map[
2961 k8sclustertype
2962 ].generate_kdu_instance_name(
2963 db_dict=db_dict_install,
2964 kdu_model=k8s_instance_info["kdu-model"],
2965 kdu_name=k8s_instance_info["kdu-name"],
2966 )
2967
2968 # Update the nsrs table with the kdu-instance value
2969 self.update_db_2(
2970 item="nsrs",
2971 _id=nsr_id,
2972 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
2973 )
2974
2975 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
2976 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
2977 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
2978 # namespace, this first verification could be removed, and the next step would be done for any kind
2979 # of KNF.
2980 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
2981 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
2982 if k8sclustertype in ("juju", "juju-bundle"):
2983 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
2984 # that the user passed a namespace which he wants its KDU to be deployed in)
2985 if (
2986 self.db.count(
2987 table="nsrs",
2988 q_filter={
2989 "_id": nsr_id,
2990 "_admin.projects_write": k8s_instance_info["namespace"],
2991 "_admin.projects_read": k8s_instance_info["namespace"],
2992 },
2993 )
2994 > 0
2995 ):
2996 self.logger.debug(
2997 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
2998 )
2999 self.update_db_2(
3000 item="nsrs",
3001 _id=nsr_id,
3002 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3003 )
3004 k8s_instance_info["namespace"] = kdu_instance
3005
3006 await self.k8scluster_map[k8sclustertype].install(
3007 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3008 kdu_model=k8s_instance_info["kdu-model"],
3009 atomic=True,
3010 params=k8params,
3011 db_dict=db_dict_install,
3012 timeout=timeout,
3013 kdu_name=k8s_instance_info["kdu-name"],
3014 namespace=k8s_instance_info["namespace"],
3015 kdu_instance=kdu_instance,
3016 vca_id=vca_id,
3017 )
3018
3019 # Obtain services to obtain management service ip
3020 services = await self.k8scluster_map[k8sclustertype].get_services(
3021 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3022 kdu_instance=kdu_instance,
3023 namespace=k8s_instance_info["namespace"],
3024 )
3025
3026 # Obtain management service info (if exists)
3027 vnfr_update_dict = {}
3028 kdu_config = get_configuration(vnfd, kdud["name"])
3029 if kdu_config:
3030 target_ee_list = kdu_config.get("execution-environment-list", [])
3031 else:
3032 target_ee_list = []
3033
3034 if services:
3035 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3036 mgmt_services = [
3037 service
3038 for service in kdud.get("service", [])
3039 if service.get("mgmt-service")
3040 ]
3041 for mgmt_service in mgmt_services:
3042 for service in services:
3043 if service["name"].startswith(mgmt_service["name"]):
3044 # Mgmt service found, Obtain service ip
3045 ip = service.get("external_ip", service.get("cluster_ip"))
3046 if isinstance(ip, list) and len(ip) == 1:
3047 ip = ip[0]
3048
3049 vnfr_update_dict[
3050 "kdur.{}.ip-address".format(kdu_index)
3051 ] = ip
3052
3053 # Check if must update also mgmt ip at the vnf
3054 service_external_cp = mgmt_service.get(
3055 "external-connection-point-ref"
3056 )
3057 if service_external_cp:
3058 if (
3059 deep_get(vnfd, ("mgmt-interface", "cp"))
3060 == service_external_cp
3061 ):
3062 vnfr_update_dict["ip-address"] = ip
3063
3064 if find_in_list(
3065 target_ee_list,
3066 lambda ee: ee.get(
3067 "external-connection-point-ref", ""
3068 )
3069 == service_external_cp,
3070 ):
3071 vnfr_update_dict[
3072 "kdur.{}.ip-address".format(kdu_index)
3073 ] = ip
3074 break
3075 else:
3076 self.logger.warn(
3077 "Mgmt service name: {} not found".format(
3078 mgmt_service["name"]
3079 )
3080 )
3081
3082 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3083 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3084
3085 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3086 if (
3087 kdu_config
3088 and kdu_config.get("initial-config-primitive")
3089 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3090 ):
3091 initial_config_primitive_list = kdu_config.get(
3092 "initial-config-primitive"
3093 )
3094 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3095
3096 for initial_config_primitive in initial_config_primitive_list:
3097 primitive_params_ = self._map_primitive_params(
3098 initial_config_primitive, {}, {}
3099 )
3100
3101 await asyncio.wait_for(
3102 self.k8scluster_map[k8sclustertype].exec_primitive(
3103 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3104 kdu_instance=kdu_instance,
3105 primitive_name=initial_config_primitive["name"],
3106 params=primitive_params_,
3107 db_dict=db_dict_install,
3108 vca_id=vca_id,
3109 ),
3110 timeout=timeout,
3111 )
3112
3113 except Exception as e:
3114 # Prepare update db with error and raise exception
3115 try:
3116 self.update_db_2(
3117 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3118 )
3119 self.update_db_2(
3120 "vnfrs",
3121 vnfr_data.get("_id"),
3122 {"kdur.{}.status".format(kdu_index): "ERROR"},
3123 )
3124 except Exception:
3125 # ignore to keep original exception
3126 pass
3127 # reraise original error
3128 raise
3129
3130 return kdu_instance
3131
3132 async def deploy_kdus(
3133 self,
3134 logging_text,
3135 nsr_id,
3136 nslcmop_id,
3137 db_vnfrs,
3138 db_vnfds,
3139 task_instantiation_info,
3140 ):
3141 # Launch kdus if present in the descriptor
3142
3143 k8scluster_id_2_uuic = {
3144 "helm-chart-v3": {},
3145 "helm-chart": {},
3146 "juju-bundle": {},
3147 }
3148
3149 async def _get_cluster_id(cluster_id, cluster_type):
3150 nonlocal k8scluster_id_2_uuic
3151 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3152 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3153
3154 # check if K8scluster is creating and wait look if previous tasks in process
3155 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3156 "k8scluster", cluster_id
3157 )
3158 if task_dependency:
3159 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3160 task_name, cluster_id
3161 )
3162 self.logger.debug(logging_text + text)
3163 await asyncio.wait(task_dependency, timeout=3600)
3164
3165 db_k8scluster = self.db.get_one(
3166 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3167 )
3168 if not db_k8scluster:
3169 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3170
3171 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3172 if not k8s_id:
3173 if cluster_type == "helm-chart-v3":
3174 try:
3175 # backward compatibility for existing clusters that have not been initialized for helm v3
3176 k8s_credentials = yaml.safe_dump(
3177 db_k8scluster.get("credentials")
3178 )
3179 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3180 k8s_credentials, reuse_cluster_uuid=cluster_id
3181 )
3182 db_k8scluster_update = {}
3183 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3184 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3185 db_k8scluster_update[
3186 "_admin.helm-chart-v3.created"
3187 ] = uninstall_sw
3188 db_k8scluster_update[
3189 "_admin.helm-chart-v3.operationalState"
3190 ] = "ENABLED"
3191 self.update_db_2(
3192 "k8sclusters", cluster_id, db_k8scluster_update
3193 )
3194 except Exception as e:
3195 self.logger.error(
3196 logging_text
3197 + "error initializing helm-v3 cluster: {}".format(str(e))
3198 )
3199 raise LcmException(
3200 "K8s cluster '{}' has not been initialized for '{}'".format(
3201 cluster_id, cluster_type
3202 )
3203 )
3204 else:
3205 raise LcmException(
3206 "K8s cluster '{}' has not been initialized for '{}'".format(
3207 cluster_id, cluster_type
3208 )
3209 )
3210 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3211 return k8s_id
3212
3213 logging_text += "Deploy kdus: "
3214 step = ""
3215 try:
3216 db_nsr_update = {"_admin.deployed.K8s": []}
3217 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3218
3219 index = 0
3220 updated_cluster_list = []
3221 updated_v3_cluster_list = []
3222
3223 for vnfr_data in db_vnfrs.values():
3224 vca_id = self.get_vca_id(vnfr_data, {})
3225 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3226 # Step 0: Prepare and set parameters
3227 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3228 vnfd_id = vnfr_data.get("vnfd-id")
3229 vnfd_with_id = find_in_list(
3230 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3231 )
3232 kdud = next(
3233 kdud
3234 for kdud in vnfd_with_id["kdu"]
3235 if kdud["name"] == kdur["kdu-name"]
3236 )
3237 namespace = kdur.get("k8s-namespace")
3238 kdu_deployment_name = kdur.get("kdu-deployment-name")
3239 if kdur.get("helm-chart"):
3240 kdumodel = kdur["helm-chart"]
3241 # Default version: helm3, if helm-version is v2 assign v2
3242 k8sclustertype = "helm-chart-v3"
3243 self.logger.debug("kdur: {}".format(kdur))
3244 if (
3245 kdur.get("helm-version")
3246 and kdur.get("helm-version") == "v2"
3247 ):
3248 k8sclustertype = "helm-chart"
3249 elif kdur.get("juju-bundle"):
3250 kdumodel = kdur["juju-bundle"]
3251 k8sclustertype = "juju-bundle"
3252 else:
3253 raise LcmException(
3254 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3255 "juju-bundle. Maybe an old NBI version is running".format(
3256 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3257 )
3258 )
3259 # check if kdumodel is a file and exists
3260 try:
3261 vnfd_with_id = find_in_list(
3262 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3263 )
3264 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3265 if storage and storage.get(
3266 "pkg-dir"
3267 ): # may be not present if vnfd has not artifacts
3268 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3269 filename = "{}/{}/{}s/{}".format(
3270 storage["folder"],
3271 storage["pkg-dir"],
3272 k8sclustertype,
3273 kdumodel,
3274 )
3275 if self.fs.file_exists(
3276 filename, mode="file"
3277 ) or self.fs.file_exists(filename, mode="dir"):
3278 kdumodel = self.fs.path + filename
3279 except (asyncio.TimeoutError, asyncio.CancelledError):
3280 raise
3281 except Exception: # it is not a file
3282 pass
3283
3284 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3285 step = "Synchronize repos for k8s cluster '{}'".format(
3286 k8s_cluster_id
3287 )
3288 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3289
3290 # Synchronize repos
3291 if (
3292 k8sclustertype == "helm-chart"
3293 and cluster_uuid not in updated_cluster_list
3294 ) or (
3295 k8sclustertype == "helm-chart-v3"
3296 and cluster_uuid not in updated_v3_cluster_list
3297 ):
3298 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3299 self.k8scluster_map[k8sclustertype].synchronize_repos(
3300 cluster_uuid=cluster_uuid
3301 )
3302 )
3303 if del_repo_list or added_repo_dict:
3304 if k8sclustertype == "helm-chart":
3305 unset = {
3306 "_admin.helm_charts_added." + item: None
3307 for item in del_repo_list
3308 }
3309 updated = {
3310 "_admin.helm_charts_added." + item: name
3311 for item, name in added_repo_dict.items()
3312 }
3313 updated_cluster_list.append(cluster_uuid)
3314 elif k8sclustertype == "helm-chart-v3":
3315 unset = {
3316 "_admin.helm_charts_v3_added." + item: None
3317 for item in del_repo_list
3318 }
3319 updated = {
3320 "_admin.helm_charts_v3_added." + item: name
3321 for item, name in added_repo_dict.items()
3322 }
3323 updated_v3_cluster_list.append(cluster_uuid)
3324 self.logger.debug(
3325 logging_text + "repos synchronized on k8s cluster "
3326 "'{}' to_delete: {}, to_add: {}".format(
3327 k8s_cluster_id, del_repo_list, added_repo_dict
3328 )
3329 )
3330 self.db.set_one(
3331 "k8sclusters",
3332 {"_id": k8s_cluster_id},
3333 updated,
3334 unset=unset,
3335 )
3336
3337 # Instantiate kdu
3338 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3339 vnfr_data["member-vnf-index-ref"],
3340 kdur["kdu-name"],
3341 k8s_cluster_id,
3342 )
3343 k8s_instance_info = {
3344 "kdu-instance": None,
3345 "k8scluster-uuid": cluster_uuid,
3346 "k8scluster-type": k8sclustertype,
3347 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3348 "kdu-name": kdur["kdu-name"],
3349 "kdu-model": kdumodel,
3350 "namespace": namespace,
3351 "kdu-deployment-name": kdu_deployment_name,
3352 }
3353 db_path = "_admin.deployed.K8s.{}".format(index)
3354 db_nsr_update[db_path] = k8s_instance_info
3355 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3356 vnfd_with_id = find_in_list(
3357 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3358 )
3359 task = asyncio.ensure_future(
3360 self._install_kdu(
3361 nsr_id,
3362 db_path,
3363 vnfr_data,
3364 kdu_index,
3365 kdud,
3366 vnfd_with_id,
3367 k8s_instance_info,
3368 k8params=desc_params,
3369 timeout=1800,
3370 vca_id=vca_id,
3371 )
3372 )
3373 self.lcm_tasks.register(
3374 "ns",
3375 nsr_id,
3376 nslcmop_id,
3377 "instantiate_KDU-{}".format(index),
3378 task,
3379 )
3380 task_instantiation_info[task] = "Deploying KDU {}".format(
3381 kdur["kdu-name"]
3382 )
3383
3384 index += 1
3385
3386 except (LcmException, asyncio.CancelledError):
3387 raise
3388 except Exception as e:
3389 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3390 if isinstance(e, (N2VCException, DbException)):
3391 self.logger.error(logging_text + msg)
3392 else:
3393 self.logger.critical(logging_text + msg, exc_info=True)
3394 raise LcmException(msg)
3395 finally:
3396 if db_nsr_update:
3397 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3398
3399 def _deploy_n2vc(
3400 self,
3401 logging_text,
3402 db_nsr,
3403 db_vnfr,
3404 nslcmop_id,
3405 nsr_id,
3406 nsi_id,
3407 vnfd_id,
3408 vdu_id,
3409 kdu_name,
3410 member_vnf_index,
3411 vdu_index,
3412 vdu_name,
3413 deploy_params,
3414 descriptor_config,
3415 base_folder,
3416 task_instantiation_info,
3417 stage,
3418 ):
3419 # launch instantiate_N2VC in a asyncio task and register task object
3420 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3421 # if not found, create one entry and update database
3422 # fill db_nsr._admin.deployed.VCA.<index>
3423
3424 self.logger.debug(
3425 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3426 )
3427 if "execution-environment-list" in descriptor_config:
3428 ee_list = descriptor_config.get("execution-environment-list", [])
3429 elif "juju" in descriptor_config:
3430 ee_list = [descriptor_config] # ns charms
3431 else: # other types as script are not supported
3432 ee_list = []
3433
3434 for ee_item in ee_list:
3435 self.logger.debug(
3436 logging_text
3437 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3438 ee_item.get("juju"), ee_item.get("helm-chart")
3439 )
3440 )
3441 ee_descriptor_id = ee_item.get("id")
3442 if ee_item.get("juju"):
3443 vca_name = ee_item["juju"].get("charm")
3444 vca_type = (
3445 "lxc_proxy_charm"
3446 if ee_item["juju"].get("charm") is not None
3447 else "native_charm"
3448 )
3449 if ee_item["juju"].get("cloud") == "k8s":
3450 vca_type = "k8s_proxy_charm"
3451 elif ee_item["juju"].get("proxy") is False:
3452 vca_type = "native_charm"
3453 elif ee_item.get("helm-chart"):
3454 vca_name = ee_item["helm-chart"]
3455 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3456 vca_type = "helm"
3457 else:
3458 vca_type = "helm-v3"
3459 else:
3460 self.logger.debug(
3461 logging_text + "skipping non juju neither charm configuration"
3462 )
3463 continue
3464
3465 vca_index = -1
3466 for vca_index, vca_deployed in enumerate(
3467 db_nsr["_admin"]["deployed"]["VCA"]
3468 ):
3469 if not vca_deployed:
3470 continue
3471 if (
3472 vca_deployed.get("member-vnf-index") == member_vnf_index
3473 and vca_deployed.get("vdu_id") == vdu_id
3474 and vca_deployed.get("kdu_name") == kdu_name
3475 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3476 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3477 ):
3478 break
3479 else:
3480 # not found, create one.
3481 target = (
3482 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3483 )
3484 if vdu_id:
3485 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3486 elif kdu_name:
3487 target += "/kdu/{}".format(kdu_name)
3488 vca_deployed = {
3489 "target_element": target,
3490 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
3491 "member-vnf-index": member_vnf_index,
3492 "vdu_id": vdu_id,
3493 "kdu_name": kdu_name,
3494 "vdu_count_index": vdu_index,
3495 "operational-status": "init", # TODO revise
3496 "detailed-status": "", # TODO revise
3497 "step": "initial-deploy", # TODO revise
3498 "vnfd_id": vnfd_id,
3499 "vdu_name": vdu_name,
3500 "type": vca_type,
3501 "ee_descriptor_id": ee_descriptor_id,
3502 }
3503 vca_index += 1
3504
3505 # create VCA and configurationStatus in db
3506 db_dict = {
3507 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
3508 "configurationStatus.{}".format(vca_index): dict(),
3509 }
3510 self.update_db_2("nsrs", nsr_id, db_dict)
3511
3512 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3513
3514 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3515 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3516 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3517
3518 # Launch task
3519 task_n2vc = asyncio.ensure_future(
3520 self.instantiate_N2VC(
3521 logging_text=logging_text,
3522 vca_index=vca_index,
3523 nsi_id=nsi_id,
3524 db_nsr=db_nsr,
3525 db_vnfr=db_vnfr,
3526 vdu_id=vdu_id,
3527 kdu_name=kdu_name,
3528 vdu_index=vdu_index,
3529 deploy_params=deploy_params,
3530 config_descriptor=descriptor_config,
3531 base_folder=base_folder,
3532 nslcmop_id=nslcmop_id,
3533 stage=stage,
3534 vca_type=vca_type,
3535 vca_name=vca_name,
3536 ee_config_descriptor=ee_item,
3537 )
3538 )
3539 self.lcm_tasks.register(
3540 "ns",
3541 nsr_id,
3542 nslcmop_id,
3543 "instantiate_N2VC-{}".format(vca_index),
3544 task_n2vc,
3545 )
3546 task_instantiation_info[
3547 task_n2vc
3548 ] = self.task_name_deploy_vca + " {}.{}".format(
3549 member_vnf_index or "", vdu_id or ""
3550 )
3551
3552 @staticmethod
3553 def _create_nslcmop(nsr_id, operation, params):
3554 """
3555 Creates a ns-lcm-opp content to be stored at database.
3556 :param nsr_id: internal id of the instance
3557 :param operation: instantiate, terminate, scale, action, ...
3558 :param params: user parameters for the operation
3559 :return: dictionary following SOL005 format
3560 """
3561 # Raise exception if invalid arguments
3562 if not (nsr_id and operation and params):
3563 raise LcmException(
3564 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3565 )
3566 now = time()
3567 _id = str(uuid4())
3568 nslcmop = {
3569 "id": _id,
3570 "_id": _id,
3571 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3572 "operationState": "PROCESSING",
3573 "statusEnteredTime": now,
3574 "nsInstanceId": nsr_id,
3575 "lcmOperationType": operation,
3576 "startTime": now,
3577 "isAutomaticInvocation": False,
3578 "operationParams": params,
3579 "isCancelPending": False,
3580 "links": {
3581 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3582 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3583 },
3584 }
3585 return nslcmop
3586
3587 def _format_additional_params(self, params):
3588 params = params or {}
3589 for key, value in params.items():
3590 if str(value).startswith("!!yaml "):
3591 params[key] = yaml.safe_load(value[7:])
3592 return params
3593
3594 def _get_terminate_primitive_params(self, seq, vnf_index):
3595 primitive = seq.get("name")
3596 primitive_params = {}
3597 params = {
3598 "member_vnf_index": vnf_index,
3599 "primitive": primitive,
3600 "primitive_params": primitive_params,
3601 }
3602 desc_params = {}
3603 return self._map_primitive_params(seq, params, desc_params)
3604
3605 # sub-operations
3606
3607 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3608 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3609 if op.get("operationState") == "COMPLETED":
3610 # b. Skip sub-operation
3611 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3612 return self.SUBOPERATION_STATUS_SKIP
3613 else:
3614 # c. retry executing sub-operation
3615 # The sub-operation exists, and operationState != 'COMPLETED'
3616 # Update operationState = 'PROCESSING' to indicate a retry.
3617 operationState = "PROCESSING"
3618 detailed_status = "In progress"
3619 self._update_suboperation_status(
3620 db_nslcmop, op_index, operationState, detailed_status
3621 )
3622 # Return the sub-operation index
3623 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3624 # with arguments extracted from the sub-operation
3625 return op_index
3626
3627 # Find a sub-operation where all keys in a matching dictionary must match
3628 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3629 def _find_suboperation(self, db_nslcmop, match):
3630 if db_nslcmop and match:
3631 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
3632 for i, op in enumerate(op_list):
3633 if all(op.get(k) == match[k] for k in match):
3634 return i
3635 return self.SUBOPERATION_STATUS_NOT_FOUND
3636
3637 # Update status for a sub-operation given its index
3638 def _update_suboperation_status(
3639 self, db_nslcmop, op_index, operationState, detailed_status
3640 ):
3641 # Update DB for HA tasks
3642 q_filter = {"_id": db_nslcmop["_id"]}
3643 update_dict = {
3644 "_admin.operations.{}.operationState".format(op_index): operationState,
3645 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
3646 }
3647 self.db.set_one(
3648 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
3649 )
3650
3651 # Add sub-operation, return the index of the added sub-operation
3652 # Optionally, set operationState, detailed-status, and operationType
3653 # Status and type are currently set for 'scale' sub-operations:
3654 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
3655 # 'detailed-status' : status message
3656 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
3657 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
3658 def _add_suboperation(
3659 self,
3660 db_nslcmop,
3661 vnf_index,
3662 vdu_id,
3663 vdu_count_index,
3664 vdu_name,
3665 primitive,
3666 mapped_primitive_params,
3667 operationState=None,
3668 detailed_status=None,
3669 operationType=None,
3670 RO_nsr_id=None,
3671 RO_scaling_info=None,
3672 ):
3673 if not db_nslcmop:
3674 return self.SUBOPERATION_STATUS_NOT_FOUND
3675 # Get the "_admin.operations" list, if it exists
3676 db_nslcmop_admin = db_nslcmop.get("_admin", {})
3677 op_list = db_nslcmop_admin.get("operations")
3678 # Create or append to the "_admin.operations" list
3679 new_op = {
3680 "member_vnf_index": vnf_index,
3681 "vdu_id": vdu_id,
3682 "vdu_count_index": vdu_count_index,
3683 "primitive": primitive,
3684 "primitive_params": mapped_primitive_params,
3685 }
3686 if operationState:
3687 new_op["operationState"] = operationState
3688 if detailed_status:
3689 new_op["detailed-status"] = detailed_status
3690 if operationType:
3691 new_op["lcmOperationType"] = operationType
3692 if RO_nsr_id:
3693 new_op["RO_nsr_id"] = RO_nsr_id
3694 if RO_scaling_info:
3695 new_op["RO_scaling_info"] = RO_scaling_info
3696 if not op_list:
3697 # No existing operations, create key 'operations' with current operation as first list element
3698 db_nslcmop_admin.update({"operations": [new_op]})
3699 op_list = db_nslcmop_admin.get("operations")
3700 else:
3701 # Existing operations, append operation to list
3702 op_list.append(new_op)
3703
3704 db_nslcmop_update = {"_admin.operations": op_list}
3705 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
3706 op_index = len(op_list) - 1
3707 return op_index
3708
3709 # Helper methods for scale() sub-operations
3710
3711 # pre-scale/post-scale:
3712 # Check for 3 different cases:
3713 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
3714 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
3715 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
3716 def _check_or_add_scale_suboperation(
3717 self,
3718 db_nslcmop,
3719 vnf_index,
3720 vnf_config_primitive,
3721 primitive_params,
3722 operationType,
3723 RO_nsr_id=None,
3724 RO_scaling_info=None,
3725 ):
3726 # Find this sub-operation
3727 if RO_nsr_id and RO_scaling_info:
3728 operationType = "SCALE-RO"
3729 match = {
3730 "member_vnf_index": vnf_index,
3731 "RO_nsr_id": RO_nsr_id,
3732 "RO_scaling_info": RO_scaling_info,
3733 }
3734 else:
3735 match = {
3736 "member_vnf_index": vnf_index,
3737 "primitive": vnf_config_primitive,
3738 "primitive_params": primitive_params,
3739 "lcmOperationType": operationType,
3740 }
3741 op_index = self._find_suboperation(db_nslcmop, match)
3742 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
3743 # a. New sub-operation
3744 # The sub-operation does not exist, add it.
3745 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
3746 # The following parameters are set to None for all kind of scaling:
3747 vdu_id = None
3748 vdu_count_index = None
3749 vdu_name = None
3750 if RO_nsr_id and RO_scaling_info:
3751 vnf_config_primitive = None
3752 primitive_params = None
3753 else:
3754 RO_nsr_id = None
3755 RO_scaling_info = None
3756 # Initial status for sub-operation
3757 operationState = "PROCESSING"
3758 detailed_status = "In progress"
3759 # Add sub-operation for pre/post-scaling (zero or more operations)
3760 self._add_suboperation(
3761 db_nslcmop,
3762 vnf_index,
3763 vdu_id,
3764 vdu_count_index,
3765 vdu_name,
3766 vnf_config_primitive,
3767 primitive_params,
3768 operationState,
3769 detailed_status,
3770 operationType,
3771 RO_nsr_id,
3772 RO_scaling_info,
3773 )
3774 return self.SUBOPERATION_STATUS_NEW
3775 else:
3776 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
3777 # or op_index (operationState != 'COMPLETED')
3778 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
3779
3780 # Function to return execution_environment id
3781
3782 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
3783 # TODO vdu_index_count
3784 for vca in vca_deployed_list:
3785 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
3786 return vca["ee_id"]
3787
3788 async def destroy_N2VC(
3789 self,
3790 logging_text,
3791 db_nslcmop,
3792 vca_deployed,
3793 config_descriptor,
3794 vca_index,
3795 destroy_ee=True,
3796 exec_primitives=True,
3797 scaling_in=False,
3798 vca_id: str = None,
3799 ):
3800 """
3801 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
3802 :param logging_text:
3803 :param db_nslcmop:
3804 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
3805 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
3806 :param vca_index: index in the database _admin.deployed.VCA
3807 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
3808 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
3809 not executed properly
3810 :param scaling_in: True destroys the application, False destroys the model
3811 :return: None or exception
3812 """
3813
3814 self.logger.debug(
3815 logging_text
3816 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
3817 vca_index, vca_deployed, config_descriptor, destroy_ee
3818 )
3819 )
3820
3821 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
3822
3823 # execute terminate_primitives
3824 if exec_primitives:
3825 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
3826 config_descriptor.get("terminate-config-primitive"),
3827 vca_deployed.get("ee_descriptor_id"),
3828 )
3829 vdu_id = vca_deployed.get("vdu_id")
3830 vdu_count_index = vca_deployed.get("vdu_count_index")
3831 vdu_name = vca_deployed.get("vdu_name")
3832 vnf_index = vca_deployed.get("member-vnf-index")
3833 if terminate_primitives and vca_deployed.get("needed_terminate"):
3834 for seq in terminate_primitives:
3835 # For each sequence in list, get primitive and call _ns_execute_primitive()
3836 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
3837 vnf_index, seq.get("name")
3838 )
3839 self.logger.debug(logging_text + step)
3840 # Create the primitive for each sequence, i.e. "primitive": "touch"
3841 primitive = seq.get("name")
3842 mapped_primitive_params = self._get_terminate_primitive_params(
3843 seq, vnf_index
3844 )
3845
3846 # Add sub-operation
3847 self._add_suboperation(
3848 db_nslcmop,
3849 vnf_index,
3850 vdu_id,
3851 vdu_count_index,
3852 vdu_name,
3853 primitive,
3854 mapped_primitive_params,
3855 )
3856 # Sub-operations: Call _ns_execute_primitive() instead of action()
3857 try:
3858 result, result_detail = await self._ns_execute_primitive(
3859 vca_deployed["ee_id"],
3860 primitive,
3861 mapped_primitive_params,
3862 vca_type=vca_type,
3863 vca_id=vca_id,
3864 )
3865 except LcmException:
3866 # this happens when VCA is not deployed. In this case it is not needed to terminate
3867 continue
3868 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
3869 if result not in result_ok:
3870 raise LcmException(
3871 "terminate_primitive {} for vnf_member_index={} fails with "
3872 "error {}".format(seq.get("name"), vnf_index, result_detail)
3873 )
3874 # set that this VCA do not need terminated
3875 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
3876 vca_index
3877 )
3878 self.update_db_2(
3879 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
3880 )
3881
3882 if vca_deployed.get("prometheus_jobs") and self.prometheus:
3883 await self.prometheus.update(remove_jobs=vca_deployed["prometheus_jobs"])
3884
3885 if destroy_ee:
3886 await self.vca_map[vca_type].delete_execution_environment(
3887 vca_deployed["ee_id"],
3888 scaling_in=scaling_in,
3889 vca_type=vca_type,
3890 vca_id=vca_id,
3891 )
3892
3893 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
3894 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
3895 namespace = "." + db_nsr["_id"]
3896 try:
3897 await self.n2vc.delete_namespace(
3898 namespace=namespace,
3899 total_timeout=self.timeout_charm_delete,
3900 vca_id=vca_id,
3901 )
3902 except N2VCNotFound: # already deleted. Skip
3903 pass
3904 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
3905
3906 async def _terminate_RO(
3907 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
3908 ):
3909 """
3910 Terminates a deployment from RO
3911 :param logging_text:
3912 :param nsr_deployed: db_nsr._admin.deployed
3913 :param nsr_id:
3914 :param nslcmop_id:
3915 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
3916 this method will update only the index 2, but it will write on database the concatenated content of the list
3917 :return:
3918 """
3919 db_nsr_update = {}
3920 failed_detail = []
3921 ro_nsr_id = ro_delete_action = None
3922 if nsr_deployed and nsr_deployed.get("RO"):
3923 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
3924 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
3925 try:
3926 if ro_nsr_id:
3927 stage[2] = "Deleting ns from VIM."
3928 db_nsr_update["detailed-status"] = " ".join(stage)
3929 self._write_op_status(nslcmop_id, stage)
3930 self.logger.debug(logging_text + stage[2])
3931 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3932 self._write_op_status(nslcmop_id, stage)
3933 desc = await self.RO.delete("ns", ro_nsr_id)
3934 ro_delete_action = desc["action_id"]
3935 db_nsr_update[
3936 "_admin.deployed.RO.nsr_delete_action_id"
3937 ] = ro_delete_action
3938 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
3939 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3940 if ro_delete_action:
3941 # wait until NS is deleted from VIM
3942 stage[2] = "Waiting ns deleted from VIM."
3943 detailed_status_old = None
3944 self.logger.debug(
3945 logging_text
3946 + stage[2]
3947 + " RO_id={} ro_delete_action={}".format(
3948 ro_nsr_id, ro_delete_action
3949 )
3950 )
3951 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3952 self._write_op_status(nslcmop_id, stage)
3953
3954 delete_timeout = 20 * 60 # 20 minutes
3955 while delete_timeout > 0:
3956 desc = await self.RO.show(
3957 "ns",
3958 item_id_name=ro_nsr_id,
3959 extra_item="action",
3960 extra_item_id=ro_delete_action,
3961 )
3962
3963 # deploymentStatus
3964 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
3965
3966 ns_status, ns_status_info = self.RO.check_action_status(desc)
3967 if ns_status == "ERROR":
3968 raise ROclient.ROClientException(ns_status_info)
3969 elif ns_status == "BUILD":
3970 stage[2] = "Deleting from VIM {}".format(ns_status_info)
3971 elif ns_status == "ACTIVE":
3972 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
3973 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3974 break
3975 else:
3976 assert (
3977 False
3978 ), "ROclient.check_action_status returns unknown {}".format(
3979 ns_status
3980 )
3981 if stage[2] != detailed_status_old:
3982 detailed_status_old = stage[2]
3983 db_nsr_update["detailed-status"] = " ".join(stage)
3984 self._write_op_status(nslcmop_id, stage)
3985 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3986 await asyncio.sleep(5, loop=self.loop)
3987 delete_timeout -= 5
3988 else: # delete_timeout <= 0:
3989 raise ROclient.ROClientException(
3990 "Timeout waiting ns deleted from VIM"
3991 )
3992
3993 except Exception as e:
3994 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3995 if (
3996 isinstance(e, ROclient.ROClientException) and e.http_code == 404
3997 ): # not found
3998 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
3999 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4000 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4001 self.logger.debug(
4002 logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)
4003 )
4004 elif (
4005 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4006 ): # conflict
4007 failed_detail.append("delete conflict: {}".format(e))
4008 self.logger.debug(
4009 logging_text
4010 + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)
4011 )
4012 else:
4013 failed_detail.append("delete error: {}".format(e))
4014 self.logger.error(
4015 logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)
4016 )
4017
4018 # Delete nsd
4019 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
4020 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
4021 try:
4022 stage[2] = "Deleting nsd from RO."
4023 db_nsr_update["detailed-status"] = " ".join(stage)
4024 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4025 self._write_op_status(nslcmop_id, stage)
4026 await self.RO.delete("nsd", ro_nsd_id)
4027 self.logger.debug(
4028 logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)
4029 )
4030 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4031 except Exception as e:
4032 if (
4033 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4034 ): # not found
4035 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4036 self.logger.debug(
4037 logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)
4038 )
4039 elif (
4040 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4041 ): # conflict
4042 failed_detail.append(
4043 "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)
4044 )
4045 self.logger.debug(logging_text + failed_detail[-1])
4046 else:
4047 failed_detail.append(
4048 "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)
4049 )
4050 self.logger.error(logging_text + failed_detail[-1])
4051
4052 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
4053 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
4054 if not vnf_deployed or not vnf_deployed["id"]:
4055 continue
4056 try:
4057 ro_vnfd_id = vnf_deployed["id"]
4058 stage[
4059 2
4060 ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
4061 vnf_deployed["member-vnf-index"], ro_vnfd_id
4062 )
4063 db_nsr_update["detailed-status"] = " ".join(stage)
4064 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4065 self._write_op_status(nslcmop_id, stage)
4066 await self.RO.delete("vnfd", ro_vnfd_id)
4067 self.logger.debug(
4068 logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)
4069 )
4070 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
4071 except Exception as e:
4072 if (
4073 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4074 ): # not found
4075 db_nsr_update[
4076 "_admin.deployed.RO.vnfd.{}.id".format(index)
4077 ] = None
4078 self.logger.debug(
4079 logging_text
4080 + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)
4081 )
4082 elif (
4083 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4084 ): # conflict
4085 failed_detail.append(
4086 "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)
4087 )
4088 self.logger.debug(logging_text + failed_detail[-1])
4089 else:
4090 failed_detail.append(
4091 "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)
4092 )
4093 self.logger.error(logging_text + failed_detail[-1])
4094
4095 if failed_detail:
4096 stage[2] = "Error deleting from VIM"
4097 else:
4098 stage[2] = "Deleted from VIM"
4099 db_nsr_update["detailed-status"] = " ".join(stage)
4100 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4101 self._write_op_status(nslcmop_id, stage)
4102
4103 if failed_detail:
4104 raise LcmException("; ".join(failed_detail))
4105
4106 async def terminate(self, nsr_id, nslcmop_id):
4107 # Try to lock HA task here
4108 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4109 if not task_is_locked_by_me:
4110 return
4111
4112 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4113 self.logger.debug(logging_text + "Enter")
4114 timeout_ns_terminate = self.timeout_ns_terminate
4115 db_nsr = None
4116 db_nslcmop = None
4117 operation_params = None
4118 exc = None
4119 error_list = [] # annotates all failed error messages
4120 db_nslcmop_update = {}
4121 autoremove = False # autoremove after terminated
4122 tasks_dict_info = {}
4123 db_nsr_update = {}
4124 stage = [
4125 "Stage 1/3: Preparing task.",
4126 "Waiting for previous operations to terminate.",
4127 "",
4128 ]
4129 # ^ contains [stage, step, VIM-status]
4130 try:
4131 # wait for any previous tasks in process
4132 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4133
4134 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4135 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4136 operation_params = db_nslcmop.get("operationParams") or {}
4137 if operation_params.get("timeout_ns_terminate"):
4138 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4139 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4140 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4141
4142 db_nsr_update["operational-status"] = "terminating"
4143 db_nsr_update["config-status"] = "terminating"
4144 self._write_ns_status(
4145 nsr_id=nsr_id,
4146 ns_state="TERMINATING",
4147 current_operation="TERMINATING",
4148 current_operation_id=nslcmop_id,
4149 other_update=db_nsr_update,
4150 )
4151 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4152 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4153 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4154 return
4155
4156 stage[1] = "Getting vnf descriptors from db."
4157 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4158 db_vnfrs_dict = {
4159 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4160 }
4161 db_vnfds_from_id = {}
4162 db_vnfds_from_member_index = {}
4163 # Loop over VNFRs
4164 for vnfr in db_vnfrs_list:
4165 vnfd_id = vnfr["vnfd-id"]
4166 if vnfd_id not in db_vnfds_from_id:
4167 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4168 db_vnfds_from_id[vnfd_id] = vnfd
4169 db_vnfds_from_member_index[
4170 vnfr["member-vnf-index-ref"]
4171 ] = db_vnfds_from_id[vnfd_id]
4172
4173 # Destroy individual execution environments when there are terminating primitives.
4174 # Rest of EE will be deleted at once
4175 # TODO - check before calling _destroy_N2VC
4176 # if not operation_params.get("skip_terminate_primitives"):#
4177 # or not vca.get("needed_terminate"):
4178 stage[0] = "Stage 2/3 execute terminating primitives."
4179 self.logger.debug(logging_text + stage[0])
4180 stage[1] = "Looking execution environment that needs terminate."
4181 self.logger.debug(logging_text + stage[1])
4182
4183 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4184 config_descriptor = None
4185 vca_member_vnf_index = vca.get("member-vnf-index")
4186 vca_id = self.get_vca_id(
4187 db_vnfrs_dict.get(vca_member_vnf_index)
4188 if vca_member_vnf_index
4189 else None,
4190 db_nsr,
4191 )
4192 if not vca or not vca.get("ee_id"):
4193 continue
4194 if not vca.get("member-vnf-index"):
4195 # ns
4196 config_descriptor = db_nsr.get("ns-configuration")
4197 elif vca.get("vdu_id"):
4198 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4199 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4200 elif vca.get("kdu_name"):
4201 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4202 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4203 else:
4204 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4205 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4206 vca_type = vca.get("type")
4207 exec_terminate_primitives = not operation_params.get(
4208 "skip_terminate_primitives"
4209 ) and vca.get("needed_terminate")
4210 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4211 # pending native charms
4212 destroy_ee = (
4213 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4214 )
4215 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4216 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4217 task = asyncio.ensure_future(
4218 self.destroy_N2VC(
4219 logging_text,
4220 db_nslcmop,
4221 vca,
4222 config_descriptor,
4223 vca_index,
4224 destroy_ee,
4225 exec_terminate_primitives,
4226 vca_id=vca_id,
4227 )
4228 )
4229 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4230
4231 # wait for pending tasks of terminate primitives
4232 if tasks_dict_info:
4233 self.logger.debug(
4234 logging_text
4235 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4236 )
4237 error_list = await self._wait_for_tasks(
4238 logging_text,
4239 tasks_dict_info,
4240 min(self.timeout_charm_delete, timeout_ns_terminate),
4241 stage,
4242 nslcmop_id,
4243 )
4244 tasks_dict_info.clear()
4245 if error_list:
4246 return # raise LcmException("; ".join(error_list))
4247
4248 # remove All execution environments at once
4249 stage[0] = "Stage 3/3 delete all."
4250
4251 if nsr_deployed.get("VCA"):
4252 stage[1] = "Deleting all execution environments."
4253 self.logger.debug(logging_text + stage[1])
4254 vca_id = self.get_vca_id({}, db_nsr)
4255 task_delete_ee = asyncio.ensure_future(
4256 asyncio.wait_for(
4257 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4258 timeout=self.timeout_charm_delete,
4259 )
4260 )
4261 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4262 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4263
4264 # Delete from k8scluster
4265 stage[1] = "Deleting KDUs."
4266 self.logger.debug(logging_text + stage[1])
4267 # print(nsr_deployed)
4268 for kdu in get_iterable(nsr_deployed, "K8s"):
4269 if not kdu or not kdu.get("kdu-instance"):
4270 continue
4271 kdu_instance = kdu.get("kdu-instance")
4272 if kdu.get("k8scluster-type") in self.k8scluster_map:
4273 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4274 vca_id = self.get_vca_id({}, db_nsr)
4275 task_delete_kdu_instance = asyncio.ensure_future(
4276 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4277 cluster_uuid=kdu.get("k8scluster-uuid"),
4278 kdu_instance=kdu_instance,
4279 vca_id=vca_id,
4280 )
4281 )
4282 else:
4283 self.logger.error(
4284 logging_text
4285 + "Unknown k8s deployment type {}".format(
4286 kdu.get("k8scluster-type")
4287 )
4288 )
4289 continue
4290 tasks_dict_info[
4291 task_delete_kdu_instance
4292 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4293
4294 # remove from RO
4295 stage[1] = "Deleting ns from VIM."
4296 if self.ng_ro:
4297 task_delete_ro = asyncio.ensure_future(
4298 self._terminate_ng_ro(
4299 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4300 )
4301 )
4302 else:
4303 task_delete_ro = asyncio.ensure_future(
4304 self._terminate_RO(
4305 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4306 )
4307 )
4308 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4309
4310 # rest of staff will be done at finally
4311
4312 except (
4313 ROclient.ROClientException,
4314 DbException,
4315 LcmException,
4316 N2VCException,
4317 ) as e:
4318 self.logger.error(logging_text + "Exit Exception {}".format(e))
4319 exc = e
4320 except asyncio.CancelledError:
4321 self.logger.error(
4322 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4323 )
4324 exc = "Operation was cancelled"
4325 except Exception as e:
4326 exc = traceback.format_exc()
4327 self.logger.critical(
4328 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4329 exc_info=True,
4330 )
4331 finally:
4332 if exc:
4333 error_list.append(str(exc))
4334 try:
4335 # wait for pending tasks
4336 if tasks_dict_info:
4337 stage[1] = "Waiting for terminate pending tasks."
4338 self.logger.debug(logging_text + stage[1])
4339 error_list += await self._wait_for_tasks(
4340 logging_text,
4341 tasks_dict_info,
4342 timeout_ns_terminate,
4343 stage,
4344 nslcmop_id,
4345 )
4346 stage[1] = stage[2] = ""
4347 except asyncio.CancelledError:
4348 error_list.append("Cancelled")
4349 # TODO cancell all tasks
4350 except Exception as exc:
4351 error_list.append(str(exc))
4352 # update status at database
4353 if error_list:
4354 error_detail = "; ".join(error_list)
4355 # self.logger.error(logging_text + error_detail)
4356 error_description_nslcmop = "{} Detail: {}".format(
4357 stage[0], error_detail
4358 )
4359 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4360 nslcmop_id, stage[0]
4361 )
4362
4363 db_nsr_update["operational-status"] = "failed"
4364 db_nsr_update["detailed-status"] = (
4365 error_description_nsr + " Detail: " + error_detail
4366 )
4367 db_nslcmop_update["detailed-status"] = error_detail
4368 nslcmop_operation_state = "FAILED"
4369 ns_state = "BROKEN"
4370 else:
4371 error_detail = None
4372 error_description_nsr = error_description_nslcmop = None
4373 ns_state = "NOT_INSTANTIATED"
4374 db_nsr_update["operational-status"] = "terminated"
4375 db_nsr_update["detailed-status"] = "Done"
4376 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4377 db_nslcmop_update["detailed-status"] = "Done"
4378 nslcmop_operation_state = "COMPLETED"
4379
4380 if db_nsr:
4381 self._write_ns_status(
4382 nsr_id=nsr_id,
4383 ns_state=ns_state,
4384 current_operation="IDLE",
4385 current_operation_id=None,
4386 error_description=error_description_nsr,
4387 error_detail=error_detail,
4388 other_update=db_nsr_update,
4389 )
4390 self._write_op_status(
4391 op_id=nslcmop_id,
4392 stage="",
4393 error_message=error_description_nslcmop,
4394 operation_state=nslcmop_operation_state,
4395 other_update=db_nslcmop_update,
4396 )
4397 if ns_state == "NOT_INSTANTIATED":
4398 try:
4399 self.db.set_list(
4400 "vnfrs",
4401 {"nsr-id-ref": nsr_id},
4402 {"_admin.nsState": "NOT_INSTANTIATED"},
4403 )
4404 except DbException as e:
4405 self.logger.warn(
4406 logging_text
4407 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4408 nsr_id, e
4409 )
4410 )
4411 if operation_params:
4412 autoremove = operation_params.get("autoremove", False)
4413 if nslcmop_operation_state:
4414 try:
4415 await self.msg.aiowrite(
4416 "ns",
4417 "terminated",
4418 {
4419 "nsr_id": nsr_id,
4420 "nslcmop_id": nslcmop_id,
4421 "operationState": nslcmop_operation_state,
4422 "autoremove": autoremove,
4423 },
4424 loop=self.loop,
4425 )
4426 except Exception as e:
4427 self.logger.error(
4428 logging_text + "kafka_write notification Exception {}".format(e)
4429 )
4430
4431 self.logger.debug(logging_text + "Exit")
4432 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4433
4434 async def _wait_for_tasks(
4435 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4436 ):
4437 time_start = time()
4438 error_detail_list = []
4439 error_list = []
4440 pending_tasks = list(created_tasks_info.keys())
4441 num_tasks = len(pending_tasks)
4442 num_done = 0
4443 stage[1] = "{}/{}.".format(num_done, num_tasks)
4444 self._write_op_status(nslcmop_id, stage)
4445 while pending_tasks:
4446 new_error = None
4447 _timeout = timeout + time_start - time()
4448 done, pending_tasks = await asyncio.wait(
4449 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4450 )
4451 num_done += len(done)
4452 if not done: # Timeout
4453 for task in pending_tasks:
4454 new_error = created_tasks_info[task] + ": Timeout"
4455 error_detail_list.append(new_error)
4456 error_list.append(new_error)
4457 break
4458 for task in done:
4459 if task.cancelled():
4460 exc = "Cancelled"
4461 else:
4462 exc = task.exception()
4463 if exc:
4464 if isinstance(exc, asyncio.TimeoutError):
4465 exc = "Timeout"
4466 new_error = created_tasks_info[task] + ": {}".format(exc)
4467 error_list.append(created_tasks_info[task])
4468 error_detail_list.append(new_error)
4469 if isinstance(
4470 exc,
4471 (
4472 str,
4473 DbException,
4474 N2VCException,
4475 ROclient.ROClientException,
4476 LcmException,
4477 K8sException,
4478 NgRoException,
4479 ),
4480 ):
4481 self.logger.error(logging_text + new_error)
4482 else:
4483 exc_traceback = "".join(
4484 traceback.format_exception(None, exc, exc.__traceback__)
4485 )
4486 self.logger.error(
4487 logging_text
4488 + created_tasks_info[task]
4489 + " "
4490 + exc_traceback
4491 )
4492 else:
4493 self.logger.debug(
4494 logging_text + created_tasks_info[task] + ": Done"
4495 )
4496 stage[1] = "{}/{}.".format(num_done, num_tasks)
4497 if new_error:
4498 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4499 if nsr_id: # update also nsr
4500 self.update_db_2(
4501 "nsrs",
4502 nsr_id,
4503 {
4504 "errorDescription": "Error at: " + ", ".join(error_list),
4505 "errorDetail": ". ".join(error_detail_list),
4506 },
4507 )
4508 self._write_op_status(nslcmop_id, stage)
4509 return error_detail_list
4510
4511 @staticmethod
4512 def _map_primitive_params(primitive_desc, params, instantiation_params):
4513 """
4514 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4515 The default-value is used. If it is between < > it look for a value at instantiation_params
4516 :param primitive_desc: portion of VNFD/NSD that describes primitive
4517 :param params: Params provided by user
4518 :param instantiation_params: Instantiation params provided by user
4519 :return: a dictionary with the calculated params
4520 """
4521 calculated_params = {}
4522 for parameter in primitive_desc.get("parameter", ()):
4523 param_name = parameter["name"]
4524 if param_name in params:
4525 calculated_params[param_name] = params[param_name]
4526 elif "default-value" in parameter or "value" in parameter:
4527 if "value" in parameter:
4528 calculated_params[param_name] = parameter["value"]
4529 else:
4530 calculated_params[param_name] = parameter["default-value"]
4531 if (
4532 isinstance(calculated_params[param_name], str)
4533 and calculated_params[param_name].startswith("<")
4534 and calculated_params[param_name].endswith(">")
4535 ):
4536 if calculated_params[param_name][1:-1] in instantiation_params:
4537 calculated_params[param_name] = instantiation_params[
4538 calculated_params[param_name][1:-1]
4539 ]
4540 else:
4541 raise LcmException(
4542 "Parameter {} needed to execute primitive {} not provided".format(
4543 calculated_params[param_name], primitive_desc["name"]
4544 )
4545 )
4546 else:
4547 raise LcmException(
4548 "Parameter {} needed to execute primitive {} not provided".format(
4549 param_name, primitive_desc["name"]
4550 )
4551 )
4552
4553 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4554 calculated_params[param_name] = yaml.safe_dump(
4555 calculated_params[param_name], default_flow_style=True, width=256
4556 )
4557 elif isinstance(calculated_params[param_name], str) and calculated_params[
4558 param_name
4559 ].startswith("!!yaml "):
4560 calculated_params[param_name] = calculated_params[param_name][7:]
4561 if parameter.get("data-type") == "INTEGER":
4562 try:
4563 calculated_params[param_name] = int(calculated_params[param_name])
4564 except ValueError: # error converting string to int
4565 raise LcmException(
4566 "Parameter {} of primitive {} must be integer".format(
4567 param_name, primitive_desc["name"]
4568 )
4569 )
4570 elif parameter.get("data-type") == "BOOLEAN":
4571 calculated_params[param_name] = not (
4572 (str(calculated_params[param_name])).lower() == "false"
4573 )
4574
4575 # add always ns_config_info if primitive name is config
4576 if primitive_desc["name"] == "config":
4577 if "ns_config_info" in instantiation_params:
4578 calculated_params["ns_config_info"] = instantiation_params[
4579 "ns_config_info"
4580 ]
4581 return calculated_params
4582
4583 def _look_for_deployed_vca(
4584 self,
4585 deployed_vca,
4586 member_vnf_index,
4587 vdu_id,
4588 vdu_count_index,
4589 kdu_name=None,
4590 ee_descriptor_id=None,
4591 ):
4592 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4593 for vca in deployed_vca:
4594 if not vca:
4595 continue
4596 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4597 continue
4598 if (
4599 vdu_count_index is not None
4600 and vdu_count_index != vca["vdu_count_index"]
4601 ):
4602 continue
4603 if kdu_name and kdu_name != vca["kdu_name"]:
4604 continue
4605 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4606 continue
4607 break
4608 else:
4609 # vca_deployed not found
4610 raise LcmException(
4611 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4612 " is not deployed".format(
4613 member_vnf_index,
4614 vdu_id,
4615 vdu_count_index,
4616 kdu_name,
4617 ee_descriptor_id,
4618 )
4619 )
4620 # get ee_id
4621 ee_id = vca.get("ee_id")
4622 vca_type = vca.get(
4623 "type", "lxc_proxy_charm"
4624 ) # default value for backward compatibility - proxy charm
4625 if not ee_id:
4626 raise LcmException(
4627 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4628 "execution environment".format(
4629 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4630 )
4631 )
4632 return ee_id, vca_type
4633
4634 async def _ns_execute_primitive(
4635 self,
4636 ee_id,
4637 primitive,
4638 primitive_params,
4639 retries=0,
4640 retries_interval=30,
4641 timeout=None,
4642 vca_type=None,
4643 db_dict=None,
4644 vca_id: str = None,
4645 ) -> (str, str):
4646 try:
4647 if primitive == "config":
4648 primitive_params = {"params": primitive_params}
4649
4650 vca_type = vca_type or "lxc_proxy_charm"
4651
4652 while retries >= 0:
4653 try:
4654 output = await asyncio.wait_for(
4655 self.vca_map[vca_type].exec_primitive(
4656 ee_id=ee_id,
4657 primitive_name=primitive,
4658 params_dict=primitive_params,
4659 progress_timeout=self.timeout_progress_primitive,
4660 total_timeout=self.timeout_primitive,
4661 db_dict=db_dict,
4662 vca_id=vca_id,
4663 vca_type=vca_type,
4664 ),
4665 timeout=timeout or self.timeout_primitive,
4666 )
4667 # execution was OK
4668 break
4669 except asyncio.CancelledError:
4670 raise
4671 except Exception as e: # asyncio.TimeoutError
4672 if isinstance(e, asyncio.TimeoutError):
4673 e = "Timeout"
4674 retries -= 1
4675 if retries >= 0:
4676 self.logger.debug(
4677 "Error executing action {} on {} -> {}".format(
4678 primitive, ee_id, e
4679 )
4680 )
4681 # wait and retry
4682 await asyncio.sleep(retries_interval, loop=self.loop)
4683 else:
4684 return "FAILED", str(e)
4685
4686 return "COMPLETED", output
4687
4688 except (LcmException, asyncio.CancelledError):
4689 raise
4690 except Exception as e:
4691 return "FAIL", "Error executing action {}: {}".format(primitive, e)
4692
4693 async def vca_status_refresh(self, nsr_id, nslcmop_id):
4694 """
4695 Updating the vca_status with latest juju information in nsrs record
4696 :param: nsr_id: Id of the nsr
4697 :param: nslcmop_id: Id of the nslcmop
4698 :return: None
4699 """
4700
4701 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
4702 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4703 vca_id = self.get_vca_id({}, db_nsr)
4704 if db_nsr["_admin"]["deployed"]["K8s"]:
4705 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
4706 cluster_uuid, kdu_instance, cluster_type = (
4707 k8s["k8scluster-uuid"],
4708 k8s["kdu-instance"],
4709 k8s["k8scluster-type"],
4710 )
4711 await self._on_update_k8s_db(
4712 cluster_uuid=cluster_uuid,
4713 kdu_instance=kdu_instance,
4714 filter={"_id": nsr_id},
4715 vca_id=vca_id,
4716 cluster_type=cluster_type,
4717 )
4718 else:
4719 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
4720 table, filter = "nsrs", {"_id": nsr_id}
4721 path = "_admin.deployed.VCA.{}.".format(vca_index)
4722 await self._on_update_n2vc_db(table, filter, path, {})
4723
4724 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
4725 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
4726
4727 async def action(self, nsr_id, nslcmop_id):
4728 # Try to lock HA task here
4729 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4730 if not task_is_locked_by_me:
4731 return
4732
4733 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
4734 self.logger.debug(logging_text + "Enter")
4735 # get all needed from database
4736 db_nsr = None
4737 db_nslcmop = None
4738 db_nsr_update = {}
4739 db_nslcmop_update = {}
4740 nslcmop_operation_state = None
4741 error_description_nslcmop = None
4742 exc = None
4743 try:
4744 # wait for any previous tasks in process
4745 step = "Waiting for previous operations to terminate"
4746 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4747
4748 self._write_ns_status(
4749 nsr_id=nsr_id,
4750 ns_state=None,
4751 current_operation="RUNNING ACTION",
4752 current_operation_id=nslcmop_id,
4753 )
4754
4755 step = "Getting information from database"
4756 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4757 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4758 if db_nslcmop["operationParams"].get("primitive_params"):
4759 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
4760 db_nslcmop["operationParams"]["primitive_params"]
4761 )
4762
4763 nsr_deployed = db_nsr["_admin"].get("deployed")
4764 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
4765 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
4766 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
4767 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
4768 primitive = db_nslcmop["operationParams"]["primitive"]
4769 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
4770 timeout_ns_action = db_nslcmop["operationParams"].get(
4771 "timeout_ns_action", self.timeout_primitive
4772 )
4773
4774 if vnf_index:
4775 step = "Getting vnfr from database"
4776 db_vnfr = self.db.get_one(
4777 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
4778 )
4779 if db_vnfr.get("kdur"):
4780 kdur_list = []
4781 for kdur in db_vnfr["kdur"]:
4782 if kdur.get("additionalParams"):
4783 kdur["additionalParams"] = json.loads(
4784 kdur["additionalParams"]
4785 )
4786 kdur_list.append(kdur)
4787 db_vnfr["kdur"] = kdur_list
4788 step = "Getting vnfd from database"
4789 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
4790 else:
4791 step = "Getting nsd from database"
4792 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
4793
4794 vca_id = self.get_vca_id(db_vnfr, db_nsr)
4795 # for backward compatibility
4796 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
4797 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
4798 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
4799 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4800
4801 # look for primitive
4802 config_primitive_desc = descriptor_configuration = None
4803 if vdu_id:
4804 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
4805 elif kdu_name:
4806 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
4807 elif vnf_index:
4808 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
4809 else:
4810 descriptor_configuration = db_nsd.get("ns-configuration")
4811
4812 if descriptor_configuration and descriptor_configuration.get(
4813 "config-primitive"
4814 ):
4815 for config_primitive in descriptor_configuration["config-primitive"]:
4816 if config_primitive["name"] == primitive:
4817 config_primitive_desc = config_primitive
4818 break
4819
4820 if not config_primitive_desc:
4821 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
4822 raise LcmException(
4823 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
4824 primitive
4825 )
4826 )
4827 primitive_name = primitive
4828 ee_descriptor_id = None
4829 else:
4830 primitive_name = config_primitive_desc.get(
4831 "execution-environment-primitive", primitive
4832 )
4833 ee_descriptor_id = config_primitive_desc.get(
4834 "execution-environment-ref"
4835 )
4836
4837 if vnf_index:
4838 if vdu_id:
4839 vdur = next(
4840 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
4841 )
4842 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
4843 elif kdu_name:
4844 kdur = next(
4845 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
4846 )
4847 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
4848 else:
4849 desc_params = parse_yaml_strings(
4850 db_vnfr.get("additionalParamsForVnf")
4851 )
4852 else:
4853 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
4854 if kdu_name and get_configuration(db_vnfd, kdu_name):
4855 kdu_configuration = get_configuration(db_vnfd, kdu_name)
4856 actions = set()
4857 for primitive in kdu_configuration.get("initial-config-primitive", []):
4858 actions.add(primitive["name"])
4859 for primitive in kdu_configuration.get("config-primitive", []):
4860 actions.add(primitive["name"])
4861 kdu_action = True if primitive_name in actions else False
4862
4863 # TODO check if ns is in a proper status
4864 if kdu_name and (
4865 primitive_name in ("upgrade", "rollback", "status") or kdu_action
4866 ):
4867 # kdur and desc_params already set from before
4868 if primitive_params:
4869 desc_params.update(primitive_params)
4870 # TODO Check if we will need something at vnf level
4871 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
4872 if (
4873 kdu_name == kdu["kdu-name"]
4874 and kdu["member-vnf-index"] == vnf_index
4875 ):
4876 break
4877 else:
4878 raise LcmException(
4879 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
4880 )
4881
4882 if kdu.get("k8scluster-type") not in self.k8scluster_map:
4883 msg = "unknown k8scluster-type '{}'".format(
4884 kdu.get("k8scluster-type")
4885 )
4886 raise LcmException(msg)
4887
4888 db_dict = {
4889 "collection": "nsrs",
4890 "filter": {"_id": nsr_id},
4891 "path": "_admin.deployed.K8s.{}".format(index),
4892 }
4893 self.logger.debug(
4894 logging_text
4895 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
4896 )
4897 step = "Executing kdu {}".format(primitive_name)
4898 if primitive_name == "upgrade":
4899 if desc_params.get("kdu_model"):
4900 kdu_model = desc_params.get("kdu_model")
4901 del desc_params["kdu_model"]
4902 else:
4903 kdu_model = kdu.get("kdu-model")
4904 parts = kdu_model.split(sep=":")
4905 if len(parts) == 2:
4906 kdu_model = parts[0]
4907 if desc_params.get("kdu_atomic_upgrade"):
4908 atomic_upgrade = desc_params.get("kdu_atomic_upgrade").lower() in ("yes", "true", "1")
4909 del desc_params["kdu_atomic_upgrade"]
4910 else:
4911 atomic_upgrade = True
4912
4913 detailed_status = await asyncio.wait_for(
4914 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
4915 cluster_uuid=kdu.get("k8scluster-uuid"),
4916 kdu_instance=kdu.get("kdu-instance"),
4917 atomic=atomic_upgrade,
4918 kdu_model=kdu_model,
4919 params=desc_params,
4920 db_dict=db_dict,
4921 timeout=timeout_ns_action,
4922 ),
4923 timeout=timeout_ns_action + 10,
4924 )
4925 self.logger.debug(
4926 logging_text + " Upgrade of kdu {} done".format(detailed_status)
4927 )
4928 elif primitive_name == "rollback":
4929 detailed_status = await asyncio.wait_for(
4930 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
4931 cluster_uuid=kdu.get("k8scluster-uuid"),
4932 kdu_instance=kdu.get("kdu-instance"),
4933 db_dict=db_dict,
4934 ),
4935 timeout=timeout_ns_action,
4936 )
4937 elif primitive_name == "status":
4938 detailed_status = await asyncio.wait_for(
4939 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
4940 cluster_uuid=kdu.get("k8scluster-uuid"),
4941 kdu_instance=kdu.get("kdu-instance"),
4942 vca_id=vca_id,
4943 ),
4944 timeout=timeout_ns_action,
4945 )
4946 else:
4947 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
4948 kdu["kdu-name"], nsr_id
4949 )
4950 params = self._map_primitive_params(
4951 config_primitive_desc, primitive_params, desc_params
4952 )
4953
4954 detailed_status = await asyncio.wait_for(
4955 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
4956 cluster_uuid=kdu.get("k8scluster-uuid"),
4957 kdu_instance=kdu_instance,
4958 primitive_name=primitive_name,
4959 params=params,
4960 db_dict=db_dict,
4961 timeout=timeout_ns_action,
4962 vca_id=vca_id,
4963 ),
4964 timeout=timeout_ns_action,
4965 )
4966
4967 if detailed_status:
4968 nslcmop_operation_state = "COMPLETED"
4969 else:
4970 detailed_status = ""
4971 nslcmop_operation_state = "FAILED"
4972 else:
4973 ee_id, vca_type = self._look_for_deployed_vca(
4974 nsr_deployed["VCA"],
4975 member_vnf_index=vnf_index,
4976 vdu_id=vdu_id,
4977 vdu_count_index=vdu_count_index,
4978 ee_descriptor_id=ee_descriptor_id,
4979 )
4980 for vca_index, vca_deployed in enumerate(
4981 db_nsr["_admin"]["deployed"]["VCA"]
4982 ):
4983 if vca_deployed.get("member-vnf-index") == vnf_index:
4984 db_dict = {
4985 "collection": "nsrs",
4986 "filter": {"_id": nsr_id},
4987 "path": "_admin.deployed.VCA.{}.".format(vca_index),
4988 }
4989 break
4990 (
4991 nslcmop_operation_state,
4992 detailed_status,
4993 ) = await self._ns_execute_primitive(
4994 ee_id,
4995 primitive=primitive_name,
4996 primitive_params=self._map_primitive_params(
4997 config_primitive_desc, primitive_params, desc_params
4998 ),
4999 timeout=timeout_ns_action,
5000 vca_type=vca_type,
5001 db_dict=db_dict,
5002 vca_id=vca_id,
5003 )
5004
5005 db_nslcmop_update["detailed-status"] = detailed_status
5006 error_description_nslcmop = (
5007 detailed_status if nslcmop_operation_state == "FAILED" else ""
5008 )
5009 self.logger.debug(
5010 logging_text
5011 + " task Done with result {} {}".format(
5012 nslcmop_operation_state, detailed_status
5013 )
5014 )
5015 return # database update is called inside finally
5016
5017 except (DbException, LcmException, N2VCException, K8sException) as e:
5018 self.logger.error(logging_text + "Exit Exception {}".format(e))
5019 exc = e
5020 except asyncio.CancelledError:
5021 self.logger.error(
5022 logging_text + "Cancelled Exception while '{}'".format(step)
5023 )
5024 exc = "Operation was cancelled"
5025 except asyncio.TimeoutError:
5026 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5027 exc = "Timeout"
5028 except Exception as e:
5029 exc = traceback.format_exc()
5030 self.logger.critical(
5031 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5032 exc_info=True,
5033 )
5034 finally:
5035 if exc:
5036 db_nslcmop_update[
5037 "detailed-status"
5038 ] = (
5039 detailed_status
5040 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5041 nslcmop_operation_state = "FAILED"
5042 if db_nsr:
5043 self._write_ns_status(
5044 nsr_id=nsr_id,
5045 ns_state=db_nsr[
5046 "nsState"
5047 ], # TODO check if degraded. For the moment use previous status
5048 current_operation="IDLE",
5049 current_operation_id=None,
5050 # error_description=error_description_nsr,
5051 # error_detail=error_detail,
5052 other_update=db_nsr_update,
5053 )
5054
5055 self._write_op_status(
5056 op_id=nslcmop_id,
5057 stage="",
5058 error_message=error_description_nslcmop,
5059 operation_state=nslcmop_operation_state,
5060 other_update=db_nslcmop_update,
5061 )
5062
5063 if nslcmop_operation_state:
5064 try:
5065 await self.msg.aiowrite(
5066 "ns",
5067 "actioned",
5068 {
5069 "nsr_id": nsr_id,
5070 "nslcmop_id": nslcmop_id,
5071 "operationState": nslcmop_operation_state,
5072 },
5073 loop=self.loop,
5074 )
5075 except Exception as e:
5076 self.logger.error(
5077 logging_text + "kafka_write notification Exception {}".format(e)
5078 )
5079 self.logger.debug(logging_text + "Exit")
5080 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5081 return nslcmop_operation_state, detailed_status
5082
5083 async def scale(self, nsr_id, nslcmop_id):
5084 # Try to lock HA task here
5085 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5086 if not task_is_locked_by_me:
5087 return
5088
5089 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
5090 stage = ["", "", ""]
5091 tasks_dict_info = {}
5092 # ^ stage, step, VIM progress
5093 self.logger.debug(logging_text + "Enter")
5094 # get all needed from database
5095 db_nsr = None
5096 db_nslcmop_update = {}
5097 db_nsr_update = {}
5098 exc = None
5099 # in case of error, indicates what part of scale was failed to put nsr at error status
5100 scale_process = None
5101 old_operational_status = ""
5102 old_config_status = ""
5103 nsi_id = None
5104 try:
5105 # wait for any previous tasks in process
5106 step = "Waiting for previous operations to terminate"
5107 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5108 self._write_ns_status(
5109 nsr_id=nsr_id,
5110 ns_state=None,
5111 current_operation="SCALING",
5112 current_operation_id=nslcmop_id,
5113 )
5114
5115 step = "Getting nslcmop from database"
5116 self.logger.debug(
5117 step + " after having waited for previous tasks to be completed"
5118 )
5119 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5120
5121 step = "Getting nsr from database"
5122 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5123 old_operational_status = db_nsr["operational-status"]
5124 old_config_status = db_nsr["config-status"]
5125
5126 step = "Parsing scaling parameters"
5127 db_nsr_update["operational-status"] = "scaling"
5128 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5129 nsr_deployed = db_nsr["_admin"].get("deployed")
5130
5131 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
5132 "scaleByStepData"
5133 ]["member-vnf-index"]
5134 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
5135 "scaleByStepData"
5136 ]["scaling-group-descriptor"]
5137 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
5138 # for backward compatibility
5139 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5140 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5141 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5142 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5143
5144 step = "Getting vnfr from database"
5145 db_vnfr = self.db.get_one(
5146 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5147 )
5148
5149 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5150
5151 step = "Getting vnfd from database"
5152 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5153
5154 base_folder = db_vnfd["_admin"]["storage"]
5155
5156 step = "Getting scaling-group-descriptor"
5157 scaling_descriptor = find_in_list(
5158 get_scaling_aspect(db_vnfd),
5159 lambda scale_desc: scale_desc["name"] == scaling_group,
5160 )
5161 if not scaling_descriptor:
5162 raise LcmException(
5163 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
5164 "at vnfd:scaling-group-descriptor".format(scaling_group)
5165 )
5166
5167 step = "Sending scale order to VIM"
5168 # TODO check if ns is in a proper status
5169 nb_scale_op = 0
5170 if not db_nsr["_admin"].get("scaling-group"):
5171 self.update_db_2(
5172 "nsrs",
5173 nsr_id,
5174 {
5175 "_admin.scaling-group": [
5176 {"name": scaling_group, "nb-scale-op": 0}
5177 ]
5178 },
5179 )
5180 admin_scale_index = 0
5181 else:
5182 for admin_scale_index, admin_scale_info in enumerate(
5183 db_nsr["_admin"]["scaling-group"]
5184 ):
5185 if admin_scale_info["name"] == scaling_group:
5186 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
5187 break
5188 else: # not found, set index one plus last element and add new entry with the name
5189 admin_scale_index += 1
5190 db_nsr_update[
5191 "_admin.scaling-group.{}.name".format(admin_scale_index)
5192 ] = scaling_group
5193
5194 vca_scaling_info = []
5195 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
5196 if scaling_type == "SCALE_OUT":
5197 if "aspect-delta-details" not in scaling_descriptor:
5198 raise LcmException(
5199 "Aspect delta details not fount in scaling descriptor {}".format(
5200 scaling_descriptor["name"]
5201 )
5202 )
5203 # count if max-instance-count is reached
5204 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
5205
5206 scaling_info["scaling_direction"] = "OUT"
5207 scaling_info["vdu-create"] = {}
5208 scaling_info["kdu-create"] = {}
5209 for delta in deltas:
5210 for vdu_delta in delta.get("vdu-delta", {}):
5211 vdud = get_vdu(db_vnfd, vdu_delta["id"])
5212 # vdu_index also provides the number of instance of the targeted vdu
5213 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
5214 cloud_init_text = self._get_vdu_cloud_init_content(
5215 vdud, db_vnfd
5216 )
5217 if cloud_init_text:
5218 additional_params = (
5219 self._get_vdu_additional_params(db_vnfr, vdud["id"])
5220 or {}
5221 )
5222 cloud_init_list = []
5223
5224 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
5225 max_instance_count = 10
5226 if vdu_profile and "max-number-of-instances" in vdu_profile:
5227 max_instance_count = vdu_profile.get(
5228 "max-number-of-instances", 10
5229 )
5230
5231 default_instance_num = get_number_of_instances(
5232 db_vnfd, vdud["id"]
5233 )
5234 instances_number = vdu_delta.get("number-of-instances", 1)
5235 nb_scale_op += instances_number
5236
5237 new_instance_count = nb_scale_op + default_instance_num
5238 # Control if new count is over max and vdu count is less than max.
5239 # Then assign new instance count
5240 if new_instance_count > max_instance_count > vdu_count:
5241 instances_number = new_instance_count - max_instance_count
5242 else:
5243 instances_number = instances_number
5244
5245 if new_instance_count > max_instance_count:
5246 raise LcmException(
5247 "reached the limit of {} (max-instance-count) "
5248 "scaling-out operations for the "
5249 "scaling-group-descriptor '{}'".format(
5250 nb_scale_op, scaling_group
5251 )
5252 )
5253 for x in range(vdu_delta.get("number-of-instances", 1)):
5254 if cloud_init_text:
5255 # TODO Information of its own ip is not available because db_vnfr is not updated.
5256 additional_params["OSM"] = get_osm_params(
5257 db_vnfr, vdu_delta["id"], vdu_index + x
5258 )
5259 cloud_init_list.append(
5260 self._parse_cloud_init(
5261 cloud_init_text,
5262 additional_params,
5263 db_vnfd["id"],
5264 vdud["id"],
5265 )
5266 )
5267 vca_scaling_info.append(
5268 {
5269 "osm_vdu_id": vdu_delta["id"],
5270 "member-vnf-index": vnf_index,
5271 "type": "create",
5272 "vdu_index": vdu_index + x,
5273 }
5274 )
5275 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
5276 for kdu_delta in delta.get("kdu-resource-delta", {}):
5277 kdu_profile = get_kdu_profile(db_vnfd, kdu_delta["id"])
5278 kdu_name = kdu_profile["kdu-name"]
5279 resource_name = kdu_profile["resource-name"]
5280
5281 # Might have different kdus in the same delta
5282 # Should have list for each kdu
5283 if not scaling_info["kdu-create"].get(kdu_name, None):
5284 scaling_info["kdu-create"][kdu_name] = []
5285
5286 kdur = get_kdur(db_vnfr, kdu_name)
5287 if kdur.get("helm-chart"):
5288 k8s_cluster_type = "helm-chart-v3"
5289 self.logger.debug("kdur: {}".format(kdur))
5290 if (
5291 kdur.get("helm-version")
5292 and kdur.get("helm-version") == "v2"
5293 ):
5294 k8s_cluster_type = "helm-chart"
5295 raise NotImplementedError
5296 elif kdur.get("juju-bundle"):
5297 k8s_cluster_type = "juju-bundle"
5298 else:
5299 raise LcmException(
5300 "kdu type for kdu='{}.{}' is neither helm-chart nor "
5301 "juju-bundle. Maybe an old NBI version is running".format(
5302 db_vnfr["member-vnf-index-ref"], kdu_name
5303 )
5304 )
5305
5306 max_instance_count = 10
5307 if kdu_profile and "max-number-of-instances" in kdu_profile:
5308 max_instance_count = kdu_profile.get(
5309 "max-number-of-instances", 10
5310 )
5311
5312 nb_scale_op += kdu_delta.get("number-of-instances", 1)
5313 deployed_kdu, _ = get_deployed_kdu(
5314 nsr_deployed, kdu_name, vnf_index
5315 )
5316 if deployed_kdu is None:
5317 raise LcmException(
5318 "KDU '{}' for vnf '{}' not deployed".format(
5319 kdu_name, vnf_index
5320 )
5321 )
5322 kdu_instance = deployed_kdu.get("kdu-instance")
5323 instance_num = await self.k8scluster_map[
5324 k8s_cluster_type
5325 ].get_scale_count(resource_name, kdu_instance, vca_id=vca_id)
5326 kdu_replica_count = instance_num + kdu_delta.get(
5327 "number-of-instances", 1
5328 )
5329
5330 # Control if new count is over max and instance_num is less than max.
5331 # Then assign max instance number to kdu replica count
5332 if kdu_replica_count > max_instance_count > instance_num:
5333 kdu_replica_count = max_instance_count
5334 if kdu_replica_count > max_instance_count:
5335 raise LcmException(
5336 "reached the limit of {} (max-instance-count) "
5337 "scaling-out operations for the "
5338 "scaling-group-descriptor '{}'".format(
5339 instance_num, scaling_group
5340 )
5341 )
5342
5343 for x in range(kdu_delta.get("number-of-instances", 1)):
5344 vca_scaling_info.append(
5345 {
5346 "osm_kdu_id": kdu_name,
5347 "member-vnf-index": vnf_index,
5348 "type": "create",
5349 "kdu_index": instance_num + x - 1,
5350 }
5351 )
5352 scaling_info["kdu-create"][kdu_name].append(
5353 {
5354 "member-vnf-index": vnf_index,
5355 "type": "create",
5356 "k8s-cluster-type": k8s_cluster_type,
5357 "resource-name": resource_name,
5358 "scale": kdu_replica_count,
5359 }
5360 )
5361 elif scaling_type == "SCALE_IN":
5362 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
5363
5364 scaling_info["scaling_direction"] = "IN"
5365 scaling_info["vdu-delete"] = {}
5366 scaling_info["kdu-delete"] = {}
5367
5368 for delta in deltas:
5369 for vdu_delta in delta.get("vdu-delta", {}):
5370 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
5371 min_instance_count = 0
5372 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
5373 if vdu_profile and "min-number-of-instances" in vdu_profile:
5374 min_instance_count = vdu_profile["min-number-of-instances"]
5375
5376 default_instance_num = get_number_of_instances(
5377 db_vnfd, vdu_delta["id"]
5378 )
5379 instance_num = vdu_delta.get("number-of-instances", 1)
5380 nb_scale_op -= instance_num
5381
5382 new_instance_count = nb_scale_op + default_instance_num
5383
5384 if new_instance_count < min_instance_count < vdu_count:
5385 instances_number = min_instance_count - new_instance_count
5386 else:
5387 instances_number = instance_num
5388
5389 if new_instance_count < min_instance_count:
5390 raise LcmException(
5391 "reached the limit of {} (min-instance-count) scaling-in operations for the "
5392 "scaling-group-descriptor '{}'".format(
5393 nb_scale_op, scaling_group
5394 )
5395 )
5396 for x in range(vdu_delta.get("number-of-instances", 1)):
5397 vca_scaling_info.append(
5398 {
5399 "osm_vdu_id": vdu_delta["id"],
5400 "member-vnf-index": vnf_index,
5401 "type": "delete",
5402 "vdu_index": vdu_index - 1 - x,
5403 }
5404 )
5405 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
5406 for kdu_delta in delta.get("kdu-resource-delta", {}):
5407 kdu_profile = get_kdu_profile(db_vnfd, kdu_delta["id"])
5408 kdu_name = kdu_profile["kdu-name"]
5409 resource_name = kdu_profile["resource-name"]
5410
5411 if not scaling_info["kdu-delete"].get(kdu_name, None):
5412 scaling_info["kdu-delete"][kdu_name] = []
5413
5414 kdur = get_kdur(db_vnfr, kdu_name)
5415 if kdur.get("helm-chart"):
5416 k8s_cluster_type = "helm-chart-v3"
5417 self.logger.debug("kdur: {}".format(kdur))
5418 if (
5419 kdur.get("helm-version")
5420 and kdur.get("helm-version") == "v2"
5421 ):
5422 k8s_cluster_type = "helm-chart"
5423 raise NotImplementedError
5424 elif kdur.get("juju-bundle"):
5425 k8s_cluster_type = "juju-bundle"
5426 else:
5427 raise LcmException(
5428 "kdu type for kdu='{}.{}' is neither helm-chart nor "
5429 "juju-bundle. Maybe an old NBI version is running".format(
5430 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
5431 )
5432 )
5433
5434 min_instance_count = 0
5435 if kdu_profile and "min-number-of-instances" in kdu_profile:
5436 min_instance_count = kdu_profile["min-number-of-instances"]
5437
5438 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
5439 deployed_kdu, _ = get_deployed_kdu(
5440 nsr_deployed, kdu_name, vnf_index
5441 )
5442 if deployed_kdu is None:
5443 raise LcmException(
5444 "KDU '{}' for vnf '{}' not deployed".format(
5445 kdu_name, vnf_index
5446 )
5447 )
5448 kdu_instance = deployed_kdu.get("kdu-instance")
5449 instance_num = await self.k8scluster_map[
5450 k8s_cluster_type
5451 ].get_scale_count(resource_name, kdu_instance, vca_id=vca_id)
5452 kdu_replica_count = instance_num - kdu_delta.get(
5453 "number-of-instances", 1
5454 )
5455
5456 if kdu_replica_count < min_instance_count < instance_num:
5457 kdu_replica_count = min_instance_count
5458 if kdu_replica_count < min_instance_count:
5459 raise LcmException(
5460 "reached the limit of {} (min-instance-count) scaling-in operations for the "
5461 "scaling-group-descriptor '{}'".format(
5462 instance_num, scaling_group
5463 )
5464 )
5465
5466 for x in range(kdu_delta.get("number-of-instances", 1)):
5467 vca_scaling_info.append(
5468 {
5469 "osm_kdu_id": kdu_name,
5470 "member-vnf-index": vnf_index,
5471 "type": "delete",
5472 "kdu_index": instance_num - x - 1,
5473 }
5474 )
5475 scaling_info["kdu-delete"][kdu_name].append(
5476 {
5477 "member-vnf-index": vnf_index,
5478 "type": "delete",
5479 "k8s-cluster-type": k8s_cluster_type,
5480 "resource-name": resource_name,
5481 "scale": kdu_replica_count,
5482 }
5483 )
5484
5485 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
5486 vdu_delete = copy(scaling_info.get("vdu-delete"))
5487 if scaling_info["scaling_direction"] == "IN":
5488 for vdur in reversed(db_vnfr["vdur"]):
5489 if vdu_delete.get(vdur["vdu-id-ref"]):
5490 vdu_delete[vdur["vdu-id-ref"]] -= 1
5491 scaling_info["vdu"].append(
5492 {
5493 "name": vdur.get("name") or vdur.get("vdu-name"),
5494 "vdu_id": vdur["vdu-id-ref"],
5495 "interface": [],
5496 }
5497 )
5498 for interface in vdur["interfaces"]:
5499 scaling_info["vdu"][-1]["interface"].append(
5500 {
5501 "name": interface["name"],
5502 "ip_address": interface["ip-address"],
5503 "mac_address": interface.get("mac-address"),
5504 }
5505 )
5506 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
5507
5508 # PRE-SCALE BEGIN
5509 step = "Executing pre-scale vnf-config-primitive"
5510 if scaling_descriptor.get("scaling-config-action"):
5511 for scaling_config_action in scaling_descriptor[
5512 "scaling-config-action"
5513 ]:
5514 if (
5515 scaling_config_action.get("trigger") == "pre-scale-in"
5516 and scaling_type == "SCALE_IN"
5517 ) or (
5518 scaling_config_action.get("trigger") == "pre-scale-out"
5519 and scaling_type == "SCALE_OUT"
5520 ):
5521 vnf_config_primitive = scaling_config_action[
5522 "vnf-config-primitive-name-ref"
5523 ]
5524 step = db_nslcmop_update[
5525 "detailed-status"
5526 ] = "executing pre-scale scaling-config-action '{}'".format(
5527 vnf_config_primitive
5528 )
5529
5530 # look for primitive
5531 for config_primitive in (
5532 get_configuration(db_vnfd, db_vnfd["id"]) or {}
5533 ).get("config-primitive", ()):
5534 if config_primitive["name"] == vnf_config_primitive:
5535 break
5536 else:
5537 raise LcmException(
5538 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
5539 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
5540 "primitive".format(scaling_group, vnf_config_primitive)
5541 )
5542
5543 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
5544 if db_vnfr.get("additionalParamsForVnf"):
5545 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
5546
5547 scale_process = "VCA"
5548 db_nsr_update["config-status"] = "configuring pre-scaling"
5549 primitive_params = self._map_primitive_params(
5550 config_primitive, {}, vnfr_params
5551 )
5552
5553 # Pre-scale retry check: Check if this sub-operation has been executed before
5554 op_index = self._check_or_add_scale_suboperation(
5555 db_nslcmop,
5556 vnf_index,
5557 vnf_config_primitive,
5558 primitive_params,
5559 "PRE-SCALE",
5560 )
5561 if op_index == self.SUBOPERATION_STATUS_SKIP:
5562 # Skip sub-operation
5563 result = "COMPLETED"
5564 result_detail = "Done"
5565 self.logger.debug(
5566 logging_text
5567 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
5568 vnf_config_primitive, result, result_detail
5569 )
5570 )
5571 else:
5572 if op_index == self.SUBOPERATION_STATUS_NEW:
5573 # New sub-operation: Get index of this sub-operation
5574 op_index = (
5575 len(db_nslcmop.get("_admin", {}).get("operations"))
5576 - 1
5577 )
5578 self.logger.debug(
5579 logging_text
5580 + "vnf_config_primitive={} New sub-operation".format(
5581 vnf_config_primitive
5582 )
5583 )
5584 else:
5585 # retry: Get registered params for this existing sub-operation
5586 op = db_nslcmop.get("_admin", {}).get("operations", [])[
5587 op_index
5588 ]
5589 vnf_index = op.get("member_vnf_index")
5590 vnf_config_primitive = op.get("primitive")
5591 primitive_params = op.get("primitive_params")
5592 self.logger.debug(
5593 logging_text
5594 + "vnf_config_primitive={} Sub-operation retry".format(
5595 vnf_config_primitive
5596 )
5597 )
5598 # Execute the primitive, either with new (first-time) or registered (reintent) args
5599 ee_descriptor_id = config_primitive.get(
5600 "execution-environment-ref"
5601 )
5602 primitive_name = config_primitive.get(
5603 "execution-environment-primitive", vnf_config_primitive
5604 )
5605 ee_id, vca_type = self._look_for_deployed_vca(
5606 nsr_deployed["VCA"],
5607 member_vnf_index=vnf_index,
5608 vdu_id=None,
5609 vdu_count_index=None,
5610 ee_descriptor_id=ee_descriptor_id,
5611 )
5612 result, result_detail = await self._ns_execute_primitive(
5613 ee_id,
5614 primitive_name,
5615 primitive_params,
5616 vca_type=vca_type,
5617 vca_id=vca_id,
5618 )
5619 self.logger.debug(
5620 logging_text
5621 + "vnf_config_primitive={} Done with result {} {}".format(
5622 vnf_config_primitive, result, result_detail
5623 )
5624 )
5625 # Update operationState = COMPLETED | FAILED
5626 self._update_suboperation_status(
5627 db_nslcmop, op_index, result, result_detail
5628 )
5629
5630 if result == "FAILED":
5631 raise LcmException(result_detail)
5632 db_nsr_update["config-status"] = old_config_status
5633 scale_process = None
5634 # PRE-SCALE END
5635
5636 db_nsr_update[
5637 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
5638 ] = nb_scale_op
5639 db_nsr_update[
5640 "_admin.scaling-group.{}.time".format(admin_scale_index)
5641 ] = time()
5642
5643 # SCALE-IN VCA - BEGIN
5644 if vca_scaling_info:
5645 step = db_nslcmop_update[
5646 "detailed-status"
5647 ] = "Deleting the execution environments"
5648 scale_process = "VCA"
5649 for vca_info in vca_scaling_info:
5650 if vca_info["type"] == "delete":
5651 member_vnf_index = str(vca_info["member-vnf-index"])
5652 self.logger.debug(
5653 logging_text + "vdu info: {}".format(vca_info)
5654 )
5655 if vca_info.get("osm_vdu_id"):
5656 vdu_id = vca_info["osm_vdu_id"]
5657 vdu_index = int(vca_info["vdu_index"])
5658 stage[
5659 1
5660 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
5661 member_vnf_index, vdu_id, vdu_index
5662 )
5663 else:
5664 vdu_index = 0
5665 kdu_id = vca_info["osm_kdu_id"]
5666 stage[
5667 1
5668 ] = "Scaling member_vnf_index={}, kdu_id={}, vdu_index={} ".format(
5669 member_vnf_index, kdu_id, vdu_index
5670 )
5671 stage[2] = step = "Scaling in VCA"
5672 self._write_op_status(op_id=nslcmop_id, stage=stage)
5673 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
5674 config_update = db_nsr["configurationStatus"]
5675 for vca_index, vca in enumerate(vca_update):
5676 if (
5677 (vca or vca.get("ee_id"))
5678 and vca["member-vnf-index"] == member_vnf_index
5679 and vca["vdu_count_index"] == vdu_index
5680 ):
5681 if vca.get("vdu_id"):
5682 config_descriptor = get_configuration(
5683 db_vnfd, vca.get("vdu_id")
5684 )
5685 elif vca.get("kdu_name"):
5686 config_descriptor = get_configuration(
5687 db_vnfd, vca.get("kdu_name")
5688 )
5689 else:
5690 config_descriptor = get_configuration(
5691 db_vnfd, db_vnfd["id"]
5692 )
5693 operation_params = (
5694 db_nslcmop.get("operationParams") or {}
5695 )
5696 exec_terminate_primitives = not operation_params.get(
5697 "skip_terminate_primitives"
5698 ) and vca.get("needed_terminate")
5699 task = asyncio.ensure_future(
5700 asyncio.wait_for(
5701 self.destroy_N2VC(
5702 logging_text,
5703 db_nslcmop,
5704 vca,
5705 config_descriptor,
5706 vca_index,
5707 destroy_ee=True,
5708 exec_primitives=exec_terminate_primitives,
5709 scaling_in=True,
5710 vca_id=vca_id,
5711 ),
5712 timeout=self.timeout_charm_delete,
5713 )
5714 )
5715 tasks_dict_info[task] = "Terminating VCA {}".format(
5716 vca.get("ee_id")
5717 )
5718 del vca_update[vca_index]
5719 del config_update[vca_index]
5720 # wait for pending tasks of terminate primitives
5721 if tasks_dict_info:
5722 self.logger.debug(
5723 logging_text
5724 + "Waiting for tasks {}".format(
5725 list(tasks_dict_info.keys())
5726 )
5727 )
5728 error_list = await self._wait_for_tasks(
5729 logging_text,
5730 tasks_dict_info,
5731 min(
5732 self.timeout_charm_delete, self.timeout_ns_terminate
5733 ),
5734 stage,
5735 nslcmop_id,
5736 )
5737 tasks_dict_info.clear()
5738 if error_list:
5739 raise LcmException("; ".join(error_list))
5740
5741 db_vca_and_config_update = {
5742 "_admin.deployed.VCA": vca_update,
5743 "configurationStatus": config_update,
5744 }
5745 self.update_db_2(
5746 "nsrs", db_nsr["_id"], db_vca_and_config_update
5747 )
5748 scale_process = None
5749 # SCALE-IN VCA - END
5750
5751 # SCALE RO - BEGIN
5752 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
5753 scale_process = "RO"
5754 if self.ro_config.get("ng"):
5755 await self._scale_ng_ro(
5756 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
5757 )
5758 scaling_info.pop("vdu-create", None)
5759 scaling_info.pop("vdu-delete", None)
5760
5761 scale_process = None
5762 # SCALE RO - END
5763
5764 # SCALE KDU - BEGIN
5765 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
5766 scale_process = "KDU"
5767 await self._scale_kdu(
5768 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
5769 )
5770 scaling_info.pop("kdu-create", None)
5771 scaling_info.pop("kdu-delete", None)
5772
5773 scale_process = None
5774 # SCALE KDU - END
5775
5776 if db_nsr_update:
5777 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5778
5779 # SCALE-UP VCA - BEGIN
5780 if vca_scaling_info:
5781 step = db_nslcmop_update[
5782 "detailed-status"
5783 ] = "Creating new execution environments"
5784 scale_process = "VCA"
5785 for vca_info in vca_scaling_info:
5786 if vca_info["type"] == "create":
5787 member_vnf_index = str(vca_info["member-vnf-index"])
5788 self.logger.debug(
5789 logging_text + "vdu info: {}".format(vca_info)
5790 )
5791 vnfd_id = db_vnfr["vnfd-ref"]
5792 if vca_info.get("osm_vdu_id"):
5793 vdu_index = int(vca_info["vdu_index"])
5794 deploy_params = {"OSM": get_osm_params(db_vnfr)}
5795 if db_vnfr.get("additionalParamsForVnf"):
5796 deploy_params.update(
5797 parse_yaml_strings(
5798 db_vnfr["additionalParamsForVnf"].copy()
5799 )
5800 )
5801 descriptor_config = get_configuration(
5802 db_vnfd, db_vnfd["id"]
5803 )
5804 if descriptor_config:
5805 vdu_id = None
5806 vdu_name = None
5807 kdu_name = None
5808 self._deploy_n2vc(
5809 logging_text=logging_text
5810 + "member_vnf_index={} ".format(member_vnf_index),
5811 db_nsr=db_nsr,
5812 db_vnfr=db_vnfr,
5813 nslcmop_id=nslcmop_id,
5814 nsr_id=nsr_id,
5815 nsi_id=nsi_id,
5816 vnfd_id=vnfd_id,
5817 vdu_id=vdu_id,
5818 kdu_name=kdu_name,
5819 member_vnf_index=member_vnf_index,
5820 vdu_index=vdu_index,
5821 vdu_name=vdu_name,
5822 deploy_params=deploy_params,
5823 descriptor_config=descriptor_config,
5824 base_folder=base_folder,
5825 task_instantiation_info=tasks_dict_info,
5826 stage=stage,
5827 )
5828 vdu_id = vca_info["osm_vdu_id"]
5829 vdur = find_in_list(
5830 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
5831 )
5832 descriptor_config = get_configuration(db_vnfd, vdu_id)
5833 if vdur.get("additionalParams"):
5834 deploy_params_vdu = parse_yaml_strings(
5835 vdur["additionalParams"]
5836 )
5837 else:
5838 deploy_params_vdu = deploy_params
5839 deploy_params_vdu["OSM"] = get_osm_params(
5840 db_vnfr, vdu_id, vdu_count_index=vdu_index
5841 )
5842 if descriptor_config:
5843 vdu_name = None
5844 kdu_name = None
5845 stage[
5846 1
5847 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
5848 member_vnf_index, vdu_id, vdu_index
5849 )
5850 stage[2] = step = "Scaling out VCA"
5851 self._write_op_status(op_id=nslcmop_id, stage=stage)
5852 self._deploy_n2vc(
5853 logging_text=logging_text
5854 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
5855 member_vnf_index, vdu_id, vdu_index
5856 ),
5857 db_nsr=db_nsr,
5858 db_vnfr=db_vnfr,
5859 nslcmop_id=nslcmop_id,
5860 nsr_id=nsr_id,
5861 nsi_id=nsi_id,
5862 vnfd_id=vnfd_id,
5863 vdu_id=vdu_id,
5864 kdu_name=kdu_name,
5865 member_vnf_index=member_vnf_index,
5866 vdu_index=vdu_index,
5867 vdu_name=vdu_name,
5868 deploy_params=deploy_params_vdu,
5869 descriptor_config=descriptor_config,
5870 base_folder=base_folder,
5871 task_instantiation_info=tasks_dict_info,
5872 stage=stage,
5873 )
5874 else:
5875 kdu_name = vca_info["osm_kdu_id"]
5876 descriptor_config = get_configuration(db_vnfd, kdu_name)
5877 if descriptor_config:
5878 vdu_id = None
5879 kdu_index = int(vca_info["kdu_index"])
5880 vdu_name = None
5881 kdur = next(
5882 x
5883 for x in db_vnfr["kdur"]
5884 if x["kdu-name"] == kdu_name
5885 )
5886 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
5887 if kdur.get("additionalParams"):
5888 deploy_params_kdu = parse_yaml_strings(
5889 kdur["additionalParams"]
5890 )
5891
5892 self._deploy_n2vc(
5893 logging_text=logging_text,
5894 db_nsr=db_nsr,
5895 db_vnfr=db_vnfr,
5896 nslcmop_id=nslcmop_id,
5897 nsr_id=nsr_id,
5898 nsi_id=nsi_id,
5899 vnfd_id=vnfd_id,
5900 vdu_id=vdu_id,
5901 kdu_name=kdu_name,
5902 member_vnf_index=member_vnf_index,
5903 vdu_index=kdu_index,
5904 vdu_name=vdu_name,
5905 deploy_params=deploy_params_kdu,
5906 descriptor_config=descriptor_config,
5907 base_folder=base_folder,
5908 task_instantiation_info=tasks_dict_info,
5909 stage=stage,
5910 )
5911 # SCALE-UP VCA - END
5912 scale_process = None
5913
5914 # POST-SCALE BEGIN
5915 # execute primitive service POST-SCALING
5916 step = "Executing post-scale vnf-config-primitive"
5917 if scaling_descriptor.get("scaling-config-action"):
5918 for scaling_config_action in scaling_descriptor[
5919 "scaling-config-action"
5920 ]:
5921 if (
5922 scaling_config_action.get("trigger") == "post-scale-in"
5923 and scaling_type == "SCALE_IN"
5924 ) or (
5925 scaling_config_action.get("trigger") == "post-scale-out"
5926 and scaling_type == "SCALE_OUT"
5927 ):
5928 vnf_config_primitive = scaling_config_action[
5929 "vnf-config-primitive-name-ref"
5930 ]
5931 step = db_nslcmop_update[
5932 "detailed-status"
5933 ] = "executing post-scale scaling-config-action '{}'".format(
5934 vnf_config_primitive
5935 )
5936
5937 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
5938 if db_vnfr.get("additionalParamsForVnf"):
5939 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
5940
5941 # look for primitive
5942 for config_primitive in (
5943 get_configuration(db_vnfd, db_vnfd["id"]) or {}
5944 ).get("config-primitive", ()):
5945 if config_primitive["name"] == vnf_config_primitive:
5946 break
5947 else:
5948 raise LcmException(
5949 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
5950 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
5951 "config-primitive".format(
5952 scaling_group, vnf_config_primitive
5953 )
5954 )
5955 scale_process = "VCA"
5956 db_nsr_update["config-status"] = "configuring post-scaling"
5957 primitive_params = self._map_primitive_params(
5958 config_primitive, {}, vnfr_params
5959 )
5960
5961 # Post-scale retry check: Check if this sub-operation has been executed before
5962 op_index = self._check_or_add_scale_suboperation(
5963 db_nslcmop,
5964 vnf_index,
5965 vnf_config_primitive,
5966 primitive_params,
5967 "POST-SCALE",
5968 )
5969 if op_index == self.SUBOPERATION_STATUS_SKIP:
5970 # Skip sub-operation
5971 result = "COMPLETED"
5972 result_detail = "Done"
5973 self.logger.debug(
5974 logging_text
5975 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
5976 vnf_config_primitive, result, result_detail
5977 )
5978 )
5979 else:
5980 if op_index == self.SUBOPERATION_STATUS_NEW:
5981 # New sub-operation: Get index of this sub-operation
5982 op_index = (
5983 len(db_nslcmop.get("_admin", {}).get("operations"))
5984 - 1
5985 )
5986 self.logger.debug(
5987 logging_text
5988 + "vnf_config_primitive={} New sub-operation".format(
5989 vnf_config_primitive
5990 )
5991 )
5992 else:
5993 # retry: Get registered params for this existing sub-operation
5994 op = db_nslcmop.get("_admin", {}).get("operations", [])[
5995 op_index
5996 ]
5997 vnf_index = op.get("member_vnf_index")
5998 vnf_config_primitive = op.get("primitive")
5999 primitive_params = op.get("primitive_params")
6000 self.logger.debug(
6001 logging_text
6002 + "vnf_config_primitive={} Sub-operation retry".format(
6003 vnf_config_primitive
6004 )
6005 )
6006 # Execute the primitive, either with new (first-time) or registered (reintent) args
6007 ee_descriptor_id = config_primitive.get(
6008 "execution-environment-ref"
6009 )
6010 primitive_name = config_primitive.get(
6011 "execution-environment-primitive", vnf_config_primitive
6012 )
6013 ee_id, vca_type = self._look_for_deployed_vca(
6014 nsr_deployed["VCA"],
6015 member_vnf_index=vnf_index,
6016 vdu_id=None,
6017 vdu_count_index=None,
6018 ee_descriptor_id=ee_descriptor_id,
6019 )
6020 result, result_detail = await self._ns_execute_primitive(
6021 ee_id,
6022 primitive_name,
6023 primitive_params,
6024 vca_type=vca_type,
6025 vca_id=vca_id,
6026 )
6027 self.logger.debug(
6028 logging_text
6029 + "vnf_config_primitive={} Done with result {} {}".format(
6030 vnf_config_primitive, result, result_detail
6031 )
6032 )
6033 # Update operationState = COMPLETED | FAILED
6034 self._update_suboperation_status(
6035 db_nslcmop, op_index, result, result_detail
6036 )
6037
6038 if result == "FAILED":
6039 raise LcmException(result_detail)
6040 db_nsr_update["config-status"] = old_config_status
6041 scale_process = None
6042 # POST-SCALE END
6043
6044 db_nsr_update[
6045 "detailed-status"
6046 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
6047 db_nsr_update["operational-status"] = (
6048 "running"
6049 if old_operational_status == "failed"
6050 else old_operational_status
6051 )
6052 db_nsr_update["config-status"] = old_config_status
6053 return
6054 except (
6055 ROclient.ROClientException,
6056 DbException,
6057 LcmException,
6058 NgRoException,
6059 ) as e:
6060 self.logger.error(logging_text + "Exit Exception {}".format(e))
6061 exc = e
6062 except asyncio.CancelledError:
6063 self.logger.error(
6064 logging_text + "Cancelled Exception while '{}'".format(step)
6065 )
6066 exc = "Operation was cancelled"
6067 except Exception as e:
6068 exc = traceback.format_exc()
6069 self.logger.critical(
6070 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6071 exc_info=True,
6072 )
6073 finally:
6074 self._write_ns_status(
6075 nsr_id=nsr_id,
6076 ns_state=None,
6077 current_operation="IDLE",
6078 current_operation_id=None,
6079 )
6080 if tasks_dict_info:
6081 stage[1] = "Waiting for instantiate pending tasks."
6082 self.logger.debug(logging_text + stage[1])
6083 exc = await self._wait_for_tasks(
6084 logging_text,
6085 tasks_dict_info,
6086 self.timeout_ns_deploy,
6087 stage,
6088 nslcmop_id,
6089 nsr_id=nsr_id,
6090 )
6091 if exc:
6092 db_nslcmop_update[
6093 "detailed-status"
6094 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6095 nslcmop_operation_state = "FAILED"
6096 if db_nsr:
6097 db_nsr_update["operational-status"] = old_operational_status
6098 db_nsr_update["config-status"] = old_config_status
6099 db_nsr_update["detailed-status"] = ""
6100 if scale_process:
6101 if "VCA" in scale_process:
6102 db_nsr_update["config-status"] = "failed"
6103 if "RO" in scale_process:
6104 db_nsr_update["operational-status"] = "failed"
6105 db_nsr_update[
6106 "detailed-status"
6107 ] = "FAILED scaling nslcmop={} {}: {}".format(
6108 nslcmop_id, step, exc
6109 )
6110 else:
6111 error_description_nslcmop = None
6112 nslcmop_operation_state = "COMPLETED"
6113 db_nslcmop_update["detailed-status"] = "Done"
6114
6115 self._write_op_status(
6116 op_id=nslcmop_id,
6117 stage="",
6118 error_message=error_description_nslcmop,
6119 operation_state=nslcmop_operation_state,
6120 other_update=db_nslcmop_update,
6121 )
6122 if db_nsr:
6123 self._write_ns_status(
6124 nsr_id=nsr_id,
6125 ns_state=None,
6126 current_operation="IDLE",
6127 current_operation_id=None,
6128 other_update=db_nsr_update,
6129 )
6130
6131 if nslcmop_operation_state:
6132 try:
6133 msg = {
6134 "nsr_id": nsr_id,
6135 "nslcmop_id": nslcmop_id,
6136 "operationState": nslcmop_operation_state,
6137 }
6138 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
6139 except Exception as e:
6140 self.logger.error(
6141 logging_text + "kafka_write notification Exception {}".format(e)
6142 )
6143 self.logger.debug(logging_text + "Exit")
6144 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
6145
6146 async def _scale_kdu(
6147 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6148 ):
6149 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
6150 for kdu_name in _scaling_info:
6151 for kdu_scaling_info in _scaling_info[kdu_name]:
6152 deployed_kdu, index = get_deployed_kdu(
6153 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
6154 )
6155 cluster_uuid = deployed_kdu["k8scluster-uuid"]
6156 kdu_instance = deployed_kdu["kdu-instance"]
6157 scale = int(kdu_scaling_info["scale"])
6158 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
6159
6160 db_dict = {
6161 "collection": "nsrs",
6162 "filter": {"_id": nsr_id},
6163 "path": "_admin.deployed.K8s.{}".format(index),
6164 }
6165
6166 step = "scaling application {}".format(
6167 kdu_scaling_info["resource-name"]
6168 )
6169 self.logger.debug(logging_text + step)
6170
6171 if kdu_scaling_info["type"] == "delete":
6172 kdu_config = get_configuration(db_vnfd, kdu_name)
6173 if (
6174 kdu_config
6175 and kdu_config.get("terminate-config-primitive")
6176 and get_juju_ee_ref(db_vnfd, kdu_name) is None
6177 ):
6178 terminate_config_primitive_list = kdu_config.get(
6179 "terminate-config-primitive"
6180 )
6181 terminate_config_primitive_list.sort(
6182 key=lambda val: int(val["seq"])
6183 )
6184
6185 for (
6186 terminate_config_primitive
6187 ) in terminate_config_primitive_list:
6188 primitive_params_ = self._map_primitive_params(
6189 terminate_config_primitive, {}, {}
6190 )
6191 step = "execute terminate config primitive"
6192 self.logger.debug(logging_text + step)
6193 await asyncio.wait_for(
6194 self.k8scluster_map[k8s_cluster_type].exec_primitive(
6195 cluster_uuid=cluster_uuid,
6196 kdu_instance=kdu_instance,
6197 primitive_name=terminate_config_primitive["name"],
6198 params=primitive_params_,
6199 db_dict=db_dict,
6200 vca_id=vca_id,
6201 ),
6202 timeout=600,
6203 )
6204
6205 await asyncio.wait_for(
6206 self.k8scluster_map[k8s_cluster_type].scale(
6207 kdu_instance,
6208 scale,
6209 kdu_scaling_info["resource-name"],
6210 vca_id=vca_id,
6211 ),
6212 timeout=self.timeout_vca_on_error,
6213 )
6214
6215 if kdu_scaling_info["type"] == "create":
6216 kdu_config = get_configuration(db_vnfd, kdu_name)
6217 if (
6218 kdu_config
6219 and kdu_config.get("initial-config-primitive")
6220 and get_juju_ee_ref(db_vnfd, kdu_name) is None
6221 ):
6222 initial_config_primitive_list = kdu_config.get(
6223 "initial-config-primitive"
6224 )
6225 initial_config_primitive_list.sort(
6226 key=lambda val: int(val["seq"])
6227 )
6228
6229 for initial_config_primitive in initial_config_primitive_list:
6230 primitive_params_ = self._map_primitive_params(
6231 initial_config_primitive, {}, {}
6232 )
6233 step = "execute initial config primitive"
6234 self.logger.debug(logging_text + step)
6235 await asyncio.wait_for(
6236 self.k8scluster_map[k8s_cluster_type].exec_primitive(
6237 cluster_uuid=cluster_uuid,
6238 kdu_instance=kdu_instance,
6239 primitive_name=initial_config_primitive["name"],
6240 params=primitive_params_,
6241 db_dict=db_dict,
6242 vca_id=vca_id,
6243 ),
6244 timeout=600,
6245 )
6246
6247 async def _scale_ng_ro(
6248 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
6249 ):
6250 nsr_id = db_nslcmop["nsInstanceId"]
6251 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
6252 db_vnfrs = {}
6253
6254 # read from db: vnfd's for every vnf
6255 db_vnfds = []
6256
6257 # for each vnf in ns, read vnfd
6258 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
6259 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
6260 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
6261 # if we haven't this vnfd, read it from db
6262 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
6263 # read from db
6264 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
6265 db_vnfds.append(vnfd)
6266 n2vc_key = self.n2vc.get_public_key()
6267 n2vc_key_list = [n2vc_key]
6268 self.scale_vnfr(
6269 db_vnfr,
6270 vdu_scaling_info.get("vdu-create"),
6271 vdu_scaling_info.get("vdu-delete"),
6272 mark_delete=True,
6273 )
6274 # db_vnfr has been updated, update db_vnfrs to use it
6275 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
6276 await self._instantiate_ng_ro(
6277 logging_text,
6278 nsr_id,
6279 db_nsd,
6280 db_nsr,
6281 db_nslcmop,
6282 db_vnfrs,
6283 db_vnfds,
6284 n2vc_key_list,
6285 stage=stage,
6286 start_deploy=time(),
6287 timeout_ns_deploy=self.timeout_ns_deploy,
6288 )
6289 if vdu_scaling_info.get("vdu-delete"):
6290 self.scale_vnfr(
6291 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
6292 )
6293
6294 async def add_prometheus_metrics(
6295 self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip
6296 ):
6297 if not self.prometheus:
6298 return
6299 # look if exist a file called 'prometheus*.j2' and
6300 artifact_content = self.fs.dir_ls(artifact_path)
6301 job_file = next(
6302 (
6303 f
6304 for f in artifact_content
6305 if f.startswith("prometheus") and f.endswith(".j2")
6306 ),
6307 None,
6308 )
6309 if not job_file:
6310 return
6311 with self.fs.file_open((artifact_path, job_file), "r") as f:
6312 job_data = f.read()
6313
6314 # TODO get_service
6315 _, _, service = ee_id.partition(".") # remove prefix "namespace."
6316 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
6317 host_port = "80"
6318 vnfr_id = vnfr_id.replace("-", "")
6319 variables = {
6320 "JOB_NAME": vnfr_id,
6321 "TARGET_IP": target_ip,
6322 "EXPORTER_POD_IP": host_name,
6323 "EXPORTER_POD_PORT": host_port,
6324 }
6325 job_list = self.prometheus.parse_job(job_data, variables)
6326 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
6327 for job in job_list:
6328 if (
6329 not isinstance(job.get("job_name"), str)
6330 or vnfr_id not in job["job_name"]
6331 ):
6332 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
6333 job["nsr_id"] = nsr_id
6334 job_dict = {jl["job_name"]: jl for jl in job_list}
6335 if await self.prometheus.update(job_dict):
6336 return list(job_dict.keys())
6337
6338 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
6339 """
6340 Get VCA Cloud and VCA Cloud Credentials for the VIM account
6341
6342 :param: vim_account_id: VIM Account ID
6343
6344 :return: (cloud_name, cloud_credential)
6345 """
6346 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
6347 return config.get("vca_cloud"), config.get("vca_cloud_credential")
6348
6349 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
6350 """
6351 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
6352
6353 :param: vim_account_id: VIM Account ID
6354
6355 :return: (cloud_name, cloud_credential)
6356 """
6357 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
6358 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")