a3cdb74f7ee15601d017e98bbf8ff45f22bd99ff
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import json
27 from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33 select_autoescape,
34 )
35
36 from osm_lcm import ROclient
37 from osm_lcm.data_utils.lcm_config import LcmCfg
38 from osm_lcm.data_utils.nsr import (
39 get_deployed_kdu,
40 get_deployed_vca,
41 get_deployed_vca_list,
42 get_nsd,
43 )
44 from osm_lcm.data_utils.vca import (
45 DeployedComponent,
46 DeployedK8sResource,
47 DeployedVCA,
48 EELevel,
49 Relation,
50 EERelation,
51 safe_get_ee_relation,
52 )
53 from osm_lcm.ng_ro import NgRoClient, NgRoException
54 from osm_lcm.lcm_utils import (
55 LcmException,
56 LcmExceptionNoMgmtIP,
57 LcmBase,
58 deep_get,
59 get_iterable,
60 populate_dict,
61 check_juju_bundle_existence,
62 get_charm_artifact_path,
63 get_ee_id_parts,
64 vld_to_ro_ip_profile,
65 )
66 from osm_lcm.data_utils.nsd import (
67 get_ns_configuration_relation_list,
68 get_vnf_profile,
69 get_vnf_profiles,
70 )
71 from osm_lcm.data_utils.vnfd import (
72 get_kdu,
73 get_kdu_services,
74 get_relation_list,
75 get_vdu_list,
76 get_vdu_profile,
77 get_ee_sorted_initial_config_primitive_list,
78 get_ee_sorted_terminate_config_primitive_list,
79 get_kdu_list,
80 get_virtual_link_profiles,
81 get_vdu,
82 get_configuration,
83 get_vdu_index,
84 get_scaling_aspect,
85 get_number_of_instances,
86 get_juju_ee_ref,
87 get_kdu_resource_profile,
88 find_software_version,
89 check_helm_ee_in_ns,
90 )
91 from osm_lcm.data_utils.list_utils import find_in_list
92 from osm_lcm.data_utils.vnfr import (
93 get_osm_params,
94 get_vdur_index,
95 get_kdur,
96 get_volumes_from_instantiation_params,
97 )
98 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
99 from osm_lcm.data_utils.database.vim_account import VimAccountDB
100 from n2vc.definitions import RelationEndpoint
101 from n2vc.k8s_helm_conn import K8sHelmConnector
102 from n2vc.k8s_helm3_conn import K8sHelm3Connector
103 from n2vc.k8s_juju_conn import K8sJujuConnector
104
105 from osm_common.dbbase import DbException
106 from osm_common.fsbase import FsException
107
108 from osm_lcm.data_utils.database.database import Database
109 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
110 from osm_lcm.data_utils.wim import (
111 get_sdn_ports,
112 get_target_wim_attrs,
113 select_feasible_wim_account,
114 )
115
116 from n2vc.n2vc_juju_conn import N2VCJujuConnector
117 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
118
119 from osm_lcm.lcm_helm_conn import LCMHelmConn
120 from osm_lcm.osm_config import OsmConfigBuilder
121 from osm_lcm.prometheus import parse_job
122
123 from copy import copy, deepcopy
124 from time import time
125 from uuid import uuid4
126
127 from random import SystemRandom
128
129 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
130
131
132 class NsLcm(LcmBase):
133 SUBOPERATION_STATUS_NOT_FOUND = -1
134 SUBOPERATION_STATUS_NEW = -2
135 SUBOPERATION_STATUS_SKIP = -3
136 EE_TLS_NAME = "ee-tls"
137 task_name_deploy_vca = "Deploying VCA"
138 rel_operation_types = {
139 "GE": ">=",
140 "LE": "<=",
141 "GT": ">",
142 "LT": "<",
143 "EQ": "==",
144 "NE": "!=",
145 }
146
147 def __init__(self, msg, lcm_tasks, config: LcmCfg):
148 """
149 Init, Connect to database, filesystem storage, and messaging
150 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
151 :return: None
152 """
153 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
154
155 self.db = Database().instance.db
156 self.fs = Filesystem().instance.fs
157 self.lcm_tasks = lcm_tasks
158 self.timeout = config.timeout
159 self.ro_config = config.RO
160 self.vca_config = config.VCA
161
162 # create N2VC connector
163 self.n2vc = N2VCJujuConnector(
164 log=self.logger,
165 on_update_db=self._on_update_n2vc_db,
166 fs=self.fs,
167 db=self.db,
168 )
169
170 self.conn_helm_ee = LCMHelmConn(
171 log=self.logger,
172 vca_config=self.vca_config,
173 on_update_db=self._on_update_n2vc_db,
174 )
175
176 self.k8sclusterhelm2 = K8sHelmConnector(
177 kubectl_command=self.vca_config.kubectlpath,
178 helm_command=self.vca_config.helmpath,
179 log=self.logger,
180 on_update_db=None,
181 fs=self.fs,
182 db=self.db,
183 )
184
185 self.k8sclusterhelm3 = K8sHelm3Connector(
186 kubectl_command=self.vca_config.kubectlpath,
187 helm_command=self.vca_config.helm3path,
188 fs=self.fs,
189 log=self.logger,
190 db=self.db,
191 on_update_db=None,
192 )
193
194 self.k8sclusterjuju = K8sJujuConnector(
195 kubectl_command=self.vca_config.kubectlpath,
196 juju_command=self.vca_config.jujupath,
197 log=self.logger,
198 on_update_db=self._on_update_k8s_db,
199 fs=self.fs,
200 db=self.db,
201 )
202
203 self.k8scluster_map = {
204 "helm-chart": self.k8sclusterhelm2,
205 "helm-chart-v3": self.k8sclusterhelm3,
206 "chart": self.k8sclusterhelm3,
207 "juju-bundle": self.k8sclusterjuju,
208 "juju": self.k8sclusterjuju,
209 }
210
211 self.vca_map = {
212 "lxc_proxy_charm": self.n2vc,
213 "native_charm": self.n2vc,
214 "k8s_proxy_charm": self.n2vc,
215 "helm": self.conn_helm_ee,
216 "helm-v3": self.conn_helm_ee,
217 }
218
219 # create RO client
220 self.RO = NgRoClient(**self.ro_config.to_dict())
221
222 self.op_status_map = {
223 "instantiation": self.RO.status,
224 "termination": self.RO.status,
225 "migrate": self.RO.status,
226 "healing": self.RO.recreate_status,
227 "verticalscale": self.RO.status,
228 "start_stop_rebuild": self.RO.status,
229 }
230
231 @staticmethod
232 def increment_ip_mac(ip_mac, vm_index=1):
233 if not isinstance(ip_mac, str):
234 return ip_mac
235 try:
236 # try with ipv4 look for last dot
237 i = ip_mac.rfind(".")
238 if i > 0:
239 i += 1
240 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
241 # try with ipv6 or mac look for last colon. Operate in hex
242 i = ip_mac.rfind(":")
243 if i > 0:
244 i += 1
245 # format in hex, len can be 2 for mac or 4 for ipv6
246 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
247 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
248 )
249 except Exception:
250 pass
251 return None
252
253 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
254 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
255
256 try:
257 # TODO filter RO descriptor fields...
258
259 # write to database
260 db_dict = dict()
261 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
262 db_dict["deploymentStatus"] = ro_descriptor
263 self.update_db_2("nsrs", nsrs_id, db_dict)
264
265 except Exception as e:
266 self.logger.warn(
267 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
268 )
269
270 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
271 # remove last dot from path (if exists)
272 if path.endswith("."):
273 path = path[:-1]
274
275 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
276 # .format(table, filter, path, updated_data))
277 try:
278 nsr_id = filter.get("_id")
279
280 # read ns record from database
281 nsr = self.db.get_one(table="nsrs", q_filter=filter)
282 current_ns_status = nsr.get("nsState")
283
284 # get vca status for NS
285 status_dict = await self.n2vc.get_status(
286 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
287 )
288
289 # vcaStatus
290 db_dict = dict()
291 db_dict["vcaStatus"] = status_dict
292
293 # update configurationStatus for this VCA
294 try:
295 vca_index = int(path[path.rfind(".") + 1 :])
296
297 vca_list = deep_get(
298 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
299 )
300 vca_status = vca_list[vca_index].get("status")
301
302 configuration_status_list = nsr.get("configurationStatus")
303 config_status = configuration_status_list[vca_index].get("status")
304
305 if config_status == "BROKEN" and vca_status != "failed":
306 db_dict["configurationStatus"][vca_index] = "READY"
307 elif config_status != "BROKEN" and vca_status == "failed":
308 db_dict["configurationStatus"][vca_index] = "BROKEN"
309 except Exception as e:
310 # not update configurationStatus
311 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
312
313 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
314 # if nsState = 'DEGRADED' check if all is OK
315 is_degraded = False
316 if current_ns_status in ("READY", "DEGRADED"):
317 error_description = ""
318 # check machines
319 if status_dict.get("machines"):
320 for machine_id in status_dict.get("machines"):
321 machine = status_dict.get("machines").get(machine_id)
322 # check machine agent-status
323 if machine.get("agent-status"):
324 s = machine.get("agent-status").get("status")
325 if s != "started":
326 is_degraded = True
327 error_description += (
328 "machine {} agent-status={} ; ".format(
329 machine_id, s
330 )
331 )
332 # check machine instance status
333 if machine.get("instance-status"):
334 s = machine.get("instance-status").get("status")
335 if s != "running":
336 is_degraded = True
337 error_description += (
338 "machine {} instance-status={} ; ".format(
339 machine_id, s
340 )
341 )
342 # check applications
343 if status_dict.get("applications"):
344 for app_id in status_dict.get("applications"):
345 app = status_dict.get("applications").get(app_id)
346 # check application status
347 if app.get("status"):
348 s = app.get("status").get("status")
349 if s != "active":
350 is_degraded = True
351 error_description += (
352 "application {} status={} ; ".format(app_id, s)
353 )
354
355 if error_description:
356 db_dict["errorDescription"] = error_description
357 if current_ns_status == "READY" and is_degraded:
358 db_dict["nsState"] = "DEGRADED"
359 if current_ns_status == "DEGRADED" and not is_degraded:
360 db_dict["nsState"] = "READY"
361
362 # write to database
363 self.update_db_2("nsrs", nsr_id, db_dict)
364
365 except (asyncio.CancelledError, asyncio.TimeoutError):
366 raise
367 except Exception as e:
368 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
369
370 async def _on_update_k8s_db(
371 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
372 ):
373 """
374 Updating vca status in NSR record
375 :param cluster_uuid: UUID of a k8s cluster
376 :param kdu_instance: The unique name of the KDU instance
377 :param filter: To get nsr_id
378 :cluster_type: The cluster type (juju, k8s)
379 :return: none
380 """
381
382 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
383 # .format(cluster_uuid, kdu_instance, filter))
384
385 nsr_id = filter.get("_id")
386 try:
387 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
388 cluster_uuid=cluster_uuid,
389 kdu_instance=kdu_instance,
390 yaml_format=False,
391 complete_status=True,
392 vca_id=vca_id,
393 )
394
395 # vcaStatus
396 db_dict = dict()
397 db_dict["vcaStatus"] = {nsr_id: vca_status}
398
399 self.logger.debug(
400 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
401 )
402
403 # write to database
404 self.update_db_2("nsrs", nsr_id, db_dict)
405 except (asyncio.CancelledError, asyncio.TimeoutError):
406 raise
407 except Exception as e:
408 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
409
410 @staticmethod
411 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
412 try:
413 env = Environment(
414 undefined=StrictUndefined,
415 autoescape=select_autoescape(default_for_string=True, default=True),
416 )
417 template = env.from_string(cloud_init_text)
418 return template.render(additional_params or {})
419 except UndefinedError as e:
420 raise LcmException(
421 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
422 "file, must be provided in the instantiation parameters inside the "
423 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
424 )
425 except (TemplateError, TemplateNotFound) as e:
426 raise LcmException(
427 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
428 vnfd_id, vdu_id, e
429 )
430 )
431
432 def _get_vdu_cloud_init_content(self, vdu, vnfd):
433 cloud_init_content = cloud_init_file = None
434 try:
435 if vdu.get("cloud-init-file"):
436 base_folder = vnfd["_admin"]["storage"]
437 if base_folder["pkg-dir"]:
438 cloud_init_file = "{}/{}/cloud_init/{}".format(
439 base_folder["folder"],
440 base_folder["pkg-dir"],
441 vdu["cloud-init-file"],
442 )
443 else:
444 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
445 base_folder["folder"],
446 vdu["cloud-init-file"],
447 )
448 with self.fs.file_open(cloud_init_file, "r") as ci_file:
449 cloud_init_content = ci_file.read()
450 elif vdu.get("cloud-init"):
451 cloud_init_content = vdu["cloud-init"]
452
453 return cloud_init_content
454 except FsException as e:
455 raise LcmException(
456 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
457 vnfd["id"], vdu["id"], cloud_init_file, e
458 )
459 )
460
461 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
462 vdur = next(
463 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
464 )
465 additional_params = vdur.get("additionalParams")
466 return parse_yaml_strings(additional_params)
467
468 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
469 """
470 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
471 :param vnfd: input vnfd
472 :param new_id: overrides vnf id if provided
473 :param additionalParams: Instantiation params for VNFs provided
474 :param nsrId: Id of the NSR
475 :return: copy of vnfd
476 """
477 vnfd_RO = deepcopy(vnfd)
478 # remove unused by RO configuration, monitoring, scaling and internal keys
479 vnfd_RO.pop("_id", None)
480 vnfd_RO.pop("_admin", None)
481 vnfd_RO.pop("monitoring-param", None)
482 vnfd_RO.pop("scaling-group-descriptor", None)
483 vnfd_RO.pop("kdu", None)
484 vnfd_RO.pop("k8s-cluster", None)
485 if new_id:
486 vnfd_RO["id"] = new_id
487
488 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
489 for vdu in get_iterable(vnfd_RO, "vdu"):
490 vdu.pop("cloud-init-file", None)
491 vdu.pop("cloud-init", None)
492 return vnfd_RO
493
494 @staticmethod
495 def ip_profile_2_RO(ip_profile):
496 RO_ip_profile = deepcopy(ip_profile)
497 if "dns-server" in RO_ip_profile:
498 if isinstance(RO_ip_profile["dns-server"], list):
499 RO_ip_profile["dns-address"] = []
500 for ds in RO_ip_profile.pop("dns-server"):
501 RO_ip_profile["dns-address"].append(ds["address"])
502 else:
503 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
504 if RO_ip_profile.get("ip-version") == "ipv4":
505 RO_ip_profile["ip-version"] = "IPv4"
506 if RO_ip_profile.get("ip-version") == "ipv6":
507 RO_ip_profile["ip-version"] = "IPv6"
508 if "dhcp-params" in RO_ip_profile:
509 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
510 return RO_ip_profile
511
512 def _get_ro_vim_id_for_vim_account(self, vim_account):
513 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
514 if db_vim["_admin"]["operationalState"] != "ENABLED":
515 raise LcmException(
516 "VIM={} is not available. operationalState={}".format(
517 vim_account, db_vim["_admin"]["operationalState"]
518 )
519 )
520 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
521 return RO_vim_id
522
523 def get_ro_wim_id_for_wim_account(self, wim_account):
524 if isinstance(wim_account, str):
525 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
526 if db_wim["_admin"]["operationalState"] != "ENABLED":
527 raise LcmException(
528 "WIM={} is not available. operationalState={}".format(
529 wim_account, db_wim["_admin"]["operationalState"]
530 )
531 )
532 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
533 return RO_wim_id
534 else:
535 return wim_account
536
537 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
538 db_vdu_push_list = []
539 template_vdur = []
540 db_update = {"_admin.modified": time()}
541 if vdu_create:
542 for vdu_id, vdu_count in vdu_create.items():
543 vdur = next(
544 (
545 vdur
546 for vdur in reversed(db_vnfr["vdur"])
547 if vdur["vdu-id-ref"] == vdu_id
548 ),
549 None,
550 )
551 if not vdur:
552 # Read the template saved in the db:
553 self.logger.debug(
554 "No vdur in the database. Using the vdur-template to scale"
555 )
556 vdur_template = db_vnfr.get("vdur-template")
557 if not vdur_template:
558 raise LcmException(
559 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
560 vdu_id
561 )
562 )
563 vdur = vdur_template[0]
564 # Delete a template from the database after using it
565 self.db.set_one(
566 "vnfrs",
567 {"_id": db_vnfr["_id"]},
568 None,
569 pull={"vdur-template": {"_id": vdur["_id"]}},
570 )
571 for count in range(vdu_count):
572 vdur_copy = deepcopy(vdur)
573 vdur_copy["status"] = "BUILD"
574 vdur_copy["status-detailed"] = None
575 vdur_copy["ip-address"] = None
576 vdur_copy["_id"] = str(uuid4())
577 vdur_copy["count-index"] += count + 1
578 vdur_copy["id"] = "{}-{}".format(
579 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
580 )
581 vdur_copy.pop("vim_info", None)
582 for iface in vdur_copy["interfaces"]:
583 if iface.get("fixed-ip"):
584 iface["ip-address"] = self.increment_ip_mac(
585 iface["ip-address"], count + 1
586 )
587 else:
588 iface.pop("ip-address", None)
589 if iface.get("fixed-mac"):
590 iface["mac-address"] = self.increment_ip_mac(
591 iface["mac-address"], count + 1
592 )
593 else:
594 iface.pop("mac-address", None)
595 if db_vnfr["vdur"]:
596 iface.pop(
597 "mgmt_vnf", None
598 ) # only first vdu can be managment of vnf
599 db_vdu_push_list.append(vdur_copy)
600 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
601 if vdu_delete:
602 if len(db_vnfr["vdur"]) == 1:
603 # The scale will move to 0 instances
604 self.logger.debug(
605 "Scaling to 0 !, creating the template with the last vdur"
606 )
607 template_vdur = [db_vnfr["vdur"][0]]
608 for vdu_id, vdu_count in vdu_delete.items():
609 if mark_delete:
610 indexes_to_delete = [
611 iv[0]
612 for iv in enumerate(db_vnfr["vdur"])
613 if iv[1]["vdu-id-ref"] == vdu_id
614 ]
615 db_update.update(
616 {
617 "vdur.{}.status".format(i): "DELETING"
618 for i in indexes_to_delete[-vdu_count:]
619 }
620 )
621 else:
622 # it must be deleted one by one because common.db does not allow otherwise
623 vdus_to_delete = [
624 v
625 for v in reversed(db_vnfr["vdur"])
626 if v["vdu-id-ref"] == vdu_id
627 ]
628 for vdu in vdus_to_delete[:vdu_count]:
629 self.db.set_one(
630 "vnfrs",
631 {"_id": db_vnfr["_id"]},
632 None,
633 pull={"vdur": {"_id": vdu["_id"]}},
634 )
635 db_push = {}
636 if db_vdu_push_list:
637 db_push["vdur"] = db_vdu_push_list
638 if template_vdur:
639 db_push["vdur-template"] = template_vdur
640 if not db_push:
641 db_push = None
642 db_vnfr["vdur-template"] = template_vdur
643 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
644 # modify passed dictionary db_vnfr
645 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
646 db_vnfr["vdur"] = db_vnfr_["vdur"]
647
648 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
649 """
650 Updates database nsr with the RO info for the created vld
651 :param ns_update_nsr: dictionary to be filled with the updated info
652 :param db_nsr: content of db_nsr. This is also modified
653 :param nsr_desc_RO: nsr descriptor from RO
654 :return: Nothing, LcmException is raised on errors
655 """
656
657 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
658 for net_RO in get_iterable(nsr_desc_RO, "nets"):
659 if vld["id"] != net_RO.get("ns_net_osm_id"):
660 continue
661 vld["vim-id"] = net_RO.get("vim_net_id")
662 vld["name"] = net_RO.get("vim_name")
663 vld["status"] = net_RO.get("status")
664 vld["status-detailed"] = net_RO.get("error_msg")
665 ns_update_nsr["vld.{}".format(vld_index)] = vld
666 break
667 else:
668 raise LcmException(
669 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
670 )
671
672 def set_vnfr_at_error(self, db_vnfrs, error_text):
673 try:
674 for db_vnfr in db_vnfrs.values():
675 vnfr_update = {"status": "ERROR"}
676 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
677 if "status" not in vdur:
678 vdur["status"] = "ERROR"
679 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
680 if error_text:
681 vdur["status-detailed"] = str(error_text)
682 vnfr_update[
683 "vdur.{}.status-detailed".format(vdu_index)
684 ] = "ERROR"
685 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
686 except DbException as e:
687 self.logger.error("Cannot update vnf. {}".format(e))
688
689 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
690 """
691 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
692 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
693 :param nsr_desc_RO: nsr descriptor from RO
694 :return: Nothing, LcmException is raised on errors
695 """
696 for vnf_index, db_vnfr in db_vnfrs.items():
697 for vnf_RO in nsr_desc_RO["vnfs"]:
698 if vnf_RO["member_vnf_index"] != vnf_index:
699 continue
700 vnfr_update = {}
701 if vnf_RO.get("ip_address"):
702 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
703 "ip_address"
704 ].split(";")[0]
705 elif not db_vnfr.get("ip-address"):
706 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
707 raise LcmExceptionNoMgmtIP(
708 "ns member_vnf_index '{}' has no IP address".format(
709 vnf_index
710 )
711 )
712
713 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
714 vdur_RO_count_index = 0
715 if vdur.get("pdu-type"):
716 continue
717 for vdur_RO in get_iterable(vnf_RO, "vms"):
718 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
719 continue
720 if vdur["count-index"] != vdur_RO_count_index:
721 vdur_RO_count_index += 1
722 continue
723 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
724 if vdur_RO.get("ip_address"):
725 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
726 else:
727 vdur["ip-address"] = None
728 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
729 vdur["name"] = vdur_RO.get("vim_name")
730 vdur["status"] = vdur_RO.get("status")
731 vdur["status-detailed"] = vdur_RO.get("error_msg")
732 for ifacer in get_iterable(vdur, "interfaces"):
733 for interface_RO in get_iterable(vdur_RO, "interfaces"):
734 if ifacer["name"] == interface_RO.get("internal_name"):
735 ifacer["ip-address"] = interface_RO.get(
736 "ip_address"
737 )
738 ifacer["mac-address"] = interface_RO.get(
739 "mac_address"
740 )
741 break
742 else:
743 raise LcmException(
744 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
745 "from VIM info".format(
746 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
747 )
748 )
749 vnfr_update["vdur.{}".format(vdu_index)] = vdur
750 break
751 else:
752 raise LcmException(
753 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
754 "VIM info".format(
755 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
756 )
757 )
758
759 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
760 for net_RO in get_iterable(nsr_desc_RO, "nets"):
761 if vld["id"] != net_RO.get("vnf_net_osm_id"):
762 continue
763 vld["vim-id"] = net_RO.get("vim_net_id")
764 vld["name"] = net_RO.get("vim_name")
765 vld["status"] = net_RO.get("status")
766 vld["status-detailed"] = net_RO.get("error_msg")
767 vnfr_update["vld.{}".format(vld_index)] = vld
768 break
769 else:
770 raise LcmException(
771 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
772 vnf_index, vld["id"]
773 )
774 )
775
776 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
777 break
778
779 else:
780 raise LcmException(
781 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
782 vnf_index
783 )
784 )
785
786 def _get_ns_config_info(self, nsr_id):
787 """
788 Generates a mapping between vnf,vdu elements and the N2VC id
789 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
790 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
791 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
792 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
793 """
794 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
795 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
796 mapping = {}
797 ns_config_info = {"osm-config-mapping": mapping}
798 for vca in vca_deployed_list:
799 if not vca["member-vnf-index"]:
800 continue
801 if not vca["vdu_id"]:
802 mapping[vca["member-vnf-index"]] = vca["application"]
803 else:
804 mapping[
805 "{}.{}.{}".format(
806 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
807 )
808 ] = vca["application"]
809 return ns_config_info
810
811 async def _instantiate_ng_ro(
812 self,
813 logging_text,
814 nsr_id,
815 nsd,
816 db_nsr,
817 db_nslcmop,
818 db_vnfrs,
819 db_vnfds,
820 n2vc_key_list,
821 stage,
822 start_deploy,
823 timeout_ns_deploy,
824 ):
825 db_vims = {}
826
827 def get_vim_account(vim_account_id):
828 nonlocal db_vims
829 if vim_account_id in db_vims:
830 return db_vims[vim_account_id]
831 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
832 db_vims[vim_account_id] = db_vim
833 return db_vim
834
835 # modify target_vld info with instantiation parameters
836 def parse_vld_instantiation_params(
837 target_vim, target_vld, vld_params, target_sdn
838 ):
839 if vld_params.get("ip-profile"):
840 target_vld["vim_info"][target_vim]["ip_profile"] = vld_to_ro_ip_profile(
841 vld_params["ip-profile"]
842 )
843 if vld_params.get("provider-network"):
844 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
845 "provider-network"
846 ]
847 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
848 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
849 "provider-network"
850 ]["sdn-ports"]
851
852 # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
853 # if wim_account_id is specified in vld_params, validate if it is feasible.
854 wim_account_id, db_wim = select_feasible_wim_account(
855 db_nsr, db_vnfrs, target_vld, vld_params, self.logger
856 )
857
858 if wim_account_id:
859 # WIM is needed and a feasible one was found, populate WIM target and SDN ports
860 self.logger.info("WIM selected: {:s}".format(str(wim_account_id)))
861 # update vld_params with correct WIM account Id
862 vld_params["wimAccountId"] = wim_account_id
863
864 target_wim = "wim:{}".format(wim_account_id)
865 target_wim_attrs = get_target_wim_attrs(nsr_id, target_vld, vld_params)
866 sdn_ports = get_sdn_ports(vld_params, db_wim)
867 if len(sdn_ports) > 0:
868 target_vld["vim_info"][target_wim] = target_wim_attrs
869 target_vld["vim_info"][target_wim]["sdn-ports"] = sdn_ports
870
871 self.logger.debug(
872 "Target VLD with WIM data: {:s}".format(str(target_vld))
873 )
874
875 for param in ("vim-network-name", "vim-network-id"):
876 if vld_params.get(param):
877 if isinstance(vld_params[param], dict):
878 for vim, vim_net in vld_params[param].items():
879 other_target_vim = "vim:" + vim
880 populate_dict(
881 target_vld["vim_info"],
882 (other_target_vim, param.replace("-", "_")),
883 vim_net,
884 )
885 else: # isinstance str
886 target_vld["vim_info"][target_vim][
887 param.replace("-", "_")
888 ] = vld_params[param]
889 if vld_params.get("common_id"):
890 target_vld["common_id"] = vld_params.get("common_id")
891
892 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
893 def update_ns_vld_target(target, ns_params):
894 for vnf_params in ns_params.get("vnf", ()):
895 if vnf_params.get("vimAccountId"):
896 target_vnf = next(
897 (
898 vnfr
899 for vnfr in db_vnfrs.values()
900 if vnf_params["member-vnf-index"]
901 == vnfr["member-vnf-index-ref"]
902 ),
903 None,
904 )
905 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
906 if not vdur:
907 continue
908 for a_index, a_vld in enumerate(target["ns"]["vld"]):
909 target_vld = find_in_list(
910 get_iterable(vdur, "interfaces"),
911 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
912 )
913
914 vld_params = find_in_list(
915 get_iterable(ns_params, "vld"),
916 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
917 )
918 if target_vld:
919 if vnf_params.get("vimAccountId") not in a_vld.get(
920 "vim_info", {}
921 ):
922 target_vim_network_list = [
923 v for _, v in a_vld.get("vim_info").items()
924 ]
925 target_vim_network_name = next(
926 (
927 item.get("vim_network_name", "")
928 for item in target_vim_network_list
929 ),
930 "",
931 )
932
933 target["ns"]["vld"][a_index].get("vim_info").update(
934 {
935 "vim:{}".format(vnf_params["vimAccountId"]): {
936 "vim_network_name": target_vim_network_name,
937 }
938 }
939 )
940
941 if vld_params:
942 for param in ("vim-network-name", "vim-network-id"):
943 if vld_params.get(param) and isinstance(
944 vld_params[param], dict
945 ):
946 for vim, vim_net in vld_params[
947 param
948 ].items():
949 other_target_vim = "vim:" + vim
950 populate_dict(
951 target["ns"]["vld"][a_index].get(
952 "vim_info"
953 ),
954 (
955 other_target_vim,
956 param.replace("-", "_"),
957 ),
958 vim_net,
959 )
960
961 nslcmop_id = db_nslcmop["_id"]
962 target = {
963 "name": db_nsr["name"],
964 "ns": {"vld": []},
965 "vnf": [],
966 "image": deepcopy(db_nsr["image"]),
967 "flavor": deepcopy(db_nsr["flavor"]),
968 "action_id": nslcmop_id,
969 "cloud_init_content": {},
970 }
971 for image in target["image"]:
972 image["vim_info"] = {}
973 for flavor in target["flavor"]:
974 flavor["vim_info"] = {}
975 if db_nsr.get("shared-volumes"):
976 target["shared-volumes"] = deepcopy(db_nsr["shared-volumes"])
977 for shared_volumes in target["shared-volumes"]:
978 shared_volumes["vim_info"] = {}
979 if db_nsr.get("affinity-or-anti-affinity-group"):
980 target["affinity-or-anti-affinity-group"] = deepcopy(
981 db_nsr["affinity-or-anti-affinity-group"]
982 )
983 for affinity_or_anti_affinity_group in target[
984 "affinity-or-anti-affinity-group"
985 ]:
986 affinity_or_anti_affinity_group["vim_info"] = {}
987
988 if db_nslcmop.get("lcmOperationType") != "instantiate":
989 # get parameters of instantiation:
990 db_nslcmop_instantiate = self.db.get_list(
991 "nslcmops",
992 {
993 "nsInstanceId": db_nslcmop["nsInstanceId"],
994 "lcmOperationType": "instantiate",
995 },
996 )[-1]
997 ns_params = db_nslcmop_instantiate.get("operationParams")
998 else:
999 ns_params = db_nslcmop.get("operationParams")
1000 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
1001 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
1002
1003 cp2target = {}
1004 for vld_index, vld in enumerate(db_nsr.get("vld")):
1005 target_vim = "vim:{}".format(ns_params["vimAccountId"])
1006 target_vld = {
1007 "id": vld["id"],
1008 "name": vld["name"],
1009 "mgmt-network": vld.get("mgmt-network", False),
1010 "type": vld.get("type"),
1011 "vim_info": {
1012 target_vim: {
1013 "vim_network_name": vld.get("vim-network-name"),
1014 "vim_account_id": ns_params["vimAccountId"],
1015 }
1016 },
1017 }
1018 # check if this network needs SDN assist
1019 if vld.get("pci-interfaces"):
1020 db_vim = get_vim_account(ns_params["vimAccountId"])
1021 if vim_config := db_vim.get("config"):
1022 if sdnc_id := vim_config.get("sdn-controller"):
1023 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
1024 target_sdn = "sdn:{}".format(sdnc_id)
1025 target_vld["vim_info"][target_sdn] = {
1026 "sdn": True,
1027 "target_vim": target_vim,
1028 "vlds": [sdn_vld],
1029 "type": vld.get("type"),
1030 }
1031
1032 nsd_vnf_profiles = get_vnf_profiles(nsd)
1033 for nsd_vnf_profile in nsd_vnf_profiles:
1034 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
1035 if cp["virtual-link-profile-id"] == vld["id"]:
1036 cp2target[
1037 "member_vnf:{}.{}".format(
1038 cp["constituent-cpd-id"][0][
1039 "constituent-base-element-id"
1040 ],
1041 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
1042 )
1043 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
1044
1045 # check at nsd descriptor, if there is an ip-profile
1046 vld_params = {}
1047 nsd_vlp = find_in_list(
1048 get_virtual_link_profiles(nsd),
1049 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
1050 == vld["id"],
1051 )
1052 if (
1053 nsd_vlp
1054 and nsd_vlp.get("virtual-link-protocol-data")
1055 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1056 ):
1057 vld_params["ip-profile"] = nsd_vlp["virtual-link-protocol-data"][
1058 "l3-protocol-data"
1059 ]
1060
1061 # update vld_params with instantiation params
1062 vld_instantiation_params = find_in_list(
1063 get_iterable(ns_params, "vld"),
1064 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
1065 )
1066 if vld_instantiation_params:
1067 vld_params.update(vld_instantiation_params)
1068 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
1069 target["ns"]["vld"].append(target_vld)
1070 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1071 update_ns_vld_target(target, ns_params)
1072
1073 for vnfr in db_vnfrs.values():
1074 vnfd = find_in_list(
1075 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
1076 )
1077 vnf_params = find_in_list(
1078 get_iterable(ns_params, "vnf"),
1079 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
1080 )
1081 target_vnf = deepcopy(vnfr)
1082 target_vim = "vim:{}".format(vnfr["vim-account-id"])
1083 for vld in target_vnf.get("vld", ()):
1084 # check if connected to a ns.vld, to fill target'
1085 vnf_cp = find_in_list(
1086 vnfd.get("int-virtual-link-desc", ()),
1087 lambda cpd: cpd.get("id") == vld["id"],
1088 )
1089 if vnf_cp:
1090 ns_cp = "member_vnf:{}.{}".format(
1091 vnfr["member-vnf-index-ref"], vnf_cp["id"]
1092 )
1093 if cp2target.get(ns_cp):
1094 vld["target"] = cp2target[ns_cp]
1095
1096 vld["vim_info"] = {
1097 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1098 }
1099 # check if this network needs SDN assist
1100 target_sdn = None
1101 if vld.get("pci-interfaces"):
1102 db_vim = get_vim_account(vnfr["vim-account-id"])
1103 sdnc_id = db_vim["config"].get("sdn-controller")
1104 if sdnc_id:
1105 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1106 target_sdn = "sdn:{}".format(sdnc_id)
1107 vld["vim_info"][target_sdn] = {
1108 "sdn": True,
1109 "target_vim": target_vim,
1110 "vlds": [sdn_vld],
1111 "type": vld.get("type"),
1112 }
1113
1114 # check at vnfd descriptor, if there is an ip-profile
1115 vld_params = {}
1116 vnfd_vlp = find_in_list(
1117 get_virtual_link_profiles(vnfd),
1118 lambda a_link_profile: a_link_profile["id"] == vld["id"],
1119 )
1120 if (
1121 vnfd_vlp
1122 and vnfd_vlp.get("virtual-link-protocol-data")
1123 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1124 ):
1125 vld_params["ip-profile"] = vnfd_vlp["virtual-link-protocol-data"][
1126 "l3-protocol-data"
1127 ]
1128 # update vld_params with instantiation params
1129 if vnf_params:
1130 vld_instantiation_params = find_in_list(
1131 get_iterable(vnf_params, "internal-vld"),
1132 lambda i_vld: i_vld["name"] == vld["id"],
1133 )
1134 if vld_instantiation_params:
1135 vld_params.update(vld_instantiation_params)
1136 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1137
1138 vdur_list = []
1139 for vdur in target_vnf.get("vdur", ()):
1140 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1141 continue # This vdu must not be created
1142 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1143
1144 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1145
1146 if ssh_keys_all:
1147 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1148 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1149 if (
1150 vdu_configuration
1151 and vdu_configuration.get("config-access")
1152 and vdu_configuration.get("config-access").get("ssh-access")
1153 ):
1154 vdur["ssh-keys"] = ssh_keys_all
1155 vdur["ssh-access-required"] = vdu_configuration[
1156 "config-access"
1157 ]["ssh-access"]["required"]
1158 elif (
1159 vnf_configuration
1160 and vnf_configuration.get("config-access")
1161 and vnf_configuration.get("config-access").get("ssh-access")
1162 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1163 ):
1164 vdur["ssh-keys"] = ssh_keys_all
1165 vdur["ssh-access-required"] = vnf_configuration[
1166 "config-access"
1167 ]["ssh-access"]["required"]
1168 elif ssh_keys_instantiation and find_in_list(
1169 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1170 ):
1171 vdur["ssh-keys"] = ssh_keys_instantiation
1172
1173 self.logger.debug("NS > vdur > {}".format(vdur))
1174
1175 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1176 # cloud-init
1177 if vdud.get("cloud-init-file"):
1178 vdur["cloud-init"] = "{}:file:{}".format(
1179 vnfd["_id"], vdud.get("cloud-init-file")
1180 )
1181 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1182 if vdur["cloud-init"] not in target["cloud_init_content"]:
1183 base_folder = vnfd["_admin"]["storage"]
1184 if base_folder["pkg-dir"]:
1185 cloud_init_file = "{}/{}/cloud_init/{}".format(
1186 base_folder["folder"],
1187 base_folder["pkg-dir"],
1188 vdud.get("cloud-init-file"),
1189 )
1190 else:
1191 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1192 base_folder["folder"],
1193 vdud.get("cloud-init-file"),
1194 )
1195 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1196 target["cloud_init_content"][
1197 vdur["cloud-init"]
1198 ] = ci_file.read()
1199 elif vdud.get("cloud-init"):
1200 vdur["cloud-init"] = "{}:vdu:{}".format(
1201 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1202 )
1203 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1204 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1205 "cloud-init"
1206 ]
1207 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1208 deploy_params_vdu = self._format_additional_params(
1209 vdur.get("additionalParams") or {}
1210 )
1211 deploy_params_vdu["OSM"] = get_osm_params(
1212 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1213 )
1214 vdur["additionalParams"] = deploy_params_vdu
1215
1216 # flavor
1217 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1218 if target_vim not in ns_flavor["vim_info"]:
1219 ns_flavor["vim_info"][target_vim] = {}
1220
1221 # deal with images
1222 # in case alternative images are provided we must check if they should be applied
1223 # for the vim_type, modify the vim_type taking into account
1224 ns_image_id = int(vdur["ns-image-id"])
1225 if vdur.get("alt-image-ids"):
1226 db_vim = get_vim_account(vnfr["vim-account-id"])
1227 vim_type = db_vim["vim_type"]
1228 for alt_image_id in vdur.get("alt-image-ids"):
1229 ns_alt_image = target["image"][int(alt_image_id)]
1230 if vim_type == ns_alt_image.get("vim-type"):
1231 # must use alternative image
1232 self.logger.debug(
1233 "use alternative image id: {}".format(alt_image_id)
1234 )
1235 ns_image_id = alt_image_id
1236 vdur["ns-image-id"] = ns_image_id
1237 break
1238 ns_image = target["image"][int(ns_image_id)]
1239 if target_vim not in ns_image["vim_info"]:
1240 ns_image["vim_info"][target_vim] = {}
1241
1242 # Affinity groups
1243 if vdur.get("affinity-or-anti-affinity-group-id"):
1244 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1245 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1246 if target_vim not in ns_ags["vim_info"]:
1247 ns_ags["vim_info"][target_vim] = {}
1248
1249 # shared-volumes
1250 if vdur.get("shared-volumes-id"):
1251 for sv_id in vdur["shared-volumes-id"]:
1252 ns_sv = find_in_list(
1253 target["shared-volumes"], lambda sv: sv_id in sv["id"]
1254 )
1255 if ns_sv:
1256 ns_sv["vim_info"][target_vim] = {}
1257
1258 vdur["vim_info"] = {target_vim: {}}
1259 # instantiation parameters
1260 if vnf_params:
1261 vdu_instantiation_params = find_in_list(
1262 get_iterable(vnf_params, "vdu"),
1263 lambda i_vdu: i_vdu["id"] == vdud["id"],
1264 )
1265 if vdu_instantiation_params:
1266 # Parse the vdu_volumes from the instantiation params
1267 vdu_volumes = get_volumes_from_instantiation_params(
1268 vdu_instantiation_params, vdud
1269 )
1270 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1271 vdur["additionalParams"]["OSM"][
1272 "vim_flavor_id"
1273 ] = vdu_instantiation_params.get("vim-flavor-id")
1274 vdur_list.append(vdur)
1275 target_vnf["vdur"] = vdur_list
1276 target["vnf"].append(target_vnf)
1277
1278 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1279 desc = await self.RO.deploy(nsr_id, target)
1280 self.logger.debug("RO return > {}".format(desc))
1281 action_id = desc["action_id"]
1282 await self._wait_ng_ro(
1283 nsr_id,
1284 action_id,
1285 nslcmop_id,
1286 start_deploy,
1287 timeout_ns_deploy,
1288 stage,
1289 operation="instantiation",
1290 )
1291
1292 # Updating NSR
1293 db_nsr_update = {
1294 "_admin.deployed.RO.operational-status": "running",
1295 "detailed-status": " ".join(stage),
1296 }
1297 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1298 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1299 self._write_op_status(nslcmop_id, stage)
1300 self.logger.debug(
1301 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1302 )
1303 return
1304
1305 async def _wait_ng_ro(
1306 self,
1307 nsr_id,
1308 action_id,
1309 nslcmop_id=None,
1310 start_time=None,
1311 timeout=600,
1312 stage=None,
1313 operation=None,
1314 ):
1315 detailed_status_old = None
1316 db_nsr_update = {}
1317 start_time = start_time or time()
1318 while time() <= start_time + timeout:
1319 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1320 self.logger.debug("Wait NG RO > {}".format(desc_status))
1321 if desc_status["status"] == "FAILED":
1322 raise NgRoException(desc_status["details"])
1323 elif desc_status["status"] == "BUILD":
1324 if stage:
1325 stage[2] = "VIM: ({})".format(desc_status["details"])
1326 elif desc_status["status"] == "DONE":
1327 if stage:
1328 stage[2] = "Deployed at VIM"
1329 break
1330 else:
1331 assert False, "ROclient.check_ns_status returns unknown {}".format(
1332 desc_status["status"]
1333 )
1334 if stage and nslcmop_id and stage[2] != detailed_status_old:
1335 detailed_status_old = stage[2]
1336 db_nsr_update["detailed-status"] = " ".join(stage)
1337 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1338 self._write_op_status(nslcmop_id, stage)
1339 await asyncio.sleep(15)
1340 else: # timeout_ns_deploy
1341 raise NgRoException("Timeout waiting ns to deploy")
1342
1343 async def _terminate_ng_ro(
1344 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1345 ):
1346 db_nsr_update = {}
1347 failed_detail = []
1348 action_id = None
1349 start_deploy = time()
1350 try:
1351 target = {
1352 "ns": {"vld": []},
1353 "vnf": [],
1354 "image": [],
1355 "flavor": [],
1356 "action_id": nslcmop_id,
1357 }
1358 desc = await self.RO.deploy(nsr_id, target)
1359 action_id = desc["action_id"]
1360 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1361 self.logger.debug(
1362 logging_text
1363 + "ns terminate action at RO. action_id={}".format(action_id)
1364 )
1365
1366 # wait until done
1367 delete_timeout = 20 * 60 # 20 minutes
1368 await self._wait_ng_ro(
1369 nsr_id,
1370 action_id,
1371 nslcmop_id,
1372 start_deploy,
1373 delete_timeout,
1374 stage,
1375 operation="termination",
1376 )
1377 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1378 # delete all nsr
1379 await self.RO.delete(nsr_id)
1380 except NgRoException as e:
1381 if e.http_code == 404: # not found
1382 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1383 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1384 self.logger.debug(
1385 logging_text + "RO_action_id={} already deleted".format(action_id)
1386 )
1387 elif e.http_code == 409: # conflict
1388 failed_detail.append("delete conflict: {}".format(e))
1389 self.logger.debug(
1390 logging_text
1391 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1392 )
1393 else:
1394 failed_detail.append("delete error: {}".format(e))
1395 self.logger.error(
1396 logging_text
1397 + "RO_action_id={} delete error: {}".format(action_id, e)
1398 )
1399 except Exception as e:
1400 failed_detail.append("delete error: {}".format(e))
1401 self.logger.error(
1402 logging_text + "RO_action_id={} delete error: {}".format(action_id, e)
1403 )
1404
1405 if failed_detail:
1406 stage[2] = "Error deleting from VIM"
1407 else:
1408 stage[2] = "Deleted from VIM"
1409 db_nsr_update["detailed-status"] = " ".join(stage)
1410 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1411 self._write_op_status(nslcmop_id, stage)
1412
1413 if failed_detail:
1414 raise LcmException("; ".join(failed_detail))
1415 return
1416
1417 async def instantiate_RO(
1418 self,
1419 logging_text,
1420 nsr_id,
1421 nsd,
1422 db_nsr,
1423 db_nslcmop,
1424 db_vnfrs,
1425 db_vnfds,
1426 n2vc_key_list,
1427 stage,
1428 ):
1429 """
1430 Instantiate at RO
1431 :param logging_text: preffix text to use at logging
1432 :param nsr_id: nsr identity
1433 :param nsd: database content of ns descriptor
1434 :param db_nsr: database content of ns record
1435 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1436 :param db_vnfrs:
1437 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1438 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1439 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1440 :return: None or exception
1441 """
1442 try:
1443 start_deploy = time()
1444 ns_params = db_nslcmop.get("operationParams")
1445 if ns_params and ns_params.get("timeout_ns_deploy"):
1446 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1447 else:
1448 timeout_ns_deploy = self.timeout.ns_deploy
1449
1450 # Check for and optionally request placement optimization. Database will be updated if placement activated
1451 stage[2] = "Waiting for Placement."
1452 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1453 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1454 for vnfr in db_vnfrs.values():
1455 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1456 break
1457 else:
1458 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1459
1460 return await self._instantiate_ng_ro(
1461 logging_text,
1462 nsr_id,
1463 nsd,
1464 db_nsr,
1465 db_nslcmop,
1466 db_vnfrs,
1467 db_vnfds,
1468 n2vc_key_list,
1469 stage,
1470 start_deploy,
1471 timeout_ns_deploy,
1472 )
1473 except Exception as e:
1474 stage[2] = "ERROR deploying at VIM"
1475 self.set_vnfr_at_error(db_vnfrs, str(e))
1476 self.logger.error(
1477 "Error deploying at VIM {}".format(e),
1478 exc_info=not isinstance(
1479 e,
1480 (
1481 ROclient.ROClientException,
1482 LcmException,
1483 DbException,
1484 NgRoException,
1485 ),
1486 ),
1487 )
1488 raise
1489
1490 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1491 """
1492 Wait for kdu to be up, get ip address
1493 :param logging_text: prefix use for logging
1494 :param nsr_id:
1495 :param vnfr_id:
1496 :param kdu_name:
1497 :return: IP address, K8s services
1498 """
1499
1500 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1501 nb_tries = 0
1502
1503 while nb_tries < 360:
1504 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1505 kdur = next(
1506 (
1507 x
1508 for x in get_iterable(db_vnfr, "kdur")
1509 if x.get("kdu-name") == kdu_name
1510 ),
1511 None,
1512 )
1513 if not kdur:
1514 raise LcmException(
1515 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1516 )
1517 if kdur.get("status"):
1518 if kdur["status"] in ("READY", "ENABLED"):
1519 return kdur.get("ip-address"), kdur.get("services")
1520 else:
1521 raise LcmException(
1522 "target KDU={} is in error state".format(kdu_name)
1523 )
1524
1525 await asyncio.sleep(10)
1526 nb_tries += 1
1527 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1528
1529 async def wait_vm_up_insert_key_ro(
1530 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1531 ):
1532 """
1533 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1534 :param logging_text: prefix use for logging
1535 :param nsr_id:
1536 :param vnfr_id:
1537 :param vdu_id:
1538 :param vdu_index:
1539 :param pub_key: public ssh key to inject, None to skip
1540 :param user: user to apply the public ssh key
1541 :return: IP address
1542 """
1543
1544 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1545 ip_address = None
1546 target_vdu_id = None
1547 ro_retries = 0
1548
1549 while True:
1550 ro_retries += 1
1551 if ro_retries >= 360: # 1 hour
1552 raise LcmException(
1553 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1554 )
1555
1556 await asyncio.sleep(10)
1557
1558 # get ip address
1559 if not target_vdu_id:
1560 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1561
1562 if not vdu_id: # for the VNF case
1563 if db_vnfr.get("status") == "ERROR":
1564 raise LcmException(
1565 "Cannot inject ssh-key because target VNF is in error state"
1566 )
1567 ip_address = db_vnfr.get("ip-address")
1568 if not ip_address:
1569 continue
1570 vdur = next(
1571 (
1572 x
1573 for x in get_iterable(db_vnfr, "vdur")
1574 if x.get("ip-address") == ip_address
1575 ),
1576 None,
1577 )
1578 else: # VDU case
1579 vdur = next(
1580 (
1581 x
1582 for x in get_iterable(db_vnfr, "vdur")
1583 if x.get("vdu-id-ref") == vdu_id
1584 and x.get("count-index") == vdu_index
1585 ),
1586 None,
1587 )
1588
1589 if (
1590 not vdur and len(db_vnfr.get("vdur", ())) == 1
1591 ): # If only one, this should be the target vdu
1592 vdur = db_vnfr["vdur"][0]
1593 if not vdur:
1594 raise LcmException(
1595 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1596 vnfr_id, vdu_id, vdu_index
1597 )
1598 )
1599 # New generation RO stores information at "vim_info"
1600 ng_ro_status = None
1601 target_vim = None
1602 if vdur.get("vim_info"):
1603 target_vim = next(
1604 t for t in vdur["vim_info"]
1605 ) # there should be only one key
1606 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1607 if (
1608 vdur.get("pdu-type")
1609 or vdur.get("status") == "ACTIVE"
1610 or ng_ro_status == "ACTIVE"
1611 ):
1612 ip_address = vdur.get("ip-address")
1613 if not ip_address:
1614 continue
1615 target_vdu_id = vdur["vdu-id-ref"]
1616 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1617 raise LcmException(
1618 "Cannot inject ssh-key because target VM is in error state"
1619 )
1620
1621 if not target_vdu_id:
1622 continue
1623
1624 # inject public key into machine
1625 if pub_key and user:
1626 self.logger.debug(logging_text + "Inserting RO key")
1627 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1628 if vdur.get("pdu-type"):
1629 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1630 return ip_address
1631 try:
1632 target = {
1633 "action": {
1634 "action": "inject_ssh_key",
1635 "key": pub_key,
1636 "user": user,
1637 },
1638 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1639 }
1640 desc = await self.RO.deploy(nsr_id, target)
1641 action_id = desc["action_id"]
1642 await self._wait_ng_ro(
1643 nsr_id, action_id, timeout=600, operation="instantiation"
1644 )
1645 break
1646 except NgRoException as e:
1647 raise LcmException(
1648 "Reaching max tries injecting key. Error: {}".format(e)
1649 )
1650 else:
1651 break
1652
1653 return ip_address
1654
1655 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1656 """
1657 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1658 """
1659 my_vca = vca_deployed_list[vca_index]
1660 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1661 # vdu or kdu: no dependencies
1662 return
1663 timeout = 300
1664 while timeout >= 0:
1665 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1666 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1667 configuration_status_list = db_nsr["configurationStatus"]
1668 for index, vca_deployed in enumerate(configuration_status_list):
1669 if index == vca_index:
1670 # myself
1671 continue
1672 if not my_vca.get("member-vnf-index") or (
1673 vca_deployed.get("member-vnf-index")
1674 == my_vca.get("member-vnf-index")
1675 ):
1676 internal_status = configuration_status_list[index].get("status")
1677 if internal_status == "READY":
1678 continue
1679 elif internal_status == "BROKEN":
1680 raise LcmException(
1681 "Configuration aborted because dependent charm/s has failed"
1682 )
1683 else:
1684 break
1685 else:
1686 # no dependencies, return
1687 return
1688 await asyncio.sleep(10)
1689 timeout -= 1
1690
1691 raise LcmException("Configuration aborted because dependent charm/s timeout")
1692
1693 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1694 vca_id = None
1695 if db_vnfr:
1696 vca_id = deep_get(db_vnfr, ("vca-id",))
1697 elif db_nsr:
1698 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1699 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1700 return vca_id
1701
1702 async def instantiate_N2VC(
1703 self,
1704 logging_text,
1705 vca_index,
1706 nsi_id,
1707 db_nsr,
1708 db_vnfr,
1709 vdu_id,
1710 kdu_name,
1711 vdu_index,
1712 kdu_index,
1713 config_descriptor,
1714 deploy_params,
1715 base_folder,
1716 nslcmop_id,
1717 stage,
1718 vca_type,
1719 vca_name,
1720 ee_config_descriptor,
1721 ):
1722 nsr_id = db_nsr["_id"]
1723 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1724 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1725 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1726 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1727 db_dict = {
1728 "collection": "nsrs",
1729 "filter": {"_id": nsr_id},
1730 "path": db_update_entry,
1731 }
1732 step = ""
1733 try:
1734 element_type = "NS"
1735 element_under_configuration = nsr_id
1736
1737 vnfr_id = None
1738 if db_vnfr:
1739 vnfr_id = db_vnfr["_id"]
1740 osm_config["osm"]["vnf_id"] = vnfr_id
1741
1742 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1743
1744 if vca_type == "native_charm":
1745 index_number = 0
1746 else:
1747 index_number = vdu_index or 0
1748
1749 if vnfr_id:
1750 element_type = "VNF"
1751 element_under_configuration = vnfr_id
1752 namespace += ".{}-{}".format(vnfr_id, index_number)
1753 if vdu_id:
1754 namespace += ".{}-{}".format(vdu_id, index_number)
1755 element_type = "VDU"
1756 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1757 osm_config["osm"]["vdu_id"] = vdu_id
1758 elif kdu_name:
1759 namespace += ".{}".format(kdu_name)
1760 element_type = "KDU"
1761 element_under_configuration = kdu_name
1762 osm_config["osm"]["kdu_name"] = kdu_name
1763
1764 # Get artifact path
1765 if base_folder["pkg-dir"]:
1766 artifact_path = "{}/{}/{}/{}".format(
1767 base_folder["folder"],
1768 base_folder["pkg-dir"],
1769 "charms"
1770 if vca_type
1771 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1772 else "helm-charts",
1773 vca_name,
1774 )
1775 else:
1776 artifact_path = "{}/Scripts/{}/{}/".format(
1777 base_folder["folder"],
1778 "charms"
1779 if vca_type
1780 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1781 else "helm-charts",
1782 vca_name,
1783 )
1784
1785 self.logger.debug("Artifact path > {}".format(artifact_path))
1786
1787 # get initial_config_primitive_list that applies to this element
1788 initial_config_primitive_list = config_descriptor.get(
1789 "initial-config-primitive"
1790 )
1791
1792 self.logger.debug(
1793 "Initial config primitive list > {}".format(
1794 initial_config_primitive_list
1795 )
1796 )
1797
1798 # add config if not present for NS charm
1799 ee_descriptor_id = ee_config_descriptor.get("id")
1800 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1801 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1802 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1803 )
1804
1805 self.logger.debug(
1806 "Initial config primitive list #2 > {}".format(
1807 initial_config_primitive_list
1808 )
1809 )
1810 # n2vc_redesign STEP 3.1
1811 # find old ee_id if exists
1812 ee_id = vca_deployed.get("ee_id")
1813
1814 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1815 # create or register execution environment in VCA
1816 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1817 self._write_configuration_status(
1818 nsr_id=nsr_id,
1819 vca_index=vca_index,
1820 status="CREATING",
1821 element_under_configuration=element_under_configuration,
1822 element_type=element_type,
1823 )
1824
1825 step = "create execution environment"
1826 self.logger.debug(logging_text + step)
1827
1828 ee_id = None
1829 credentials = None
1830 if vca_type == "k8s_proxy_charm":
1831 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1832 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1833 namespace=namespace,
1834 artifact_path=artifact_path,
1835 db_dict=db_dict,
1836 vca_id=vca_id,
1837 )
1838 elif vca_type == "helm" or vca_type == "helm-v3":
1839 ee_id, credentials = await self.vca_map[
1840 vca_type
1841 ].create_execution_environment(
1842 namespace=nsr_id,
1843 reuse_ee_id=ee_id,
1844 db_dict=db_dict,
1845 config=osm_config,
1846 artifact_path=artifact_path,
1847 chart_model=vca_name,
1848 vca_type=vca_type,
1849 )
1850 else:
1851 ee_id, credentials = await self.vca_map[
1852 vca_type
1853 ].create_execution_environment(
1854 namespace=namespace,
1855 reuse_ee_id=ee_id,
1856 db_dict=db_dict,
1857 vca_id=vca_id,
1858 )
1859
1860 elif vca_type == "native_charm":
1861 step = "Waiting to VM being up and getting IP address"
1862 self.logger.debug(logging_text + step)
1863 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1864 logging_text,
1865 nsr_id,
1866 vnfr_id,
1867 vdu_id,
1868 vdu_index,
1869 user=None,
1870 pub_key=None,
1871 )
1872 credentials = {"hostname": rw_mgmt_ip}
1873 # get username
1874 username = deep_get(
1875 config_descriptor, ("config-access", "ssh-access", "default-user")
1876 )
1877 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1878 # merged. Meanwhile let's get username from initial-config-primitive
1879 if not username and initial_config_primitive_list:
1880 for config_primitive in initial_config_primitive_list:
1881 for param in config_primitive.get("parameter", ()):
1882 if param["name"] == "ssh-username":
1883 username = param["value"]
1884 break
1885 if not username:
1886 raise LcmException(
1887 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1888 "'config-access.ssh-access.default-user'"
1889 )
1890 credentials["username"] = username
1891 # n2vc_redesign STEP 3.2
1892
1893 self._write_configuration_status(
1894 nsr_id=nsr_id,
1895 vca_index=vca_index,
1896 status="REGISTERING",
1897 element_under_configuration=element_under_configuration,
1898 element_type=element_type,
1899 )
1900
1901 step = "register execution environment {}".format(credentials)
1902 self.logger.debug(logging_text + step)
1903 ee_id = await self.vca_map[vca_type].register_execution_environment(
1904 credentials=credentials,
1905 namespace=namespace,
1906 db_dict=db_dict,
1907 vca_id=vca_id,
1908 )
1909
1910 # for compatibility with MON/POL modules, the need model and application name at database
1911 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1912 ee_id_parts = ee_id.split(".")
1913 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1914 if len(ee_id_parts) >= 2:
1915 model_name = ee_id_parts[0]
1916 application_name = ee_id_parts[1]
1917 db_nsr_update[db_update_entry + "model"] = model_name
1918 db_nsr_update[db_update_entry + "application"] = application_name
1919
1920 # n2vc_redesign STEP 3.3
1921 step = "Install configuration Software"
1922
1923 self._write_configuration_status(
1924 nsr_id=nsr_id,
1925 vca_index=vca_index,
1926 status="INSTALLING SW",
1927 element_under_configuration=element_under_configuration,
1928 element_type=element_type,
1929 other_update=db_nsr_update,
1930 )
1931
1932 # TODO check if already done
1933 self.logger.debug(logging_text + step)
1934 config = None
1935 if vca_type == "native_charm":
1936 config_primitive = next(
1937 (p for p in initial_config_primitive_list if p["name"] == "config"),
1938 None,
1939 )
1940 if config_primitive:
1941 config = self._map_primitive_params(
1942 config_primitive, {}, deploy_params
1943 )
1944 num_units = 1
1945 if vca_type == "lxc_proxy_charm":
1946 if element_type == "NS":
1947 num_units = db_nsr.get("config-units") or 1
1948 elif element_type == "VNF":
1949 num_units = db_vnfr.get("config-units") or 1
1950 elif element_type == "VDU":
1951 for v in db_vnfr["vdur"]:
1952 if vdu_id == v["vdu-id-ref"]:
1953 num_units = v.get("config-units") or 1
1954 break
1955 if vca_type != "k8s_proxy_charm":
1956 await self.vca_map[vca_type].install_configuration_sw(
1957 ee_id=ee_id,
1958 artifact_path=artifact_path,
1959 db_dict=db_dict,
1960 config=config,
1961 num_units=num_units,
1962 vca_id=vca_id,
1963 vca_type=vca_type,
1964 )
1965
1966 # write in db flag of configuration_sw already installed
1967 self.update_db_2(
1968 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
1969 )
1970
1971 # add relations for this VCA (wait for other peers related with this VCA)
1972 is_relation_added = await self._add_vca_relations(
1973 logging_text=logging_text,
1974 nsr_id=nsr_id,
1975 vca_type=vca_type,
1976 vca_index=vca_index,
1977 )
1978
1979 if not is_relation_added:
1980 raise LcmException("Relations could not be added to VCA.")
1981
1982 # if SSH access is required, then get execution environment SSH public
1983 # if native charm we have waited already to VM be UP
1984 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
1985 pub_key = None
1986 user = None
1987 # self.logger.debug("get ssh key block")
1988 if deep_get(
1989 config_descriptor, ("config-access", "ssh-access", "required")
1990 ):
1991 # self.logger.debug("ssh key needed")
1992 # Needed to inject a ssh key
1993 user = deep_get(
1994 config_descriptor,
1995 ("config-access", "ssh-access", "default-user"),
1996 )
1997 step = "Install configuration Software, getting public ssh key"
1998 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
1999 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
2000 )
2001
2002 step = "Insert public key into VM user={} ssh_key={}".format(
2003 user, pub_key
2004 )
2005 else:
2006 # self.logger.debug("no need to get ssh key")
2007 step = "Waiting to VM being up and getting IP address"
2008 self.logger.debug(logging_text + step)
2009
2010 # default rw_mgmt_ip to None, avoiding the non definition of the variable
2011 rw_mgmt_ip = None
2012
2013 # n2vc_redesign STEP 5.1
2014 # wait for RO (ip-address) Insert pub_key into VM
2015 if vnfr_id:
2016 if kdu_name:
2017 rw_mgmt_ip, services = await self.wait_kdu_up(
2018 logging_text, nsr_id, vnfr_id, kdu_name
2019 )
2020 vnfd = self.db.get_one(
2021 "vnfds_revisions",
2022 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2023 )
2024 kdu = get_kdu(vnfd, kdu_name)
2025 kdu_services = [
2026 service["name"] for service in get_kdu_services(kdu)
2027 ]
2028 exposed_services = []
2029 for service in services:
2030 if any(s in service["name"] for s in kdu_services):
2031 exposed_services.append(service)
2032 await self.vca_map[vca_type].exec_primitive(
2033 ee_id=ee_id,
2034 primitive_name="config",
2035 params_dict={
2036 "osm-config": json.dumps(
2037 OsmConfigBuilder(
2038 k8s={"services": exposed_services}
2039 ).build()
2040 )
2041 },
2042 vca_id=vca_id,
2043 )
2044
2045 # This verification is needed in order to avoid trying to add a public key
2046 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
2047 # for a KNF and not for its KDUs, the previous verification gives False, and the code
2048 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
2049 # or it is a KNF)
2050 elif db_vnfr.get("vdur"):
2051 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2052 logging_text,
2053 nsr_id,
2054 vnfr_id,
2055 vdu_id,
2056 vdu_index,
2057 user=user,
2058 pub_key=pub_key,
2059 )
2060
2061 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
2062
2063 # store rw_mgmt_ip in deploy params for later replacement
2064 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
2065
2066 # n2vc_redesign STEP 6 Execute initial config primitive
2067 step = "execute initial config primitive"
2068
2069 # wait for dependent primitives execution (NS -> VNF -> VDU)
2070 if initial_config_primitive_list:
2071 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
2072
2073 # stage, in function of element type: vdu, kdu, vnf or ns
2074 my_vca = vca_deployed_list[vca_index]
2075 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
2076 # VDU or KDU
2077 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
2078 elif my_vca.get("member-vnf-index"):
2079 # VNF
2080 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
2081 else:
2082 # NS
2083 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
2084
2085 self._write_configuration_status(
2086 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
2087 )
2088
2089 self._write_op_status(op_id=nslcmop_id, stage=stage)
2090
2091 check_if_terminated_needed = True
2092 for initial_config_primitive in initial_config_primitive_list:
2093 # adding information on the vca_deployed if it is a NS execution environment
2094 if not vca_deployed["member-vnf-index"]:
2095 deploy_params["ns_config_info"] = json.dumps(
2096 self._get_ns_config_info(nsr_id)
2097 )
2098 # TODO check if already done
2099 primitive_params_ = self._map_primitive_params(
2100 initial_config_primitive, {}, deploy_params
2101 )
2102
2103 step = "execute primitive '{}' params '{}'".format(
2104 initial_config_primitive["name"], primitive_params_
2105 )
2106 self.logger.debug(logging_text + step)
2107 await self.vca_map[vca_type].exec_primitive(
2108 ee_id=ee_id,
2109 primitive_name=initial_config_primitive["name"],
2110 params_dict=primitive_params_,
2111 db_dict=db_dict,
2112 vca_id=vca_id,
2113 vca_type=vca_type,
2114 )
2115 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2116 if check_if_terminated_needed:
2117 if config_descriptor.get("terminate-config-primitive"):
2118 self.update_db_2(
2119 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2120 )
2121 check_if_terminated_needed = False
2122
2123 # TODO register in database that primitive is done
2124
2125 # STEP 7 Configure metrics
2126 if vca_type == "helm" or vca_type == "helm-v3":
2127 # TODO: review for those cases where the helm chart is a reference and
2128 # is not part of the NF package
2129 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2130 ee_id=ee_id,
2131 artifact_path=artifact_path,
2132 ee_config_descriptor=ee_config_descriptor,
2133 vnfr_id=vnfr_id,
2134 nsr_id=nsr_id,
2135 target_ip=rw_mgmt_ip,
2136 element_type=element_type,
2137 vnf_member_index=db_vnfr.get("member-vnf-index-ref", ""),
2138 vdu_id=vdu_id,
2139 vdu_index=vdu_index,
2140 kdu_name=kdu_name,
2141 kdu_index=kdu_index,
2142 )
2143 if prometheus_jobs:
2144 self.update_db_2(
2145 "nsrs",
2146 nsr_id,
2147 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2148 )
2149
2150 for job in prometheus_jobs:
2151 self.db.set_one(
2152 "prometheus_jobs",
2153 {"job_name": job["job_name"]},
2154 job,
2155 upsert=True,
2156 fail_on_empty=False,
2157 )
2158
2159 step = "instantiated at VCA"
2160 self.logger.debug(logging_text + step)
2161
2162 self._write_configuration_status(
2163 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2164 )
2165
2166 except Exception as e: # TODO not use Exception but N2VC exception
2167 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2168 if not isinstance(
2169 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2170 ):
2171 self.logger.error(
2172 "Exception while {} : {}".format(step, e), exc_info=True
2173 )
2174 self._write_configuration_status(
2175 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2176 )
2177 raise LcmException("{}. {}".format(step, e)) from e
2178
2179 def _write_ns_status(
2180 self,
2181 nsr_id: str,
2182 ns_state: str,
2183 current_operation: str,
2184 current_operation_id: str,
2185 error_description: str = None,
2186 error_detail: str = None,
2187 other_update: dict = None,
2188 ):
2189 """
2190 Update db_nsr fields.
2191 :param nsr_id:
2192 :param ns_state:
2193 :param current_operation:
2194 :param current_operation_id:
2195 :param error_description:
2196 :param error_detail:
2197 :param other_update: Other required changes at database if provided, will be cleared
2198 :return:
2199 """
2200 try:
2201 db_dict = other_update or {}
2202 db_dict[
2203 "_admin.nslcmop"
2204 ] = current_operation_id # for backward compatibility
2205 db_dict["_admin.current-operation"] = current_operation_id
2206 db_dict["_admin.operation-type"] = (
2207 current_operation if current_operation != "IDLE" else None
2208 )
2209 db_dict["currentOperation"] = current_operation
2210 db_dict["currentOperationID"] = current_operation_id
2211 db_dict["errorDescription"] = error_description
2212 db_dict["errorDetail"] = error_detail
2213
2214 if ns_state:
2215 db_dict["nsState"] = ns_state
2216 self.update_db_2("nsrs", nsr_id, db_dict)
2217 except DbException as e:
2218 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2219
2220 def _write_op_status(
2221 self,
2222 op_id: str,
2223 stage: list = None,
2224 error_message: str = None,
2225 queuePosition: int = 0,
2226 operation_state: str = None,
2227 other_update: dict = None,
2228 ):
2229 try:
2230 db_dict = other_update or {}
2231 db_dict["queuePosition"] = queuePosition
2232 if isinstance(stage, list):
2233 db_dict["stage"] = stage[0]
2234 db_dict["detailed-status"] = " ".join(stage)
2235 elif stage is not None:
2236 db_dict["stage"] = str(stage)
2237
2238 if error_message is not None:
2239 db_dict["errorMessage"] = error_message
2240 if operation_state is not None:
2241 db_dict["operationState"] = operation_state
2242 db_dict["statusEnteredTime"] = time()
2243 self.update_db_2("nslcmops", op_id, db_dict)
2244 except DbException as e:
2245 self.logger.warn(
2246 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2247 )
2248
2249 def _write_all_config_status(self, db_nsr: dict, status: str):
2250 try:
2251 nsr_id = db_nsr["_id"]
2252 # configurationStatus
2253 config_status = db_nsr.get("configurationStatus")
2254 if config_status:
2255 db_nsr_update = {
2256 "configurationStatus.{}.status".format(index): status
2257 for index, v in enumerate(config_status)
2258 if v
2259 }
2260 # update status
2261 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2262
2263 except DbException as e:
2264 self.logger.warn(
2265 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2266 )
2267
2268 def _write_configuration_status(
2269 self,
2270 nsr_id: str,
2271 vca_index: int,
2272 status: str = None,
2273 element_under_configuration: str = None,
2274 element_type: str = None,
2275 other_update: dict = None,
2276 ):
2277 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2278 # .format(vca_index, status))
2279
2280 try:
2281 db_path = "configurationStatus.{}.".format(vca_index)
2282 db_dict = other_update or {}
2283 if status:
2284 db_dict[db_path + "status"] = status
2285 if element_under_configuration:
2286 db_dict[
2287 db_path + "elementUnderConfiguration"
2288 ] = element_under_configuration
2289 if element_type:
2290 db_dict[db_path + "elementType"] = element_type
2291 self.update_db_2("nsrs", nsr_id, db_dict)
2292 except DbException as e:
2293 self.logger.warn(
2294 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2295 status, nsr_id, vca_index, e
2296 )
2297 )
2298
2299 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2300 """
2301 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2302 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2303 Database is used because the result can be obtained from a different LCM worker in case of HA.
2304 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2305 :param db_nslcmop: database content of nslcmop
2306 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2307 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2308 computed 'vim-account-id'
2309 """
2310 modified = False
2311 nslcmop_id = db_nslcmop["_id"]
2312 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2313 if placement_engine == "PLA":
2314 self.logger.debug(
2315 logging_text + "Invoke and wait for placement optimization"
2316 )
2317 await self.msg.aiowrite("pla", "get_placement", {"nslcmopId": nslcmop_id})
2318 db_poll_interval = 5
2319 wait = db_poll_interval * 10
2320 pla_result = None
2321 while not pla_result and wait >= 0:
2322 await asyncio.sleep(db_poll_interval)
2323 wait -= db_poll_interval
2324 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2325 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2326
2327 if not pla_result:
2328 raise LcmException(
2329 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2330 )
2331
2332 for pla_vnf in pla_result["vnf"]:
2333 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2334 if not pla_vnf.get("vimAccountId") or not vnfr:
2335 continue
2336 modified = True
2337 self.db.set_one(
2338 "vnfrs",
2339 {"_id": vnfr["_id"]},
2340 {"vim-account-id": pla_vnf["vimAccountId"]},
2341 )
2342 # Modifies db_vnfrs
2343 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2344 return modified
2345
2346 def _gather_vnfr_healing_alerts(self, vnfr, vnfd):
2347 alerts = []
2348 nsr_id = vnfr["nsr-id-ref"]
2349 df = vnfd.get("df", [{}])[0]
2350 # Checking for auto-healing configuration
2351 if "healing-aspect" in df:
2352 healing_aspects = df["healing-aspect"]
2353 for healing in healing_aspects:
2354 for healing_policy in healing.get("healing-policy", ()):
2355 vdu_id = healing_policy["vdu-id"]
2356 vdur = next(
2357 (vdur for vdur in vnfr["vdur"] if vdu_id == vdur["vdu-id-ref"]),
2358 {},
2359 )
2360 if not vdur:
2361 continue
2362 metric_name = "vm_status"
2363 vdu_name = vdur.get("name")
2364 vnf_member_index = vnfr["member-vnf-index-ref"]
2365 uuid = str(uuid4())
2366 name = f"healing_{uuid}"
2367 action = healing_policy
2368 # action_on_recovery = healing.get("action-on-recovery")
2369 # cooldown_time = healing.get("cooldown-time")
2370 # day1 = healing.get("day1")
2371 alert = {
2372 "uuid": uuid,
2373 "name": name,
2374 "metric": metric_name,
2375 "tags": {
2376 "ns_id": nsr_id,
2377 "vnf_member_index": vnf_member_index,
2378 "vdu_name": vdu_name,
2379 },
2380 "alarm_status": "ok",
2381 "action_type": "healing",
2382 "action": action,
2383 }
2384 alerts.append(alert)
2385 return alerts
2386
2387 def _gather_vnfr_scaling_alerts(self, vnfr, vnfd):
2388 alerts = []
2389 nsr_id = vnfr["nsr-id-ref"]
2390 df = vnfd.get("df", [{}])[0]
2391 # Checking for auto-scaling configuration
2392 if "scaling-aspect" in df:
2393 scaling_aspects = df["scaling-aspect"]
2394 all_vnfd_monitoring_params = {}
2395 for ivld in vnfd.get("int-virtual-link-desc", ()):
2396 for mp in ivld.get("monitoring-parameters", ()):
2397 all_vnfd_monitoring_params[mp.get("id")] = mp
2398 for vdu in vnfd.get("vdu", ()):
2399 for mp in vdu.get("monitoring-parameter", ()):
2400 all_vnfd_monitoring_params[mp.get("id")] = mp
2401 for df in vnfd.get("df", ()):
2402 for mp in df.get("monitoring-parameter", ()):
2403 all_vnfd_monitoring_params[mp.get("id")] = mp
2404 for scaling_aspect in scaling_aspects:
2405 scaling_group_name = scaling_aspect.get("name", "")
2406 # Get monitored VDUs
2407 all_monitored_vdus = set()
2408 for delta in scaling_aspect.get("aspect-delta-details", {}).get(
2409 "deltas", ()
2410 ):
2411 for vdu_delta in delta.get("vdu-delta", ()):
2412 all_monitored_vdus.add(vdu_delta.get("id"))
2413 monitored_vdurs = list(
2414 filter(
2415 lambda vdur: vdur["vdu-id-ref"] in all_monitored_vdus,
2416 vnfr["vdur"],
2417 )
2418 )
2419 if not monitored_vdurs:
2420 self.logger.error(
2421 "Scaling criteria is referring to a vnf-monitoring-param that does not contain a reference to a vdu or vnf metric"
2422 )
2423 continue
2424 for scaling_policy in scaling_aspect.get("scaling-policy", ()):
2425 if scaling_policy["scaling-type"] != "automatic":
2426 continue
2427 threshold_time = scaling_policy.get("threshold-time", "1")
2428 cooldown_time = scaling_policy.get("cooldown-time", "0")
2429 for scaling_criteria in scaling_policy["scaling-criteria"]:
2430 monitoring_param_ref = scaling_criteria.get(
2431 "vnf-monitoring-param-ref"
2432 )
2433 vnf_monitoring_param = all_vnfd_monitoring_params[
2434 monitoring_param_ref
2435 ]
2436 for vdur in monitored_vdurs:
2437 vdu_id = vdur["vdu-id-ref"]
2438 metric_name = vnf_monitoring_param.get("performance-metric")
2439 metric_name = f"osm_{metric_name}"
2440 vnf_member_index = vnfr["member-vnf-index-ref"]
2441 scalein_threshold = scaling_criteria.get(
2442 "scale-in-threshold"
2443 )
2444 scaleout_threshold = scaling_criteria.get(
2445 "scale-out-threshold"
2446 )
2447 # Looking for min/max-number-of-instances
2448 instances_min_number = 1
2449 instances_max_number = 1
2450 vdu_profile = df["vdu-profile"]
2451 if vdu_profile:
2452 profile = next(
2453 item for item in vdu_profile if item["id"] == vdu_id
2454 )
2455 instances_min_number = profile.get(
2456 "min-number-of-instances", 1
2457 )
2458 instances_max_number = profile.get(
2459 "max-number-of-instances", 1
2460 )
2461
2462 if scalein_threshold:
2463 uuid = str(uuid4())
2464 name = f"scalein_{uuid}"
2465 operation = scaling_criteria[
2466 "scale-in-relational-operation"
2467 ]
2468 rel_operator = self.rel_operation_types.get(
2469 operation, "<="
2470 )
2471 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2472 expression = f"(count ({metric_selector}) > {instances_min_number}) and (avg({metric_selector}) {rel_operator} {scalein_threshold})"
2473 labels = {
2474 "ns_id": nsr_id,
2475 "vnf_member_index": vnf_member_index,
2476 "vdu_id": vdu_id,
2477 }
2478 prom_cfg = {
2479 "alert": name,
2480 "expr": expression,
2481 "for": str(threshold_time) + "m",
2482 "labels": labels,
2483 }
2484 action = scaling_policy
2485 action = {
2486 "scaling-group": scaling_group_name,
2487 "cooldown-time": cooldown_time,
2488 }
2489 alert = {
2490 "uuid": uuid,
2491 "name": name,
2492 "metric": metric_name,
2493 "tags": {
2494 "ns_id": nsr_id,
2495 "vnf_member_index": vnf_member_index,
2496 "vdu_id": vdu_id,
2497 },
2498 "alarm_status": "ok",
2499 "action_type": "scale_in",
2500 "action": action,
2501 "prometheus_config": prom_cfg,
2502 }
2503 alerts.append(alert)
2504
2505 if scaleout_threshold:
2506 uuid = str(uuid4())
2507 name = f"scaleout_{uuid}"
2508 operation = scaling_criteria[
2509 "scale-out-relational-operation"
2510 ]
2511 rel_operator = self.rel_operation_types.get(
2512 operation, "<="
2513 )
2514 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2515 expression = f"(count ({metric_selector}) < {instances_max_number}) and (avg({metric_selector}) {rel_operator} {scaleout_threshold})"
2516 labels = {
2517 "ns_id": nsr_id,
2518 "vnf_member_index": vnf_member_index,
2519 "vdu_id": vdu_id,
2520 }
2521 prom_cfg = {
2522 "alert": name,
2523 "expr": expression,
2524 "for": str(threshold_time) + "m",
2525 "labels": labels,
2526 }
2527 action = scaling_policy
2528 action = {
2529 "scaling-group": scaling_group_name,
2530 "cooldown-time": cooldown_time,
2531 }
2532 alert = {
2533 "uuid": uuid,
2534 "name": name,
2535 "metric": metric_name,
2536 "tags": {
2537 "ns_id": nsr_id,
2538 "vnf_member_index": vnf_member_index,
2539 "vdu_id": vdu_id,
2540 },
2541 "alarm_status": "ok",
2542 "action_type": "scale_out",
2543 "action": action,
2544 "prometheus_config": prom_cfg,
2545 }
2546 alerts.append(alert)
2547 return alerts
2548
2549 def _gather_vnfr_alarm_alerts(self, vnfr, vnfd):
2550 alerts = []
2551 nsr_id = vnfr["nsr-id-ref"]
2552 vnf_member_index = vnfr["member-vnf-index-ref"]
2553
2554 # Checking for VNF alarm configuration
2555 for vdur in vnfr["vdur"]:
2556 vdu_id = vdur["vdu-id-ref"]
2557 vdu = next(filter(lambda vdu: vdu["id"] == vdu_id, vnfd["vdu"]))
2558 if "alarm" in vdu:
2559 # Get VDU monitoring params, since alerts are based on them
2560 vdu_monitoring_params = {}
2561 for mp in vdu.get("monitoring-parameter", []):
2562 vdu_monitoring_params[mp.get("id")] = mp
2563 if not vdu_monitoring_params:
2564 self.logger.error(
2565 "VDU alarm refers to a VDU monitoring param, but there are no VDU monitoring params in the VDU"
2566 )
2567 continue
2568 # Get alarms in the VDU
2569 alarm_descriptors = vdu["alarm"]
2570 # Create VDU alarms for each alarm in the VDU
2571 for alarm_descriptor in alarm_descriptors:
2572 # Check that the VDU alarm refers to a proper monitoring param
2573 alarm_monitoring_param = alarm_descriptor.get(
2574 "vnf-monitoring-param-ref", ""
2575 )
2576 vdu_specific_monitoring_param = vdu_monitoring_params.get(
2577 alarm_monitoring_param, {}
2578 )
2579 if not vdu_specific_monitoring_param:
2580 self.logger.error(
2581 "VDU alarm refers to a VDU monitoring param not present in the VDU"
2582 )
2583 continue
2584 metric_name = vdu_specific_monitoring_param.get(
2585 "performance-metric"
2586 )
2587 if not metric_name:
2588 self.logger.error(
2589 "VDU alarm refers to a VDU monitoring param that has no associated performance-metric"
2590 )
2591 continue
2592 # Set params of the alarm to be created in Prometheus
2593 metric_name = f"osm_{metric_name}"
2594 metric_threshold = alarm_descriptor.get("value")
2595 uuid = str(uuid4())
2596 alert_name = f"vdu_alarm_{uuid}"
2597 operation = alarm_descriptor["operation"]
2598 rel_operator = self.rel_operation_types.get(operation, "<=")
2599 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2600 expression = f"{metric_selector} {rel_operator} {metric_threshold}"
2601 labels = {
2602 "ns_id": nsr_id,
2603 "vnf_member_index": vnf_member_index,
2604 "vdu_id": vdu_id,
2605 "vdu_name": "{{ $labels.vdu_name }}",
2606 }
2607 prom_cfg = {
2608 "alert": alert_name,
2609 "expr": expression,
2610 "for": "1m", # default value. Ideally, this should be related to an IM param, but there is not such param
2611 "labels": labels,
2612 }
2613 alarm_action = dict()
2614 for action_type in ["ok", "insufficient-data", "alarm"]:
2615 if (
2616 "actions" in alarm_descriptor
2617 and action_type in alarm_descriptor["actions"]
2618 ):
2619 alarm_action[action_type] = alarm_descriptor["actions"][
2620 action_type
2621 ]
2622 alert = {
2623 "uuid": uuid,
2624 "name": alert_name,
2625 "metric": metric_name,
2626 "tags": {
2627 "ns_id": nsr_id,
2628 "vnf_member_index": vnf_member_index,
2629 "vdu_id": vdu_id,
2630 },
2631 "alarm_status": "ok",
2632 "action_type": "vdu_alarm",
2633 "action": alarm_action,
2634 "prometheus_config": prom_cfg,
2635 }
2636 alerts.append(alert)
2637 return alerts
2638
2639 def update_nsrs_with_pla_result(self, params):
2640 try:
2641 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2642 self.update_db_2(
2643 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2644 )
2645 except Exception as e:
2646 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2647
2648 async def instantiate(self, nsr_id, nslcmop_id):
2649 """
2650
2651 :param nsr_id: ns instance to deploy
2652 :param nslcmop_id: operation to run
2653 :return:
2654 """
2655
2656 # Try to lock HA task here
2657 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2658 if not task_is_locked_by_me:
2659 self.logger.debug(
2660 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2661 )
2662 return
2663
2664 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2665 self.logger.debug(logging_text + "Enter")
2666
2667 # get all needed from database
2668
2669 # database nsrs record
2670 db_nsr = None
2671
2672 # database nslcmops record
2673 db_nslcmop = None
2674
2675 # update operation on nsrs
2676 db_nsr_update = {}
2677 # update operation on nslcmops
2678 db_nslcmop_update = {}
2679
2680 timeout_ns_deploy = self.timeout.ns_deploy
2681
2682 nslcmop_operation_state = None
2683 db_vnfrs = {} # vnf's info indexed by member-index
2684 # n2vc_info = {}
2685 tasks_dict_info = {} # from task to info text
2686 exc = None
2687 error_list = []
2688 stage = [
2689 "Stage 1/5: preparation of the environment.",
2690 "Waiting for previous operations to terminate.",
2691 "",
2692 ]
2693 # ^ stage, step, VIM progress
2694 try:
2695 # wait for any previous tasks in process
2696 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2697
2698 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2699 stage[1] = "Reading from database."
2700 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2701 db_nsr_update["detailed-status"] = "creating"
2702 db_nsr_update["operational-status"] = "init"
2703 self._write_ns_status(
2704 nsr_id=nsr_id,
2705 ns_state="BUILDING",
2706 current_operation="INSTANTIATING",
2707 current_operation_id=nslcmop_id,
2708 other_update=db_nsr_update,
2709 )
2710 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2711
2712 # read from db: operation
2713 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2714 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2715 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2716 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2717 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2718 )
2719 ns_params = db_nslcmop.get("operationParams")
2720 if ns_params and ns_params.get("timeout_ns_deploy"):
2721 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2722
2723 # read from db: ns
2724 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2725 self.logger.debug(logging_text + stage[1])
2726 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2727 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2728 self.logger.debug(logging_text + stage[1])
2729 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2730 self.fs.sync(db_nsr["nsd-id"])
2731 db_nsr["nsd"] = nsd
2732 # nsr_name = db_nsr["name"] # TODO short-name??
2733
2734 # read from db: vnf's of this ns
2735 stage[1] = "Getting vnfrs from db."
2736 self.logger.debug(logging_text + stage[1])
2737 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2738
2739 # read from db: vnfd's for every vnf
2740 db_vnfds = [] # every vnfd data
2741
2742 # for each vnf in ns, read vnfd
2743 for vnfr in db_vnfrs_list:
2744 if vnfr.get("kdur"):
2745 kdur_list = []
2746 for kdur in vnfr["kdur"]:
2747 if kdur.get("additionalParams"):
2748 kdur["additionalParams"] = json.loads(
2749 kdur["additionalParams"]
2750 )
2751 kdur_list.append(kdur)
2752 vnfr["kdur"] = kdur_list
2753
2754 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2755 vnfd_id = vnfr["vnfd-id"]
2756 vnfd_ref = vnfr["vnfd-ref"]
2757 self.fs.sync(vnfd_id)
2758
2759 # if we haven't this vnfd, read it from db
2760 if vnfd_id not in db_vnfds:
2761 # read from db
2762 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2763 vnfd_id, vnfd_ref
2764 )
2765 self.logger.debug(logging_text + stage[1])
2766 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2767
2768 # store vnfd
2769 db_vnfds.append(vnfd)
2770
2771 # Get or generates the _admin.deployed.VCA list
2772 vca_deployed_list = None
2773 if db_nsr["_admin"].get("deployed"):
2774 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2775 if vca_deployed_list is None:
2776 vca_deployed_list = []
2777 configuration_status_list = []
2778 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2779 db_nsr_update["configurationStatus"] = configuration_status_list
2780 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2781 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2782 elif isinstance(vca_deployed_list, dict):
2783 # maintain backward compatibility. Change a dict to list at database
2784 vca_deployed_list = list(vca_deployed_list.values())
2785 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2786 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2787
2788 if not isinstance(
2789 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2790 ):
2791 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2792 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2793
2794 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2795 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2796 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2797 self.db.set_list(
2798 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2799 )
2800
2801 # n2vc_redesign STEP 2 Deploy Network Scenario
2802 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2803 self._write_op_status(op_id=nslcmop_id, stage=stage)
2804
2805 stage[1] = "Deploying KDUs."
2806 # self.logger.debug(logging_text + "Before deploy_kdus")
2807 # Call to deploy_kdus in case exists the "vdu:kdu" param
2808 await self.deploy_kdus(
2809 logging_text=logging_text,
2810 nsr_id=nsr_id,
2811 nslcmop_id=nslcmop_id,
2812 db_vnfrs=db_vnfrs,
2813 db_vnfds=db_vnfds,
2814 task_instantiation_info=tasks_dict_info,
2815 )
2816
2817 stage[1] = "Getting VCA public key."
2818 # n2vc_redesign STEP 1 Get VCA public ssh-key
2819 # feature 1429. Add n2vc public key to needed VMs
2820 n2vc_key = self.n2vc.get_public_key()
2821 n2vc_key_list = [n2vc_key]
2822 if self.vca_config.public_key:
2823 n2vc_key_list.append(self.vca_config.public_key)
2824
2825 stage[1] = "Deploying NS at VIM."
2826 task_ro = asyncio.ensure_future(
2827 self.instantiate_RO(
2828 logging_text=logging_text,
2829 nsr_id=nsr_id,
2830 nsd=nsd,
2831 db_nsr=db_nsr,
2832 db_nslcmop=db_nslcmop,
2833 db_vnfrs=db_vnfrs,
2834 db_vnfds=db_vnfds,
2835 n2vc_key_list=n2vc_key_list,
2836 stage=stage,
2837 )
2838 )
2839 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2840 tasks_dict_info[task_ro] = "Deploying at VIM"
2841
2842 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2843 stage[1] = "Deploying Execution Environments."
2844 self.logger.debug(logging_text + stage[1])
2845
2846 # create namespace and certificate if any helm based EE is present in the NS
2847 if check_helm_ee_in_ns(db_vnfds):
2848 await self.vca_map["helm-v3"].setup_ns_namespace(
2849 name=nsr_id,
2850 )
2851 # create TLS certificates
2852 await self.vca_map["helm-v3"].create_tls_certificate(
2853 secret_name=self.EE_TLS_NAME,
2854 dns_prefix="*",
2855 nsr_id=nsr_id,
2856 usage="server auth",
2857 namespace=nsr_id,
2858 )
2859
2860 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2861 for vnf_profile in get_vnf_profiles(nsd):
2862 vnfd_id = vnf_profile["vnfd-id"]
2863 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2864 member_vnf_index = str(vnf_profile["id"])
2865 db_vnfr = db_vnfrs[member_vnf_index]
2866 base_folder = vnfd["_admin"]["storage"]
2867 vdu_id = None
2868 vdu_index = 0
2869 vdu_name = None
2870 kdu_name = None
2871 kdu_index = None
2872
2873 # Get additional parameters
2874 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2875 if db_vnfr.get("additionalParamsForVnf"):
2876 deploy_params.update(
2877 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2878 )
2879
2880 descriptor_config = get_configuration(vnfd, vnfd["id"])
2881 if descriptor_config:
2882 self._deploy_n2vc(
2883 logging_text=logging_text
2884 + "member_vnf_index={} ".format(member_vnf_index),
2885 db_nsr=db_nsr,
2886 db_vnfr=db_vnfr,
2887 nslcmop_id=nslcmop_id,
2888 nsr_id=nsr_id,
2889 nsi_id=nsi_id,
2890 vnfd_id=vnfd_id,
2891 vdu_id=vdu_id,
2892 kdu_name=kdu_name,
2893 member_vnf_index=member_vnf_index,
2894 vdu_index=vdu_index,
2895 kdu_index=kdu_index,
2896 vdu_name=vdu_name,
2897 deploy_params=deploy_params,
2898 descriptor_config=descriptor_config,
2899 base_folder=base_folder,
2900 task_instantiation_info=tasks_dict_info,
2901 stage=stage,
2902 )
2903
2904 # Deploy charms for each VDU that supports one.
2905 for vdud in get_vdu_list(vnfd):
2906 vdu_id = vdud["id"]
2907 descriptor_config = get_configuration(vnfd, vdu_id)
2908 vdur = find_in_list(
2909 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2910 )
2911
2912 if vdur.get("additionalParams"):
2913 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2914 else:
2915 deploy_params_vdu = deploy_params
2916 deploy_params_vdu["OSM"] = get_osm_params(
2917 db_vnfr, vdu_id, vdu_count_index=0
2918 )
2919 vdud_count = get_number_of_instances(vnfd, vdu_id)
2920
2921 self.logger.debug("VDUD > {}".format(vdud))
2922 self.logger.debug(
2923 "Descriptor config > {}".format(descriptor_config)
2924 )
2925 if descriptor_config:
2926 vdu_name = None
2927 kdu_name = None
2928 kdu_index = None
2929 for vdu_index in range(vdud_count):
2930 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2931 self._deploy_n2vc(
2932 logging_text=logging_text
2933 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2934 member_vnf_index, vdu_id, vdu_index
2935 ),
2936 db_nsr=db_nsr,
2937 db_vnfr=db_vnfr,
2938 nslcmop_id=nslcmop_id,
2939 nsr_id=nsr_id,
2940 nsi_id=nsi_id,
2941 vnfd_id=vnfd_id,
2942 vdu_id=vdu_id,
2943 kdu_name=kdu_name,
2944 kdu_index=kdu_index,
2945 member_vnf_index=member_vnf_index,
2946 vdu_index=vdu_index,
2947 vdu_name=vdu_name,
2948 deploy_params=deploy_params_vdu,
2949 descriptor_config=descriptor_config,
2950 base_folder=base_folder,
2951 task_instantiation_info=tasks_dict_info,
2952 stage=stage,
2953 )
2954 for kdud in get_kdu_list(vnfd):
2955 kdu_name = kdud["name"]
2956 descriptor_config = get_configuration(vnfd, kdu_name)
2957 if descriptor_config:
2958 vdu_id = None
2959 vdu_index = 0
2960 vdu_name = None
2961 kdu_index, kdur = next(
2962 x
2963 for x in enumerate(db_vnfr["kdur"])
2964 if x[1]["kdu-name"] == kdu_name
2965 )
2966 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2967 if kdur.get("additionalParams"):
2968 deploy_params_kdu.update(
2969 parse_yaml_strings(kdur["additionalParams"].copy())
2970 )
2971
2972 self._deploy_n2vc(
2973 logging_text=logging_text,
2974 db_nsr=db_nsr,
2975 db_vnfr=db_vnfr,
2976 nslcmop_id=nslcmop_id,
2977 nsr_id=nsr_id,
2978 nsi_id=nsi_id,
2979 vnfd_id=vnfd_id,
2980 vdu_id=vdu_id,
2981 kdu_name=kdu_name,
2982 member_vnf_index=member_vnf_index,
2983 vdu_index=vdu_index,
2984 kdu_index=kdu_index,
2985 vdu_name=vdu_name,
2986 deploy_params=deploy_params_kdu,
2987 descriptor_config=descriptor_config,
2988 base_folder=base_folder,
2989 task_instantiation_info=tasks_dict_info,
2990 stage=stage,
2991 )
2992
2993 # Check if each vnf has exporter for metric collection if so update prometheus job records
2994 if "exporters-endpoints" in vnfd.get("df")[0]:
2995 exporter_config = vnfd.get("df")[0].get("exporters-endpoints")
2996 self.logger.debug("exporter config :{}".format(exporter_config))
2997 artifact_path = "{}/{}/{}".format(
2998 base_folder["folder"],
2999 base_folder["pkg-dir"],
3000 "exporter-endpoint",
3001 )
3002 ee_id = None
3003 ee_config_descriptor = exporter_config
3004 vnfr_id = db_vnfr["id"]
3005 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
3006 logging_text,
3007 nsr_id,
3008 vnfr_id,
3009 vdu_id=None,
3010 vdu_index=None,
3011 user=None,
3012 pub_key=None,
3013 )
3014 self.logger.debug("rw_mgmt_ip:{}".format(rw_mgmt_ip))
3015 self.logger.debug("Artifact_path:{}".format(artifact_path))
3016 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
3017 vdu_id_for_prom = None
3018 vdu_index_for_prom = None
3019 for x in get_iterable(db_vnfr, "vdur"):
3020 vdu_id_for_prom = x.get("vdu-id-ref")
3021 vdu_index_for_prom = x.get("count-index")
3022 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
3023 ee_id=ee_id,
3024 artifact_path=artifact_path,
3025 ee_config_descriptor=ee_config_descriptor,
3026 vnfr_id=vnfr_id,
3027 nsr_id=nsr_id,
3028 target_ip=rw_mgmt_ip,
3029 element_type="VDU",
3030 vdu_id=vdu_id_for_prom,
3031 vdu_index=vdu_index_for_prom,
3032 )
3033
3034 self.logger.debug("Prometheus job:{}".format(prometheus_jobs))
3035 if prometheus_jobs:
3036 db_nsr_update["_admin.deployed.prometheus_jobs"] = prometheus_jobs
3037 self.update_db_2(
3038 "nsrs",
3039 nsr_id,
3040 db_nsr_update,
3041 )
3042
3043 for job in prometheus_jobs:
3044 self.db.set_one(
3045 "prometheus_jobs",
3046 {"job_name": job["job_name"]},
3047 job,
3048 upsert=True,
3049 fail_on_empty=False,
3050 )
3051
3052 # Check if this NS has a charm configuration
3053 descriptor_config = nsd.get("ns-configuration")
3054 if descriptor_config and descriptor_config.get("juju"):
3055 vnfd_id = None
3056 db_vnfr = None
3057 member_vnf_index = None
3058 vdu_id = None
3059 kdu_name = None
3060 kdu_index = None
3061 vdu_index = 0
3062 vdu_name = None
3063
3064 # Get additional parameters
3065 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
3066 if db_nsr.get("additionalParamsForNs"):
3067 deploy_params.update(
3068 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
3069 )
3070 base_folder = nsd["_admin"]["storage"]
3071 self._deploy_n2vc(
3072 logging_text=logging_text,
3073 db_nsr=db_nsr,
3074 db_vnfr=db_vnfr,
3075 nslcmop_id=nslcmop_id,
3076 nsr_id=nsr_id,
3077 nsi_id=nsi_id,
3078 vnfd_id=vnfd_id,
3079 vdu_id=vdu_id,
3080 kdu_name=kdu_name,
3081 member_vnf_index=member_vnf_index,
3082 vdu_index=vdu_index,
3083 kdu_index=kdu_index,
3084 vdu_name=vdu_name,
3085 deploy_params=deploy_params,
3086 descriptor_config=descriptor_config,
3087 base_folder=base_folder,
3088 task_instantiation_info=tasks_dict_info,
3089 stage=stage,
3090 )
3091
3092 # rest of staff will be done at finally
3093
3094 except (
3095 ROclient.ROClientException,
3096 DbException,
3097 LcmException,
3098 N2VCException,
3099 ) as e:
3100 self.logger.error(
3101 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
3102 )
3103 exc = e
3104 except asyncio.CancelledError:
3105 self.logger.error(
3106 logging_text + "Cancelled Exception while '{}'".format(stage[1])
3107 )
3108 exc = "Operation was cancelled"
3109 except Exception as e:
3110 exc = traceback.format_exc()
3111 self.logger.critical(
3112 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
3113 exc_info=True,
3114 )
3115 finally:
3116 if exc:
3117 error_list.append(str(exc))
3118 try:
3119 # wait for pending tasks
3120 if tasks_dict_info:
3121 stage[1] = "Waiting for instantiate pending tasks."
3122 self.logger.debug(logging_text + stage[1])
3123 error_list += await self._wait_for_tasks(
3124 logging_text,
3125 tasks_dict_info,
3126 timeout_ns_deploy,
3127 stage,
3128 nslcmop_id,
3129 nsr_id=nsr_id,
3130 )
3131 stage[1] = stage[2] = ""
3132 except asyncio.CancelledError:
3133 error_list.append("Cancelled")
3134 # TODO cancel all tasks
3135 except Exception as exc:
3136 error_list.append(str(exc))
3137
3138 # update operation-status
3139 db_nsr_update["operational-status"] = "running"
3140 # let's begin with VCA 'configured' status (later we can change it)
3141 db_nsr_update["config-status"] = "configured"
3142 for task, task_name in tasks_dict_info.items():
3143 if not task.done() or task.cancelled() or task.exception():
3144 if task_name.startswith(self.task_name_deploy_vca):
3145 # A N2VC task is pending
3146 db_nsr_update["config-status"] = "failed"
3147 else:
3148 # RO or KDU task is pending
3149 db_nsr_update["operational-status"] = "failed"
3150
3151 # update status at database
3152 if error_list:
3153 error_detail = ". ".join(error_list)
3154 self.logger.error(logging_text + error_detail)
3155 error_description_nslcmop = "{} Detail: {}".format(
3156 stage[0], error_detail
3157 )
3158 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
3159 nslcmop_id, stage[0]
3160 )
3161
3162 db_nsr_update["detailed-status"] = (
3163 error_description_nsr + " Detail: " + error_detail
3164 )
3165 db_nslcmop_update["detailed-status"] = error_detail
3166 nslcmop_operation_state = "FAILED"
3167 ns_state = "BROKEN"
3168 else:
3169 error_detail = None
3170 error_description_nsr = error_description_nslcmop = None
3171 ns_state = "READY"
3172 db_nsr_update["detailed-status"] = "Done"
3173 db_nslcmop_update["detailed-status"] = "Done"
3174 nslcmop_operation_state = "COMPLETED"
3175 # Gather auto-healing and auto-scaling alerts for each vnfr
3176 healing_alerts = []
3177 scaling_alerts = []
3178 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
3179 vnfd = next(
3180 (sub for sub in db_vnfds if sub["_id"] == vnfr["vnfd-id"]), None
3181 )
3182 healing_alerts = self._gather_vnfr_healing_alerts(vnfr, vnfd)
3183 for alert in healing_alerts:
3184 self.logger.info(f"Storing healing alert in MongoDB: {alert}")
3185 self.db.create("alerts", alert)
3186
3187 scaling_alerts = self._gather_vnfr_scaling_alerts(vnfr, vnfd)
3188 for alert in scaling_alerts:
3189 self.logger.info(f"Storing scaling alert in MongoDB: {alert}")
3190 self.db.create("alerts", alert)
3191
3192 alarm_alerts = self._gather_vnfr_alarm_alerts(vnfr, vnfd)
3193 for alert in alarm_alerts:
3194 self.logger.info(f"Storing VNF alarm alert in MongoDB: {alert}")
3195 self.db.create("alerts", alert)
3196 if db_nsr:
3197 self._write_ns_status(
3198 nsr_id=nsr_id,
3199 ns_state=ns_state,
3200 current_operation="IDLE",
3201 current_operation_id=None,
3202 error_description=error_description_nsr,
3203 error_detail=error_detail,
3204 other_update=db_nsr_update,
3205 )
3206 self._write_op_status(
3207 op_id=nslcmop_id,
3208 stage="",
3209 error_message=error_description_nslcmop,
3210 operation_state=nslcmop_operation_state,
3211 other_update=db_nslcmop_update,
3212 )
3213
3214 if nslcmop_operation_state:
3215 try:
3216 await self.msg.aiowrite(
3217 "ns",
3218 "instantiated",
3219 {
3220 "nsr_id": nsr_id,
3221 "nslcmop_id": nslcmop_id,
3222 "operationState": nslcmop_operation_state,
3223 },
3224 )
3225 except Exception as e:
3226 self.logger.error(
3227 logging_text + "kafka_write notification Exception {}".format(e)
3228 )
3229
3230 self.logger.debug(logging_text + "Exit")
3231 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
3232
3233 def _get_vnfd(self, vnfd_id: str, projects_read: str, cached_vnfds: Dict[str, Any]):
3234 if vnfd_id not in cached_vnfds:
3235 cached_vnfds[vnfd_id] = self.db.get_one(
3236 "vnfds", {"id": vnfd_id, "_admin.projects_read": projects_read}
3237 )
3238 return cached_vnfds[vnfd_id]
3239
3240 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
3241 if vnf_profile_id not in cached_vnfrs:
3242 cached_vnfrs[vnf_profile_id] = self.db.get_one(
3243 "vnfrs",
3244 {
3245 "member-vnf-index-ref": vnf_profile_id,
3246 "nsr-id-ref": nsr_id,
3247 },
3248 )
3249 return cached_vnfrs[vnf_profile_id]
3250
3251 def _is_deployed_vca_in_relation(
3252 self, vca: DeployedVCA, relation: Relation
3253 ) -> bool:
3254 found = False
3255 for endpoint in (relation.provider, relation.requirer):
3256 if endpoint["kdu-resource-profile-id"]:
3257 continue
3258 found = (
3259 vca.vnf_profile_id == endpoint.vnf_profile_id
3260 and vca.vdu_profile_id == endpoint.vdu_profile_id
3261 and vca.execution_environment_ref == endpoint.execution_environment_ref
3262 )
3263 if found:
3264 break
3265 return found
3266
3267 def _update_ee_relation_data_with_implicit_data(
3268 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
3269 ):
3270 ee_relation_data = safe_get_ee_relation(
3271 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
3272 )
3273 ee_relation_level = EELevel.get_level(ee_relation_data)
3274 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
3275 "execution-environment-ref"
3276 ]:
3277 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
3278 vnfd_id = vnf_profile["vnfd-id"]
3279 project = nsd["_admin"]["projects_read"][0]
3280 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3281 entity_id = (
3282 vnfd_id
3283 if ee_relation_level == EELevel.VNF
3284 else ee_relation_data["vdu-profile-id"]
3285 )
3286 ee = get_juju_ee_ref(db_vnfd, entity_id)
3287 if not ee:
3288 raise Exception(
3289 f"not execution environments found for ee_relation {ee_relation_data}"
3290 )
3291 ee_relation_data["execution-environment-ref"] = ee["id"]
3292 return ee_relation_data
3293
3294 def _get_ns_relations(
3295 self,
3296 nsr_id: str,
3297 nsd: Dict[str, Any],
3298 vca: DeployedVCA,
3299 cached_vnfds: Dict[str, Any],
3300 ) -> List[Relation]:
3301 relations = []
3302 db_ns_relations = get_ns_configuration_relation_list(nsd)
3303 for r in db_ns_relations:
3304 provider_dict = None
3305 requirer_dict = None
3306 if all(key in r for key in ("provider", "requirer")):
3307 provider_dict = r["provider"]
3308 requirer_dict = r["requirer"]
3309 elif "entities" in r:
3310 provider_id = r["entities"][0]["id"]
3311 provider_dict = {
3312 "nsr-id": nsr_id,
3313 "endpoint": r["entities"][0]["endpoint"],
3314 }
3315 if provider_id != nsd["id"]:
3316 provider_dict["vnf-profile-id"] = provider_id
3317 requirer_id = r["entities"][1]["id"]
3318 requirer_dict = {
3319 "nsr-id": nsr_id,
3320 "endpoint": r["entities"][1]["endpoint"],
3321 }
3322 if requirer_id != nsd["id"]:
3323 requirer_dict["vnf-profile-id"] = requirer_id
3324 else:
3325 raise Exception(
3326 "provider/requirer or entities must be included in the relation."
3327 )
3328 relation_provider = self._update_ee_relation_data_with_implicit_data(
3329 nsr_id, nsd, provider_dict, cached_vnfds
3330 )
3331 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3332 nsr_id, nsd, requirer_dict, cached_vnfds
3333 )
3334 provider = EERelation(relation_provider)
3335 requirer = EERelation(relation_requirer)
3336 relation = Relation(r["name"], provider, requirer)
3337 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3338 if vca_in_relation:
3339 relations.append(relation)
3340 return relations
3341
3342 def _get_vnf_relations(
3343 self,
3344 nsr_id: str,
3345 nsd: Dict[str, Any],
3346 vca: DeployedVCA,
3347 cached_vnfds: Dict[str, Any],
3348 ) -> List[Relation]:
3349 relations = []
3350 if vca.target_element == "ns":
3351 self.logger.debug("VCA is a NS charm, not a VNF.")
3352 return relations
3353 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
3354 vnf_profile_id = vnf_profile["id"]
3355 vnfd_id = vnf_profile["vnfd-id"]
3356 project = nsd["_admin"]["projects_read"][0]
3357 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3358 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
3359 for r in db_vnf_relations:
3360 provider_dict = None
3361 requirer_dict = None
3362 if all(key in r for key in ("provider", "requirer")):
3363 provider_dict = r["provider"]
3364 requirer_dict = r["requirer"]
3365 elif "entities" in r:
3366 provider_id = r["entities"][0]["id"]
3367 provider_dict = {
3368 "nsr-id": nsr_id,
3369 "vnf-profile-id": vnf_profile_id,
3370 "endpoint": r["entities"][0]["endpoint"],
3371 }
3372 if provider_id != vnfd_id:
3373 provider_dict["vdu-profile-id"] = provider_id
3374 requirer_id = r["entities"][1]["id"]
3375 requirer_dict = {
3376 "nsr-id": nsr_id,
3377 "vnf-profile-id": vnf_profile_id,
3378 "endpoint": r["entities"][1]["endpoint"],
3379 }
3380 if requirer_id != vnfd_id:
3381 requirer_dict["vdu-profile-id"] = requirer_id
3382 else:
3383 raise Exception(
3384 "provider/requirer or entities must be included in the relation."
3385 )
3386 relation_provider = self._update_ee_relation_data_with_implicit_data(
3387 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3388 )
3389 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3390 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3391 )
3392 provider = EERelation(relation_provider)
3393 requirer = EERelation(relation_requirer)
3394 relation = Relation(r["name"], provider, requirer)
3395 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3396 if vca_in_relation:
3397 relations.append(relation)
3398 return relations
3399
3400 def _get_kdu_resource_data(
3401 self,
3402 ee_relation: EERelation,
3403 db_nsr: Dict[str, Any],
3404 cached_vnfds: Dict[str, Any],
3405 ) -> DeployedK8sResource:
3406 nsd = get_nsd(db_nsr)
3407 vnf_profiles = get_vnf_profiles(nsd)
3408 vnfd_id = find_in_list(
3409 vnf_profiles,
3410 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3411 )["vnfd-id"]
3412 project = nsd["_admin"]["projects_read"][0]
3413 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3414 kdu_resource_profile = get_kdu_resource_profile(
3415 db_vnfd, ee_relation.kdu_resource_profile_id
3416 )
3417 kdu_name = kdu_resource_profile["kdu-name"]
3418 deployed_kdu, _ = get_deployed_kdu(
3419 db_nsr.get("_admin", ()).get("deployed", ()),
3420 kdu_name,
3421 ee_relation.vnf_profile_id,
3422 )
3423 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3424 return deployed_kdu
3425
3426 def _get_deployed_component(
3427 self,
3428 ee_relation: EERelation,
3429 db_nsr: Dict[str, Any],
3430 cached_vnfds: Dict[str, Any],
3431 ) -> DeployedComponent:
3432 nsr_id = db_nsr["_id"]
3433 deployed_component = None
3434 ee_level = EELevel.get_level(ee_relation)
3435 if ee_level == EELevel.NS:
3436 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3437 if vca:
3438 deployed_component = DeployedVCA(nsr_id, vca)
3439 elif ee_level == EELevel.VNF:
3440 vca = get_deployed_vca(
3441 db_nsr,
3442 {
3443 "vdu_id": None,
3444 "member-vnf-index": ee_relation.vnf_profile_id,
3445 "ee_descriptor_id": ee_relation.execution_environment_ref,
3446 },
3447 )
3448 if vca:
3449 deployed_component = DeployedVCA(nsr_id, vca)
3450 elif ee_level == EELevel.VDU:
3451 vca = get_deployed_vca(
3452 db_nsr,
3453 {
3454 "vdu_id": ee_relation.vdu_profile_id,
3455 "member-vnf-index": ee_relation.vnf_profile_id,
3456 "ee_descriptor_id": ee_relation.execution_environment_ref,
3457 },
3458 )
3459 if vca:
3460 deployed_component = DeployedVCA(nsr_id, vca)
3461 elif ee_level == EELevel.KDU:
3462 kdu_resource_data = self._get_kdu_resource_data(
3463 ee_relation, db_nsr, cached_vnfds
3464 )
3465 if kdu_resource_data:
3466 deployed_component = DeployedK8sResource(kdu_resource_data)
3467 return deployed_component
3468
3469 async def _add_relation(
3470 self,
3471 relation: Relation,
3472 vca_type: str,
3473 db_nsr: Dict[str, Any],
3474 cached_vnfds: Dict[str, Any],
3475 cached_vnfrs: Dict[str, Any],
3476 ) -> bool:
3477 deployed_provider = self._get_deployed_component(
3478 relation.provider, db_nsr, cached_vnfds
3479 )
3480 deployed_requirer = self._get_deployed_component(
3481 relation.requirer, db_nsr, cached_vnfds
3482 )
3483 if (
3484 deployed_provider
3485 and deployed_requirer
3486 and deployed_provider.config_sw_installed
3487 and deployed_requirer.config_sw_installed
3488 ):
3489 provider_db_vnfr = (
3490 self._get_vnfr(
3491 relation.provider.nsr_id,
3492 relation.provider.vnf_profile_id,
3493 cached_vnfrs,
3494 )
3495 if relation.provider.vnf_profile_id
3496 else None
3497 )
3498 requirer_db_vnfr = (
3499 self._get_vnfr(
3500 relation.requirer.nsr_id,
3501 relation.requirer.vnf_profile_id,
3502 cached_vnfrs,
3503 )
3504 if relation.requirer.vnf_profile_id
3505 else None
3506 )
3507 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3508 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3509 provider_relation_endpoint = RelationEndpoint(
3510 deployed_provider.ee_id,
3511 provider_vca_id,
3512 relation.provider.endpoint,
3513 )
3514 requirer_relation_endpoint = RelationEndpoint(
3515 deployed_requirer.ee_id,
3516 requirer_vca_id,
3517 relation.requirer.endpoint,
3518 )
3519 try:
3520 await self.vca_map[vca_type].add_relation(
3521 provider=provider_relation_endpoint,
3522 requirer=requirer_relation_endpoint,
3523 )
3524 except N2VCException as exception:
3525 self.logger.error(exception)
3526 raise LcmException(exception)
3527 return True
3528 return False
3529
3530 async def _add_vca_relations(
3531 self,
3532 logging_text,
3533 nsr_id,
3534 vca_type: str,
3535 vca_index: int,
3536 timeout: int = 3600,
3537 ) -> bool:
3538 # steps:
3539 # 1. find all relations for this VCA
3540 # 2. wait for other peers related
3541 # 3. add relations
3542
3543 try:
3544 # STEP 1: find all relations for this VCA
3545
3546 # read nsr record
3547 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3548 nsd = get_nsd(db_nsr)
3549
3550 # this VCA data
3551 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3552 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3553
3554 cached_vnfds = {}
3555 cached_vnfrs = {}
3556 relations = []
3557 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3558 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3559
3560 # if no relations, terminate
3561 if not relations:
3562 self.logger.debug(logging_text + " No relations")
3563 return True
3564
3565 self.logger.debug(logging_text + " adding relations {}".format(relations))
3566
3567 # add all relations
3568 start = time()
3569 while True:
3570 # check timeout
3571 now = time()
3572 if now - start >= timeout:
3573 self.logger.error(logging_text + " : timeout adding relations")
3574 return False
3575
3576 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3577 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3578
3579 # for each relation, find the VCA's related
3580 for relation in relations.copy():
3581 added = await self._add_relation(
3582 relation,
3583 vca_type,
3584 db_nsr,
3585 cached_vnfds,
3586 cached_vnfrs,
3587 )
3588 if added:
3589 relations.remove(relation)
3590
3591 if not relations:
3592 self.logger.debug("Relations added")
3593 break
3594 await asyncio.sleep(5.0)
3595
3596 return True
3597
3598 except Exception as e:
3599 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3600 return False
3601
3602 async def _install_kdu(
3603 self,
3604 nsr_id: str,
3605 nsr_db_path: str,
3606 vnfr_data: dict,
3607 kdu_index: int,
3608 kdud: dict,
3609 vnfd: dict,
3610 k8s_instance_info: dict,
3611 k8params: dict = None,
3612 timeout: int = 600,
3613 vca_id: str = None,
3614 ):
3615 try:
3616 k8sclustertype = k8s_instance_info["k8scluster-type"]
3617 # Instantiate kdu
3618 db_dict_install = {
3619 "collection": "nsrs",
3620 "filter": {"_id": nsr_id},
3621 "path": nsr_db_path,
3622 }
3623
3624 if k8s_instance_info.get("kdu-deployment-name"):
3625 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3626 else:
3627 kdu_instance = self.k8scluster_map[
3628 k8sclustertype
3629 ].generate_kdu_instance_name(
3630 db_dict=db_dict_install,
3631 kdu_model=k8s_instance_info["kdu-model"],
3632 kdu_name=k8s_instance_info["kdu-name"],
3633 )
3634
3635 # Update the nsrs table with the kdu-instance value
3636 self.update_db_2(
3637 item="nsrs",
3638 _id=nsr_id,
3639 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3640 )
3641
3642 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3643 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3644 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3645 # namespace, this first verification could be removed, and the next step would be done for any kind
3646 # of KNF.
3647 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3648 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3649 if k8sclustertype in ("juju", "juju-bundle"):
3650 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3651 # that the user passed a namespace which he wants its KDU to be deployed in)
3652 if (
3653 self.db.count(
3654 table="nsrs",
3655 q_filter={
3656 "_id": nsr_id,
3657 "_admin.projects_write": k8s_instance_info["namespace"],
3658 "_admin.projects_read": k8s_instance_info["namespace"],
3659 },
3660 )
3661 > 0
3662 ):
3663 self.logger.debug(
3664 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3665 )
3666 self.update_db_2(
3667 item="nsrs",
3668 _id=nsr_id,
3669 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3670 )
3671 k8s_instance_info["namespace"] = kdu_instance
3672
3673 await self.k8scluster_map[k8sclustertype].install(
3674 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3675 kdu_model=k8s_instance_info["kdu-model"],
3676 atomic=True,
3677 params=k8params,
3678 db_dict=db_dict_install,
3679 timeout=timeout,
3680 kdu_name=k8s_instance_info["kdu-name"],
3681 namespace=k8s_instance_info["namespace"],
3682 kdu_instance=kdu_instance,
3683 vca_id=vca_id,
3684 )
3685
3686 # Obtain services to obtain management service ip
3687 services = await self.k8scluster_map[k8sclustertype].get_services(
3688 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3689 kdu_instance=kdu_instance,
3690 namespace=k8s_instance_info["namespace"],
3691 )
3692
3693 # Obtain management service info (if exists)
3694 vnfr_update_dict = {}
3695 kdu_config = get_configuration(vnfd, kdud["name"])
3696 if kdu_config:
3697 target_ee_list = kdu_config.get("execution-environment-list", [])
3698 else:
3699 target_ee_list = []
3700
3701 if services:
3702 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3703 mgmt_services = [
3704 service
3705 for service in kdud.get("service", [])
3706 if service.get("mgmt-service")
3707 ]
3708 for mgmt_service in mgmt_services:
3709 for service in services:
3710 if service["name"].startswith(mgmt_service["name"]):
3711 # Mgmt service found, Obtain service ip
3712 ip = service.get("external_ip", service.get("cluster_ip"))
3713 if isinstance(ip, list) and len(ip) == 1:
3714 ip = ip[0]
3715
3716 vnfr_update_dict[
3717 "kdur.{}.ip-address".format(kdu_index)
3718 ] = ip
3719
3720 # Check if must update also mgmt ip at the vnf
3721 service_external_cp = mgmt_service.get(
3722 "external-connection-point-ref"
3723 )
3724 if service_external_cp:
3725 if (
3726 deep_get(vnfd, ("mgmt-interface", "cp"))
3727 == service_external_cp
3728 ):
3729 vnfr_update_dict["ip-address"] = ip
3730
3731 if find_in_list(
3732 target_ee_list,
3733 lambda ee: ee.get(
3734 "external-connection-point-ref", ""
3735 )
3736 == service_external_cp,
3737 ):
3738 vnfr_update_dict[
3739 "kdur.{}.ip-address".format(kdu_index)
3740 ] = ip
3741 break
3742 else:
3743 self.logger.warn(
3744 "Mgmt service name: {} not found".format(
3745 mgmt_service["name"]
3746 )
3747 )
3748
3749 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3750 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3751
3752 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3753 if (
3754 kdu_config
3755 and kdu_config.get("initial-config-primitive")
3756 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3757 ):
3758 initial_config_primitive_list = kdu_config.get(
3759 "initial-config-primitive"
3760 )
3761 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3762
3763 for initial_config_primitive in initial_config_primitive_list:
3764 primitive_params_ = self._map_primitive_params(
3765 initial_config_primitive, {}, {}
3766 )
3767
3768 await asyncio.wait_for(
3769 self.k8scluster_map[k8sclustertype].exec_primitive(
3770 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3771 kdu_instance=kdu_instance,
3772 primitive_name=initial_config_primitive["name"],
3773 params=primitive_params_,
3774 db_dict=db_dict_install,
3775 vca_id=vca_id,
3776 ),
3777 timeout=timeout,
3778 )
3779
3780 except Exception as e:
3781 # Prepare update db with error and raise exception
3782 try:
3783 self.update_db_2(
3784 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3785 )
3786 self.update_db_2(
3787 "vnfrs",
3788 vnfr_data.get("_id"),
3789 {"kdur.{}.status".format(kdu_index): "ERROR"},
3790 )
3791 except Exception:
3792 # ignore to keep original exception
3793 pass
3794 # reraise original error
3795 raise
3796
3797 return kdu_instance
3798
3799 async def deploy_kdus(
3800 self,
3801 logging_text,
3802 nsr_id,
3803 nslcmop_id,
3804 db_vnfrs,
3805 db_vnfds,
3806 task_instantiation_info,
3807 ):
3808 # Launch kdus if present in the descriptor
3809
3810 k8scluster_id_2_uuic = {
3811 "helm-chart-v3": {},
3812 "helm-chart": {},
3813 "juju-bundle": {},
3814 }
3815
3816 async def _get_cluster_id(cluster_id, cluster_type):
3817 nonlocal k8scluster_id_2_uuic
3818 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3819 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3820
3821 # check if K8scluster is creating and wait look if previous tasks in process
3822 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3823 "k8scluster", cluster_id
3824 )
3825 if task_dependency:
3826 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3827 task_name, cluster_id
3828 )
3829 self.logger.debug(logging_text + text)
3830 await asyncio.wait(task_dependency, timeout=3600)
3831
3832 db_k8scluster = self.db.get_one(
3833 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3834 )
3835 if not db_k8scluster:
3836 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3837
3838 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3839 if not k8s_id:
3840 if cluster_type == "helm-chart-v3":
3841 try:
3842 # backward compatibility for existing clusters that have not been initialized for helm v3
3843 k8s_credentials = yaml.safe_dump(
3844 db_k8scluster.get("credentials")
3845 )
3846 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3847 k8s_credentials, reuse_cluster_uuid=cluster_id
3848 )
3849 db_k8scluster_update = {}
3850 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3851 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3852 db_k8scluster_update[
3853 "_admin.helm-chart-v3.created"
3854 ] = uninstall_sw
3855 db_k8scluster_update[
3856 "_admin.helm-chart-v3.operationalState"
3857 ] = "ENABLED"
3858 self.update_db_2(
3859 "k8sclusters", cluster_id, db_k8scluster_update
3860 )
3861 except Exception as e:
3862 self.logger.error(
3863 logging_text
3864 + "error initializing helm-v3 cluster: {}".format(str(e))
3865 )
3866 raise LcmException(
3867 "K8s cluster '{}' has not been initialized for '{}'".format(
3868 cluster_id, cluster_type
3869 )
3870 )
3871 else:
3872 raise LcmException(
3873 "K8s cluster '{}' has not been initialized for '{}'".format(
3874 cluster_id, cluster_type
3875 )
3876 )
3877 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3878 return k8s_id
3879
3880 logging_text += "Deploy kdus: "
3881 step = ""
3882 try:
3883 db_nsr_update = {"_admin.deployed.K8s": []}
3884 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3885
3886 index = 0
3887 updated_cluster_list = []
3888 updated_v3_cluster_list = []
3889
3890 for vnfr_data in db_vnfrs.values():
3891 vca_id = self.get_vca_id(vnfr_data, {})
3892 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3893 # Step 0: Prepare and set parameters
3894 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3895 vnfd_id = vnfr_data.get("vnfd-id")
3896 vnfd_with_id = find_in_list(
3897 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3898 )
3899 kdud = next(
3900 kdud
3901 for kdud in vnfd_with_id["kdu"]
3902 if kdud["name"] == kdur["kdu-name"]
3903 )
3904 namespace = kdur.get("k8s-namespace")
3905 kdu_deployment_name = kdur.get("kdu-deployment-name")
3906 if kdur.get("helm-chart"):
3907 kdumodel = kdur["helm-chart"]
3908 # Default version: helm3, if helm-version is v2 assign v2
3909 k8sclustertype = "helm-chart-v3"
3910 self.logger.debug("kdur: {}".format(kdur))
3911 if (
3912 kdur.get("helm-version")
3913 and kdur.get("helm-version") == "v2"
3914 ):
3915 k8sclustertype = "helm-chart"
3916 elif kdur.get("juju-bundle"):
3917 kdumodel = kdur["juju-bundle"]
3918 k8sclustertype = "juju-bundle"
3919 else:
3920 raise LcmException(
3921 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3922 "juju-bundle. Maybe an old NBI version is running".format(
3923 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3924 )
3925 )
3926 # check if kdumodel is a file and exists
3927 try:
3928 vnfd_with_id = find_in_list(
3929 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3930 )
3931 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3932 if storage: # may be not present if vnfd has not artifacts
3933 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3934 if storage["pkg-dir"]:
3935 filename = "{}/{}/{}s/{}".format(
3936 storage["folder"],
3937 storage["pkg-dir"],
3938 k8sclustertype,
3939 kdumodel,
3940 )
3941 else:
3942 filename = "{}/Scripts/{}s/{}".format(
3943 storage["folder"],
3944 k8sclustertype,
3945 kdumodel,
3946 )
3947 if self.fs.file_exists(
3948 filename, mode="file"
3949 ) or self.fs.file_exists(filename, mode="dir"):
3950 kdumodel = self.fs.path + filename
3951 except (asyncio.TimeoutError, asyncio.CancelledError):
3952 raise
3953 except Exception: # it is not a file
3954 pass
3955
3956 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3957 step = "Synchronize repos for k8s cluster '{}'".format(
3958 k8s_cluster_id
3959 )
3960 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3961
3962 # Synchronize repos
3963 if (
3964 k8sclustertype == "helm-chart"
3965 and cluster_uuid not in updated_cluster_list
3966 ) or (
3967 k8sclustertype == "helm-chart-v3"
3968 and cluster_uuid not in updated_v3_cluster_list
3969 ):
3970 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3971 self.k8scluster_map[k8sclustertype].synchronize_repos(
3972 cluster_uuid=cluster_uuid
3973 )
3974 )
3975 if del_repo_list or added_repo_dict:
3976 if k8sclustertype == "helm-chart":
3977 unset = {
3978 "_admin.helm_charts_added." + item: None
3979 for item in del_repo_list
3980 }
3981 updated = {
3982 "_admin.helm_charts_added." + item: name
3983 for item, name in added_repo_dict.items()
3984 }
3985 updated_cluster_list.append(cluster_uuid)
3986 elif k8sclustertype == "helm-chart-v3":
3987 unset = {
3988 "_admin.helm_charts_v3_added." + item: None
3989 for item in del_repo_list
3990 }
3991 updated = {
3992 "_admin.helm_charts_v3_added." + item: name
3993 for item, name in added_repo_dict.items()
3994 }
3995 updated_v3_cluster_list.append(cluster_uuid)
3996 self.logger.debug(
3997 logging_text + "repos synchronized on k8s cluster "
3998 "'{}' to_delete: {}, to_add: {}".format(
3999 k8s_cluster_id, del_repo_list, added_repo_dict
4000 )
4001 )
4002 self.db.set_one(
4003 "k8sclusters",
4004 {"_id": k8s_cluster_id},
4005 updated,
4006 unset=unset,
4007 )
4008
4009 # Instantiate kdu
4010 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
4011 vnfr_data["member-vnf-index-ref"],
4012 kdur["kdu-name"],
4013 k8s_cluster_id,
4014 )
4015 k8s_instance_info = {
4016 "kdu-instance": None,
4017 "k8scluster-uuid": cluster_uuid,
4018 "k8scluster-type": k8sclustertype,
4019 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
4020 "kdu-name": kdur["kdu-name"],
4021 "kdu-model": kdumodel,
4022 "namespace": namespace,
4023 "kdu-deployment-name": kdu_deployment_name,
4024 }
4025 db_path = "_admin.deployed.K8s.{}".format(index)
4026 db_nsr_update[db_path] = k8s_instance_info
4027 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4028 vnfd_with_id = find_in_list(
4029 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
4030 )
4031 task = asyncio.ensure_future(
4032 self._install_kdu(
4033 nsr_id,
4034 db_path,
4035 vnfr_data,
4036 kdu_index,
4037 kdud,
4038 vnfd_with_id,
4039 k8s_instance_info,
4040 k8params=desc_params,
4041 timeout=1800,
4042 vca_id=vca_id,
4043 )
4044 )
4045 self.lcm_tasks.register(
4046 "ns",
4047 nsr_id,
4048 nslcmop_id,
4049 "instantiate_KDU-{}".format(index),
4050 task,
4051 )
4052 task_instantiation_info[task] = "Deploying KDU {}".format(
4053 kdur["kdu-name"]
4054 )
4055
4056 index += 1
4057
4058 except (LcmException, asyncio.CancelledError):
4059 raise
4060 except Exception as e:
4061 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
4062 if isinstance(e, (N2VCException, DbException)):
4063 self.logger.error(logging_text + msg)
4064 else:
4065 self.logger.critical(logging_text + msg, exc_info=True)
4066 raise LcmException(msg)
4067 finally:
4068 if db_nsr_update:
4069 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4070
4071 def _deploy_n2vc(
4072 self,
4073 logging_text,
4074 db_nsr,
4075 db_vnfr,
4076 nslcmop_id,
4077 nsr_id,
4078 nsi_id,
4079 vnfd_id,
4080 vdu_id,
4081 kdu_name,
4082 member_vnf_index,
4083 vdu_index,
4084 kdu_index,
4085 vdu_name,
4086 deploy_params,
4087 descriptor_config,
4088 base_folder,
4089 task_instantiation_info,
4090 stage,
4091 ):
4092 # launch instantiate_N2VC in a asyncio task and register task object
4093 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
4094 # if not found, create one entry and update database
4095 # fill db_nsr._admin.deployed.VCA.<index>
4096
4097 self.logger.debug(
4098 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
4099 )
4100
4101 charm_name = ""
4102 get_charm_name = False
4103 if "execution-environment-list" in descriptor_config:
4104 ee_list = descriptor_config.get("execution-environment-list", [])
4105 elif "juju" in descriptor_config:
4106 ee_list = [descriptor_config] # ns charms
4107 if "execution-environment-list" not in descriptor_config:
4108 # charm name is only required for ns charms
4109 get_charm_name = True
4110 else: # other types as script are not supported
4111 ee_list = []
4112
4113 for ee_item in ee_list:
4114 self.logger.debug(
4115 logging_text
4116 + "_deploy_n2vc ee_item juju={}, helm={}".format(
4117 ee_item.get("juju"), ee_item.get("helm-chart")
4118 )
4119 )
4120 ee_descriptor_id = ee_item.get("id")
4121 if ee_item.get("juju"):
4122 vca_name = ee_item["juju"].get("charm")
4123 if get_charm_name:
4124 charm_name = self.find_charm_name(db_nsr, str(vca_name))
4125 vca_type = (
4126 "lxc_proxy_charm"
4127 if ee_item["juju"].get("charm") is not None
4128 else "native_charm"
4129 )
4130 if ee_item["juju"].get("cloud") == "k8s":
4131 vca_type = "k8s_proxy_charm"
4132 elif ee_item["juju"].get("proxy") is False:
4133 vca_type = "native_charm"
4134 elif ee_item.get("helm-chart"):
4135 vca_name = ee_item["helm-chart"]
4136 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
4137 vca_type = "helm"
4138 else:
4139 vca_type = "helm-v3"
4140 else:
4141 self.logger.debug(
4142 logging_text + "skipping non juju neither charm configuration"
4143 )
4144 continue
4145
4146 vca_index = -1
4147 for vca_index, vca_deployed in enumerate(
4148 db_nsr["_admin"]["deployed"]["VCA"]
4149 ):
4150 if not vca_deployed:
4151 continue
4152 if (
4153 vca_deployed.get("member-vnf-index") == member_vnf_index
4154 and vca_deployed.get("vdu_id") == vdu_id
4155 and vca_deployed.get("kdu_name") == kdu_name
4156 and vca_deployed.get("vdu_count_index", 0) == vdu_index
4157 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
4158 ):
4159 break
4160 else:
4161 # not found, create one.
4162 target = (
4163 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
4164 )
4165 if vdu_id:
4166 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
4167 elif kdu_name:
4168 target += "/kdu/{}".format(kdu_name)
4169 vca_deployed = {
4170 "target_element": target,
4171 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
4172 "member-vnf-index": member_vnf_index,
4173 "vdu_id": vdu_id,
4174 "kdu_name": kdu_name,
4175 "vdu_count_index": vdu_index,
4176 "operational-status": "init", # TODO revise
4177 "detailed-status": "", # TODO revise
4178 "step": "initial-deploy", # TODO revise
4179 "vnfd_id": vnfd_id,
4180 "vdu_name": vdu_name,
4181 "type": vca_type,
4182 "ee_descriptor_id": ee_descriptor_id,
4183 "charm_name": charm_name,
4184 }
4185 vca_index += 1
4186
4187 # create VCA and configurationStatus in db
4188 db_dict = {
4189 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
4190 "configurationStatus.{}".format(vca_index): dict(),
4191 }
4192 self.update_db_2("nsrs", nsr_id, db_dict)
4193
4194 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
4195
4196 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
4197 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
4198 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
4199
4200 # Launch task
4201 task_n2vc = asyncio.ensure_future(
4202 self.instantiate_N2VC(
4203 logging_text=logging_text,
4204 vca_index=vca_index,
4205 nsi_id=nsi_id,
4206 db_nsr=db_nsr,
4207 db_vnfr=db_vnfr,
4208 vdu_id=vdu_id,
4209 kdu_name=kdu_name,
4210 vdu_index=vdu_index,
4211 kdu_index=kdu_index,
4212 deploy_params=deploy_params,
4213 config_descriptor=descriptor_config,
4214 base_folder=base_folder,
4215 nslcmop_id=nslcmop_id,
4216 stage=stage,
4217 vca_type=vca_type,
4218 vca_name=vca_name,
4219 ee_config_descriptor=ee_item,
4220 )
4221 )
4222 self.lcm_tasks.register(
4223 "ns",
4224 nsr_id,
4225 nslcmop_id,
4226 "instantiate_N2VC-{}".format(vca_index),
4227 task_n2vc,
4228 )
4229 task_instantiation_info[
4230 task_n2vc
4231 ] = self.task_name_deploy_vca + " {}.{}".format(
4232 member_vnf_index or "", vdu_id or ""
4233 )
4234
4235 @staticmethod
4236 def _create_nslcmop(nsr_id, operation, params):
4237 """
4238 Creates a ns-lcm-opp content to be stored at database.
4239 :param nsr_id: internal id of the instance
4240 :param operation: instantiate, terminate, scale, action, ...
4241 :param params: user parameters for the operation
4242 :return: dictionary following SOL005 format
4243 """
4244 # Raise exception if invalid arguments
4245 if not (nsr_id and operation and params):
4246 raise LcmException(
4247 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
4248 )
4249 now = time()
4250 _id = str(uuid4())
4251 nslcmop = {
4252 "id": _id,
4253 "_id": _id,
4254 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
4255 "operationState": "PROCESSING",
4256 "statusEnteredTime": now,
4257 "nsInstanceId": nsr_id,
4258 "lcmOperationType": operation,
4259 "startTime": now,
4260 "isAutomaticInvocation": False,
4261 "operationParams": params,
4262 "isCancelPending": False,
4263 "links": {
4264 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
4265 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
4266 },
4267 }
4268 return nslcmop
4269
4270 def _format_additional_params(self, params):
4271 params = params or {}
4272 for key, value in params.items():
4273 if str(value).startswith("!!yaml "):
4274 params[key] = yaml.safe_load(value[7:])
4275 return params
4276
4277 def _get_terminate_primitive_params(self, seq, vnf_index):
4278 primitive = seq.get("name")
4279 primitive_params = {}
4280 params = {
4281 "member_vnf_index": vnf_index,
4282 "primitive": primitive,
4283 "primitive_params": primitive_params,
4284 }
4285 desc_params = {}
4286 return self._map_primitive_params(seq, params, desc_params)
4287
4288 # sub-operations
4289
4290 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
4291 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
4292 if op.get("operationState") == "COMPLETED":
4293 # b. Skip sub-operation
4294 # _ns_execute_primitive() or RO.create_action() will NOT be executed
4295 return self.SUBOPERATION_STATUS_SKIP
4296 else:
4297 # c. retry executing sub-operation
4298 # The sub-operation exists, and operationState != 'COMPLETED'
4299 # Update operationState = 'PROCESSING' to indicate a retry.
4300 operationState = "PROCESSING"
4301 detailed_status = "In progress"
4302 self._update_suboperation_status(
4303 db_nslcmop, op_index, operationState, detailed_status
4304 )
4305 # Return the sub-operation index
4306 # _ns_execute_primitive() or RO.create_action() will be called from scale()
4307 # with arguments extracted from the sub-operation
4308 return op_index
4309
4310 # Find a sub-operation where all keys in a matching dictionary must match
4311 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
4312 def _find_suboperation(self, db_nslcmop, match):
4313 if db_nslcmop and match:
4314 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
4315 for i, op in enumerate(op_list):
4316 if all(op.get(k) == match[k] for k in match):
4317 return i
4318 return self.SUBOPERATION_STATUS_NOT_FOUND
4319
4320 # Update status for a sub-operation given its index
4321 def _update_suboperation_status(
4322 self, db_nslcmop, op_index, operationState, detailed_status
4323 ):
4324 # Update DB for HA tasks
4325 q_filter = {"_id": db_nslcmop["_id"]}
4326 update_dict = {
4327 "_admin.operations.{}.operationState".format(op_index): operationState,
4328 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
4329 }
4330 self.db.set_one(
4331 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
4332 )
4333
4334 # Add sub-operation, return the index of the added sub-operation
4335 # Optionally, set operationState, detailed-status, and operationType
4336 # Status and type are currently set for 'scale' sub-operations:
4337 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
4338 # 'detailed-status' : status message
4339 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
4340 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
4341 def _add_suboperation(
4342 self,
4343 db_nslcmop,
4344 vnf_index,
4345 vdu_id,
4346 vdu_count_index,
4347 vdu_name,
4348 primitive,
4349 mapped_primitive_params,
4350 operationState=None,
4351 detailed_status=None,
4352 operationType=None,
4353 RO_nsr_id=None,
4354 RO_scaling_info=None,
4355 ):
4356 if not db_nslcmop:
4357 return self.SUBOPERATION_STATUS_NOT_FOUND
4358 # Get the "_admin.operations" list, if it exists
4359 db_nslcmop_admin = db_nslcmop.get("_admin", {})
4360 op_list = db_nslcmop_admin.get("operations")
4361 # Create or append to the "_admin.operations" list
4362 new_op = {
4363 "member_vnf_index": vnf_index,
4364 "vdu_id": vdu_id,
4365 "vdu_count_index": vdu_count_index,
4366 "primitive": primitive,
4367 "primitive_params": mapped_primitive_params,
4368 }
4369 if operationState:
4370 new_op["operationState"] = operationState
4371 if detailed_status:
4372 new_op["detailed-status"] = detailed_status
4373 if operationType:
4374 new_op["lcmOperationType"] = operationType
4375 if RO_nsr_id:
4376 new_op["RO_nsr_id"] = RO_nsr_id
4377 if RO_scaling_info:
4378 new_op["RO_scaling_info"] = RO_scaling_info
4379 if not op_list:
4380 # No existing operations, create key 'operations' with current operation as first list element
4381 db_nslcmop_admin.update({"operations": [new_op]})
4382 op_list = db_nslcmop_admin.get("operations")
4383 else:
4384 # Existing operations, append operation to list
4385 op_list.append(new_op)
4386
4387 db_nslcmop_update = {"_admin.operations": op_list}
4388 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
4389 op_index = len(op_list) - 1
4390 return op_index
4391
4392 # Helper methods for scale() sub-operations
4393
4394 # pre-scale/post-scale:
4395 # Check for 3 different cases:
4396 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4397 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4398 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4399 def _check_or_add_scale_suboperation(
4400 self,
4401 db_nslcmop,
4402 vnf_index,
4403 vnf_config_primitive,
4404 primitive_params,
4405 operationType,
4406 RO_nsr_id=None,
4407 RO_scaling_info=None,
4408 ):
4409 # Find this sub-operation
4410 if RO_nsr_id and RO_scaling_info:
4411 operationType = "SCALE-RO"
4412 match = {
4413 "member_vnf_index": vnf_index,
4414 "RO_nsr_id": RO_nsr_id,
4415 "RO_scaling_info": RO_scaling_info,
4416 }
4417 else:
4418 match = {
4419 "member_vnf_index": vnf_index,
4420 "primitive": vnf_config_primitive,
4421 "primitive_params": primitive_params,
4422 "lcmOperationType": operationType,
4423 }
4424 op_index = self._find_suboperation(db_nslcmop, match)
4425 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4426 # a. New sub-operation
4427 # The sub-operation does not exist, add it.
4428 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4429 # The following parameters are set to None for all kind of scaling:
4430 vdu_id = None
4431 vdu_count_index = None
4432 vdu_name = None
4433 if RO_nsr_id and RO_scaling_info:
4434 vnf_config_primitive = None
4435 primitive_params = None
4436 else:
4437 RO_nsr_id = None
4438 RO_scaling_info = None
4439 # Initial status for sub-operation
4440 operationState = "PROCESSING"
4441 detailed_status = "In progress"
4442 # Add sub-operation for pre/post-scaling (zero or more operations)
4443 self._add_suboperation(
4444 db_nslcmop,
4445 vnf_index,
4446 vdu_id,
4447 vdu_count_index,
4448 vdu_name,
4449 vnf_config_primitive,
4450 primitive_params,
4451 operationState,
4452 detailed_status,
4453 operationType,
4454 RO_nsr_id,
4455 RO_scaling_info,
4456 )
4457 return self.SUBOPERATION_STATUS_NEW
4458 else:
4459 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4460 # or op_index (operationState != 'COMPLETED')
4461 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4462
4463 # Function to return execution_environment id
4464
4465 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
4466 # TODO vdu_index_count
4467 for vca in vca_deployed_list:
4468 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
4469 return vca.get("ee_id")
4470
4471 async def destroy_N2VC(
4472 self,
4473 logging_text,
4474 db_nslcmop,
4475 vca_deployed,
4476 config_descriptor,
4477 vca_index,
4478 destroy_ee=True,
4479 exec_primitives=True,
4480 scaling_in=False,
4481 vca_id: str = None,
4482 ):
4483 """
4484 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4485 :param logging_text:
4486 :param db_nslcmop:
4487 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4488 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4489 :param vca_index: index in the database _admin.deployed.VCA
4490 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4491 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4492 not executed properly
4493 :param scaling_in: True destroys the application, False destroys the model
4494 :return: None or exception
4495 """
4496
4497 self.logger.debug(
4498 logging_text
4499 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4500 vca_index, vca_deployed, config_descriptor, destroy_ee
4501 )
4502 )
4503
4504 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4505
4506 # execute terminate_primitives
4507 if exec_primitives:
4508 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4509 config_descriptor.get("terminate-config-primitive"),
4510 vca_deployed.get("ee_descriptor_id"),
4511 )
4512 vdu_id = vca_deployed.get("vdu_id")
4513 vdu_count_index = vca_deployed.get("vdu_count_index")
4514 vdu_name = vca_deployed.get("vdu_name")
4515 vnf_index = vca_deployed.get("member-vnf-index")
4516 if terminate_primitives and vca_deployed.get("needed_terminate"):
4517 for seq in terminate_primitives:
4518 # For each sequence in list, get primitive and call _ns_execute_primitive()
4519 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4520 vnf_index, seq.get("name")
4521 )
4522 self.logger.debug(logging_text + step)
4523 # Create the primitive for each sequence, i.e. "primitive": "touch"
4524 primitive = seq.get("name")
4525 mapped_primitive_params = self._get_terminate_primitive_params(
4526 seq, vnf_index
4527 )
4528
4529 # Add sub-operation
4530 self._add_suboperation(
4531 db_nslcmop,
4532 vnf_index,
4533 vdu_id,
4534 vdu_count_index,
4535 vdu_name,
4536 primitive,
4537 mapped_primitive_params,
4538 )
4539 # Sub-operations: Call _ns_execute_primitive() instead of action()
4540 try:
4541 result, result_detail = await self._ns_execute_primitive(
4542 vca_deployed["ee_id"],
4543 primitive,
4544 mapped_primitive_params,
4545 vca_type=vca_type,
4546 vca_id=vca_id,
4547 )
4548 except LcmException:
4549 # this happens when VCA is not deployed. In this case it is not needed to terminate
4550 continue
4551 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4552 if result not in result_ok:
4553 raise LcmException(
4554 "terminate_primitive {} for vnf_member_index={} fails with "
4555 "error {}".format(seq.get("name"), vnf_index, result_detail)
4556 )
4557 # set that this VCA do not need terminated
4558 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4559 vca_index
4560 )
4561 self.update_db_2(
4562 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4563 )
4564
4565 # Delete Prometheus Jobs if any
4566 # This uses NSR_ID, so it will destroy any jobs under this index
4567 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4568
4569 if destroy_ee:
4570 await self.vca_map[vca_type].delete_execution_environment(
4571 vca_deployed["ee_id"],
4572 scaling_in=scaling_in,
4573 vca_type=vca_type,
4574 vca_id=vca_id,
4575 )
4576
4577 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4578 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4579 namespace = "." + db_nsr["_id"]
4580 try:
4581 await self.n2vc.delete_namespace(
4582 namespace=namespace,
4583 total_timeout=self.timeout.charm_delete,
4584 vca_id=vca_id,
4585 )
4586 except N2VCNotFound: # already deleted. Skip
4587 pass
4588 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4589
4590 async def terminate(self, nsr_id, nslcmop_id):
4591 # Try to lock HA task here
4592 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4593 if not task_is_locked_by_me:
4594 return
4595
4596 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4597 self.logger.debug(logging_text + "Enter")
4598 timeout_ns_terminate = self.timeout.ns_terminate
4599 db_nsr = None
4600 db_nslcmop = None
4601 operation_params = None
4602 exc = None
4603 error_list = [] # annotates all failed error messages
4604 db_nslcmop_update = {}
4605 autoremove = False # autoremove after terminated
4606 tasks_dict_info = {}
4607 db_nsr_update = {}
4608 stage = [
4609 "Stage 1/3: Preparing task.",
4610 "Waiting for previous operations to terminate.",
4611 "",
4612 ]
4613 # ^ contains [stage, step, VIM-status]
4614 try:
4615 # wait for any previous tasks in process
4616 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4617
4618 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4619 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4620 operation_params = db_nslcmop.get("operationParams") or {}
4621 if operation_params.get("timeout_ns_terminate"):
4622 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4623 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4624 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4625
4626 db_nsr_update["operational-status"] = "terminating"
4627 db_nsr_update["config-status"] = "terminating"
4628 self._write_ns_status(
4629 nsr_id=nsr_id,
4630 ns_state="TERMINATING",
4631 current_operation="TERMINATING",
4632 current_operation_id=nslcmop_id,
4633 other_update=db_nsr_update,
4634 )
4635 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4636 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4637 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4638 return
4639
4640 stage[1] = "Getting vnf descriptors from db."
4641 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4642 db_vnfrs_dict = {
4643 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4644 }
4645 db_vnfds_from_id = {}
4646 db_vnfds_from_member_index = {}
4647 # Loop over VNFRs
4648 for vnfr in db_vnfrs_list:
4649 vnfd_id = vnfr["vnfd-id"]
4650 if vnfd_id not in db_vnfds_from_id:
4651 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4652 db_vnfds_from_id[vnfd_id] = vnfd
4653 db_vnfds_from_member_index[
4654 vnfr["member-vnf-index-ref"]
4655 ] = db_vnfds_from_id[vnfd_id]
4656
4657 # Destroy individual execution environments when there are terminating primitives.
4658 # Rest of EE will be deleted at once
4659 # TODO - check before calling _destroy_N2VC
4660 # if not operation_params.get("skip_terminate_primitives"):#
4661 # or not vca.get("needed_terminate"):
4662 stage[0] = "Stage 2/3 execute terminating primitives."
4663 self.logger.debug(logging_text + stage[0])
4664 stage[1] = "Looking execution environment that needs terminate."
4665 self.logger.debug(logging_text + stage[1])
4666
4667 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4668 config_descriptor = None
4669 vca_member_vnf_index = vca.get("member-vnf-index")
4670 vca_id = self.get_vca_id(
4671 db_vnfrs_dict.get(vca_member_vnf_index)
4672 if vca_member_vnf_index
4673 else None,
4674 db_nsr,
4675 )
4676 if not vca or not vca.get("ee_id"):
4677 continue
4678 if not vca.get("member-vnf-index"):
4679 # ns
4680 config_descriptor = db_nsr.get("ns-configuration")
4681 elif vca.get("vdu_id"):
4682 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4683 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4684 elif vca.get("kdu_name"):
4685 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4686 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4687 else:
4688 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4689 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4690 vca_type = vca.get("type")
4691 exec_terminate_primitives = not operation_params.get(
4692 "skip_terminate_primitives"
4693 ) and vca.get("needed_terminate")
4694 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4695 # pending native charms
4696 destroy_ee = (
4697 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4698 )
4699 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4700 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4701 task = asyncio.ensure_future(
4702 self.destroy_N2VC(
4703 logging_text,
4704 db_nslcmop,
4705 vca,
4706 config_descriptor,
4707 vca_index,
4708 destroy_ee,
4709 exec_terminate_primitives,
4710 vca_id=vca_id,
4711 )
4712 )
4713 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4714
4715 # wait for pending tasks of terminate primitives
4716 if tasks_dict_info:
4717 self.logger.debug(
4718 logging_text
4719 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4720 )
4721 error_list = await self._wait_for_tasks(
4722 logging_text,
4723 tasks_dict_info,
4724 min(self.timeout.charm_delete, timeout_ns_terminate),
4725 stage,
4726 nslcmop_id,
4727 )
4728 tasks_dict_info.clear()
4729 if error_list:
4730 return # raise LcmException("; ".join(error_list))
4731
4732 # remove All execution environments at once
4733 stage[0] = "Stage 3/3 delete all."
4734
4735 if nsr_deployed.get("VCA"):
4736 stage[1] = "Deleting all execution environments."
4737 self.logger.debug(logging_text + stage[1])
4738 vca_id = self.get_vca_id({}, db_nsr)
4739 task_delete_ee = asyncio.ensure_future(
4740 asyncio.wait_for(
4741 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4742 timeout=self.timeout.charm_delete,
4743 )
4744 )
4745 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4746 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4747
4748 # Delete Namespace and Certificates if necessary
4749 if check_helm_ee_in_ns(list(db_vnfds_from_member_index.values())):
4750 await self.vca_map["helm-v3"].delete_tls_certificate(
4751 namespace=db_nslcmop["nsInstanceId"],
4752 certificate_name=self.EE_TLS_NAME,
4753 )
4754 await self.vca_map["helm-v3"].delete_namespace(
4755 namespace=db_nslcmop["nsInstanceId"],
4756 )
4757
4758 # Delete from k8scluster
4759 stage[1] = "Deleting KDUs."
4760 self.logger.debug(logging_text + stage[1])
4761 # print(nsr_deployed)
4762 for kdu in get_iterable(nsr_deployed, "K8s"):
4763 if not kdu or not kdu.get("kdu-instance"):
4764 continue
4765 kdu_instance = kdu.get("kdu-instance")
4766 if kdu.get("k8scluster-type") in self.k8scluster_map:
4767 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4768 vca_id = self.get_vca_id({}, db_nsr)
4769 task_delete_kdu_instance = asyncio.ensure_future(
4770 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4771 cluster_uuid=kdu.get("k8scluster-uuid"),
4772 kdu_instance=kdu_instance,
4773 vca_id=vca_id,
4774 namespace=kdu.get("namespace"),
4775 )
4776 )
4777 else:
4778 self.logger.error(
4779 logging_text
4780 + "Unknown k8s deployment type {}".format(
4781 kdu.get("k8scluster-type")
4782 )
4783 )
4784 continue
4785 tasks_dict_info[
4786 task_delete_kdu_instance
4787 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4788
4789 # remove from RO
4790 stage[1] = "Deleting ns from VIM."
4791 if self.ro_config.ng:
4792 task_delete_ro = asyncio.ensure_future(
4793 self._terminate_ng_ro(
4794 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4795 )
4796 )
4797 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4798
4799 # rest of staff will be done at finally
4800
4801 except (
4802 ROclient.ROClientException,
4803 DbException,
4804 LcmException,
4805 N2VCException,
4806 ) as e:
4807 self.logger.error(logging_text + "Exit Exception {}".format(e))
4808 exc = e
4809 except asyncio.CancelledError:
4810 self.logger.error(
4811 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4812 )
4813 exc = "Operation was cancelled"
4814 except Exception as e:
4815 exc = traceback.format_exc()
4816 self.logger.critical(
4817 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4818 exc_info=True,
4819 )
4820 finally:
4821 if exc:
4822 error_list.append(str(exc))
4823 try:
4824 # wait for pending tasks
4825 if tasks_dict_info:
4826 stage[1] = "Waiting for terminate pending tasks."
4827 self.logger.debug(logging_text + stage[1])
4828 error_list += await self._wait_for_tasks(
4829 logging_text,
4830 tasks_dict_info,
4831 timeout_ns_terminate,
4832 stage,
4833 nslcmop_id,
4834 )
4835 stage[1] = stage[2] = ""
4836 except asyncio.CancelledError:
4837 error_list.append("Cancelled")
4838 # TODO cancell all tasks
4839 except Exception as exc:
4840 error_list.append(str(exc))
4841 # update status at database
4842 if error_list:
4843 error_detail = "; ".join(error_list)
4844 # self.logger.error(logging_text + error_detail)
4845 error_description_nslcmop = "{} Detail: {}".format(
4846 stage[0], error_detail
4847 )
4848 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4849 nslcmop_id, stage[0]
4850 )
4851
4852 db_nsr_update["operational-status"] = "failed"
4853 db_nsr_update["detailed-status"] = (
4854 error_description_nsr + " Detail: " + error_detail
4855 )
4856 db_nslcmop_update["detailed-status"] = error_detail
4857 nslcmop_operation_state = "FAILED"
4858 ns_state = "BROKEN"
4859 else:
4860 error_detail = None
4861 error_description_nsr = error_description_nslcmop = None
4862 ns_state = "NOT_INSTANTIATED"
4863 db_nsr_update["operational-status"] = "terminated"
4864 db_nsr_update["detailed-status"] = "Done"
4865 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4866 db_nslcmop_update["detailed-status"] = "Done"
4867 nslcmop_operation_state = "COMPLETED"
4868
4869 if db_nsr:
4870 self._write_ns_status(
4871 nsr_id=nsr_id,
4872 ns_state=ns_state,
4873 current_operation="IDLE",
4874 current_operation_id=None,
4875 error_description=error_description_nsr,
4876 error_detail=error_detail,
4877 other_update=db_nsr_update,
4878 )
4879 self._write_op_status(
4880 op_id=nslcmop_id,
4881 stage="",
4882 error_message=error_description_nslcmop,
4883 operation_state=nslcmop_operation_state,
4884 other_update=db_nslcmop_update,
4885 )
4886 if ns_state == "NOT_INSTANTIATED":
4887 try:
4888 self.db.set_list(
4889 "vnfrs",
4890 {"nsr-id-ref": nsr_id},
4891 {"_admin.nsState": "NOT_INSTANTIATED"},
4892 )
4893 except DbException as e:
4894 self.logger.warn(
4895 logging_text
4896 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4897 nsr_id, e
4898 )
4899 )
4900 if operation_params:
4901 autoremove = operation_params.get("autoremove", False)
4902 if nslcmop_operation_state:
4903 try:
4904 await self.msg.aiowrite(
4905 "ns",
4906 "terminated",
4907 {
4908 "nsr_id": nsr_id,
4909 "nslcmop_id": nslcmop_id,
4910 "operationState": nslcmop_operation_state,
4911 "autoremove": autoremove,
4912 },
4913 )
4914 except Exception as e:
4915 self.logger.error(
4916 logging_text + "kafka_write notification Exception {}".format(e)
4917 )
4918 self.logger.debug(f"Deleting alerts: ns_id={nsr_id}")
4919 self.db.del_list("alerts", {"tags.ns_id": nsr_id})
4920
4921 self.logger.debug(logging_text + "Exit")
4922 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4923
4924 async def _wait_for_tasks(
4925 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4926 ):
4927 time_start = time()
4928 error_detail_list = []
4929 error_list = []
4930 pending_tasks = list(created_tasks_info.keys())
4931 num_tasks = len(pending_tasks)
4932 num_done = 0
4933 stage[1] = "{}/{}.".format(num_done, num_tasks)
4934 self._write_op_status(nslcmop_id, stage)
4935 while pending_tasks:
4936 new_error = None
4937 _timeout = timeout + time_start - time()
4938 done, pending_tasks = await asyncio.wait(
4939 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4940 )
4941 num_done += len(done)
4942 if not done: # Timeout
4943 for task in pending_tasks:
4944 new_error = created_tasks_info[task] + ": Timeout"
4945 error_detail_list.append(new_error)
4946 error_list.append(new_error)
4947 break
4948 for task in done:
4949 if task.cancelled():
4950 exc = "Cancelled"
4951 else:
4952 exc = task.exception()
4953 if exc:
4954 if isinstance(exc, asyncio.TimeoutError):
4955 exc = "Timeout"
4956 new_error = created_tasks_info[task] + ": {}".format(exc)
4957 error_list.append(created_tasks_info[task])
4958 error_detail_list.append(new_error)
4959 if isinstance(
4960 exc,
4961 (
4962 str,
4963 DbException,
4964 N2VCException,
4965 ROclient.ROClientException,
4966 LcmException,
4967 K8sException,
4968 NgRoException,
4969 ),
4970 ):
4971 self.logger.error(logging_text + new_error)
4972 else:
4973 exc_traceback = "".join(
4974 traceback.format_exception(None, exc, exc.__traceback__)
4975 )
4976 self.logger.error(
4977 logging_text
4978 + created_tasks_info[task]
4979 + " "
4980 + exc_traceback
4981 )
4982 else:
4983 self.logger.debug(
4984 logging_text + created_tasks_info[task] + ": Done"
4985 )
4986 stage[1] = "{}/{}.".format(num_done, num_tasks)
4987 if new_error:
4988 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4989 if nsr_id: # update also nsr
4990 self.update_db_2(
4991 "nsrs",
4992 nsr_id,
4993 {
4994 "errorDescription": "Error at: " + ", ".join(error_list),
4995 "errorDetail": ". ".join(error_detail_list),
4996 },
4997 )
4998 self._write_op_status(nslcmop_id, stage)
4999 return error_detail_list
5000
5001 @staticmethod
5002 def _map_primitive_params(primitive_desc, params, instantiation_params):
5003 """
5004 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
5005 The default-value is used. If it is between < > it look for a value at instantiation_params
5006 :param primitive_desc: portion of VNFD/NSD that describes primitive
5007 :param params: Params provided by user
5008 :param instantiation_params: Instantiation params provided by user
5009 :return: a dictionary with the calculated params
5010 """
5011 calculated_params = {}
5012 for parameter in primitive_desc.get("parameter", ()):
5013 param_name = parameter["name"]
5014 if param_name in params:
5015 calculated_params[param_name] = params[param_name]
5016 elif "default-value" in parameter or "value" in parameter:
5017 if "value" in parameter:
5018 calculated_params[param_name] = parameter["value"]
5019 else:
5020 calculated_params[param_name] = parameter["default-value"]
5021 if (
5022 isinstance(calculated_params[param_name], str)
5023 and calculated_params[param_name].startswith("<")
5024 and calculated_params[param_name].endswith(">")
5025 ):
5026 if calculated_params[param_name][1:-1] in instantiation_params:
5027 calculated_params[param_name] = instantiation_params[
5028 calculated_params[param_name][1:-1]
5029 ]
5030 else:
5031 raise LcmException(
5032 "Parameter {} needed to execute primitive {} not provided".format(
5033 calculated_params[param_name], primitive_desc["name"]
5034 )
5035 )
5036 else:
5037 raise LcmException(
5038 "Parameter {} needed to execute primitive {} not provided".format(
5039 param_name, primitive_desc["name"]
5040 )
5041 )
5042
5043 if isinstance(calculated_params[param_name], (dict, list, tuple)):
5044 calculated_params[param_name] = yaml.safe_dump(
5045 calculated_params[param_name], default_flow_style=True, width=256
5046 )
5047 elif isinstance(calculated_params[param_name], str) and calculated_params[
5048 param_name
5049 ].startswith("!!yaml "):
5050 calculated_params[param_name] = calculated_params[param_name][7:]
5051 if parameter.get("data-type") == "INTEGER":
5052 try:
5053 calculated_params[param_name] = int(calculated_params[param_name])
5054 except ValueError: # error converting string to int
5055 raise LcmException(
5056 "Parameter {} of primitive {} must be integer".format(
5057 param_name, primitive_desc["name"]
5058 )
5059 )
5060 elif parameter.get("data-type") == "BOOLEAN":
5061 calculated_params[param_name] = not (
5062 (str(calculated_params[param_name])).lower() == "false"
5063 )
5064
5065 # add always ns_config_info if primitive name is config
5066 if primitive_desc["name"] == "config":
5067 if "ns_config_info" in instantiation_params:
5068 calculated_params["ns_config_info"] = instantiation_params[
5069 "ns_config_info"
5070 ]
5071 return calculated_params
5072
5073 def _look_for_deployed_vca(
5074 self,
5075 deployed_vca,
5076 member_vnf_index,
5077 vdu_id,
5078 vdu_count_index,
5079 kdu_name=None,
5080 ee_descriptor_id=None,
5081 ):
5082 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
5083 for vca in deployed_vca:
5084 if not vca:
5085 continue
5086 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
5087 continue
5088 if (
5089 vdu_count_index is not None
5090 and vdu_count_index != vca["vdu_count_index"]
5091 ):
5092 continue
5093 if kdu_name and kdu_name != vca["kdu_name"]:
5094 continue
5095 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
5096 continue
5097 break
5098 else:
5099 # vca_deployed not found
5100 raise LcmException(
5101 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
5102 " is not deployed".format(
5103 member_vnf_index,
5104 vdu_id,
5105 vdu_count_index,
5106 kdu_name,
5107 ee_descriptor_id,
5108 )
5109 )
5110 # get ee_id
5111 ee_id = vca.get("ee_id")
5112 vca_type = vca.get(
5113 "type", "lxc_proxy_charm"
5114 ) # default value for backward compatibility - proxy charm
5115 if not ee_id:
5116 raise LcmException(
5117 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
5118 "execution environment".format(
5119 member_vnf_index, vdu_id, kdu_name, vdu_count_index
5120 )
5121 )
5122 return ee_id, vca_type
5123
5124 async def _ns_execute_primitive(
5125 self,
5126 ee_id,
5127 primitive,
5128 primitive_params,
5129 retries=0,
5130 retries_interval=30,
5131 timeout=None,
5132 vca_type=None,
5133 db_dict=None,
5134 vca_id: str = None,
5135 ) -> (str, str):
5136 try:
5137 if primitive == "config":
5138 primitive_params = {"params": primitive_params}
5139
5140 vca_type = vca_type or "lxc_proxy_charm"
5141
5142 while retries >= 0:
5143 try:
5144 output = await asyncio.wait_for(
5145 self.vca_map[vca_type].exec_primitive(
5146 ee_id=ee_id,
5147 primitive_name=primitive,
5148 params_dict=primitive_params,
5149 progress_timeout=self.timeout.progress_primitive,
5150 total_timeout=self.timeout.primitive,
5151 db_dict=db_dict,
5152 vca_id=vca_id,
5153 vca_type=vca_type,
5154 ),
5155 timeout=timeout or self.timeout.primitive,
5156 )
5157 # execution was OK
5158 break
5159 except asyncio.CancelledError:
5160 raise
5161 except Exception as e:
5162 retries -= 1
5163 if retries >= 0:
5164 self.logger.debug(
5165 "Error executing action {} on {} -> {}".format(
5166 primitive, ee_id, e
5167 )
5168 )
5169 # wait and retry
5170 await asyncio.sleep(retries_interval)
5171 else:
5172 if isinstance(e, asyncio.TimeoutError):
5173 e = N2VCException(
5174 message="Timed out waiting for action to complete"
5175 )
5176 return "FAILED", getattr(e, "message", repr(e))
5177
5178 return "COMPLETED", output
5179
5180 except (LcmException, asyncio.CancelledError):
5181 raise
5182 except Exception as e:
5183 return "FAIL", "Error executing action {}: {}".format(primitive, e)
5184
5185 async def vca_status_refresh(self, nsr_id, nslcmop_id):
5186 """
5187 Updating the vca_status with latest juju information in nsrs record
5188 :param: nsr_id: Id of the nsr
5189 :param: nslcmop_id: Id of the nslcmop
5190 :return: None
5191 """
5192
5193 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
5194 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5195 vca_id = self.get_vca_id({}, db_nsr)
5196 if db_nsr["_admin"]["deployed"]["K8s"]:
5197 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
5198 cluster_uuid, kdu_instance, cluster_type = (
5199 k8s["k8scluster-uuid"],
5200 k8s["kdu-instance"],
5201 k8s["k8scluster-type"],
5202 )
5203 await self._on_update_k8s_db(
5204 cluster_uuid=cluster_uuid,
5205 kdu_instance=kdu_instance,
5206 filter={"_id": nsr_id},
5207 vca_id=vca_id,
5208 cluster_type=cluster_type,
5209 )
5210 else:
5211 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
5212 table, filter = "nsrs", {"_id": nsr_id}
5213 path = "_admin.deployed.VCA.{}.".format(vca_index)
5214 await self._on_update_n2vc_db(table, filter, path, {})
5215
5216 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
5217 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
5218
5219 async def action(self, nsr_id, nslcmop_id):
5220 # Try to lock HA task here
5221 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5222 if not task_is_locked_by_me:
5223 return
5224
5225 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
5226 self.logger.debug(logging_text + "Enter")
5227 # get all needed from database
5228 db_nsr = None
5229 db_nslcmop = None
5230 db_nsr_update = {}
5231 db_nslcmop_update = {}
5232 nslcmop_operation_state = None
5233 error_description_nslcmop = None
5234 exc = None
5235 step = ""
5236 try:
5237 # wait for any previous tasks in process
5238 step = "Waiting for previous operations to terminate"
5239 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5240
5241 self._write_ns_status(
5242 nsr_id=nsr_id,
5243 ns_state=None,
5244 current_operation="RUNNING ACTION",
5245 current_operation_id=nslcmop_id,
5246 )
5247
5248 step = "Getting information from database"
5249 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5250 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5251 if db_nslcmop["operationParams"].get("primitive_params"):
5252 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5253 db_nslcmop["operationParams"]["primitive_params"]
5254 )
5255
5256 nsr_deployed = db_nsr["_admin"].get("deployed")
5257 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
5258 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
5259 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
5260 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
5261 primitive = db_nslcmop["operationParams"]["primitive"]
5262 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
5263 timeout_ns_action = db_nslcmop["operationParams"].get(
5264 "timeout_ns_action", self.timeout.primitive
5265 )
5266
5267 if vnf_index:
5268 step = "Getting vnfr from database"
5269 db_vnfr = self.db.get_one(
5270 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5271 )
5272 if db_vnfr.get("kdur"):
5273 kdur_list = []
5274 for kdur in db_vnfr["kdur"]:
5275 if kdur.get("additionalParams"):
5276 kdur["additionalParams"] = json.loads(
5277 kdur["additionalParams"]
5278 )
5279 kdur_list.append(kdur)
5280 db_vnfr["kdur"] = kdur_list
5281 step = "Getting vnfd from database"
5282 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5283
5284 # Sync filesystem before running a primitive
5285 self.fs.sync(db_vnfr["vnfd-id"])
5286 else:
5287 step = "Getting nsd from database"
5288 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
5289
5290 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5291 # for backward compatibility
5292 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5293 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5294 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5295 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5296
5297 # look for primitive
5298 config_primitive_desc = descriptor_configuration = None
5299 if vdu_id:
5300 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
5301 elif kdu_name:
5302 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
5303 elif vnf_index:
5304 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
5305 else:
5306 descriptor_configuration = db_nsd.get("ns-configuration")
5307
5308 if descriptor_configuration and descriptor_configuration.get(
5309 "config-primitive"
5310 ):
5311 for config_primitive in descriptor_configuration["config-primitive"]:
5312 if config_primitive["name"] == primitive:
5313 config_primitive_desc = config_primitive
5314 break
5315
5316 if not config_primitive_desc:
5317 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
5318 raise LcmException(
5319 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5320 primitive
5321 )
5322 )
5323 primitive_name = primitive
5324 ee_descriptor_id = None
5325 else:
5326 primitive_name = config_primitive_desc.get(
5327 "execution-environment-primitive", primitive
5328 )
5329 ee_descriptor_id = config_primitive_desc.get(
5330 "execution-environment-ref"
5331 )
5332
5333 if vnf_index:
5334 if vdu_id:
5335 vdur = next(
5336 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5337 )
5338 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
5339 elif kdu_name:
5340 kdur = next(
5341 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5342 )
5343 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
5344 else:
5345 desc_params = parse_yaml_strings(
5346 db_vnfr.get("additionalParamsForVnf")
5347 )
5348 else:
5349 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
5350 if kdu_name and get_configuration(db_vnfd, kdu_name):
5351 kdu_configuration = get_configuration(db_vnfd, kdu_name)
5352 actions = set()
5353 for primitive in kdu_configuration.get("initial-config-primitive", []):
5354 actions.add(primitive["name"])
5355 for primitive in kdu_configuration.get("config-primitive", []):
5356 actions.add(primitive["name"])
5357 kdu = find_in_list(
5358 nsr_deployed["K8s"],
5359 lambda kdu: kdu_name == kdu["kdu-name"]
5360 and kdu["member-vnf-index"] == vnf_index,
5361 )
5362 kdu_action = (
5363 True
5364 if primitive_name in actions
5365 and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
5366 else False
5367 )
5368
5369 # TODO check if ns is in a proper status
5370 if kdu_name and (
5371 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5372 ):
5373 # kdur and desc_params already set from before
5374 if primitive_params:
5375 desc_params.update(primitive_params)
5376 # TODO Check if we will need something at vnf level
5377 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5378 if (
5379 kdu_name == kdu["kdu-name"]
5380 and kdu["member-vnf-index"] == vnf_index
5381 ):
5382 break
5383 else:
5384 raise LcmException(
5385 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5386 )
5387
5388 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5389 msg = "unknown k8scluster-type '{}'".format(
5390 kdu.get("k8scluster-type")
5391 )
5392 raise LcmException(msg)
5393
5394 db_dict = {
5395 "collection": "nsrs",
5396 "filter": {"_id": nsr_id},
5397 "path": "_admin.deployed.K8s.{}".format(index),
5398 }
5399 self.logger.debug(
5400 logging_text
5401 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5402 )
5403 step = "Executing kdu {}".format(primitive_name)
5404 if primitive_name == "upgrade":
5405 if desc_params.get("kdu_model"):
5406 kdu_model = desc_params.get("kdu_model")
5407 del desc_params["kdu_model"]
5408 else:
5409 kdu_model = kdu.get("kdu-model")
5410 if kdu_model.count("/") < 2: # helm chart is not embedded
5411 parts = kdu_model.split(sep=":")
5412 if len(parts) == 2:
5413 kdu_model = parts[0]
5414 if desc_params.get("kdu_atomic_upgrade"):
5415 atomic_upgrade = desc_params.get(
5416 "kdu_atomic_upgrade"
5417 ).lower() in ("yes", "true", "1")
5418 del desc_params["kdu_atomic_upgrade"]
5419 else:
5420 atomic_upgrade = True
5421
5422 detailed_status = await asyncio.wait_for(
5423 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5424 cluster_uuid=kdu.get("k8scluster-uuid"),
5425 kdu_instance=kdu.get("kdu-instance"),
5426 atomic=atomic_upgrade,
5427 kdu_model=kdu_model,
5428 params=desc_params,
5429 db_dict=db_dict,
5430 timeout=timeout_ns_action,
5431 ),
5432 timeout=timeout_ns_action + 10,
5433 )
5434 self.logger.debug(
5435 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5436 )
5437 elif primitive_name == "rollback":
5438 detailed_status = await asyncio.wait_for(
5439 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5440 cluster_uuid=kdu.get("k8scluster-uuid"),
5441 kdu_instance=kdu.get("kdu-instance"),
5442 db_dict=db_dict,
5443 ),
5444 timeout=timeout_ns_action,
5445 )
5446 elif primitive_name == "status":
5447 detailed_status = await asyncio.wait_for(
5448 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5449 cluster_uuid=kdu.get("k8scluster-uuid"),
5450 kdu_instance=kdu.get("kdu-instance"),
5451 vca_id=vca_id,
5452 ),
5453 timeout=timeout_ns_action,
5454 )
5455 else:
5456 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5457 kdu["kdu-name"], nsr_id
5458 )
5459 params = self._map_primitive_params(
5460 config_primitive_desc, primitive_params, desc_params
5461 )
5462
5463 detailed_status = await asyncio.wait_for(
5464 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5465 cluster_uuid=kdu.get("k8scluster-uuid"),
5466 kdu_instance=kdu_instance,
5467 primitive_name=primitive_name,
5468 params=params,
5469 db_dict=db_dict,
5470 timeout=timeout_ns_action,
5471 vca_id=vca_id,
5472 ),
5473 timeout=timeout_ns_action,
5474 )
5475
5476 if detailed_status:
5477 nslcmop_operation_state = "COMPLETED"
5478 else:
5479 detailed_status = ""
5480 nslcmop_operation_state = "FAILED"
5481 else:
5482 ee_id, vca_type = self._look_for_deployed_vca(
5483 nsr_deployed["VCA"],
5484 member_vnf_index=vnf_index,
5485 vdu_id=vdu_id,
5486 vdu_count_index=vdu_count_index,
5487 ee_descriptor_id=ee_descriptor_id,
5488 )
5489 for vca_index, vca_deployed in enumerate(
5490 db_nsr["_admin"]["deployed"]["VCA"]
5491 ):
5492 if vca_deployed.get("member-vnf-index") == vnf_index:
5493 db_dict = {
5494 "collection": "nsrs",
5495 "filter": {"_id": nsr_id},
5496 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5497 }
5498 break
5499 (
5500 nslcmop_operation_state,
5501 detailed_status,
5502 ) = await self._ns_execute_primitive(
5503 ee_id,
5504 primitive=primitive_name,
5505 primitive_params=self._map_primitive_params(
5506 config_primitive_desc, primitive_params, desc_params
5507 ),
5508 timeout=timeout_ns_action,
5509 vca_type=vca_type,
5510 db_dict=db_dict,
5511 vca_id=vca_id,
5512 )
5513
5514 db_nslcmop_update["detailed-status"] = detailed_status
5515 error_description_nslcmop = (
5516 detailed_status if nslcmop_operation_state == "FAILED" else ""
5517 )
5518 self.logger.debug(
5519 logging_text
5520 + "Done with result {} {}".format(
5521 nslcmop_operation_state, detailed_status
5522 )
5523 )
5524 return # database update is called inside finally
5525
5526 except (DbException, LcmException, N2VCException, K8sException) as e:
5527 self.logger.error(logging_text + "Exit Exception {}".format(e))
5528 exc = e
5529 except asyncio.CancelledError:
5530 self.logger.error(
5531 logging_text + "Cancelled Exception while '{}'".format(step)
5532 )
5533 exc = "Operation was cancelled"
5534 except asyncio.TimeoutError:
5535 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5536 exc = "Timeout"
5537 except Exception as e:
5538 exc = traceback.format_exc()
5539 self.logger.critical(
5540 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5541 exc_info=True,
5542 )
5543 finally:
5544 if exc:
5545 db_nslcmop_update[
5546 "detailed-status"
5547 ] = (
5548 detailed_status
5549 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5550 nslcmop_operation_state = "FAILED"
5551 if db_nsr:
5552 self._write_ns_status(
5553 nsr_id=nsr_id,
5554 ns_state=db_nsr[
5555 "nsState"
5556 ], # TODO check if degraded. For the moment use previous status
5557 current_operation="IDLE",
5558 current_operation_id=None,
5559 # error_description=error_description_nsr,
5560 # error_detail=error_detail,
5561 other_update=db_nsr_update,
5562 )
5563
5564 self._write_op_status(
5565 op_id=nslcmop_id,
5566 stage="",
5567 error_message=error_description_nslcmop,
5568 operation_state=nslcmop_operation_state,
5569 other_update=db_nslcmop_update,
5570 )
5571
5572 if nslcmop_operation_state:
5573 try:
5574 await self.msg.aiowrite(
5575 "ns",
5576 "actioned",
5577 {
5578 "nsr_id": nsr_id,
5579 "nslcmop_id": nslcmop_id,
5580 "operationState": nslcmop_operation_state,
5581 },
5582 )
5583 except Exception as e:
5584 self.logger.error(
5585 logging_text + "kafka_write notification Exception {}".format(e)
5586 )
5587 self.logger.debug(logging_text + "Exit")
5588 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5589 return nslcmop_operation_state, detailed_status
5590
5591 async def terminate_vdus(
5592 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5593 ):
5594 """This method terminates VDUs
5595
5596 Args:
5597 db_vnfr: VNF instance record
5598 member_vnf_index: VNF index to identify the VDUs to be removed
5599 db_nsr: NS instance record
5600 update_db_nslcmops: Nslcmop update record
5601 """
5602 vca_scaling_info = []
5603 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5604 scaling_info["scaling_direction"] = "IN"
5605 scaling_info["vdu-delete"] = {}
5606 scaling_info["kdu-delete"] = {}
5607 db_vdur = db_vnfr.get("vdur")
5608 vdur_list = copy(db_vdur)
5609 count_index = 0
5610 for index, vdu in enumerate(vdur_list):
5611 vca_scaling_info.append(
5612 {
5613 "osm_vdu_id": vdu["vdu-id-ref"],
5614 "member-vnf-index": member_vnf_index,
5615 "type": "delete",
5616 "vdu_index": count_index,
5617 }
5618 )
5619 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5620 scaling_info["vdu"].append(
5621 {
5622 "name": vdu.get("name") or vdu.get("vdu-name"),
5623 "vdu_id": vdu["vdu-id-ref"],
5624 "interface": [],
5625 }
5626 )
5627 for interface in vdu["interfaces"]:
5628 scaling_info["vdu"][index]["interface"].append(
5629 {
5630 "name": interface["name"],
5631 "ip_address": interface["ip-address"],
5632 "mac_address": interface.get("mac-address"),
5633 }
5634 )
5635 self.logger.info("NS update scaling info{}".format(scaling_info))
5636 stage[2] = "Terminating VDUs"
5637 if scaling_info.get("vdu-delete"):
5638 # scale_process = "RO"
5639 if self.ro_config.ng:
5640 await self._scale_ng_ro(
5641 logging_text,
5642 db_nsr,
5643 update_db_nslcmops,
5644 db_vnfr,
5645 scaling_info,
5646 stage,
5647 )
5648
5649 async def remove_vnf(self, nsr_id, nslcmop_id, vnf_instance_id):
5650 """This method is to Remove VNF instances from NS.
5651
5652 Args:
5653 nsr_id: NS instance id
5654 nslcmop_id: nslcmop id of update
5655 vnf_instance_id: id of the VNF instance to be removed
5656
5657 Returns:
5658 result: (str, str) COMPLETED/FAILED, details
5659 """
5660 try:
5661 db_nsr_update = {}
5662 logging_text = "Task ns={} update ".format(nsr_id)
5663 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5664 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5665 if check_vnfr_count > 1:
5666 stage = ["", "", ""]
5667 step = "Getting nslcmop from database"
5668 self.logger.debug(
5669 step + " after having waited for previous tasks to be completed"
5670 )
5671 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5672 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5673 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5674 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5675 """ db_vnfr = self.db.get_one(
5676 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5677
5678 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5679 await self.terminate_vdus(
5680 db_vnfr,
5681 member_vnf_index,
5682 db_nsr,
5683 update_db_nslcmops,
5684 stage,
5685 logging_text,
5686 )
5687
5688 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5689 constituent_vnfr.remove(db_vnfr.get("_id"))
5690 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get(
5691 "constituent-vnfr-ref"
5692 )
5693 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5694 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5695 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5696 return "COMPLETED", "Done"
5697 else:
5698 step = "Terminate VNF Failed with"
5699 raise LcmException(
5700 "{} Cannot terminate the last VNF in this NS.".format(
5701 vnf_instance_id
5702 )
5703 )
5704 except (LcmException, asyncio.CancelledError):
5705 raise
5706 except Exception as e:
5707 self.logger.debug("Error removing VNF {}".format(e))
5708 return "FAILED", "Error removing VNF {}".format(e)
5709
5710 async def _ns_redeploy_vnf(
5711 self,
5712 nsr_id,
5713 nslcmop_id,
5714 db_vnfd,
5715 db_vnfr,
5716 db_nsr,
5717 ):
5718 """This method updates and redeploys VNF instances
5719
5720 Args:
5721 nsr_id: NS instance id
5722 nslcmop_id: nslcmop id
5723 db_vnfd: VNF descriptor
5724 db_vnfr: VNF instance record
5725 db_nsr: NS instance record
5726
5727 Returns:
5728 result: (str, str) COMPLETED/FAILED, details
5729 """
5730 try:
5731 count_index = 0
5732 stage = ["", "", ""]
5733 logging_text = "Task ns={} update ".format(nsr_id)
5734 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5735 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5736
5737 # Terminate old VNF resources
5738 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5739 await self.terminate_vdus(
5740 db_vnfr,
5741 member_vnf_index,
5742 db_nsr,
5743 update_db_nslcmops,
5744 stage,
5745 logging_text,
5746 )
5747
5748 # old_vnfd_id = db_vnfr["vnfd-id"]
5749 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5750 new_db_vnfd = db_vnfd
5751 # new_vnfd_ref = new_db_vnfd["id"]
5752 # new_vnfd_id = vnfd_id
5753
5754 # Create VDUR
5755 new_vnfr_cp = []
5756 for cp in new_db_vnfd.get("ext-cpd", ()):
5757 vnf_cp = {
5758 "name": cp.get("id"),
5759 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5760 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5761 "id": cp.get("id"),
5762 }
5763 new_vnfr_cp.append(vnf_cp)
5764 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5765 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5766 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5767 new_vnfr_update = {
5768 "revision": latest_vnfd_revision,
5769 "connection-point": new_vnfr_cp,
5770 "vdur": new_vdur,
5771 "ip-address": "",
5772 }
5773 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5774 updated_db_vnfr = self.db.get_one(
5775 "vnfrs",
5776 {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id},
5777 )
5778
5779 # Instantiate new VNF resources
5780 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5781 vca_scaling_info = []
5782 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5783 scaling_info["scaling_direction"] = "OUT"
5784 scaling_info["vdu-create"] = {}
5785 scaling_info["kdu-create"] = {}
5786 vdud_instantiate_list = db_vnfd["vdu"]
5787 for index, vdud in enumerate(vdud_instantiate_list):
5788 cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
5789 if cloud_init_text:
5790 additional_params = (
5791 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5792 or {}
5793 )
5794 cloud_init_list = []
5795 if cloud_init_text:
5796 # TODO Information of its own ip is not available because db_vnfr is not updated.
5797 additional_params["OSM"] = get_osm_params(
5798 updated_db_vnfr, vdud["id"], 1
5799 )
5800 cloud_init_list.append(
5801 self._parse_cloud_init(
5802 cloud_init_text,
5803 additional_params,
5804 db_vnfd["id"],
5805 vdud["id"],
5806 )
5807 )
5808 vca_scaling_info.append(
5809 {
5810 "osm_vdu_id": vdud["id"],
5811 "member-vnf-index": member_vnf_index,
5812 "type": "create",
5813 "vdu_index": count_index,
5814 }
5815 )
5816 scaling_info["vdu-create"][vdud["id"]] = count_index
5817 if self.ro_config.ng:
5818 self.logger.debug(
5819 "New Resources to be deployed: {}".format(scaling_info)
5820 )
5821 await self._scale_ng_ro(
5822 logging_text,
5823 db_nsr,
5824 update_db_nslcmops,
5825 updated_db_vnfr,
5826 scaling_info,
5827 stage,
5828 )
5829 return "COMPLETED", "Done"
5830 except (LcmException, asyncio.CancelledError):
5831 raise
5832 except Exception as e:
5833 self.logger.debug("Error updating VNF {}".format(e))
5834 return "FAILED", "Error updating VNF {}".format(e)
5835
5836 async def _ns_charm_upgrade(
5837 self,
5838 ee_id,
5839 charm_id,
5840 charm_type,
5841 path,
5842 timeout: float = None,
5843 ) -> (str, str):
5844 """This method upgrade charms in VNF instances
5845
5846 Args:
5847 ee_id: Execution environment id
5848 path: Local path to the charm
5849 charm_id: charm-id
5850 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5851 timeout: (Float) Timeout for the ns update operation
5852
5853 Returns:
5854 result: (str, str) COMPLETED/FAILED, details
5855 """
5856 try:
5857 charm_type = charm_type or "lxc_proxy_charm"
5858 output = await self.vca_map[charm_type].upgrade_charm(
5859 ee_id=ee_id,
5860 path=path,
5861 charm_id=charm_id,
5862 charm_type=charm_type,
5863 timeout=timeout or self.timeout.ns_update,
5864 )
5865
5866 if output:
5867 return "COMPLETED", output
5868
5869 except (LcmException, asyncio.CancelledError):
5870 raise
5871
5872 except Exception as e:
5873 self.logger.debug("Error upgrading charm {}".format(path))
5874
5875 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5876
5877 async def update(self, nsr_id, nslcmop_id):
5878 """Update NS according to different update types
5879
5880 This method performs upgrade of VNF instances then updates the revision
5881 number in VNF record
5882
5883 Args:
5884 nsr_id: Network service will be updated
5885 nslcmop_id: ns lcm operation id
5886
5887 Returns:
5888 It may raise DbException, LcmException, N2VCException, K8sException
5889
5890 """
5891 # Try to lock HA task here
5892 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5893 if not task_is_locked_by_me:
5894 return
5895
5896 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5897 self.logger.debug(logging_text + "Enter")
5898
5899 # Set the required variables to be filled up later
5900 db_nsr = None
5901 db_nslcmop_update = {}
5902 vnfr_update = {}
5903 nslcmop_operation_state = None
5904 db_nsr_update = {}
5905 error_description_nslcmop = ""
5906 exc = None
5907 change_type = "updated"
5908 detailed_status = ""
5909 member_vnf_index = None
5910
5911 try:
5912 # wait for any previous tasks in process
5913 step = "Waiting for previous operations to terminate"
5914 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5915 self._write_ns_status(
5916 nsr_id=nsr_id,
5917 ns_state=None,
5918 current_operation="UPDATING",
5919 current_operation_id=nslcmop_id,
5920 )
5921
5922 step = "Getting nslcmop from database"
5923 db_nslcmop = self.db.get_one(
5924 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5925 )
5926 update_type = db_nslcmop["operationParams"]["updateType"]
5927
5928 step = "Getting nsr from database"
5929 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5930 old_operational_status = db_nsr["operational-status"]
5931 db_nsr_update["operational-status"] = "updating"
5932 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5933 nsr_deployed = db_nsr["_admin"].get("deployed")
5934
5935 if update_type == "CHANGE_VNFPKG":
5936 # Get the input parameters given through update request
5937 vnf_instance_id = db_nslcmop["operationParams"][
5938 "changeVnfPackageData"
5939 ].get("vnfInstanceId")
5940
5941 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5942 "vnfdId"
5943 )
5944 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5945
5946 step = "Getting vnfr from database"
5947 db_vnfr = self.db.get_one(
5948 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5949 )
5950
5951 step = "Getting vnfds from database"
5952 # Latest VNFD
5953 latest_vnfd = self.db.get_one(
5954 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5955 )
5956 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5957
5958 # Current VNFD
5959 current_vnf_revision = db_vnfr.get("revision", 1)
5960 current_vnfd = self.db.get_one(
5961 "vnfds_revisions",
5962 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5963 fail_on_empty=False,
5964 )
5965 # Charm artifact paths will be filled up later
5966 (
5967 current_charm_artifact_path,
5968 target_charm_artifact_path,
5969 charm_artifact_paths,
5970 helm_artifacts,
5971 ) = ([], [], [], [])
5972
5973 step = "Checking if revision has changed in VNFD"
5974 if current_vnf_revision != latest_vnfd_revision:
5975 change_type = "policy_updated"
5976
5977 # There is new revision of VNFD, update operation is required
5978 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5979 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5980
5981 step = "Removing the VNFD packages if they exist in the local path"
5982 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5983 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5984
5985 step = "Get the VNFD packages from FSMongo"
5986 self.fs.sync(from_path=latest_vnfd_path)
5987 self.fs.sync(from_path=current_vnfd_path)
5988
5989 step = (
5990 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5991 )
5992 current_base_folder = current_vnfd["_admin"]["storage"]
5993 latest_base_folder = latest_vnfd["_admin"]["storage"]
5994
5995 for vca_index, vca_deployed in enumerate(
5996 get_iterable(nsr_deployed, "VCA")
5997 ):
5998 vnf_index = db_vnfr.get("member-vnf-index-ref")
5999
6000 # Getting charm-id and charm-type
6001 if vca_deployed.get("member-vnf-index") == vnf_index:
6002 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6003 vca_type = vca_deployed.get("type")
6004 vdu_count_index = vca_deployed.get("vdu_count_index")
6005
6006 # Getting ee-id
6007 ee_id = vca_deployed.get("ee_id")
6008
6009 step = "Getting descriptor config"
6010 if current_vnfd.get("kdu"):
6011 search_key = "kdu_name"
6012 else:
6013 search_key = "vnfd_id"
6014
6015 entity_id = vca_deployed.get(search_key)
6016
6017 descriptor_config = get_configuration(
6018 current_vnfd, entity_id
6019 )
6020
6021 if "execution-environment-list" in descriptor_config:
6022 ee_list = descriptor_config.get(
6023 "execution-environment-list", []
6024 )
6025 else:
6026 ee_list = []
6027
6028 # There could be several charm used in the same VNF
6029 for ee_item in ee_list:
6030 if ee_item.get("juju"):
6031 step = "Getting charm name"
6032 charm_name = ee_item["juju"].get("charm")
6033
6034 step = "Setting Charm artifact paths"
6035 current_charm_artifact_path.append(
6036 get_charm_artifact_path(
6037 current_base_folder,
6038 charm_name,
6039 vca_type,
6040 current_vnf_revision,
6041 )
6042 )
6043 target_charm_artifact_path.append(
6044 get_charm_artifact_path(
6045 latest_base_folder,
6046 charm_name,
6047 vca_type,
6048 latest_vnfd_revision,
6049 )
6050 )
6051 elif ee_item.get("helm-chart"):
6052 # add chart to list and all parameters
6053 step = "Getting helm chart name"
6054 chart_name = ee_item.get("helm-chart")
6055 if (
6056 ee_item.get("helm-version")
6057 and ee_item.get("helm-version") == "v2"
6058 ):
6059 vca_type = "helm"
6060 else:
6061 vca_type = "helm-v3"
6062 step = "Setting Helm chart artifact paths"
6063
6064 helm_artifacts.append(
6065 {
6066 "current_artifact_path": get_charm_artifact_path(
6067 current_base_folder,
6068 chart_name,
6069 vca_type,
6070 current_vnf_revision,
6071 ),
6072 "target_artifact_path": get_charm_artifact_path(
6073 latest_base_folder,
6074 chart_name,
6075 vca_type,
6076 latest_vnfd_revision,
6077 ),
6078 "ee_id": ee_id,
6079 "vca_index": vca_index,
6080 "vdu_index": vdu_count_index,
6081 }
6082 )
6083
6084 charm_artifact_paths = zip(
6085 current_charm_artifact_path, target_charm_artifact_path
6086 )
6087
6088 step = "Checking if software version has changed in VNFD"
6089 if find_software_version(current_vnfd) != find_software_version(
6090 latest_vnfd
6091 ):
6092 step = "Checking if existing VNF has charm"
6093 for current_charm_path, target_charm_path in list(
6094 charm_artifact_paths
6095 ):
6096 if current_charm_path:
6097 raise LcmException(
6098 "Software version change is not supported as VNF instance {} has charm.".format(
6099 vnf_instance_id
6100 )
6101 )
6102
6103 # There is no change in the charm package, then redeploy the VNF
6104 # based on new descriptor
6105 step = "Redeploying VNF"
6106 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6107 (result, detailed_status) = await self._ns_redeploy_vnf(
6108 nsr_id, nslcmop_id, latest_vnfd, db_vnfr, db_nsr
6109 )
6110 if result == "FAILED":
6111 nslcmop_operation_state = result
6112 error_description_nslcmop = detailed_status
6113 db_nslcmop_update["detailed-status"] = detailed_status
6114 self.logger.debug(
6115 logging_text
6116 + " step {} Done with result {} {}".format(
6117 step, nslcmop_operation_state, detailed_status
6118 )
6119 )
6120
6121 else:
6122 step = "Checking if any charm package has changed or not"
6123 for current_charm_path, target_charm_path in list(
6124 charm_artifact_paths
6125 ):
6126 if (
6127 current_charm_path
6128 and target_charm_path
6129 and self.check_charm_hash_changed(
6130 current_charm_path, target_charm_path
6131 )
6132 ):
6133 step = "Checking whether VNF uses juju bundle"
6134 if check_juju_bundle_existence(current_vnfd):
6135 raise LcmException(
6136 "Charm upgrade is not supported for the instance which"
6137 " uses juju-bundle: {}".format(
6138 check_juju_bundle_existence(current_vnfd)
6139 )
6140 )
6141
6142 step = "Upgrading Charm"
6143 (
6144 result,
6145 detailed_status,
6146 ) = await self._ns_charm_upgrade(
6147 ee_id=ee_id,
6148 charm_id=vca_id,
6149 charm_type=vca_type,
6150 path=self.fs.path + target_charm_path,
6151 timeout=timeout_seconds,
6152 )
6153
6154 if result == "FAILED":
6155 nslcmop_operation_state = result
6156 error_description_nslcmop = detailed_status
6157
6158 db_nslcmop_update["detailed-status"] = detailed_status
6159 self.logger.debug(
6160 logging_text
6161 + " step {} Done with result {} {}".format(
6162 step, nslcmop_operation_state, detailed_status
6163 )
6164 )
6165
6166 step = "Updating policies"
6167 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6168 result = "COMPLETED"
6169 detailed_status = "Done"
6170 db_nslcmop_update["detailed-status"] = "Done"
6171
6172 # helm base EE
6173 for item in helm_artifacts:
6174 if not (
6175 item["current_artifact_path"]
6176 and item["target_artifact_path"]
6177 and self.check_charm_hash_changed(
6178 item["current_artifact_path"],
6179 item["target_artifact_path"],
6180 )
6181 ):
6182 continue
6183 db_update_entry = "_admin.deployed.VCA.{}.".format(
6184 item["vca_index"]
6185 )
6186 vnfr_id = db_vnfr["_id"]
6187 osm_config = {"osm": {"ns_id": nsr_id, "vnf_id": vnfr_id}}
6188 db_dict = {
6189 "collection": "nsrs",
6190 "filter": {"_id": nsr_id},
6191 "path": db_update_entry,
6192 }
6193 vca_type, namespace, helm_id = get_ee_id_parts(item["ee_id"])
6194 await self.vca_map[vca_type].upgrade_execution_environment(
6195 namespace=namespace,
6196 helm_id=helm_id,
6197 db_dict=db_dict,
6198 config=osm_config,
6199 artifact_path=item["target_artifact_path"],
6200 vca_type=vca_type,
6201 )
6202 vnf_id = db_vnfr.get("vnfd-ref")
6203 config_descriptor = get_configuration(latest_vnfd, vnf_id)
6204 self.logger.debug("get ssh key block")
6205 rw_mgmt_ip = None
6206 if deep_get(
6207 config_descriptor,
6208 ("config-access", "ssh-access", "required"),
6209 ):
6210 # Needed to inject a ssh key
6211 user = deep_get(
6212 config_descriptor,
6213 ("config-access", "ssh-access", "default-user"),
6214 )
6215 step = (
6216 "Install configuration Software, getting public ssh key"
6217 )
6218 pub_key = await self.vca_map[
6219 vca_type
6220 ].get_ee_ssh_public__key(
6221 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
6222 )
6223
6224 step = (
6225 "Insert public key into VM user={} ssh_key={}".format(
6226 user, pub_key
6227 )
6228 )
6229 self.logger.debug(logging_text + step)
6230
6231 # wait for RO (ip-address) Insert pub_key into VM
6232 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
6233 logging_text,
6234 nsr_id,
6235 vnfr_id,
6236 None,
6237 item["vdu_index"],
6238 user=user,
6239 pub_key=pub_key,
6240 )
6241
6242 initial_config_primitive_list = config_descriptor.get(
6243 "initial-config-primitive"
6244 )
6245 config_primitive = next(
6246 (
6247 p
6248 for p in initial_config_primitive_list
6249 if p["name"] == "config"
6250 ),
6251 None,
6252 )
6253 if not config_primitive:
6254 continue
6255
6256 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6257 if rw_mgmt_ip:
6258 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
6259 if db_vnfr.get("additionalParamsForVnf"):
6260 deploy_params.update(
6261 parse_yaml_strings(
6262 db_vnfr["additionalParamsForVnf"].copy()
6263 )
6264 )
6265 primitive_params_ = self._map_primitive_params(
6266 config_primitive, {}, deploy_params
6267 )
6268
6269 step = "execute primitive '{}' params '{}'".format(
6270 config_primitive["name"], primitive_params_
6271 )
6272 self.logger.debug(logging_text + step)
6273 await self.vca_map[vca_type].exec_primitive(
6274 ee_id=ee_id,
6275 primitive_name=config_primitive["name"],
6276 params_dict=primitive_params_,
6277 db_dict=db_dict,
6278 vca_id=vca_id,
6279 vca_type=vca_type,
6280 )
6281
6282 step = "Updating policies"
6283 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6284 detailed_status = "Done"
6285 db_nslcmop_update["detailed-status"] = "Done"
6286
6287 # If nslcmop_operation_state is None, so any operation is not failed.
6288 if not nslcmop_operation_state:
6289 nslcmop_operation_state = "COMPLETED"
6290
6291 # If update CHANGE_VNFPKG nslcmop_operation is successful
6292 # vnf revision need to be updated
6293 vnfr_update["revision"] = latest_vnfd_revision
6294 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
6295
6296 self.logger.debug(
6297 logging_text
6298 + " task Done with result {} {}".format(
6299 nslcmop_operation_state, detailed_status
6300 )
6301 )
6302 elif update_type == "REMOVE_VNF":
6303 # This part is included in https://osm.etsi.org/gerrit/11876
6304 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
6305 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
6306 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6307 step = "Removing VNF"
6308 (result, detailed_status) = await self.remove_vnf(
6309 nsr_id, nslcmop_id, vnf_instance_id
6310 )
6311 if result == "FAILED":
6312 nslcmop_operation_state = result
6313 error_description_nslcmop = detailed_status
6314 db_nslcmop_update["detailed-status"] = detailed_status
6315 change_type = "vnf_terminated"
6316 if not nslcmop_operation_state:
6317 nslcmop_operation_state = "COMPLETED"
6318 self.logger.debug(
6319 logging_text
6320 + " task Done with result {} {}".format(
6321 nslcmop_operation_state, detailed_status
6322 )
6323 )
6324
6325 elif update_type == "OPERATE_VNF":
6326 vnf_id = db_nslcmop["operationParams"]["operateVnfData"][
6327 "vnfInstanceId"
6328 ]
6329 operation_type = db_nslcmop["operationParams"]["operateVnfData"][
6330 "changeStateTo"
6331 ]
6332 additional_param = db_nslcmop["operationParams"]["operateVnfData"][
6333 "additionalParam"
6334 ]
6335 (result, detailed_status) = await self.rebuild_start_stop(
6336 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
6337 )
6338 if result == "FAILED":
6339 nslcmop_operation_state = result
6340 error_description_nslcmop = detailed_status
6341 db_nslcmop_update["detailed-status"] = detailed_status
6342 if not nslcmop_operation_state:
6343 nslcmop_operation_state = "COMPLETED"
6344 self.logger.debug(
6345 logging_text
6346 + " task Done with result {} {}".format(
6347 nslcmop_operation_state, detailed_status
6348 )
6349 )
6350
6351 # If nslcmop_operation_state is None, so any operation is not failed.
6352 # All operations are executed in overall.
6353 if not nslcmop_operation_state:
6354 nslcmop_operation_state = "COMPLETED"
6355 db_nsr_update["operational-status"] = old_operational_status
6356
6357 except (DbException, LcmException, N2VCException, K8sException) as e:
6358 self.logger.error(logging_text + "Exit Exception {}".format(e))
6359 exc = e
6360 except asyncio.CancelledError:
6361 self.logger.error(
6362 logging_text + "Cancelled Exception while '{}'".format(step)
6363 )
6364 exc = "Operation was cancelled"
6365 except asyncio.TimeoutError:
6366 self.logger.error(logging_text + "Timeout while '{}'".format(step))
6367 exc = "Timeout"
6368 except Exception as e:
6369 exc = traceback.format_exc()
6370 self.logger.critical(
6371 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6372 exc_info=True,
6373 )
6374 finally:
6375 if exc:
6376 db_nslcmop_update[
6377 "detailed-status"
6378 ] = (
6379 detailed_status
6380 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6381 nslcmop_operation_state = "FAILED"
6382 db_nsr_update["operational-status"] = old_operational_status
6383 if db_nsr:
6384 self._write_ns_status(
6385 nsr_id=nsr_id,
6386 ns_state=db_nsr["nsState"],
6387 current_operation="IDLE",
6388 current_operation_id=None,
6389 other_update=db_nsr_update,
6390 )
6391
6392 self._write_op_status(
6393 op_id=nslcmop_id,
6394 stage="",
6395 error_message=error_description_nslcmop,
6396 operation_state=nslcmop_operation_state,
6397 other_update=db_nslcmop_update,
6398 )
6399
6400 if nslcmop_operation_state:
6401 try:
6402 msg = {
6403 "nsr_id": nsr_id,
6404 "nslcmop_id": nslcmop_id,
6405 "operationState": nslcmop_operation_state,
6406 }
6407 if (
6408 change_type in ("vnf_terminated", "policy_updated")
6409 and member_vnf_index
6410 ):
6411 msg.update({"vnf_member_index": member_vnf_index})
6412 await self.msg.aiowrite("ns", change_type, msg)
6413 except Exception as e:
6414 self.logger.error(
6415 logging_text + "kafka_write notification Exception {}".format(e)
6416 )
6417 self.logger.debug(logging_text + "Exit")
6418 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6419 return nslcmop_operation_state, detailed_status
6420
6421 async def scale(self, nsr_id, nslcmop_id):
6422 # Try to lock HA task here
6423 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6424 if not task_is_locked_by_me:
6425 return
6426
6427 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6428 stage = ["", "", ""]
6429 tasks_dict_info = {}
6430 # ^ stage, step, VIM progress
6431 self.logger.debug(logging_text + "Enter")
6432 # get all needed from database
6433 db_nsr = None
6434 db_nslcmop_update = {}
6435 db_nsr_update = {}
6436 exc = None
6437 # in case of error, indicates what part of scale was failed to put nsr at error status
6438 scale_process = None
6439 old_operational_status = ""
6440 old_config_status = ""
6441 nsi_id = None
6442 try:
6443 # wait for any previous tasks in process
6444 step = "Waiting for previous operations to terminate"
6445 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6446 self._write_ns_status(
6447 nsr_id=nsr_id,
6448 ns_state=None,
6449 current_operation="SCALING",
6450 current_operation_id=nslcmop_id,
6451 )
6452
6453 step = "Getting nslcmop from database"
6454 self.logger.debug(
6455 step + " after having waited for previous tasks to be completed"
6456 )
6457 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6458
6459 step = "Getting nsr from database"
6460 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6461 old_operational_status = db_nsr["operational-status"]
6462 old_config_status = db_nsr["config-status"]
6463
6464 step = "Parsing scaling parameters"
6465 db_nsr_update["operational-status"] = "scaling"
6466 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6467 nsr_deployed = db_nsr["_admin"].get("deployed")
6468
6469 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6470 "scaleByStepData"
6471 ]["member-vnf-index"]
6472 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6473 "scaleByStepData"
6474 ]["scaling-group-descriptor"]
6475 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6476 # for backward compatibility
6477 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6478 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6479 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6480 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6481
6482 step = "Getting vnfr from database"
6483 db_vnfr = self.db.get_one(
6484 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6485 )
6486
6487 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6488
6489 step = "Getting vnfd from database"
6490 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6491
6492 base_folder = db_vnfd["_admin"]["storage"]
6493
6494 step = "Getting scaling-group-descriptor"
6495 scaling_descriptor = find_in_list(
6496 get_scaling_aspect(db_vnfd),
6497 lambda scale_desc: scale_desc["name"] == scaling_group,
6498 )
6499 if not scaling_descriptor:
6500 raise LcmException(
6501 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6502 "at vnfd:scaling-group-descriptor".format(scaling_group)
6503 )
6504
6505 step = "Sending scale order to VIM"
6506 # TODO check if ns is in a proper status
6507 nb_scale_op = 0
6508 if not db_nsr["_admin"].get("scaling-group"):
6509 self.update_db_2(
6510 "nsrs",
6511 nsr_id,
6512 {
6513 "_admin.scaling-group": [
6514 {"name": scaling_group, "nb-scale-op": 0}
6515 ]
6516 },
6517 )
6518 admin_scale_index = 0
6519 else:
6520 for admin_scale_index, admin_scale_info in enumerate(
6521 db_nsr["_admin"]["scaling-group"]
6522 ):
6523 if admin_scale_info["name"] == scaling_group:
6524 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6525 break
6526 else: # not found, set index one plus last element and add new entry with the name
6527 admin_scale_index += 1
6528 db_nsr_update[
6529 "_admin.scaling-group.{}.name".format(admin_scale_index)
6530 ] = scaling_group
6531
6532 vca_scaling_info = []
6533 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6534 if scaling_type == "SCALE_OUT":
6535 if "aspect-delta-details" not in scaling_descriptor:
6536 raise LcmException(
6537 "Aspect delta details not fount in scaling descriptor {}".format(
6538 scaling_descriptor["name"]
6539 )
6540 )
6541 # count if max-instance-count is reached
6542 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6543
6544 scaling_info["scaling_direction"] = "OUT"
6545 scaling_info["vdu-create"] = {}
6546 scaling_info["kdu-create"] = {}
6547 for delta in deltas:
6548 for vdu_delta in delta.get("vdu-delta", {}):
6549 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6550 # vdu_index also provides the number of instance of the targeted vdu
6551 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6552 cloud_init_text = self._get_vdu_cloud_init_content(
6553 vdud, db_vnfd
6554 )
6555 if cloud_init_text:
6556 additional_params = (
6557 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6558 or {}
6559 )
6560 cloud_init_list = []
6561
6562 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6563 max_instance_count = 10
6564 if vdu_profile and "max-number-of-instances" in vdu_profile:
6565 max_instance_count = vdu_profile.get(
6566 "max-number-of-instances", 10
6567 )
6568
6569 default_instance_num = get_number_of_instances(
6570 db_vnfd, vdud["id"]
6571 )
6572 instances_number = vdu_delta.get("number-of-instances", 1)
6573 nb_scale_op += instances_number
6574
6575 new_instance_count = nb_scale_op + default_instance_num
6576 # Control if new count is over max and vdu count is less than max.
6577 # Then assign new instance count
6578 if new_instance_count > max_instance_count > vdu_count:
6579 instances_number = new_instance_count - max_instance_count
6580 else:
6581 instances_number = instances_number
6582
6583 if new_instance_count > max_instance_count:
6584 raise LcmException(
6585 "reached the limit of {} (max-instance-count) "
6586 "scaling-out operations for the "
6587 "scaling-group-descriptor '{}'".format(
6588 nb_scale_op, scaling_group
6589 )
6590 )
6591 for x in range(vdu_delta.get("number-of-instances", 1)):
6592 if cloud_init_text:
6593 # TODO Information of its own ip is not available because db_vnfr is not updated.
6594 additional_params["OSM"] = get_osm_params(
6595 db_vnfr, vdu_delta["id"], vdu_index + x
6596 )
6597 cloud_init_list.append(
6598 self._parse_cloud_init(
6599 cloud_init_text,
6600 additional_params,
6601 db_vnfd["id"],
6602 vdud["id"],
6603 )
6604 )
6605 vca_scaling_info.append(
6606 {
6607 "osm_vdu_id": vdu_delta["id"],
6608 "member-vnf-index": vnf_index,
6609 "type": "create",
6610 "vdu_index": vdu_index + x,
6611 }
6612 )
6613 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6614 for kdu_delta in delta.get("kdu-resource-delta", {}):
6615 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6616 kdu_name = kdu_profile["kdu-name"]
6617 resource_name = kdu_profile.get("resource-name", "")
6618
6619 # Might have different kdus in the same delta
6620 # Should have list for each kdu
6621 if not scaling_info["kdu-create"].get(kdu_name, None):
6622 scaling_info["kdu-create"][kdu_name] = []
6623
6624 kdur = get_kdur(db_vnfr, kdu_name)
6625 if kdur.get("helm-chart"):
6626 k8s_cluster_type = "helm-chart-v3"
6627 self.logger.debug("kdur: {}".format(kdur))
6628 if (
6629 kdur.get("helm-version")
6630 and kdur.get("helm-version") == "v2"
6631 ):
6632 k8s_cluster_type = "helm-chart"
6633 elif kdur.get("juju-bundle"):
6634 k8s_cluster_type = "juju-bundle"
6635 else:
6636 raise LcmException(
6637 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6638 "juju-bundle. Maybe an old NBI version is running".format(
6639 db_vnfr["member-vnf-index-ref"], kdu_name
6640 )
6641 )
6642
6643 max_instance_count = 10
6644 if kdu_profile and "max-number-of-instances" in kdu_profile:
6645 max_instance_count = kdu_profile.get(
6646 "max-number-of-instances", 10
6647 )
6648
6649 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6650 deployed_kdu, _ = get_deployed_kdu(
6651 nsr_deployed, kdu_name, vnf_index
6652 )
6653 if deployed_kdu is None:
6654 raise LcmException(
6655 "KDU '{}' for vnf '{}' not deployed".format(
6656 kdu_name, vnf_index
6657 )
6658 )
6659 kdu_instance = deployed_kdu.get("kdu-instance")
6660 instance_num = await self.k8scluster_map[
6661 k8s_cluster_type
6662 ].get_scale_count(
6663 resource_name,
6664 kdu_instance,
6665 vca_id=vca_id,
6666 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6667 kdu_model=deployed_kdu.get("kdu-model"),
6668 )
6669 kdu_replica_count = instance_num + kdu_delta.get(
6670 "number-of-instances", 1
6671 )
6672
6673 # Control if new count is over max and instance_num is less than max.
6674 # Then assign max instance number to kdu replica count
6675 if kdu_replica_count > max_instance_count > instance_num:
6676 kdu_replica_count = max_instance_count
6677 if kdu_replica_count > max_instance_count:
6678 raise LcmException(
6679 "reached the limit of {} (max-instance-count) "
6680 "scaling-out operations for the "
6681 "scaling-group-descriptor '{}'".format(
6682 instance_num, scaling_group
6683 )
6684 )
6685
6686 for x in range(kdu_delta.get("number-of-instances", 1)):
6687 vca_scaling_info.append(
6688 {
6689 "osm_kdu_id": kdu_name,
6690 "member-vnf-index": vnf_index,
6691 "type": "create",
6692 "kdu_index": instance_num + x - 1,
6693 }
6694 )
6695 scaling_info["kdu-create"][kdu_name].append(
6696 {
6697 "member-vnf-index": vnf_index,
6698 "type": "create",
6699 "k8s-cluster-type": k8s_cluster_type,
6700 "resource-name": resource_name,
6701 "scale": kdu_replica_count,
6702 }
6703 )
6704 elif scaling_type == "SCALE_IN":
6705 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6706
6707 scaling_info["scaling_direction"] = "IN"
6708 scaling_info["vdu-delete"] = {}
6709 scaling_info["kdu-delete"] = {}
6710
6711 for delta in deltas:
6712 for vdu_delta in delta.get("vdu-delta", {}):
6713 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6714 min_instance_count = 0
6715 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6716 if vdu_profile and "min-number-of-instances" in vdu_profile:
6717 min_instance_count = vdu_profile["min-number-of-instances"]
6718
6719 default_instance_num = get_number_of_instances(
6720 db_vnfd, vdu_delta["id"]
6721 )
6722 instance_num = vdu_delta.get("number-of-instances", 1)
6723 nb_scale_op -= instance_num
6724
6725 new_instance_count = nb_scale_op + default_instance_num
6726
6727 if new_instance_count < min_instance_count < vdu_count:
6728 instances_number = min_instance_count - new_instance_count
6729 else:
6730 instances_number = instance_num
6731
6732 if new_instance_count < min_instance_count:
6733 raise LcmException(
6734 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6735 "scaling-group-descriptor '{}'".format(
6736 nb_scale_op, scaling_group
6737 )
6738 )
6739 for x in range(vdu_delta.get("number-of-instances", 1)):
6740 vca_scaling_info.append(
6741 {
6742 "osm_vdu_id": vdu_delta["id"],
6743 "member-vnf-index": vnf_index,
6744 "type": "delete",
6745 "vdu_index": vdu_index - 1 - x,
6746 }
6747 )
6748 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6749 for kdu_delta in delta.get("kdu-resource-delta", {}):
6750 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6751 kdu_name = kdu_profile["kdu-name"]
6752 resource_name = kdu_profile.get("resource-name", "")
6753
6754 if not scaling_info["kdu-delete"].get(kdu_name, None):
6755 scaling_info["kdu-delete"][kdu_name] = []
6756
6757 kdur = get_kdur(db_vnfr, kdu_name)
6758 if kdur.get("helm-chart"):
6759 k8s_cluster_type = "helm-chart-v3"
6760 self.logger.debug("kdur: {}".format(kdur))
6761 if (
6762 kdur.get("helm-version")
6763 and kdur.get("helm-version") == "v2"
6764 ):
6765 k8s_cluster_type = "helm-chart"
6766 elif kdur.get("juju-bundle"):
6767 k8s_cluster_type = "juju-bundle"
6768 else:
6769 raise LcmException(
6770 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6771 "juju-bundle. Maybe an old NBI version is running".format(
6772 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6773 )
6774 )
6775
6776 min_instance_count = 0
6777 if kdu_profile and "min-number-of-instances" in kdu_profile:
6778 min_instance_count = kdu_profile["min-number-of-instances"]
6779
6780 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6781 deployed_kdu, _ = get_deployed_kdu(
6782 nsr_deployed, kdu_name, vnf_index
6783 )
6784 if deployed_kdu is None:
6785 raise LcmException(
6786 "KDU '{}' for vnf '{}' not deployed".format(
6787 kdu_name, vnf_index
6788 )
6789 )
6790 kdu_instance = deployed_kdu.get("kdu-instance")
6791 instance_num = await self.k8scluster_map[
6792 k8s_cluster_type
6793 ].get_scale_count(
6794 resource_name,
6795 kdu_instance,
6796 vca_id=vca_id,
6797 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6798 kdu_model=deployed_kdu.get("kdu-model"),
6799 )
6800 kdu_replica_count = instance_num - kdu_delta.get(
6801 "number-of-instances", 1
6802 )
6803
6804 if kdu_replica_count < min_instance_count < instance_num:
6805 kdu_replica_count = min_instance_count
6806 if kdu_replica_count < min_instance_count:
6807 raise LcmException(
6808 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6809 "scaling-group-descriptor '{}'".format(
6810 instance_num, scaling_group
6811 )
6812 )
6813
6814 for x in range(kdu_delta.get("number-of-instances", 1)):
6815 vca_scaling_info.append(
6816 {
6817 "osm_kdu_id": kdu_name,
6818 "member-vnf-index": vnf_index,
6819 "type": "delete",
6820 "kdu_index": instance_num - x - 1,
6821 }
6822 )
6823 scaling_info["kdu-delete"][kdu_name].append(
6824 {
6825 "member-vnf-index": vnf_index,
6826 "type": "delete",
6827 "k8s-cluster-type": k8s_cluster_type,
6828 "resource-name": resource_name,
6829 "scale": kdu_replica_count,
6830 }
6831 )
6832
6833 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6834 vdu_delete = copy(scaling_info.get("vdu-delete"))
6835 if scaling_info["scaling_direction"] == "IN":
6836 for vdur in reversed(db_vnfr["vdur"]):
6837 if vdu_delete.get(vdur["vdu-id-ref"]):
6838 vdu_delete[vdur["vdu-id-ref"]] -= 1
6839 scaling_info["vdu"].append(
6840 {
6841 "name": vdur.get("name") or vdur.get("vdu-name"),
6842 "vdu_id": vdur["vdu-id-ref"],
6843 "interface": [],
6844 }
6845 )
6846 for interface in vdur["interfaces"]:
6847 scaling_info["vdu"][-1]["interface"].append(
6848 {
6849 "name": interface["name"],
6850 "ip_address": interface["ip-address"],
6851 "mac_address": interface.get("mac-address"),
6852 }
6853 )
6854 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6855
6856 # PRE-SCALE BEGIN
6857 step = "Executing pre-scale vnf-config-primitive"
6858 if scaling_descriptor.get("scaling-config-action"):
6859 for scaling_config_action in scaling_descriptor[
6860 "scaling-config-action"
6861 ]:
6862 if (
6863 scaling_config_action.get("trigger") == "pre-scale-in"
6864 and scaling_type == "SCALE_IN"
6865 ) or (
6866 scaling_config_action.get("trigger") == "pre-scale-out"
6867 and scaling_type == "SCALE_OUT"
6868 ):
6869 vnf_config_primitive = scaling_config_action[
6870 "vnf-config-primitive-name-ref"
6871 ]
6872 step = db_nslcmop_update[
6873 "detailed-status"
6874 ] = "executing pre-scale scaling-config-action '{}'".format(
6875 vnf_config_primitive
6876 )
6877
6878 # look for primitive
6879 for config_primitive in (
6880 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6881 ).get("config-primitive", ()):
6882 if config_primitive["name"] == vnf_config_primitive:
6883 break
6884 else:
6885 raise LcmException(
6886 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6887 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6888 "primitive".format(scaling_group, vnf_config_primitive)
6889 )
6890
6891 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6892 if db_vnfr.get("additionalParamsForVnf"):
6893 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6894
6895 scale_process = "VCA"
6896 db_nsr_update["config-status"] = "configuring pre-scaling"
6897 primitive_params = self._map_primitive_params(
6898 config_primitive, {}, vnfr_params
6899 )
6900
6901 # Pre-scale retry check: Check if this sub-operation has been executed before
6902 op_index = self._check_or_add_scale_suboperation(
6903 db_nslcmop,
6904 vnf_index,
6905 vnf_config_primitive,
6906 primitive_params,
6907 "PRE-SCALE",
6908 )
6909 if op_index == self.SUBOPERATION_STATUS_SKIP:
6910 # Skip sub-operation
6911 result = "COMPLETED"
6912 result_detail = "Done"
6913 self.logger.debug(
6914 logging_text
6915 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6916 vnf_config_primitive, result, result_detail
6917 )
6918 )
6919 else:
6920 if op_index == self.SUBOPERATION_STATUS_NEW:
6921 # New sub-operation: Get index of this sub-operation
6922 op_index = (
6923 len(db_nslcmop.get("_admin", {}).get("operations"))
6924 - 1
6925 )
6926 self.logger.debug(
6927 logging_text
6928 + "vnf_config_primitive={} New sub-operation".format(
6929 vnf_config_primitive
6930 )
6931 )
6932 else:
6933 # retry: Get registered params for this existing sub-operation
6934 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6935 op_index
6936 ]
6937 vnf_index = op.get("member_vnf_index")
6938 vnf_config_primitive = op.get("primitive")
6939 primitive_params = op.get("primitive_params")
6940 self.logger.debug(
6941 logging_text
6942 + "vnf_config_primitive={} Sub-operation retry".format(
6943 vnf_config_primitive
6944 )
6945 )
6946 # Execute the primitive, either with new (first-time) or registered (reintent) args
6947 ee_descriptor_id = config_primitive.get(
6948 "execution-environment-ref"
6949 )
6950 primitive_name = config_primitive.get(
6951 "execution-environment-primitive", vnf_config_primitive
6952 )
6953 ee_id, vca_type = self._look_for_deployed_vca(
6954 nsr_deployed["VCA"],
6955 member_vnf_index=vnf_index,
6956 vdu_id=None,
6957 vdu_count_index=None,
6958 ee_descriptor_id=ee_descriptor_id,
6959 )
6960 result, result_detail = await self._ns_execute_primitive(
6961 ee_id,
6962 primitive_name,
6963 primitive_params,
6964 vca_type=vca_type,
6965 vca_id=vca_id,
6966 )
6967 self.logger.debug(
6968 logging_text
6969 + "vnf_config_primitive={} Done with result {} {}".format(
6970 vnf_config_primitive, result, result_detail
6971 )
6972 )
6973 # Update operationState = COMPLETED | FAILED
6974 self._update_suboperation_status(
6975 db_nslcmop, op_index, result, result_detail
6976 )
6977
6978 if result == "FAILED":
6979 raise LcmException(result_detail)
6980 db_nsr_update["config-status"] = old_config_status
6981 scale_process = None
6982 # PRE-SCALE END
6983
6984 db_nsr_update[
6985 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6986 ] = nb_scale_op
6987 db_nsr_update[
6988 "_admin.scaling-group.{}.time".format(admin_scale_index)
6989 ] = time()
6990
6991 # SCALE-IN VCA - BEGIN
6992 if vca_scaling_info:
6993 step = db_nslcmop_update[
6994 "detailed-status"
6995 ] = "Deleting the execution environments"
6996 scale_process = "VCA"
6997 for vca_info in vca_scaling_info:
6998 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6999 member_vnf_index = str(vca_info["member-vnf-index"])
7000 self.logger.debug(
7001 logging_text + "vdu info: {}".format(vca_info)
7002 )
7003 if vca_info.get("osm_vdu_id"):
7004 vdu_id = vca_info["osm_vdu_id"]
7005 vdu_index = int(vca_info["vdu_index"])
7006 stage[
7007 1
7008 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7009 member_vnf_index, vdu_id, vdu_index
7010 )
7011 stage[2] = step = "Scaling in VCA"
7012 self._write_op_status(op_id=nslcmop_id, stage=stage)
7013 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
7014 config_update = db_nsr["configurationStatus"]
7015 for vca_index, vca in enumerate(vca_update):
7016 if (
7017 (vca or vca.get("ee_id"))
7018 and vca["member-vnf-index"] == member_vnf_index
7019 and vca["vdu_count_index"] == vdu_index
7020 ):
7021 if vca.get("vdu_id"):
7022 config_descriptor = get_configuration(
7023 db_vnfd, vca.get("vdu_id")
7024 )
7025 elif vca.get("kdu_name"):
7026 config_descriptor = get_configuration(
7027 db_vnfd, vca.get("kdu_name")
7028 )
7029 else:
7030 config_descriptor = get_configuration(
7031 db_vnfd, db_vnfd["id"]
7032 )
7033 operation_params = (
7034 db_nslcmop.get("operationParams") or {}
7035 )
7036 exec_terminate_primitives = not operation_params.get(
7037 "skip_terminate_primitives"
7038 ) and vca.get("needed_terminate")
7039 task = asyncio.ensure_future(
7040 asyncio.wait_for(
7041 self.destroy_N2VC(
7042 logging_text,
7043 db_nslcmop,
7044 vca,
7045 config_descriptor,
7046 vca_index,
7047 destroy_ee=True,
7048 exec_primitives=exec_terminate_primitives,
7049 scaling_in=True,
7050 vca_id=vca_id,
7051 ),
7052 timeout=self.timeout.charm_delete,
7053 )
7054 )
7055 tasks_dict_info[task] = "Terminating VCA {}".format(
7056 vca.get("ee_id")
7057 )
7058 del vca_update[vca_index]
7059 del config_update[vca_index]
7060 # wait for pending tasks of terminate primitives
7061 if tasks_dict_info:
7062 self.logger.debug(
7063 logging_text
7064 + "Waiting for tasks {}".format(
7065 list(tasks_dict_info.keys())
7066 )
7067 )
7068 error_list = await self._wait_for_tasks(
7069 logging_text,
7070 tasks_dict_info,
7071 min(
7072 self.timeout.charm_delete, self.timeout.ns_terminate
7073 ),
7074 stage,
7075 nslcmop_id,
7076 )
7077 tasks_dict_info.clear()
7078 if error_list:
7079 raise LcmException("; ".join(error_list))
7080
7081 db_vca_and_config_update = {
7082 "_admin.deployed.VCA": vca_update,
7083 "configurationStatus": config_update,
7084 }
7085 self.update_db_2(
7086 "nsrs", db_nsr["_id"], db_vca_and_config_update
7087 )
7088 scale_process = None
7089 # SCALE-IN VCA - END
7090
7091 # SCALE RO - BEGIN
7092 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
7093 scale_process = "RO"
7094 if self.ro_config.ng:
7095 await self._scale_ng_ro(
7096 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
7097 )
7098 scaling_info.pop("vdu-create", None)
7099 scaling_info.pop("vdu-delete", None)
7100
7101 scale_process = None
7102 # SCALE RO - END
7103
7104 # SCALE KDU - BEGIN
7105 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
7106 scale_process = "KDU"
7107 await self._scale_kdu(
7108 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7109 )
7110 scaling_info.pop("kdu-create", None)
7111 scaling_info.pop("kdu-delete", None)
7112
7113 scale_process = None
7114 # SCALE KDU - END
7115
7116 if db_nsr_update:
7117 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7118
7119 # SCALE-UP VCA - BEGIN
7120 if vca_scaling_info:
7121 step = db_nslcmop_update[
7122 "detailed-status"
7123 ] = "Creating new execution environments"
7124 scale_process = "VCA"
7125 for vca_info in vca_scaling_info:
7126 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
7127 member_vnf_index = str(vca_info["member-vnf-index"])
7128 self.logger.debug(
7129 logging_text + "vdu info: {}".format(vca_info)
7130 )
7131 vnfd_id = db_vnfr["vnfd-ref"]
7132 if vca_info.get("osm_vdu_id"):
7133 vdu_index = int(vca_info["vdu_index"])
7134 deploy_params = {"OSM": get_osm_params(db_vnfr)}
7135 if db_vnfr.get("additionalParamsForVnf"):
7136 deploy_params.update(
7137 parse_yaml_strings(
7138 db_vnfr["additionalParamsForVnf"].copy()
7139 )
7140 )
7141 descriptor_config = get_configuration(
7142 db_vnfd, db_vnfd["id"]
7143 )
7144 if descriptor_config:
7145 vdu_id = None
7146 vdu_name = None
7147 kdu_name = None
7148 kdu_index = None
7149 self._deploy_n2vc(
7150 logging_text=logging_text
7151 + "member_vnf_index={} ".format(member_vnf_index),
7152 db_nsr=db_nsr,
7153 db_vnfr=db_vnfr,
7154 nslcmop_id=nslcmop_id,
7155 nsr_id=nsr_id,
7156 nsi_id=nsi_id,
7157 vnfd_id=vnfd_id,
7158 vdu_id=vdu_id,
7159 kdu_name=kdu_name,
7160 kdu_index=kdu_index,
7161 member_vnf_index=member_vnf_index,
7162 vdu_index=vdu_index,
7163 vdu_name=vdu_name,
7164 deploy_params=deploy_params,
7165 descriptor_config=descriptor_config,
7166 base_folder=base_folder,
7167 task_instantiation_info=tasks_dict_info,
7168 stage=stage,
7169 )
7170 vdu_id = vca_info["osm_vdu_id"]
7171 vdur = find_in_list(
7172 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
7173 )
7174 descriptor_config = get_configuration(db_vnfd, vdu_id)
7175 if vdur.get("additionalParams"):
7176 deploy_params_vdu = parse_yaml_strings(
7177 vdur["additionalParams"]
7178 )
7179 else:
7180 deploy_params_vdu = deploy_params
7181 deploy_params_vdu["OSM"] = get_osm_params(
7182 db_vnfr, vdu_id, vdu_count_index=vdu_index
7183 )
7184 if descriptor_config:
7185 vdu_name = None
7186 kdu_name = None
7187 kdu_index = None
7188 stage[
7189 1
7190 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7191 member_vnf_index, vdu_id, vdu_index
7192 )
7193 stage[2] = step = "Scaling out VCA"
7194 self._write_op_status(op_id=nslcmop_id, stage=stage)
7195 self._deploy_n2vc(
7196 logging_text=logging_text
7197 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7198 member_vnf_index, vdu_id, vdu_index
7199 ),
7200 db_nsr=db_nsr,
7201 db_vnfr=db_vnfr,
7202 nslcmop_id=nslcmop_id,
7203 nsr_id=nsr_id,
7204 nsi_id=nsi_id,
7205 vnfd_id=vnfd_id,
7206 vdu_id=vdu_id,
7207 kdu_name=kdu_name,
7208 member_vnf_index=member_vnf_index,
7209 vdu_index=vdu_index,
7210 kdu_index=kdu_index,
7211 vdu_name=vdu_name,
7212 deploy_params=deploy_params_vdu,
7213 descriptor_config=descriptor_config,
7214 base_folder=base_folder,
7215 task_instantiation_info=tasks_dict_info,
7216 stage=stage,
7217 )
7218 # SCALE-UP VCA - END
7219 scale_process = None
7220
7221 # POST-SCALE BEGIN
7222 # execute primitive service POST-SCALING
7223 step = "Executing post-scale vnf-config-primitive"
7224 if scaling_descriptor.get("scaling-config-action"):
7225 for scaling_config_action in scaling_descriptor[
7226 "scaling-config-action"
7227 ]:
7228 if (
7229 scaling_config_action.get("trigger") == "post-scale-in"
7230 and scaling_type == "SCALE_IN"
7231 ) or (
7232 scaling_config_action.get("trigger") == "post-scale-out"
7233 and scaling_type == "SCALE_OUT"
7234 ):
7235 vnf_config_primitive = scaling_config_action[
7236 "vnf-config-primitive-name-ref"
7237 ]
7238 step = db_nslcmop_update[
7239 "detailed-status"
7240 ] = "executing post-scale scaling-config-action '{}'".format(
7241 vnf_config_primitive
7242 )
7243
7244 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
7245 if db_vnfr.get("additionalParamsForVnf"):
7246 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
7247
7248 # look for primitive
7249 for config_primitive in (
7250 get_configuration(db_vnfd, db_vnfd["id"]) or {}
7251 ).get("config-primitive", ()):
7252 if config_primitive["name"] == vnf_config_primitive:
7253 break
7254 else:
7255 raise LcmException(
7256 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
7257 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
7258 "config-primitive".format(
7259 scaling_group, vnf_config_primitive
7260 )
7261 )
7262 scale_process = "VCA"
7263 db_nsr_update["config-status"] = "configuring post-scaling"
7264 primitive_params = self._map_primitive_params(
7265 config_primitive, {}, vnfr_params
7266 )
7267
7268 # Post-scale retry check: Check if this sub-operation has been executed before
7269 op_index = self._check_or_add_scale_suboperation(
7270 db_nslcmop,
7271 vnf_index,
7272 vnf_config_primitive,
7273 primitive_params,
7274 "POST-SCALE",
7275 )
7276 if op_index == self.SUBOPERATION_STATUS_SKIP:
7277 # Skip sub-operation
7278 result = "COMPLETED"
7279 result_detail = "Done"
7280 self.logger.debug(
7281 logging_text
7282 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
7283 vnf_config_primitive, result, result_detail
7284 )
7285 )
7286 else:
7287 if op_index == self.SUBOPERATION_STATUS_NEW:
7288 # New sub-operation: Get index of this sub-operation
7289 op_index = (
7290 len(db_nslcmop.get("_admin", {}).get("operations"))
7291 - 1
7292 )
7293 self.logger.debug(
7294 logging_text
7295 + "vnf_config_primitive={} New sub-operation".format(
7296 vnf_config_primitive
7297 )
7298 )
7299 else:
7300 # retry: Get registered params for this existing sub-operation
7301 op = db_nslcmop.get("_admin", {}).get("operations", [])[
7302 op_index
7303 ]
7304 vnf_index = op.get("member_vnf_index")
7305 vnf_config_primitive = op.get("primitive")
7306 primitive_params = op.get("primitive_params")
7307 self.logger.debug(
7308 logging_text
7309 + "vnf_config_primitive={} Sub-operation retry".format(
7310 vnf_config_primitive
7311 )
7312 )
7313 # Execute the primitive, either with new (first-time) or registered (reintent) args
7314 ee_descriptor_id = config_primitive.get(
7315 "execution-environment-ref"
7316 )
7317 primitive_name = config_primitive.get(
7318 "execution-environment-primitive", vnf_config_primitive
7319 )
7320 ee_id, vca_type = self._look_for_deployed_vca(
7321 nsr_deployed["VCA"],
7322 member_vnf_index=vnf_index,
7323 vdu_id=None,
7324 vdu_count_index=None,
7325 ee_descriptor_id=ee_descriptor_id,
7326 )
7327 result, result_detail = await self._ns_execute_primitive(
7328 ee_id,
7329 primitive_name,
7330 primitive_params,
7331 vca_type=vca_type,
7332 vca_id=vca_id,
7333 )
7334 self.logger.debug(
7335 logging_text
7336 + "vnf_config_primitive={} Done with result {} {}".format(
7337 vnf_config_primitive, result, result_detail
7338 )
7339 )
7340 # Update operationState = COMPLETED | FAILED
7341 self._update_suboperation_status(
7342 db_nslcmop, op_index, result, result_detail
7343 )
7344
7345 if result == "FAILED":
7346 raise LcmException(result_detail)
7347 db_nsr_update["config-status"] = old_config_status
7348 scale_process = None
7349 # POST-SCALE END
7350
7351 db_nsr_update[
7352 "detailed-status"
7353 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
7354 db_nsr_update["operational-status"] = (
7355 "running"
7356 if old_operational_status == "failed"
7357 else old_operational_status
7358 )
7359 db_nsr_update["config-status"] = old_config_status
7360 return
7361 except (
7362 ROclient.ROClientException,
7363 DbException,
7364 LcmException,
7365 NgRoException,
7366 ) as e:
7367 self.logger.error(logging_text + "Exit Exception {}".format(e))
7368 exc = e
7369 except asyncio.CancelledError:
7370 self.logger.error(
7371 logging_text + "Cancelled Exception while '{}'".format(step)
7372 )
7373 exc = "Operation was cancelled"
7374 except Exception as e:
7375 exc = traceback.format_exc()
7376 self.logger.critical(
7377 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7378 exc_info=True,
7379 )
7380 finally:
7381 self._write_ns_status(
7382 nsr_id=nsr_id,
7383 ns_state=None,
7384 current_operation="IDLE",
7385 current_operation_id=None,
7386 )
7387 if tasks_dict_info:
7388 stage[1] = "Waiting for instantiate pending tasks."
7389 self.logger.debug(logging_text + stage[1])
7390 exc = await self._wait_for_tasks(
7391 logging_text,
7392 tasks_dict_info,
7393 self.timeout.ns_deploy,
7394 stage,
7395 nslcmop_id,
7396 nsr_id=nsr_id,
7397 )
7398 if exc:
7399 db_nslcmop_update[
7400 "detailed-status"
7401 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7402 nslcmop_operation_state = "FAILED"
7403 if db_nsr:
7404 db_nsr_update["operational-status"] = old_operational_status
7405 db_nsr_update["config-status"] = old_config_status
7406 db_nsr_update["detailed-status"] = ""
7407 if scale_process:
7408 if "VCA" in scale_process:
7409 db_nsr_update["config-status"] = "failed"
7410 if "RO" in scale_process:
7411 db_nsr_update["operational-status"] = "failed"
7412 db_nsr_update[
7413 "detailed-status"
7414 ] = "FAILED scaling nslcmop={} {}: {}".format(
7415 nslcmop_id, step, exc
7416 )
7417 else:
7418 error_description_nslcmop = None
7419 nslcmop_operation_state = "COMPLETED"
7420 db_nslcmop_update["detailed-status"] = "Done"
7421
7422 self._write_op_status(
7423 op_id=nslcmop_id,
7424 stage="",
7425 error_message=error_description_nslcmop,
7426 operation_state=nslcmop_operation_state,
7427 other_update=db_nslcmop_update,
7428 )
7429 if db_nsr:
7430 self._write_ns_status(
7431 nsr_id=nsr_id,
7432 ns_state=None,
7433 current_operation="IDLE",
7434 current_operation_id=None,
7435 other_update=db_nsr_update,
7436 )
7437
7438 if nslcmop_operation_state:
7439 try:
7440 msg = {
7441 "nsr_id": nsr_id,
7442 "nslcmop_id": nslcmop_id,
7443 "operationState": nslcmop_operation_state,
7444 }
7445 await self.msg.aiowrite("ns", "scaled", msg)
7446 except Exception as e:
7447 self.logger.error(
7448 logging_text + "kafka_write notification Exception {}".format(e)
7449 )
7450 self.logger.debug(logging_text + "Exit")
7451 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7452
7453 async def _scale_kdu(
7454 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7455 ):
7456 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7457 for kdu_name in _scaling_info:
7458 for kdu_scaling_info in _scaling_info[kdu_name]:
7459 deployed_kdu, index = get_deployed_kdu(
7460 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7461 )
7462 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7463 kdu_instance = deployed_kdu["kdu-instance"]
7464 kdu_model = deployed_kdu.get("kdu-model")
7465 scale = int(kdu_scaling_info["scale"])
7466 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7467
7468 db_dict = {
7469 "collection": "nsrs",
7470 "filter": {"_id": nsr_id},
7471 "path": "_admin.deployed.K8s.{}".format(index),
7472 }
7473
7474 step = "scaling application {}".format(
7475 kdu_scaling_info["resource-name"]
7476 )
7477 self.logger.debug(logging_text + step)
7478
7479 if kdu_scaling_info["type"] == "delete":
7480 kdu_config = get_configuration(db_vnfd, kdu_name)
7481 if (
7482 kdu_config
7483 and kdu_config.get("terminate-config-primitive")
7484 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7485 ):
7486 terminate_config_primitive_list = kdu_config.get(
7487 "terminate-config-primitive"
7488 )
7489 terminate_config_primitive_list.sort(
7490 key=lambda val: int(val["seq"])
7491 )
7492
7493 for (
7494 terminate_config_primitive
7495 ) in terminate_config_primitive_list:
7496 primitive_params_ = self._map_primitive_params(
7497 terminate_config_primitive, {}, {}
7498 )
7499 step = "execute terminate config primitive"
7500 self.logger.debug(logging_text + step)
7501 await asyncio.wait_for(
7502 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7503 cluster_uuid=cluster_uuid,
7504 kdu_instance=kdu_instance,
7505 primitive_name=terminate_config_primitive["name"],
7506 params=primitive_params_,
7507 db_dict=db_dict,
7508 total_timeout=self.timeout.primitive,
7509 vca_id=vca_id,
7510 ),
7511 timeout=self.timeout.primitive
7512 * self.timeout.primitive_outer_factor,
7513 )
7514
7515 await asyncio.wait_for(
7516 self.k8scluster_map[k8s_cluster_type].scale(
7517 kdu_instance=kdu_instance,
7518 scale=scale,
7519 resource_name=kdu_scaling_info["resource-name"],
7520 total_timeout=self.timeout.scale_on_error,
7521 vca_id=vca_id,
7522 cluster_uuid=cluster_uuid,
7523 kdu_model=kdu_model,
7524 atomic=True,
7525 db_dict=db_dict,
7526 ),
7527 timeout=self.timeout.scale_on_error
7528 * self.timeout.scale_on_error_outer_factor,
7529 )
7530
7531 if kdu_scaling_info["type"] == "create":
7532 kdu_config = get_configuration(db_vnfd, kdu_name)
7533 if (
7534 kdu_config
7535 and kdu_config.get("initial-config-primitive")
7536 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7537 ):
7538 initial_config_primitive_list = kdu_config.get(
7539 "initial-config-primitive"
7540 )
7541 initial_config_primitive_list.sort(
7542 key=lambda val: int(val["seq"])
7543 )
7544
7545 for initial_config_primitive in initial_config_primitive_list:
7546 primitive_params_ = self._map_primitive_params(
7547 initial_config_primitive, {}, {}
7548 )
7549 step = "execute initial config primitive"
7550 self.logger.debug(logging_text + step)
7551 await asyncio.wait_for(
7552 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7553 cluster_uuid=cluster_uuid,
7554 kdu_instance=kdu_instance,
7555 primitive_name=initial_config_primitive["name"],
7556 params=primitive_params_,
7557 db_dict=db_dict,
7558 vca_id=vca_id,
7559 ),
7560 timeout=600,
7561 )
7562
7563 async def _scale_ng_ro(
7564 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7565 ):
7566 nsr_id = db_nslcmop["nsInstanceId"]
7567 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7568 db_vnfrs = {}
7569
7570 # read from db: vnfd's for every vnf
7571 db_vnfds = []
7572
7573 # for each vnf in ns, read vnfd
7574 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7575 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7576 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7577 # if we haven't this vnfd, read it from db
7578 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7579 # read from db
7580 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7581 db_vnfds.append(vnfd)
7582 n2vc_key = self.n2vc.get_public_key()
7583 n2vc_key_list = [n2vc_key]
7584 self.scale_vnfr(
7585 db_vnfr,
7586 vdu_scaling_info.get("vdu-create"),
7587 vdu_scaling_info.get("vdu-delete"),
7588 mark_delete=True,
7589 )
7590 # db_vnfr has been updated, update db_vnfrs to use it
7591 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7592 await self._instantiate_ng_ro(
7593 logging_text,
7594 nsr_id,
7595 db_nsd,
7596 db_nsr,
7597 db_nslcmop,
7598 db_vnfrs,
7599 db_vnfds,
7600 n2vc_key_list,
7601 stage=stage,
7602 start_deploy=time(),
7603 timeout_ns_deploy=self.timeout.ns_deploy,
7604 )
7605 if vdu_scaling_info.get("vdu-delete"):
7606 self.scale_vnfr(
7607 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7608 )
7609
7610 async def extract_prometheus_scrape_jobs(
7611 self,
7612 ee_id: str,
7613 artifact_path: str,
7614 ee_config_descriptor: dict,
7615 vnfr_id: str,
7616 nsr_id: str,
7617 target_ip: str,
7618 element_type: str,
7619 vnf_member_index: str = "",
7620 vdu_id: str = "",
7621 vdu_index: int = None,
7622 kdu_name: str = "",
7623 kdu_index: int = None,
7624 ) -> dict:
7625 """Method to extract prometheus scrape jobs from EE's Prometheus template job file
7626 This method will wait until the corresponding VDU or KDU is fully instantiated
7627
7628 Args:
7629 ee_id (str): Execution Environment ID
7630 artifact_path (str): Path where the EE's content is (including the Prometheus template file)
7631 ee_config_descriptor (dict): Execution Environment's configuration descriptor
7632 vnfr_id (str): VNFR ID where this EE applies
7633 nsr_id (str): NSR ID where this EE applies
7634 target_ip (str): VDU/KDU instance IP address
7635 element_type (str): NS or VNF or VDU or KDU
7636 vnf_member_index (str, optional): VNF index where this EE applies. Defaults to "".
7637 vdu_id (str, optional): VDU ID where this EE applies. Defaults to "".
7638 vdu_index (int, optional): VDU index where this EE applies. Defaults to None.
7639 kdu_name (str, optional): KDU name where this EE applies. Defaults to "".
7640 kdu_index (int, optional): KDU index where this EE applies. Defaults to None.
7641
7642 Raises:
7643 LcmException: When the VDU or KDU instance was not found in an hour
7644
7645 Returns:
7646 _type_: Prometheus jobs
7647 """
7648 # default the vdur and kdur names to an empty string, to avoid any later
7649 # problem with Prometheus when the element type is not VDU or KDU
7650 vdur_name = ""
7651 kdur_name = ""
7652
7653 # look if exist a file called 'prometheus*.j2' and
7654 artifact_content = self.fs.dir_ls(artifact_path)
7655 job_file = next(
7656 (
7657 f
7658 for f in artifact_content
7659 if f.startswith("prometheus") and f.endswith(".j2")
7660 ),
7661 None,
7662 )
7663 if not job_file:
7664 return
7665 self.logger.debug("Artifact path{}".format(artifact_path))
7666 self.logger.debug("job file{}".format(job_file))
7667 with self.fs.file_open((artifact_path, job_file), "r") as f:
7668 job_data = f.read()
7669
7670 # obtain the VDUR or KDUR, if the element type is VDU or KDU
7671 if element_type in ("VDU", "KDU"):
7672 for _ in range(360):
7673 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
7674 if vdu_id and vdu_index is not None:
7675 vdur = next(
7676 (
7677 x
7678 for x in get_iterable(db_vnfr, "vdur")
7679 if (
7680 x.get("vdu-id-ref") == vdu_id
7681 and x.get("count-index") == vdu_index
7682 )
7683 ),
7684 {},
7685 )
7686 if vdur.get("name"):
7687 vdur_name = vdur.get("name")
7688 break
7689 if kdu_name and kdu_index is not None:
7690 kdur = next(
7691 (
7692 x
7693 for x in get_iterable(db_vnfr, "kdur")
7694 if (
7695 x.get("kdu-name") == kdu_name
7696 and x.get("count-index") == kdu_index
7697 )
7698 ),
7699 {},
7700 )
7701 if kdur.get("name"):
7702 kdur_name = kdur.get("name")
7703 break
7704
7705 await asyncio.sleep(10)
7706 else:
7707 if vdu_id and vdu_index is not None:
7708 raise LcmException(
7709 f"Timeout waiting VDU with name={vdu_id} and index={vdu_index} to be intantiated"
7710 )
7711 if kdu_name and kdu_index is not None:
7712 raise LcmException(
7713 f"Timeout waiting KDU with name={kdu_name} and index={kdu_index} to be intantiated"
7714 )
7715
7716 if ee_id is not None:
7717 _, namespace, helm_id = get_ee_id_parts(
7718 ee_id
7719 ) # get namespace and EE gRPC service name
7720 host_name = f'{helm_id}-{ee_config_descriptor["metric-service"]}.{namespace}.svc' # svc_name.namespace.svc
7721 host_port = "80"
7722 vnfr_id = vnfr_id.replace("-", "")
7723 variables = {
7724 "JOB_NAME": vnfr_id,
7725 "TARGET_IP": target_ip,
7726 "EXPORTER_POD_IP": host_name,
7727 "EXPORTER_POD_PORT": host_port,
7728 "NSR_ID": nsr_id,
7729 "VNF_MEMBER_INDEX": vnf_member_index,
7730 "VDUR_NAME": vdur_name,
7731 "KDUR_NAME": kdur_name,
7732 "ELEMENT_TYPE": element_type,
7733 }
7734 else:
7735 metric_path = ee_config_descriptor["metric-path"]
7736 target_port = ee_config_descriptor["metric-port"]
7737 vnfr_id = vnfr_id.replace("-", "")
7738 variables = {
7739 "JOB_NAME": vnfr_id,
7740 "TARGET_IP": target_ip,
7741 "TARGET_PORT": target_port,
7742 "METRIC_PATH": metric_path,
7743 }
7744
7745 job_list = parse_job(job_data, variables)
7746 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7747 for job in job_list:
7748 if (
7749 not isinstance(job.get("job_name"), str)
7750 or vnfr_id not in job["job_name"]
7751 ):
7752 job["job_name"] = vnfr_id + "_" + str(SystemRandom().randint(1, 10000))
7753 job["nsr_id"] = nsr_id
7754 job["vnfr_id"] = vnfr_id
7755 return job_list
7756
7757 async def rebuild_start_stop(
7758 self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
7759 ):
7760 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7761 self.logger.info(logging_text + "Enter")
7762 stage = ["Preparing the environment", ""]
7763 # database nsrs record
7764 db_nsr_update = {}
7765 vdu_vim_name = None
7766 vim_vm_id = None
7767 # in case of error, indicates what part of scale was failed to put nsr at error status
7768 start_deploy = time()
7769 try:
7770 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7771 vim_account_id = db_vnfr.get("vim-account-id")
7772 vim_info_key = "vim:" + vim_account_id
7773 vdu_id = additional_param["vdu_id"]
7774 vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
7775 vdur = find_in_list(
7776 vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
7777 )
7778 if vdur:
7779 vdu_vim_name = vdur["name"]
7780 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7781 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7782 else:
7783 raise LcmException("Target vdu is not found")
7784 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7785 # wait for any previous tasks in process
7786 stage[1] = "Waiting for previous operations to terminate"
7787 self.logger.info(stage[1])
7788 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7789
7790 stage[1] = "Reading from database."
7791 self.logger.info(stage[1])
7792 self._write_ns_status(
7793 nsr_id=nsr_id,
7794 ns_state=None,
7795 current_operation=operation_type.upper(),
7796 current_operation_id=nslcmop_id,
7797 )
7798 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7799
7800 # read from db: ns
7801 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7802 db_nsr_update["operational-status"] = operation_type
7803 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7804 # Payload for RO
7805 desc = {
7806 operation_type: {
7807 "vim_vm_id": vim_vm_id,
7808 "vnf_id": vnf_id,
7809 "vdu_index": additional_param["count-index"],
7810 "vdu_id": vdur["id"],
7811 "target_vim": target_vim,
7812 "vim_account_id": vim_account_id,
7813 }
7814 }
7815 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7816 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7817 self.logger.info("ro nsr id: {}".format(nsr_id))
7818 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7819 self.logger.info("response from RO: {}".format(result_dict))
7820 action_id = result_dict["action_id"]
7821 await self._wait_ng_ro(
7822 nsr_id,
7823 action_id,
7824 nslcmop_id,
7825 start_deploy,
7826 self.timeout.operate,
7827 None,
7828 "start_stop_rebuild",
7829 )
7830 return "COMPLETED", "Done"
7831 except (ROclient.ROClientException, DbException, LcmException) as e:
7832 self.logger.error("Exit Exception {}".format(e))
7833 exc = e
7834 except asyncio.CancelledError:
7835 self.logger.error("Cancelled Exception while '{}'".format(stage))
7836 exc = "Operation was cancelled"
7837 except Exception as e:
7838 exc = traceback.format_exc()
7839 self.logger.critical(
7840 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7841 )
7842 return "FAILED", "Error in operate VNF {}".format(exc)
7843
7844 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7845 """
7846 Get VCA Cloud and VCA Cloud Credentials for the VIM account
7847
7848 :param: vim_account_id: VIM Account ID
7849
7850 :return: (cloud_name, cloud_credential)
7851 """
7852 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7853 return config.get("vca_cloud"), config.get("vca_cloud_credential")
7854
7855 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7856 """
7857 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7858
7859 :param: vim_account_id: VIM Account ID
7860
7861 :return: (cloud_name, cloud_credential)
7862 """
7863 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7864 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
7865
7866 async def migrate(self, nsr_id, nslcmop_id):
7867 """
7868 Migrate VNFs and VDUs instances in a NS
7869
7870 :param: nsr_id: NS Instance ID
7871 :param: nslcmop_id: nslcmop ID of migrate
7872
7873 """
7874 # Try to lock HA task here
7875 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7876 if not task_is_locked_by_me:
7877 return
7878 logging_text = "Task ns={} migrate ".format(nsr_id)
7879 self.logger.debug(logging_text + "Enter")
7880 # get all needed from database
7881 db_nslcmop = None
7882 db_nslcmop_update = {}
7883 nslcmop_operation_state = None
7884 db_nsr_update = {}
7885 target = {}
7886 exc = None
7887 # in case of error, indicates what part of scale was failed to put nsr at error status
7888 start_deploy = time()
7889
7890 try:
7891 # wait for any previous tasks in process
7892 step = "Waiting for previous operations to terminate"
7893 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7894
7895 self._write_ns_status(
7896 nsr_id=nsr_id,
7897 ns_state=None,
7898 current_operation="MIGRATING",
7899 current_operation_id=nslcmop_id,
7900 )
7901 step = "Getting nslcmop from database"
7902 self.logger.debug(
7903 step + " after having waited for previous tasks to be completed"
7904 )
7905 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7906 migrate_params = db_nslcmop.get("operationParams")
7907
7908 target = {}
7909 target.update(migrate_params)
7910 desc = await self.RO.migrate(nsr_id, target)
7911 self.logger.debug("RO return > {}".format(desc))
7912 action_id = desc["action_id"]
7913 await self._wait_ng_ro(
7914 nsr_id,
7915 action_id,
7916 nslcmop_id,
7917 start_deploy,
7918 self.timeout.migrate,
7919 operation="migrate",
7920 )
7921 except (ROclient.ROClientException, DbException, LcmException) as e:
7922 self.logger.error("Exit Exception {}".format(e))
7923 exc = e
7924 except asyncio.CancelledError:
7925 self.logger.error("Cancelled Exception while '{}'".format(step))
7926 exc = "Operation was cancelled"
7927 except Exception as e:
7928 exc = traceback.format_exc()
7929 self.logger.critical(
7930 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7931 )
7932 finally:
7933 self._write_ns_status(
7934 nsr_id=nsr_id,
7935 ns_state=None,
7936 current_operation="IDLE",
7937 current_operation_id=None,
7938 )
7939 if exc:
7940 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7941 nslcmop_operation_state = "FAILED"
7942 else:
7943 nslcmop_operation_state = "COMPLETED"
7944 db_nslcmop_update["detailed-status"] = "Done"
7945 db_nsr_update["detailed-status"] = "Done"
7946
7947 self._write_op_status(
7948 op_id=nslcmop_id,
7949 stage="",
7950 error_message="",
7951 operation_state=nslcmop_operation_state,
7952 other_update=db_nslcmop_update,
7953 )
7954 if nslcmop_operation_state:
7955 try:
7956 msg = {
7957 "nsr_id": nsr_id,
7958 "nslcmop_id": nslcmop_id,
7959 "operationState": nslcmop_operation_state,
7960 }
7961 await self.msg.aiowrite("ns", "migrated", msg)
7962 except Exception as e:
7963 self.logger.error(
7964 logging_text + "kafka_write notification Exception {}".format(e)
7965 )
7966 self.logger.debug(logging_text + "Exit")
7967 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7968
7969 async def heal(self, nsr_id, nslcmop_id):
7970 """
7971 Heal NS
7972
7973 :param nsr_id: ns instance to heal
7974 :param nslcmop_id: operation to run
7975 :return:
7976 """
7977
7978 # Try to lock HA task here
7979 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7980 if not task_is_locked_by_me:
7981 return
7982
7983 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7984 stage = ["", "", ""]
7985 tasks_dict_info = {}
7986 # ^ stage, step, VIM progress
7987 self.logger.debug(logging_text + "Enter")
7988 # get all needed from database
7989 db_nsr = None
7990 db_nslcmop_update = {}
7991 db_nsr_update = {}
7992 db_vnfrs = {} # vnf's info indexed by _id
7993 exc = None
7994 old_operational_status = ""
7995 old_config_status = ""
7996 nsi_id = None
7997 try:
7998 # wait for any previous tasks in process
7999 step = "Waiting for previous operations to terminate"
8000 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
8001 self._write_ns_status(
8002 nsr_id=nsr_id,
8003 ns_state=None,
8004 current_operation="HEALING",
8005 current_operation_id=nslcmop_id,
8006 )
8007
8008 step = "Getting nslcmop from database"
8009 self.logger.debug(
8010 step + " after having waited for previous tasks to be completed"
8011 )
8012 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8013
8014 step = "Getting nsr from database"
8015 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8016 old_operational_status = db_nsr["operational-status"]
8017 old_config_status = db_nsr["config-status"]
8018
8019 db_nsr_update = {
8020 "_admin.deployed.RO.operational-status": "healing",
8021 }
8022 self.update_db_2("nsrs", nsr_id, db_nsr_update)
8023
8024 step = "Sending heal order to VIM"
8025 await self.heal_RO(
8026 logging_text=logging_text,
8027 nsr_id=nsr_id,
8028 db_nslcmop=db_nslcmop,
8029 stage=stage,
8030 )
8031 # VCA tasks
8032 # read from db: nsd
8033 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
8034 self.logger.debug(logging_text + stage[1])
8035 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
8036 self.fs.sync(db_nsr["nsd-id"])
8037 db_nsr["nsd"] = nsd
8038 # read from db: vnfr's of this ns
8039 step = "Getting vnfrs from db"
8040 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
8041 for vnfr in db_vnfrs_list:
8042 db_vnfrs[vnfr["_id"]] = vnfr
8043 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
8044
8045 # Check for each target VNF
8046 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
8047 for target_vnf in target_list:
8048 # Find this VNF in the list from DB
8049 vnfr_id = target_vnf.get("vnfInstanceId", None)
8050 if vnfr_id:
8051 db_vnfr = db_vnfrs[vnfr_id]
8052 vnfd_id = db_vnfr.get("vnfd-id")
8053 vnfd_ref = db_vnfr.get("vnfd-ref")
8054 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
8055 base_folder = vnfd["_admin"]["storage"]
8056 vdu_id = None
8057 vdu_index = 0
8058 vdu_name = None
8059 kdu_name = None
8060 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
8061 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
8062
8063 # Check each target VDU and deploy N2VC
8064 target_vdu_list = target_vnf.get("additionalParams", {}).get(
8065 "vdu", []
8066 )
8067 if not target_vdu_list:
8068 # Codigo nuevo para crear diccionario
8069 target_vdu_list = []
8070 for existing_vdu in db_vnfr.get("vdur"):
8071 vdu_name = existing_vdu.get("vdu-name", None)
8072 vdu_index = existing_vdu.get("count-index", 0)
8073 vdu_run_day1 = target_vnf.get("additionalParams", {}).get(
8074 "run-day1", False
8075 )
8076 vdu_to_be_healed = {
8077 "vdu-id": vdu_name,
8078 "count-index": vdu_index,
8079 "run-day1": vdu_run_day1,
8080 }
8081 target_vdu_list.append(vdu_to_be_healed)
8082 for target_vdu in target_vdu_list:
8083 deploy_params_vdu = target_vdu
8084 # Set run-day1 vnf level value if not vdu level value exists
8085 if not deploy_params_vdu.get("run-day1") and target_vnf.get(
8086 "additionalParams", {}
8087 ).get("run-day1"):
8088 deploy_params_vdu["run-day1"] = target_vnf[
8089 "additionalParams"
8090 ].get("run-day1")
8091 vdu_name = target_vdu.get("vdu-id", None)
8092 # TODO: Get vdu_id from vdud.
8093 vdu_id = vdu_name
8094 # For multi instance VDU count-index is mandatory
8095 # For single session VDU count-indes is 0
8096 vdu_index = target_vdu.get("count-index", 0)
8097
8098 # n2vc_redesign STEP 3 to 6 Deploy N2VC
8099 stage[1] = "Deploying Execution Environments."
8100 self.logger.debug(logging_text + stage[1])
8101
8102 # VNF Level charm. Normal case when proxy charms.
8103 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
8104 descriptor_config = get_configuration(vnfd, vnfd_ref)
8105 if descriptor_config:
8106 # Continue if healed machine is management machine
8107 vnf_ip_address = db_vnfr.get("ip-address")
8108 target_instance = None
8109 for instance in db_vnfr.get("vdur", None):
8110 if (
8111 instance["vdu-name"] == vdu_name
8112 and instance["count-index"] == vdu_index
8113 ):
8114 target_instance = instance
8115 break
8116 if vnf_ip_address == target_instance.get("ip-address"):
8117 self._heal_n2vc(
8118 logging_text=logging_text
8119 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
8120 member_vnf_index, vdu_name, vdu_index
8121 ),
8122 db_nsr=db_nsr,
8123 db_vnfr=db_vnfr,
8124 nslcmop_id=nslcmop_id,
8125 nsr_id=nsr_id,
8126 nsi_id=nsi_id,
8127 vnfd_id=vnfd_ref,
8128 vdu_id=None,
8129 kdu_name=None,
8130 member_vnf_index=member_vnf_index,
8131 vdu_index=0,
8132 vdu_name=None,
8133 deploy_params=deploy_params_vdu,
8134 descriptor_config=descriptor_config,
8135 base_folder=base_folder,
8136 task_instantiation_info=tasks_dict_info,
8137 stage=stage,
8138 )
8139
8140 # VDU Level charm. Normal case with native charms.
8141 descriptor_config = get_configuration(vnfd, vdu_name)
8142 if descriptor_config:
8143 self._heal_n2vc(
8144 logging_text=logging_text
8145 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
8146 member_vnf_index, vdu_name, vdu_index
8147 ),
8148 db_nsr=db_nsr,
8149 db_vnfr=db_vnfr,
8150 nslcmop_id=nslcmop_id,
8151 nsr_id=nsr_id,
8152 nsi_id=nsi_id,
8153 vnfd_id=vnfd_ref,
8154 vdu_id=vdu_id,
8155 kdu_name=kdu_name,
8156 member_vnf_index=member_vnf_index,
8157 vdu_index=vdu_index,
8158 vdu_name=vdu_name,
8159 deploy_params=deploy_params_vdu,
8160 descriptor_config=descriptor_config,
8161 base_folder=base_folder,
8162 task_instantiation_info=tasks_dict_info,
8163 stage=stage,
8164 )
8165
8166 except (
8167 ROclient.ROClientException,
8168 DbException,
8169 LcmException,
8170 NgRoException,
8171 ) as e:
8172 self.logger.error(logging_text + "Exit Exception {}".format(e))
8173 exc = e
8174 except asyncio.CancelledError:
8175 self.logger.error(
8176 logging_text + "Cancelled Exception while '{}'".format(step)
8177 )
8178 exc = "Operation was cancelled"
8179 except Exception as e:
8180 exc = traceback.format_exc()
8181 self.logger.critical(
8182 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
8183 exc_info=True,
8184 )
8185 finally:
8186 if tasks_dict_info:
8187 stage[1] = "Waiting for healing pending tasks."
8188 self.logger.debug(logging_text + stage[1])
8189 exc = await self._wait_for_tasks(
8190 logging_text,
8191 tasks_dict_info,
8192 self.timeout.ns_deploy,
8193 stage,
8194 nslcmop_id,
8195 nsr_id=nsr_id,
8196 )
8197 if exc:
8198 db_nslcmop_update[
8199 "detailed-status"
8200 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
8201 nslcmop_operation_state = "FAILED"
8202 if db_nsr:
8203 db_nsr_update["operational-status"] = old_operational_status
8204 db_nsr_update["config-status"] = old_config_status
8205 db_nsr_update[
8206 "detailed-status"
8207 ] = "FAILED healing nslcmop={} {}: {}".format(nslcmop_id, step, exc)
8208 for task, task_name in tasks_dict_info.items():
8209 if not task.done() or task.cancelled() or task.exception():
8210 if task_name.startswith(self.task_name_deploy_vca):
8211 # A N2VC task is pending
8212 db_nsr_update["config-status"] = "failed"
8213 else:
8214 # RO task is pending
8215 db_nsr_update["operational-status"] = "failed"
8216 else:
8217 error_description_nslcmop = None
8218 nslcmop_operation_state = "COMPLETED"
8219 db_nslcmop_update["detailed-status"] = "Done"
8220 db_nsr_update["detailed-status"] = "Done"
8221 db_nsr_update["operational-status"] = "running"
8222 db_nsr_update["config-status"] = "configured"
8223
8224 self._write_op_status(
8225 op_id=nslcmop_id,
8226 stage="",
8227 error_message=error_description_nslcmop,
8228 operation_state=nslcmop_operation_state,
8229 other_update=db_nslcmop_update,
8230 )
8231 if db_nsr:
8232 self._write_ns_status(
8233 nsr_id=nsr_id,
8234 ns_state=None,
8235 current_operation="IDLE",
8236 current_operation_id=None,
8237 other_update=db_nsr_update,
8238 )
8239
8240 if nslcmop_operation_state:
8241 try:
8242 msg = {
8243 "nsr_id": nsr_id,
8244 "nslcmop_id": nslcmop_id,
8245 "operationState": nslcmop_operation_state,
8246 }
8247 await self.msg.aiowrite("ns", "healed", msg)
8248 except Exception as e:
8249 self.logger.error(
8250 logging_text + "kafka_write notification Exception {}".format(e)
8251 )
8252 self.logger.debug(logging_text + "Exit")
8253 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
8254
8255 async def heal_RO(
8256 self,
8257 logging_text,
8258 nsr_id,
8259 db_nslcmop,
8260 stage,
8261 ):
8262 """
8263 Heal at RO
8264 :param logging_text: preffix text to use at logging
8265 :param nsr_id: nsr identity
8266 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
8267 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
8268 :return: None or exception
8269 """
8270
8271 def get_vim_account(vim_account_id):
8272 nonlocal db_vims
8273 if vim_account_id in db_vims:
8274 return db_vims[vim_account_id]
8275 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
8276 db_vims[vim_account_id] = db_vim
8277 return db_vim
8278
8279 try:
8280 start_heal = time()
8281 ns_params = db_nslcmop.get("operationParams")
8282 if ns_params and ns_params.get("timeout_ns_heal"):
8283 timeout_ns_heal = ns_params["timeout_ns_heal"]
8284 else:
8285 timeout_ns_heal = self.timeout.ns_heal
8286
8287 db_vims = {}
8288
8289 nslcmop_id = db_nslcmop["_id"]
8290 target = {
8291 "action_id": nslcmop_id,
8292 }
8293 self.logger.warning(
8294 "db_nslcmop={} and timeout_ns_heal={}".format(
8295 db_nslcmop, timeout_ns_heal
8296 )
8297 )
8298 target.update(db_nslcmop.get("operationParams", {}))
8299
8300 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
8301 desc = await self.RO.recreate(nsr_id, target)
8302 self.logger.debug("RO return > {}".format(desc))
8303 action_id = desc["action_id"]
8304 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
8305 await self._wait_ng_ro(
8306 nsr_id,
8307 action_id,
8308 nslcmop_id,
8309 start_heal,
8310 timeout_ns_heal,
8311 stage,
8312 operation="healing",
8313 )
8314
8315 # Updating NSR
8316 db_nsr_update = {
8317 "_admin.deployed.RO.operational-status": "running",
8318 "detailed-status": " ".join(stage),
8319 }
8320 self.update_db_2("nsrs", nsr_id, db_nsr_update)
8321 self._write_op_status(nslcmop_id, stage)
8322 self.logger.debug(
8323 logging_text + "ns healed at RO. RO_id={}".format(action_id)
8324 )
8325
8326 except Exception as e:
8327 stage[2] = "ERROR healing at VIM"
8328 # self.set_vnfr_at_error(db_vnfrs, str(e))
8329 self.logger.error(
8330 "Error healing at VIM {}".format(e),
8331 exc_info=not isinstance(
8332 e,
8333 (
8334 ROclient.ROClientException,
8335 LcmException,
8336 DbException,
8337 NgRoException,
8338 ),
8339 ),
8340 )
8341 raise
8342
8343 def _heal_n2vc(
8344 self,
8345 logging_text,
8346 db_nsr,
8347 db_vnfr,
8348 nslcmop_id,
8349 nsr_id,
8350 nsi_id,
8351 vnfd_id,
8352 vdu_id,
8353 kdu_name,
8354 member_vnf_index,
8355 vdu_index,
8356 vdu_name,
8357 deploy_params,
8358 descriptor_config,
8359 base_folder,
8360 task_instantiation_info,
8361 stage,
8362 ):
8363 # launch instantiate_N2VC in a asyncio task and register task object
8364 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
8365 # if not found, create one entry and update database
8366 # fill db_nsr._admin.deployed.VCA.<index>
8367
8368 self.logger.debug(
8369 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
8370 )
8371
8372 charm_name = ""
8373 get_charm_name = False
8374 if "execution-environment-list" in descriptor_config:
8375 ee_list = descriptor_config.get("execution-environment-list", [])
8376 elif "juju" in descriptor_config:
8377 ee_list = [descriptor_config] # ns charms
8378 if "execution-environment-list" not in descriptor_config:
8379 # charm name is only required for ns charms
8380 get_charm_name = True
8381 else: # other types as script are not supported
8382 ee_list = []
8383
8384 for ee_item in ee_list:
8385 self.logger.debug(
8386 logging_text
8387 + "_deploy_n2vc ee_item juju={}, helm={}".format(
8388 ee_item.get("juju"), ee_item.get("helm-chart")
8389 )
8390 )
8391 ee_descriptor_id = ee_item.get("id")
8392 if ee_item.get("juju"):
8393 vca_name = ee_item["juju"].get("charm")
8394 if get_charm_name:
8395 charm_name = self.find_charm_name(db_nsr, str(vca_name))
8396 vca_type = (
8397 "lxc_proxy_charm"
8398 if ee_item["juju"].get("charm") is not None
8399 else "native_charm"
8400 )
8401 if ee_item["juju"].get("cloud") == "k8s":
8402 vca_type = "k8s_proxy_charm"
8403 elif ee_item["juju"].get("proxy") is False:
8404 vca_type = "native_charm"
8405 elif ee_item.get("helm-chart"):
8406 vca_name = ee_item["helm-chart"]
8407 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
8408 vca_type = "helm"
8409 else:
8410 vca_type = "helm-v3"
8411 else:
8412 self.logger.debug(
8413 logging_text + "skipping non juju neither charm configuration"
8414 )
8415 continue
8416
8417 vca_index = -1
8418 for vca_index, vca_deployed in enumerate(
8419 db_nsr["_admin"]["deployed"]["VCA"]
8420 ):
8421 if not vca_deployed:
8422 continue
8423 if (
8424 vca_deployed.get("member-vnf-index") == member_vnf_index
8425 and vca_deployed.get("vdu_id") == vdu_id
8426 and vca_deployed.get("kdu_name") == kdu_name
8427 and vca_deployed.get("vdu_count_index", 0) == vdu_index
8428 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
8429 ):
8430 break
8431 else:
8432 # not found, create one.
8433 target = (
8434 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
8435 )
8436 if vdu_id:
8437 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
8438 elif kdu_name:
8439 target += "/kdu/{}".format(kdu_name)
8440 vca_deployed = {
8441 "target_element": target,
8442 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8443 "member-vnf-index": member_vnf_index,
8444 "vdu_id": vdu_id,
8445 "kdu_name": kdu_name,
8446 "vdu_count_index": vdu_index,
8447 "operational-status": "init", # TODO revise
8448 "detailed-status": "", # TODO revise
8449 "step": "initial-deploy", # TODO revise
8450 "vnfd_id": vnfd_id,
8451 "vdu_name": vdu_name,
8452 "type": vca_type,
8453 "ee_descriptor_id": ee_descriptor_id,
8454 "charm_name": charm_name,
8455 }
8456 vca_index += 1
8457
8458 # create VCA and configurationStatus in db
8459 db_dict = {
8460 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
8461 "configurationStatus.{}".format(vca_index): dict(),
8462 }
8463 self.update_db_2("nsrs", nsr_id, db_dict)
8464
8465 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
8466
8467 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
8468 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
8469 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
8470
8471 # Launch task
8472 task_n2vc = asyncio.ensure_future(
8473 self.heal_N2VC(
8474 logging_text=logging_text,
8475 vca_index=vca_index,
8476 nsi_id=nsi_id,
8477 db_nsr=db_nsr,
8478 db_vnfr=db_vnfr,
8479 vdu_id=vdu_id,
8480 kdu_name=kdu_name,
8481 vdu_index=vdu_index,
8482 deploy_params=deploy_params,
8483 config_descriptor=descriptor_config,
8484 base_folder=base_folder,
8485 nslcmop_id=nslcmop_id,
8486 stage=stage,
8487 vca_type=vca_type,
8488 vca_name=vca_name,
8489 ee_config_descriptor=ee_item,
8490 )
8491 )
8492 self.lcm_tasks.register(
8493 "ns",
8494 nsr_id,
8495 nslcmop_id,
8496 "instantiate_N2VC-{}".format(vca_index),
8497 task_n2vc,
8498 )
8499 task_instantiation_info[
8500 task_n2vc
8501 ] = self.task_name_deploy_vca + " {}.{}".format(
8502 member_vnf_index or "", vdu_id or ""
8503 )
8504
8505 async def heal_N2VC(
8506 self,
8507 logging_text,
8508 vca_index,
8509 nsi_id,
8510 db_nsr,
8511 db_vnfr,
8512 vdu_id,
8513 kdu_name,
8514 vdu_index,
8515 config_descriptor,
8516 deploy_params,
8517 base_folder,
8518 nslcmop_id,
8519 stage,
8520 vca_type,
8521 vca_name,
8522 ee_config_descriptor,
8523 ):
8524 nsr_id = db_nsr["_id"]
8525 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
8526 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
8527 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
8528 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
8529 db_dict = {
8530 "collection": "nsrs",
8531 "filter": {"_id": nsr_id},
8532 "path": db_update_entry,
8533 }
8534 step = ""
8535 try:
8536 element_type = "NS"
8537 element_under_configuration = nsr_id
8538
8539 vnfr_id = None
8540 if db_vnfr:
8541 vnfr_id = db_vnfr["_id"]
8542 osm_config["osm"]["vnf_id"] = vnfr_id
8543
8544 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8545
8546 if vca_type == "native_charm":
8547 index_number = 0
8548 else:
8549 index_number = vdu_index or 0
8550
8551 if vnfr_id:
8552 element_type = "VNF"
8553 element_under_configuration = vnfr_id
8554 namespace += ".{}-{}".format(vnfr_id, index_number)
8555 if vdu_id:
8556 namespace += ".{}-{}".format(vdu_id, index_number)
8557 element_type = "VDU"
8558 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8559 osm_config["osm"]["vdu_id"] = vdu_id
8560 elif kdu_name:
8561 namespace += ".{}".format(kdu_name)
8562 element_type = "KDU"
8563 element_under_configuration = kdu_name
8564 osm_config["osm"]["kdu_name"] = kdu_name
8565
8566 # Get artifact path
8567 if base_folder["pkg-dir"]:
8568 artifact_path = "{}/{}/{}/{}".format(
8569 base_folder["folder"],
8570 base_folder["pkg-dir"],
8571 "charms"
8572 if vca_type
8573 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8574 else "helm-charts",
8575 vca_name,
8576 )
8577 else:
8578 artifact_path = "{}/Scripts/{}/{}/".format(
8579 base_folder["folder"],
8580 "charms"
8581 if vca_type
8582 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8583 else "helm-charts",
8584 vca_name,
8585 )
8586
8587 self.logger.debug("Artifact path > {}".format(artifact_path))
8588
8589 # get initial_config_primitive_list that applies to this element
8590 initial_config_primitive_list = config_descriptor.get(
8591 "initial-config-primitive"
8592 )
8593
8594 self.logger.debug(
8595 "Initial config primitive list > {}".format(
8596 initial_config_primitive_list
8597 )
8598 )
8599
8600 # add config if not present for NS charm
8601 ee_descriptor_id = ee_config_descriptor.get("id")
8602 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8603 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8604 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8605 )
8606
8607 self.logger.debug(
8608 "Initial config primitive list #2 > {}".format(
8609 initial_config_primitive_list
8610 )
8611 )
8612 # n2vc_redesign STEP 3.1
8613 # find old ee_id if exists
8614 ee_id = vca_deployed.get("ee_id")
8615
8616 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8617 # create or register execution environment in VCA. Only for native charms when healing
8618 if vca_type == "native_charm":
8619 step = "Waiting to VM being up and getting IP address"
8620 self.logger.debug(logging_text + step)
8621 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8622 logging_text,
8623 nsr_id,
8624 vnfr_id,
8625 vdu_id,
8626 vdu_index,
8627 user=None,
8628 pub_key=None,
8629 )
8630 credentials = {"hostname": rw_mgmt_ip}
8631 # get username
8632 username = deep_get(
8633 config_descriptor, ("config-access", "ssh-access", "default-user")
8634 )
8635 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8636 # merged. Meanwhile let's get username from initial-config-primitive
8637 if not username and initial_config_primitive_list:
8638 for config_primitive in initial_config_primitive_list:
8639 for param in config_primitive.get("parameter", ()):
8640 if param["name"] == "ssh-username":
8641 username = param["value"]
8642 break
8643 if not username:
8644 raise LcmException(
8645 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8646 "'config-access.ssh-access.default-user'"
8647 )
8648 credentials["username"] = username
8649
8650 # n2vc_redesign STEP 3.2
8651 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8652 self._write_configuration_status(
8653 nsr_id=nsr_id,
8654 vca_index=vca_index,
8655 status="REGISTERING",
8656 element_under_configuration=element_under_configuration,
8657 element_type=element_type,
8658 )
8659
8660 step = "register execution environment {}".format(credentials)
8661 self.logger.debug(logging_text + step)
8662 ee_id = await self.vca_map[vca_type].register_execution_environment(
8663 credentials=credentials,
8664 namespace=namespace,
8665 db_dict=db_dict,
8666 vca_id=vca_id,
8667 )
8668
8669 # update ee_id en db
8670 db_dict_ee_id = {
8671 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8672 }
8673 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8674
8675 # for compatibility with MON/POL modules, the need model and application name at database
8676 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8677 # Not sure if this need to be done when healing
8678 """
8679 ee_id_parts = ee_id.split(".")
8680 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8681 if len(ee_id_parts) >= 2:
8682 model_name = ee_id_parts[0]
8683 application_name = ee_id_parts[1]
8684 db_nsr_update[db_update_entry + "model"] = model_name
8685 db_nsr_update[db_update_entry + "application"] = application_name
8686 """
8687
8688 # n2vc_redesign STEP 3.3
8689 # Install configuration software. Only for native charms.
8690 step = "Install configuration Software"
8691
8692 self._write_configuration_status(
8693 nsr_id=nsr_id,
8694 vca_index=vca_index,
8695 status="INSTALLING SW",
8696 element_under_configuration=element_under_configuration,
8697 element_type=element_type,
8698 # other_update=db_nsr_update,
8699 other_update=None,
8700 )
8701
8702 # TODO check if already done
8703 self.logger.debug(logging_text + step)
8704 config = None
8705 if vca_type == "native_charm":
8706 config_primitive = next(
8707 (p for p in initial_config_primitive_list if p["name"] == "config"),
8708 None,
8709 )
8710 if config_primitive:
8711 config = self._map_primitive_params(
8712 config_primitive, {}, deploy_params
8713 )
8714 await self.vca_map[vca_type].install_configuration_sw(
8715 ee_id=ee_id,
8716 artifact_path=artifact_path,
8717 db_dict=db_dict,
8718 config=config,
8719 num_units=1,
8720 vca_id=vca_id,
8721 vca_type=vca_type,
8722 )
8723
8724 # write in db flag of configuration_sw already installed
8725 self.update_db_2(
8726 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8727 )
8728
8729 # Not sure if this need to be done when healing
8730 """
8731 # add relations for this VCA (wait for other peers related with this VCA)
8732 await self._add_vca_relations(
8733 logging_text=logging_text,
8734 nsr_id=nsr_id,
8735 vca_type=vca_type,
8736 vca_index=vca_index,
8737 )
8738 """
8739
8740 # if SSH access is required, then get execution environment SSH public
8741 # if native charm we have waited already to VM be UP
8742 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8743 pub_key = None
8744 user = None
8745 # self.logger.debug("get ssh key block")
8746 if deep_get(
8747 config_descriptor, ("config-access", "ssh-access", "required")
8748 ):
8749 # self.logger.debug("ssh key needed")
8750 # Needed to inject a ssh key
8751 user = deep_get(
8752 config_descriptor,
8753 ("config-access", "ssh-access", "default-user"),
8754 )
8755 step = "Install configuration Software, getting public ssh key"
8756 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8757 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8758 )
8759
8760 step = "Insert public key into VM user={} ssh_key={}".format(
8761 user, pub_key
8762 )
8763 else:
8764 # self.logger.debug("no need to get ssh key")
8765 step = "Waiting to VM being up and getting IP address"
8766 self.logger.debug(logging_text + step)
8767
8768 # n2vc_redesign STEP 5.1
8769 # wait for RO (ip-address) Insert pub_key into VM
8770 # IMPORTANT: We need do wait for RO to complete healing operation.
8771 await self._wait_heal_ro(nsr_id, self.timeout.ns_heal)
8772 if vnfr_id:
8773 if kdu_name:
8774 rw_mgmt_ip = await self.wait_kdu_up(
8775 logging_text, nsr_id, vnfr_id, kdu_name
8776 )
8777 else:
8778 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8779 logging_text,
8780 nsr_id,
8781 vnfr_id,
8782 vdu_id,
8783 vdu_index,
8784 user=user,
8785 pub_key=pub_key,
8786 )
8787 else:
8788 rw_mgmt_ip = None # This is for a NS configuration
8789
8790 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8791
8792 # store rw_mgmt_ip in deploy params for later replacement
8793 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8794
8795 # Day1 operations.
8796 # get run-day1 operation parameter
8797 runDay1 = deploy_params.get("run-day1", False)
8798 self.logger.debug(
8799 "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id, vdu_id, runDay1)
8800 )
8801 if runDay1:
8802 # n2vc_redesign STEP 6 Execute initial config primitive
8803 step = "execute initial config primitive"
8804
8805 # wait for dependent primitives execution (NS -> VNF -> VDU)
8806 if initial_config_primitive_list:
8807 await self._wait_dependent_n2vc(
8808 nsr_id, vca_deployed_list, vca_index
8809 )
8810
8811 # stage, in function of element type: vdu, kdu, vnf or ns
8812 my_vca = vca_deployed_list[vca_index]
8813 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8814 # VDU or KDU
8815 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8816 elif my_vca.get("member-vnf-index"):
8817 # VNF
8818 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8819 else:
8820 # NS
8821 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8822
8823 self._write_configuration_status(
8824 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8825 )
8826
8827 self._write_op_status(op_id=nslcmop_id, stage=stage)
8828
8829 check_if_terminated_needed = True
8830 for initial_config_primitive in initial_config_primitive_list:
8831 # adding information on the vca_deployed if it is a NS execution environment
8832 if not vca_deployed["member-vnf-index"]:
8833 deploy_params["ns_config_info"] = json.dumps(
8834 self._get_ns_config_info(nsr_id)
8835 )
8836 # TODO check if already done
8837 primitive_params_ = self._map_primitive_params(
8838 initial_config_primitive, {}, deploy_params
8839 )
8840
8841 step = "execute primitive '{}' params '{}'".format(
8842 initial_config_primitive["name"], primitive_params_
8843 )
8844 self.logger.debug(logging_text + step)
8845 await self.vca_map[vca_type].exec_primitive(
8846 ee_id=ee_id,
8847 primitive_name=initial_config_primitive["name"],
8848 params_dict=primitive_params_,
8849 db_dict=db_dict,
8850 vca_id=vca_id,
8851 vca_type=vca_type,
8852 )
8853 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8854 if check_if_terminated_needed:
8855 if config_descriptor.get("terminate-config-primitive"):
8856 self.update_db_2(
8857 "nsrs",
8858 nsr_id,
8859 {db_update_entry + "needed_terminate": True},
8860 )
8861 check_if_terminated_needed = False
8862
8863 # TODO register in database that primitive is done
8864
8865 # STEP 7 Configure metrics
8866 # Not sure if this need to be done when healing
8867 """
8868 if vca_type == "helm" or vca_type == "helm-v3":
8869 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8870 ee_id=ee_id,
8871 artifact_path=artifact_path,
8872 ee_config_descriptor=ee_config_descriptor,
8873 vnfr_id=vnfr_id,
8874 nsr_id=nsr_id,
8875 target_ip=rw_mgmt_ip,
8876 )
8877 if prometheus_jobs:
8878 self.update_db_2(
8879 "nsrs",
8880 nsr_id,
8881 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8882 )
8883
8884 for job in prometheus_jobs:
8885 self.db.set_one(
8886 "prometheus_jobs",
8887 {"job_name": job["job_name"]},
8888 job,
8889 upsert=True,
8890 fail_on_empty=False,
8891 )
8892
8893 """
8894 step = "instantiated at VCA"
8895 self.logger.debug(logging_text + step)
8896
8897 self._write_configuration_status(
8898 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8899 )
8900
8901 except Exception as e: # TODO not use Exception but N2VC exception
8902 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8903 if not isinstance(
8904 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8905 ):
8906 self.logger.error(
8907 "Exception while {} : {}".format(step, e), exc_info=True
8908 )
8909 self._write_configuration_status(
8910 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8911 )
8912 raise LcmException("{} {}".format(step, e)) from e
8913
8914 async def _wait_heal_ro(
8915 self,
8916 nsr_id,
8917 timeout=600,
8918 ):
8919 start_time = time()
8920 while time() <= start_time + timeout:
8921 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8922 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"][
8923 "operational-status"
8924 ]
8925 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8926 if operational_status_ro != "healing":
8927 break
8928 await asyncio.sleep(15)
8929 else: # timeout_ns_deploy
8930 raise NgRoException("Timeout waiting ns to deploy")
8931
8932 async def vertical_scale(self, nsr_id, nslcmop_id):
8933 """
8934 Vertical Scale the VDUs in a NS
8935
8936 :param: nsr_id: NS Instance ID
8937 :param: nslcmop_id: nslcmop ID of migrate
8938
8939 """
8940 # Try to lock HA task here
8941 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8942 if not task_is_locked_by_me:
8943 return
8944 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8945 self.logger.debug(logging_text + "Enter")
8946 # get all needed from database
8947 db_nslcmop = None
8948 db_nslcmop_update = {}
8949 nslcmop_operation_state = None
8950 db_nsr_update = {}
8951 target = {}
8952 exc = None
8953 # in case of error, indicates what part of scale was failed to put nsr at error status
8954 start_deploy = time()
8955
8956 try:
8957 # wait for any previous tasks in process
8958 step = "Waiting for previous operations to terminate"
8959 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
8960
8961 self._write_ns_status(
8962 nsr_id=nsr_id,
8963 ns_state=None,
8964 current_operation="VerticalScale",
8965 current_operation_id=nslcmop_id,
8966 )
8967 step = "Getting nslcmop from database"
8968 self.logger.debug(
8969 step + " after having waited for previous tasks to be completed"
8970 )
8971 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8972 operationParams = db_nslcmop.get("operationParams")
8973 target = {}
8974 target.update(operationParams)
8975 desc = await self.RO.vertical_scale(nsr_id, target)
8976 self.logger.debug("RO return > {}".format(desc))
8977 action_id = desc["action_id"]
8978 await self._wait_ng_ro(
8979 nsr_id,
8980 action_id,
8981 nslcmop_id,
8982 start_deploy,
8983 self.timeout.verticalscale,
8984 operation="verticalscale",
8985 )
8986 except (ROclient.ROClientException, DbException, LcmException) as e:
8987 self.logger.error("Exit Exception {}".format(e))
8988 exc = e
8989 except asyncio.CancelledError:
8990 self.logger.error("Cancelled Exception while '{}'".format(step))
8991 exc = "Operation was cancelled"
8992 except Exception as e:
8993 exc = traceback.format_exc()
8994 self.logger.critical(
8995 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
8996 )
8997 finally:
8998 self._write_ns_status(
8999 nsr_id=nsr_id,
9000 ns_state=None,
9001 current_operation="IDLE",
9002 current_operation_id=None,
9003 )
9004 if exc:
9005 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
9006 nslcmop_operation_state = "FAILED"
9007 else:
9008 nslcmop_operation_state = "COMPLETED"
9009 db_nslcmop_update["detailed-status"] = "Done"
9010 db_nsr_update["detailed-status"] = "Done"
9011
9012 self._write_op_status(
9013 op_id=nslcmop_id,
9014 stage="",
9015 error_message="",
9016 operation_state=nslcmop_operation_state,
9017 other_update=db_nslcmop_update,
9018 )
9019 if nslcmop_operation_state:
9020 try:
9021 msg = {
9022 "nsr_id": nsr_id,
9023 "nslcmop_id": nslcmop_id,
9024 "operationState": nslcmop_operation_state,
9025 }
9026 await self.msg.aiowrite("ns", "verticalscaled", msg)
9027 except Exception as e:
9028 self.logger.error(
9029 logging_text + "kafka_write notification Exception {}".format(e)
9030 )
9031 self.logger.debug(logging_text + "Exit")
9032 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")