Revert "Remove unused methods"
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import json
27 from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33 select_autoescape,
34 )
35
36 from osm_lcm import ROclient
37 from osm_lcm.data_utils.lcm_config import LcmCfg
38 from osm_lcm.data_utils.nsr import (
39 get_deployed_kdu,
40 get_deployed_vca,
41 get_deployed_vca_list,
42 get_nsd,
43 )
44 from osm_lcm.data_utils.vca import (
45 DeployedComponent,
46 DeployedK8sResource,
47 DeployedVCA,
48 EELevel,
49 Relation,
50 EERelation,
51 safe_get_ee_relation,
52 )
53 from osm_lcm.ng_ro import NgRoClient, NgRoException
54 from osm_lcm.lcm_utils import (
55 LcmException,
56 LcmExceptionNoMgmtIP,
57 LcmBase,
58 deep_get,
59 get_iterable,
60 populate_dict,
61 check_juju_bundle_existence,
62 get_charm_artifact_path,
63 get_ee_id_parts,
64 vld_to_ro_ip_profile,
65 )
66 from osm_lcm.data_utils.nsd import (
67 get_ns_configuration_relation_list,
68 get_vnf_profile,
69 get_vnf_profiles,
70 )
71 from osm_lcm.data_utils.vnfd import (
72 get_kdu,
73 get_kdu_services,
74 get_relation_list,
75 get_vdu_list,
76 get_vdu_profile,
77 get_ee_sorted_initial_config_primitive_list,
78 get_ee_sorted_terminate_config_primitive_list,
79 get_kdu_list,
80 get_virtual_link_profiles,
81 get_vdu,
82 get_configuration,
83 get_vdu_index,
84 get_scaling_aspect,
85 get_number_of_instances,
86 get_juju_ee_ref,
87 get_kdu_resource_profile,
88 find_software_version,
89 check_helm_ee_in_ns,
90 )
91 from osm_lcm.data_utils.list_utils import find_in_list
92 from osm_lcm.data_utils.vnfr import (
93 get_osm_params,
94 get_vdur_index,
95 get_kdur,
96 get_volumes_from_instantiation_params,
97 )
98 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
99 from osm_lcm.data_utils.database.vim_account import VimAccountDB
100 from n2vc.definitions import RelationEndpoint
101 from n2vc.k8s_helm3_conn import K8sHelm3Connector
102 from n2vc.k8s_juju_conn import K8sJujuConnector
103
104 from osm_common.dbbase import DbException
105 from osm_common.fsbase import FsException
106
107 from osm_lcm.data_utils.database.database import Database
108 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
109 from osm_lcm.data_utils.wim import (
110 get_sdn_ports,
111 get_target_wim_attrs,
112 select_feasible_wim_account,
113 )
114
115 from n2vc.n2vc_juju_conn import N2VCJujuConnector
116 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
117
118 from osm_lcm.lcm_helm_conn import LCMHelmConn
119 from osm_lcm.osm_config import OsmConfigBuilder
120 from osm_lcm.prometheus import parse_job
121
122 from copy import copy, deepcopy
123 from time import time
124 from uuid import uuid4
125
126 from random import SystemRandom
127
128 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
129
130
131 class NsLcm(LcmBase):
132 SUBOPERATION_STATUS_NOT_FOUND = -1
133 SUBOPERATION_STATUS_NEW = -2
134 SUBOPERATION_STATUS_SKIP = -3
135 EE_TLS_NAME = "ee-tls"
136 task_name_deploy_vca = "Deploying VCA"
137 rel_operation_types = {
138 "GE": ">=",
139 "LE": "<=",
140 "GT": ">",
141 "LT": "<",
142 "EQ": "==",
143 "NE": "!=",
144 }
145
146 def __init__(self, msg, lcm_tasks, config: LcmCfg):
147 """
148 Init, Connect to database, filesystem storage, and messaging
149 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
150 :return: None
151 """
152 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
153
154 self.db = Database().instance.db
155 self.fs = Filesystem().instance.fs
156 self.lcm_tasks = lcm_tasks
157 self.timeout = config.timeout
158 self.ro_config = config.RO
159 self.vca_config = config.VCA
160
161 # create N2VC connector
162 self.n2vc = N2VCJujuConnector(
163 log=self.logger,
164 on_update_db=self._on_update_n2vc_db,
165 fs=self.fs,
166 db=self.db,
167 )
168
169 self.conn_helm_ee = LCMHelmConn(
170 log=self.logger,
171 vca_config=self.vca_config,
172 on_update_db=self._on_update_n2vc_db,
173 )
174
175 self.k8sclusterhelm3 = K8sHelm3Connector(
176 kubectl_command=self.vca_config.kubectlpath,
177 helm_command=self.vca_config.helm3path,
178 fs=self.fs,
179 log=self.logger,
180 db=self.db,
181 on_update_db=None,
182 )
183
184 self.k8sclusterjuju = K8sJujuConnector(
185 kubectl_command=self.vca_config.kubectlpath,
186 juju_command=self.vca_config.jujupath,
187 log=self.logger,
188 on_update_db=self._on_update_k8s_db,
189 fs=self.fs,
190 db=self.db,
191 )
192
193 self.k8scluster_map = {
194 "helm-chart-v3": self.k8sclusterhelm3,
195 "chart": self.k8sclusterhelm3,
196 "juju-bundle": self.k8sclusterjuju,
197 "juju": self.k8sclusterjuju,
198 }
199
200 self.vca_map = {
201 "lxc_proxy_charm": self.n2vc,
202 "native_charm": self.n2vc,
203 "k8s_proxy_charm": self.n2vc,
204 "helm": self.conn_helm_ee,
205 "helm-v3": self.conn_helm_ee,
206 }
207
208 # create RO client
209 self.RO = NgRoClient(**self.ro_config.to_dict())
210
211 self.op_status_map = {
212 "instantiation": self.RO.status,
213 "termination": self.RO.status,
214 "migrate": self.RO.status,
215 "healing": self.RO.recreate_status,
216 "verticalscale": self.RO.status,
217 "start_stop_rebuild": self.RO.status,
218 }
219
220 @staticmethod
221 def increment_ip_mac(ip_mac, vm_index=1):
222 if not isinstance(ip_mac, str):
223 return ip_mac
224 try:
225 # try with ipv4 look for last dot
226 i = ip_mac.rfind(".")
227 if i > 0:
228 i += 1
229 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
230 # try with ipv6 or mac look for last colon. Operate in hex
231 i = ip_mac.rfind(":")
232 if i > 0:
233 i += 1
234 # format in hex, len can be 2 for mac or 4 for ipv6
235 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
236 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
237 )
238 except Exception:
239 pass
240 return None
241
242 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
243 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
244
245 try:
246 # TODO filter RO descriptor fields...
247
248 # write to database
249 db_dict = dict()
250 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
251 db_dict["deploymentStatus"] = ro_descriptor
252 self.update_db_2("nsrs", nsrs_id, db_dict)
253
254 except Exception as e:
255 self.logger.warn(
256 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
257 )
258
259 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
260 # remove last dot from path (if exists)
261 if path.endswith("."):
262 path = path[:-1]
263
264 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
265 # .format(table, filter, path, updated_data))
266 try:
267 nsr_id = filter.get("_id")
268
269 # read ns record from database
270 nsr = self.db.get_one(table="nsrs", q_filter=filter)
271 current_ns_status = nsr.get("nsState")
272
273 # get vca status for NS
274 status_dict = await self.n2vc.get_status(
275 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
276 )
277
278 # vcaStatus
279 db_dict = dict()
280 db_dict["vcaStatus"] = status_dict
281
282 # update configurationStatus for this VCA
283 try:
284 vca_index = int(path[path.rfind(".") + 1 :])
285
286 vca_list = deep_get(
287 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
288 )
289 vca_status = vca_list[vca_index].get("status")
290
291 configuration_status_list = nsr.get("configurationStatus")
292 config_status = configuration_status_list[vca_index].get("status")
293
294 if config_status == "BROKEN" and vca_status != "failed":
295 db_dict["configurationStatus"][vca_index] = "READY"
296 elif config_status != "BROKEN" and vca_status == "failed":
297 db_dict["configurationStatus"][vca_index] = "BROKEN"
298 except Exception as e:
299 # not update configurationStatus
300 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
301
302 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
303 # if nsState = 'DEGRADED' check if all is OK
304 is_degraded = False
305 if current_ns_status in ("READY", "DEGRADED"):
306 error_description = ""
307 # check machines
308 if status_dict.get("machines"):
309 for machine_id in status_dict.get("machines"):
310 machine = status_dict.get("machines").get(machine_id)
311 # check machine agent-status
312 if machine.get("agent-status"):
313 s = machine.get("agent-status").get("status")
314 if s != "started":
315 is_degraded = True
316 error_description += (
317 "machine {} agent-status={} ; ".format(
318 machine_id, s
319 )
320 )
321 # check machine instance status
322 if machine.get("instance-status"):
323 s = machine.get("instance-status").get("status")
324 if s != "running":
325 is_degraded = True
326 error_description += (
327 "machine {} instance-status={} ; ".format(
328 machine_id, s
329 )
330 )
331 # check applications
332 if status_dict.get("applications"):
333 for app_id in status_dict.get("applications"):
334 app = status_dict.get("applications").get(app_id)
335 # check application status
336 if app.get("status"):
337 s = app.get("status").get("status")
338 if s != "active":
339 is_degraded = True
340 error_description += (
341 "application {} status={} ; ".format(app_id, s)
342 )
343
344 if error_description:
345 db_dict["errorDescription"] = error_description
346 if current_ns_status == "READY" and is_degraded:
347 db_dict["nsState"] = "DEGRADED"
348 if current_ns_status == "DEGRADED" and not is_degraded:
349 db_dict["nsState"] = "READY"
350
351 # write to database
352 self.update_db_2("nsrs", nsr_id, db_dict)
353
354 except (asyncio.CancelledError, asyncio.TimeoutError):
355 raise
356 except Exception as e:
357 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
358
359 async def _on_update_k8s_db(
360 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
361 ):
362 """
363 Updating vca status in NSR record
364 :param cluster_uuid: UUID of a k8s cluster
365 :param kdu_instance: The unique name of the KDU instance
366 :param filter: To get nsr_id
367 :cluster_type: The cluster type (juju, k8s)
368 :return: none
369 """
370
371 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
372 # .format(cluster_uuid, kdu_instance, filter))
373
374 nsr_id = filter.get("_id")
375 try:
376 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
377 cluster_uuid=cluster_uuid,
378 kdu_instance=kdu_instance,
379 yaml_format=False,
380 complete_status=True,
381 vca_id=vca_id,
382 )
383
384 # vcaStatus
385 db_dict = dict()
386 db_dict["vcaStatus"] = {nsr_id: vca_status}
387
388 self.logger.debug(
389 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
390 )
391
392 # write to database
393 self.update_db_2("nsrs", nsr_id, db_dict)
394 except (asyncio.CancelledError, asyncio.TimeoutError):
395 raise
396 except Exception as e:
397 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
398
399 @staticmethod
400 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
401 try:
402 env = Environment(
403 undefined=StrictUndefined,
404 autoescape=select_autoescape(default_for_string=True, default=True),
405 )
406 template = env.from_string(cloud_init_text)
407 return template.render(additional_params or {})
408 except UndefinedError as e:
409 raise LcmException(
410 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
411 "file, must be provided in the instantiation parameters inside the "
412 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
413 )
414 except (TemplateError, TemplateNotFound) as e:
415 raise LcmException(
416 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
417 vnfd_id, vdu_id, e
418 )
419 )
420
421 def _get_vdu_cloud_init_content(self, vdu, vnfd):
422 cloud_init_content = cloud_init_file = None
423 try:
424 if vdu.get("cloud-init-file"):
425 base_folder = vnfd["_admin"]["storage"]
426 if base_folder["pkg-dir"]:
427 cloud_init_file = "{}/{}/cloud_init/{}".format(
428 base_folder["folder"],
429 base_folder["pkg-dir"],
430 vdu["cloud-init-file"],
431 )
432 else:
433 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
434 base_folder["folder"],
435 vdu["cloud-init-file"],
436 )
437 with self.fs.file_open(cloud_init_file, "r") as ci_file:
438 cloud_init_content = ci_file.read()
439 elif vdu.get("cloud-init"):
440 cloud_init_content = vdu["cloud-init"]
441
442 return cloud_init_content
443 except FsException as e:
444 raise LcmException(
445 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
446 vnfd["id"], vdu["id"], cloud_init_file, e
447 )
448 )
449
450 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
451 vdur = next(
452 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
453 )
454 additional_params = vdur.get("additionalParams")
455 return parse_yaml_strings(additional_params)
456
457 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
458 """
459 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
460 :param vnfd: input vnfd
461 :param new_id: overrides vnf id if provided
462 :param additionalParams: Instantiation params for VNFs provided
463 :param nsrId: Id of the NSR
464 :return: copy of vnfd
465 """
466 vnfd_RO = deepcopy(vnfd)
467 # remove unused by RO configuration, monitoring, scaling and internal keys
468 vnfd_RO.pop("_id", None)
469 vnfd_RO.pop("_admin", None)
470 vnfd_RO.pop("monitoring-param", None)
471 vnfd_RO.pop("scaling-group-descriptor", None)
472 vnfd_RO.pop("kdu", None)
473 vnfd_RO.pop("k8s-cluster", None)
474 if new_id:
475 vnfd_RO["id"] = new_id
476
477 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
478 for vdu in get_iterable(vnfd_RO, "vdu"):
479 vdu.pop("cloud-init-file", None)
480 vdu.pop("cloud-init", None)
481 return vnfd_RO
482
483 @staticmethod
484 def ip_profile_2_RO(ip_profile):
485 RO_ip_profile = deepcopy(ip_profile)
486 if "dns-server" in RO_ip_profile:
487 if isinstance(RO_ip_profile["dns-server"], list):
488 RO_ip_profile["dns-address"] = []
489 for ds in RO_ip_profile.pop("dns-server"):
490 RO_ip_profile["dns-address"].append(ds["address"])
491 else:
492 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
493 if RO_ip_profile.get("ip-version") == "ipv4":
494 RO_ip_profile["ip-version"] = "IPv4"
495 if RO_ip_profile.get("ip-version") == "ipv6":
496 RO_ip_profile["ip-version"] = "IPv6"
497 if "dhcp-params" in RO_ip_profile:
498 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
499 return RO_ip_profile
500
501 def _get_ro_vim_id_for_vim_account(self, vim_account):
502 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
503 if db_vim["_admin"]["operationalState"] != "ENABLED":
504 raise LcmException(
505 "VIM={} is not available. operationalState={}".format(
506 vim_account, db_vim["_admin"]["operationalState"]
507 )
508 )
509 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
510 return RO_vim_id
511
512 def get_ro_wim_id_for_wim_account(self, wim_account):
513 if isinstance(wim_account, str):
514 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
515 if db_wim["_admin"]["operationalState"] != "ENABLED":
516 raise LcmException(
517 "WIM={} is not available. operationalState={}".format(
518 wim_account, db_wim["_admin"]["operationalState"]
519 )
520 )
521 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
522 return RO_wim_id
523 else:
524 return wim_account
525
526 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
527 db_vdu_push_list = []
528 template_vdur = []
529 db_update = {"_admin.modified": time()}
530 if vdu_create:
531 for vdu_id, vdu_count in vdu_create.items():
532 vdur = next(
533 (
534 vdur
535 for vdur in reversed(db_vnfr["vdur"])
536 if vdur["vdu-id-ref"] == vdu_id
537 ),
538 None,
539 )
540 if not vdur:
541 # Read the template saved in the db:
542 self.logger.debug(
543 "No vdur in the database. Using the vdur-template to scale"
544 )
545 vdur_template = db_vnfr.get("vdur-template")
546 if not vdur_template:
547 raise LcmException(
548 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
549 vdu_id
550 )
551 )
552 vdur = vdur_template[0]
553 # Delete a template from the database after using it
554 self.db.set_one(
555 "vnfrs",
556 {"_id": db_vnfr["_id"]},
557 None,
558 pull={"vdur-template": {"_id": vdur["_id"]}},
559 )
560 for count in range(vdu_count):
561 vdur_copy = deepcopy(vdur)
562 vdur_copy["status"] = "BUILD"
563 vdur_copy["status-detailed"] = None
564 vdur_copy["ip-address"] = None
565 vdur_copy["_id"] = str(uuid4())
566 vdur_copy["count-index"] += count + 1
567 vdur_copy["id"] = "{}-{}".format(
568 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
569 )
570 vdur_copy.pop("vim_info", None)
571 for iface in vdur_copy["interfaces"]:
572 if iface.get("fixed-ip"):
573 iface["ip-address"] = self.increment_ip_mac(
574 iface["ip-address"], count + 1
575 )
576 else:
577 iface.pop("ip-address", None)
578 if iface.get("fixed-mac"):
579 iface["mac-address"] = self.increment_ip_mac(
580 iface["mac-address"], count + 1
581 )
582 else:
583 iface.pop("mac-address", None)
584 if db_vnfr["vdur"]:
585 iface.pop(
586 "mgmt_vnf", None
587 ) # only first vdu can be managment of vnf
588 db_vdu_push_list.append(vdur_copy)
589 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
590 if vdu_delete:
591 if len(db_vnfr["vdur"]) == 1:
592 # The scale will move to 0 instances
593 self.logger.debug(
594 "Scaling to 0 !, creating the template with the last vdur"
595 )
596 template_vdur = [db_vnfr["vdur"][0]]
597 for vdu_id, vdu_count in vdu_delete.items():
598 if mark_delete:
599 indexes_to_delete = [
600 iv[0]
601 for iv in enumerate(db_vnfr["vdur"])
602 if iv[1]["vdu-id-ref"] == vdu_id
603 ]
604 db_update.update(
605 {
606 "vdur.{}.status".format(i): "DELETING"
607 for i in indexes_to_delete[-vdu_count:]
608 }
609 )
610 else:
611 # it must be deleted one by one because common.db does not allow otherwise
612 vdus_to_delete = [
613 v
614 for v in reversed(db_vnfr["vdur"])
615 if v["vdu-id-ref"] == vdu_id
616 ]
617 for vdu in vdus_to_delete[:vdu_count]:
618 self.db.set_one(
619 "vnfrs",
620 {"_id": db_vnfr["_id"]},
621 None,
622 pull={"vdur": {"_id": vdu["_id"]}},
623 )
624 db_push = {}
625 if db_vdu_push_list:
626 db_push["vdur"] = db_vdu_push_list
627 if template_vdur:
628 db_push["vdur-template"] = template_vdur
629 if not db_push:
630 db_push = None
631 db_vnfr["vdur-template"] = template_vdur
632 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
633 # modify passed dictionary db_vnfr
634 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
635 db_vnfr["vdur"] = db_vnfr_["vdur"]
636
637 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
638 """
639 Updates database nsr with the RO info for the created vld
640 :param ns_update_nsr: dictionary to be filled with the updated info
641 :param db_nsr: content of db_nsr. This is also modified
642 :param nsr_desc_RO: nsr descriptor from RO
643 :return: Nothing, LcmException is raised on errors
644 """
645
646 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
647 for net_RO in get_iterable(nsr_desc_RO, "nets"):
648 if vld["id"] != net_RO.get("ns_net_osm_id"):
649 continue
650 vld["vim-id"] = net_RO.get("vim_net_id")
651 vld["name"] = net_RO.get("vim_name")
652 vld["status"] = net_RO.get("status")
653 vld["status-detailed"] = net_RO.get("error_msg")
654 ns_update_nsr["vld.{}".format(vld_index)] = vld
655 break
656 else:
657 raise LcmException(
658 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
659 )
660
661 def set_vnfr_at_error(self, db_vnfrs, error_text):
662 try:
663 for db_vnfr in db_vnfrs.values():
664 vnfr_update = {"status": "ERROR"}
665 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
666 if "status" not in vdur:
667 vdur["status"] = "ERROR"
668 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
669 if error_text:
670 vdur["status-detailed"] = str(error_text)
671 vnfr_update[
672 "vdur.{}.status-detailed".format(vdu_index)
673 ] = "ERROR"
674 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
675 except DbException as e:
676 self.logger.error("Cannot update vnf. {}".format(e))
677
678 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
679 """
680 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
681 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
682 :param nsr_desc_RO: nsr descriptor from RO
683 :return: Nothing, LcmException is raised on errors
684 """
685 for vnf_index, db_vnfr in db_vnfrs.items():
686 for vnf_RO in nsr_desc_RO["vnfs"]:
687 if vnf_RO["member_vnf_index"] != vnf_index:
688 continue
689 vnfr_update = {}
690 if vnf_RO.get("ip_address"):
691 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
692 "ip_address"
693 ].split(";")[0]
694 elif not db_vnfr.get("ip-address"):
695 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
696 raise LcmExceptionNoMgmtIP(
697 "ns member_vnf_index '{}' has no IP address".format(
698 vnf_index
699 )
700 )
701
702 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
703 vdur_RO_count_index = 0
704 if vdur.get("pdu-type"):
705 continue
706 for vdur_RO in get_iterable(vnf_RO, "vms"):
707 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
708 continue
709 if vdur["count-index"] != vdur_RO_count_index:
710 vdur_RO_count_index += 1
711 continue
712 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
713 if vdur_RO.get("ip_address"):
714 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
715 else:
716 vdur["ip-address"] = None
717 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
718 vdur["name"] = vdur_RO.get("vim_name")
719 vdur["status"] = vdur_RO.get("status")
720 vdur["status-detailed"] = vdur_RO.get("error_msg")
721 for ifacer in get_iterable(vdur, "interfaces"):
722 for interface_RO in get_iterable(vdur_RO, "interfaces"):
723 if ifacer["name"] == interface_RO.get("internal_name"):
724 ifacer["ip-address"] = interface_RO.get(
725 "ip_address"
726 )
727 ifacer["mac-address"] = interface_RO.get(
728 "mac_address"
729 )
730 break
731 else:
732 raise LcmException(
733 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
734 "from VIM info".format(
735 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
736 )
737 )
738 vnfr_update["vdur.{}".format(vdu_index)] = vdur
739 break
740 else:
741 raise LcmException(
742 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
743 "VIM info".format(
744 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
745 )
746 )
747
748 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
749 for net_RO in get_iterable(nsr_desc_RO, "nets"):
750 if vld["id"] != net_RO.get("vnf_net_osm_id"):
751 continue
752 vld["vim-id"] = net_RO.get("vim_net_id")
753 vld["name"] = net_RO.get("vim_name")
754 vld["status"] = net_RO.get("status")
755 vld["status-detailed"] = net_RO.get("error_msg")
756 vnfr_update["vld.{}".format(vld_index)] = vld
757 break
758 else:
759 raise LcmException(
760 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
761 vnf_index, vld["id"]
762 )
763 )
764
765 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
766 break
767
768 else:
769 raise LcmException(
770 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
771 vnf_index
772 )
773 )
774
775 def _get_ns_config_info(self, nsr_id):
776 """
777 Generates a mapping between vnf,vdu elements and the N2VC id
778 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
779 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
780 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
781 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
782 """
783 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
784 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
785 mapping = {}
786 ns_config_info = {"osm-config-mapping": mapping}
787 for vca in vca_deployed_list:
788 if not vca["member-vnf-index"]:
789 continue
790 if not vca["vdu_id"]:
791 mapping[vca["member-vnf-index"]] = vca["application"]
792 else:
793 mapping[
794 "{}.{}.{}".format(
795 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
796 )
797 ] = vca["application"]
798 return ns_config_info
799
800 async def _instantiate_ng_ro(
801 self,
802 logging_text,
803 nsr_id,
804 nsd,
805 db_nsr,
806 db_nslcmop,
807 db_vnfrs,
808 db_vnfds,
809 n2vc_key_list,
810 stage,
811 start_deploy,
812 timeout_ns_deploy,
813 ):
814 db_vims = {}
815
816 def get_vim_account(vim_account_id):
817 nonlocal db_vims
818 if vim_account_id in db_vims:
819 return db_vims[vim_account_id]
820 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
821 db_vims[vim_account_id] = db_vim
822 return db_vim
823
824 # modify target_vld info with instantiation parameters
825 def parse_vld_instantiation_params(
826 target_vim, target_vld, vld_params, target_sdn
827 ):
828 if vld_params.get("ip-profile"):
829 target_vld["vim_info"][target_vim]["ip_profile"] = vld_to_ro_ip_profile(
830 vld_params["ip-profile"]
831 )
832 if vld_params.get("provider-network"):
833 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
834 "provider-network"
835 ]
836 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
837 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
838 "provider-network"
839 ]["sdn-ports"]
840
841 # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
842 # if wim_account_id is specified in vld_params, validate if it is feasible.
843 wim_account_id, db_wim = select_feasible_wim_account(
844 db_nsr, db_vnfrs, target_vld, vld_params, self.logger
845 )
846
847 if wim_account_id:
848 # WIM is needed and a feasible one was found, populate WIM target and SDN ports
849 self.logger.info("WIM selected: {:s}".format(str(wim_account_id)))
850 # update vld_params with correct WIM account Id
851 vld_params["wimAccountId"] = wim_account_id
852
853 target_wim = "wim:{}".format(wim_account_id)
854 target_wim_attrs = get_target_wim_attrs(nsr_id, target_vld, vld_params)
855 sdn_ports = get_sdn_ports(vld_params, db_wim)
856 if len(sdn_ports) > 0:
857 target_vld["vim_info"][target_wim] = target_wim_attrs
858 target_vld["vim_info"][target_wim]["sdn-ports"] = sdn_ports
859
860 self.logger.debug(
861 "Target VLD with WIM data: {:s}".format(str(target_vld))
862 )
863
864 for param in ("vim-network-name", "vim-network-id"):
865 if vld_params.get(param):
866 if isinstance(vld_params[param], dict):
867 for vim, vim_net in vld_params[param].items():
868 other_target_vim = "vim:" + vim
869 populate_dict(
870 target_vld["vim_info"],
871 (other_target_vim, param.replace("-", "_")),
872 vim_net,
873 )
874 else: # isinstance str
875 target_vld["vim_info"][target_vim][
876 param.replace("-", "_")
877 ] = vld_params[param]
878 if vld_params.get("common_id"):
879 target_vld["common_id"] = vld_params.get("common_id")
880
881 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
882 def update_ns_vld_target(target, ns_params):
883 for vnf_params in ns_params.get("vnf", ()):
884 if vnf_params.get("vimAccountId"):
885 target_vnf = next(
886 (
887 vnfr
888 for vnfr in db_vnfrs.values()
889 if vnf_params["member-vnf-index"]
890 == vnfr["member-vnf-index-ref"]
891 ),
892 None,
893 )
894 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
895 if not vdur:
896 continue
897 for a_index, a_vld in enumerate(target["ns"]["vld"]):
898 target_vld = find_in_list(
899 get_iterable(vdur, "interfaces"),
900 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
901 )
902
903 vld_params = find_in_list(
904 get_iterable(ns_params, "vld"),
905 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
906 )
907 if target_vld:
908 if vnf_params.get("vimAccountId") not in a_vld.get(
909 "vim_info", {}
910 ):
911 target_vim_network_list = [
912 v for _, v in a_vld.get("vim_info").items()
913 ]
914 target_vim_network_name = next(
915 (
916 item.get("vim_network_name", "")
917 for item in target_vim_network_list
918 ),
919 "",
920 )
921
922 target["ns"]["vld"][a_index].get("vim_info").update(
923 {
924 "vim:{}".format(vnf_params["vimAccountId"]): {
925 "vim_network_name": target_vim_network_name,
926 }
927 }
928 )
929
930 if vld_params:
931 for param in ("vim-network-name", "vim-network-id"):
932 if vld_params.get(param) and isinstance(
933 vld_params[param], dict
934 ):
935 for vim, vim_net in vld_params[
936 param
937 ].items():
938 other_target_vim = "vim:" + vim
939 populate_dict(
940 target["ns"]["vld"][a_index].get(
941 "vim_info"
942 ),
943 (
944 other_target_vim,
945 param.replace("-", "_"),
946 ),
947 vim_net,
948 )
949
950 nslcmop_id = db_nslcmop["_id"]
951 target = {
952 "name": db_nsr["name"],
953 "ns": {"vld": []},
954 "vnf": [],
955 "image": deepcopy(db_nsr["image"]),
956 "flavor": deepcopy(db_nsr["flavor"]),
957 "action_id": nslcmop_id,
958 "cloud_init_content": {},
959 }
960 for image in target["image"]:
961 image["vim_info"] = {}
962 for flavor in target["flavor"]:
963 flavor["vim_info"] = {}
964 if db_nsr.get("shared-volumes"):
965 target["shared-volumes"] = deepcopy(db_nsr["shared-volumes"])
966 for shared_volumes in target["shared-volumes"]:
967 shared_volumes["vim_info"] = {}
968 if db_nsr.get("affinity-or-anti-affinity-group"):
969 target["affinity-or-anti-affinity-group"] = deepcopy(
970 db_nsr["affinity-or-anti-affinity-group"]
971 )
972 for affinity_or_anti_affinity_group in target[
973 "affinity-or-anti-affinity-group"
974 ]:
975 affinity_or_anti_affinity_group["vim_info"] = {}
976
977 if db_nslcmop.get("lcmOperationType") != "instantiate":
978 # get parameters of instantiation:
979 db_nslcmop_instantiate = self.db.get_list(
980 "nslcmops",
981 {
982 "nsInstanceId": db_nslcmop["nsInstanceId"],
983 "lcmOperationType": "instantiate",
984 },
985 )[-1]
986 ns_params = db_nslcmop_instantiate.get("operationParams")
987 else:
988 ns_params = db_nslcmop.get("operationParams")
989 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
990 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
991
992 cp2target = {}
993 for vld_index, vld in enumerate(db_nsr.get("vld")):
994 target_vim = "vim:{}".format(ns_params["vimAccountId"])
995 target_vld = {
996 "id": vld["id"],
997 "name": vld["name"],
998 "mgmt-network": vld.get("mgmt-network", False),
999 "type": vld.get("type"),
1000 "vim_info": {
1001 target_vim: {
1002 "vim_network_name": vld.get("vim-network-name"),
1003 "vim_account_id": ns_params["vimAccountId"],
1004 }
1005 },
1006 }
1007 # check if this network needs SDN assist
1008 if vld.get("pci-interfaces"):
1009 db_vim = get_vim_account(ns_params["vimAccountId"])
1010 if vim_config := db_vim.get("config"):
1011 if sdnc_id := vim_config.get("sdn-controller"):
1012 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
1013 target_sdn = "sdn:{}".format(sdnc_id)
1014 target_vld["vim_info"][target_sdn] = {
1015 "sdn": True,
1016 "target_vim": target_vim,
1017 "vlds": [sdn_vld],
1018 "type": vld.get("type"),
1019 }
1020
1021 nsd_vnf_profiles = get_vnf_profiles(nsd)
1022 for nsd_vnf_profile in nsd_vnf_profiles:
1023 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
1024 if cp["virtual-link-profile-id"] == vld["id"]:
1025 cp2target[
1026 "member_vnf:{}.{}".format(
1027 cp["constituent-cpd-id"][0][
1028 "constituent-base-element-id"
1029 ],
1030 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
1031 )
1032 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
1033
1034 # check at nsd descriptor, if there is an ip-profile
1035 vld_params = {}
1036 nsd_vlp = find_in_list(
1037 get_virtual_link_profiles(nsd),
1038 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
1039 == vld["id"],
1040 )
1041 if (
1042 nsd_vlp
1043 and nsd_vlp.get("virtual-link-protocol-data")
1044 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1045 ):
1046 vld_params["ip-profile"] = nsd_vlp["virtual-link-protocol-data"][
1047 "l3-protocol-data"
1048 ]
1049
1050 # update vld_params with instantiation params
1051 vld_instantiation_params = find_in_list(
1052 get_iterable(ns_params, "vld"),
1053 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
1054 )
1055 if vld_instantiation_params:
1056 vld_params.update(vld_instantiation_params)
1057 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
1058 target["ns"]["vld"].append(target_vld)
1059 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1060 update_ns_vld_target(target, ns_params)
1061
1062 for vnfr in db_vnfrs.values():
1063 vnfd = find_in_list(
1064 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
1065 )
1066 vnf_params = find_in_list(
1067 get_iterable(ns_params, "vnf"),
1068 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
1069 )
1070 target_vnf = deepcopy(vnfr)
1071 target_vim = "vim:{}".format(vnfr["vim-account-id"])
1072 for vld in target_vnf.get("vld", ()):
1073 # check if connected to a ns.vld, to fill target'
1074 vnf_cp = find_in_list(
1075 vnfd.get("int-virtual-link-desc", ()),
1076 lambda cpd: cpd.get("id") == vld["id"],
1077 )
1078 if vnf_cp:
1079 ns_cp = "member_vnf:{}.{}".format(
1080 vnfr["member-vnf-index-ref"], vnf_cp["id"]
1081 )
1082 if cp2target.get(ns_cp):
1083 vld["target"] = cp2target[ns_cp]
1084
1085 vld["vim_info"] = {
1086 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1087 }
1088 # check if this network needs SDN assist
1089 target_sdn = None
1090 if vld.get("pci-interfaces"):
1091 db_vim = get_vim_account(vnfr["vim-account-id"])
1092 sdnc_id = db_vim["config"].get("sdn-controller")
1093 if sdnc_id:
1094 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1095 target_sdn = "sdn:{}".format(sdnc_id)
1096 vld["vim_info"][target_sdn] = {
1097 "sdn": True,
1098 "target_vim": target_vim,
1099 "vlds": [sdn_vld],
1100 "type": vld.get("type"),
1101 }
1102
1103 # check at vnfd descriptor, if there is an ip-profile
1104 vld_params = {}
1105 vnfd_vlp = find_in_list(
1106 get_virtual_link_profiles(vnfd),
1107 lambda a_link_profile: a_link_profile["id"] == vld["id"],
1108 )
1109 if (
1110 vnfd_vlp
1111 and vnfd_vlp.get("virtual-link-protocol-data")
1112 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1113 ):
1114 vld_params["ip-profile"] = vnfd_vlp["virtual-link-protocol-data"][
1115 "l3-protocol-data"
1116 ]
1117 # update vld_params with instantiation params
1118 if vnf_params:
1119 vld_instantiation_params = find_in_list(
1120 get_iterable(vnf_params, "internal-vld"),
1121 lambda i_vld: i_vld["name"] == vld["id"],
1122 )
1123 if vld_instantiation_params:
1124 vld_params.update(vld_instantiation_params)
1125 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1126
1127 vdur_list = []
1128 for vdur in target_vnf.get("vdur", ()):
1129 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1130 continue # This vdu must not be created
1131 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1132
1133 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1134
1135 if ssh_keys_all:
1136 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1137 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1138 if (
1139 vdu_configuration
1140 and vdu_configuration.get("config-access")
1141 and vdu_configuration.get("config-access").get("ssh-access")
1142 ):
1143 vdur["ssh-keys"] = ssh_keys_all
1144 vdur["ssh-access-required"] = vdu_configuration[
1145 "config-access"
1146 ]["ssh-access"]["required"]
1147 elif (
1148 vnf_configuration
1149 and vnf_configuration.get("config-access")
1150 and vnf_configuration.get("config-access").get("ssh-access")
1151 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1152 ):
1153 vdur["ssh-keys"] = ssh_keys_all
1154 vdur["ssh-access-required"] = vnf_configuration[
1155 "config-access"
1156 ]["ssh-access"]["required"]
1157 elif ssh_keys_instantiation and find_in_list(
1158 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1159 ):
1160 vdur["ssh-keys"] = ssh_keys_instantiation
1161
1162 self.logger.debug("NS > vdur > {}".format(vdur))
1163
1164 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1165 # cloud-init
1166 if vdud.get("cloud-init-file"):
1167 vdur["cloud-init"] = "{}:file:{}".format(
1168 vnfd["_id"], vdud.get("cloud-init-file")
1169 )
1170 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1171 if vdur["cloud-init"] not in target["cloud_init_content"]:
1172 base_folder = vnfd["_admin"]["storage"]
1173 if base_folder["pkg-dir"]:
1174 cloud_init_file = "{}/{}/cloud_init/{}".format(
1175 base_folder["folder"],
1176 base_folder["pkg-dir"],
1177 vdud.get("cloud-init-file"),
1178 )
1179 else:
1180 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1181 base_folder["folder"],
1182 vdud.get("cloud-init-file"),
1183 )
1184 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1185 target["cloud_init_content"][
1186 vdur["cloud-init"]
1187 ] = ci_file.read()
1188 elif vdud.get("cloud-init"):
1189 vdur["cloud-init"] = "{}:vdu:{}".format(
1190 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1191 )
1192 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1193 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1194 "cloud-init"
1195 ]
1196 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1197 deploy_params_vdu = self._format_additional_params(
1198 vdur.get("additionalParams") or {}
1199 )
1200 deploy_params_vdu["OSM"] = get_osm_params(
1201 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1202 )
1203 vdur["additionalParams"] = deploy_params_vdu
1204
1205 # flavor
1206 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1207 if target_vim not in ns_flavor["vim_info"]:
1208 ns_flavor["vim_info"][target_vim] = {}
1209
1210 # deal with images
1211 # in case alternative images are provided we must check if they should be applied
1212 # for the vim_type, modify the vim_type taking into account
1213 ns_image_id = int(vdur["ns-image-id"])
1214 if vdur.get("alt-image-ids"):
1215 db_vim = get_vim_account(vnfr["vim-account-id"])
1216 vim_type = db_vim["vim_type"]
1217 for alt_image_id in vdur.get("alt-image-ids"):
1218 ns_alt_image = target["image"][int(alt_image_id)]
1219 if vim_type == ns_alt_image.get("vim-type"):
1220 # must use alternative image
1221 self.logger.debug(
1222 "use alternative image id: {}".format(alt_image_id)
1223 )
1224 ns_image_id = alt_image_id
1225 vdur["ns-image-id"] = ns_image_id
1226 break
1227 ns_image = target["image"][int(ns_image_id)]
1228 if target_vim not in ns_image["vim_info"]:
1229 ns_image["vim_info"][target_vim] = {}
1230
1231 # Affinity groups
1232 if vdur.get("affinity-or-anti-affinity-group-id"):
1233 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1234 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1235 if target_vim not in ns_ags["vim_info"]:
1236 ns_ags["vim_info"][target_vim] = {}
1237
1238 # shared-volumes
1239 if vdur.get("shared-volumes-id"):
1240 for sv_id in vdur["shared-volumes-id"]:
1241 ns_sv = find_in_list(
1242 target["shared-volumes"], lambda sv: sv_id in sv["id"]
1243 )
1244 if ns_sv:
1245 ns_sv["vim_info"][target_vim] = {}
1246
1247 vdur["vim_info"] = {target_vim: {}}
1248 # instantiation parameters
1249 if vnf_params:
1250 vdu_instantiation_params = find_in_list(
1251 get_iterable(vnf_params, "vdu"),
1252 lambda i_vdu: i_vdu["id"] == vdud["id"],
1253 )
1254 if vdu_instantiation_params:
1255 # Parse the vdu_volumes from the instantiation params
1256 vdu_volumes = get_volumes_from_instantiation_params(
1257 vdu_instantiation_params, vdud
1258 )
1259 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1260 vdur["additionalParams"]["OSM"][
1261 "vim_flavor_id"
1262 ] = vdu_instantiation_params.get("vim-flavor-id")
1263 vdur_list.append(vdur)
1264 target_vnf["vdur"] = vdur_list
1265 target["vnf"].append(target_vnf)
1266
1267 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1268 desc = await self.RO.deploy(nsr_id, target)
1269 self.logger.debug("RO return > {}".format(desc))
1270 action_id = desc["action_id"]
1271 await self._wait_ng_ro(
1272 nsr_id,
1273 action_id,
1274 nslcmop_id,
1275 start_deploy,
1276 timeout_ns_deploy,
1277 stage,
1278 operation="instantiation",
1279 )
1280
1281 # Updating NSR
1282 db_nsr_update = {
1283 "_admin.deployed.RO.operational-status": "running",
1284 "detailed-status": " ".join(stage),
1285 }
1286 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1287 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1288 self._write_op_status(nslcmop_id, stage)
1289 self.logger.debug(
1290 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1291 )
1292 return
1293
1294 async def _wait_ng_ro(
1295 self,
1296 nsr_id,
1297 action_id,
1298 nslcmop_id=None,
1299 start_time=None,
1300 timeout=600,
1301 stage=None,
1302 operation=None,
1303 ):
1304 detailed_status_old = None
1305 db_nsr_update = {}
1306 start_time = start_time or time()
1307 while time() <= start_time + timeout:
1308 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1309 self.logger.debug("Wait NG RO > {}".format(desc_status))
1310 if desc_status["status"] == "FAILED":
1311 raise NgRoException(desc_status["details"])
1312 elif desc_status["status"] == "BUILD":
1313 if stage:
1314 stage[2] = "VIM: ({})".format(desc_status["details"])
1315 elif desc_status["status"] == "DONE":
1316 if stage:
1317 stage[2] = "Deployed at VIM"
1318 break
1319 else:
1320 assert False, "ROclient.check_ns_status returns unknown {}".format(
1321 desc_status["status"]
1322 )
1323 if stage and nslcmop_id and stage[2] != detailed_status_old:
1324 detailed_status_old = stage[2]
1325 db_nsr_update["detailed-status"] = " ".join(stage)
1326 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1327 self._write_op_status(nslcmop_id, stage)
1328 await asyncio.sleep(15)
1329 else: # timeout_ns_deploy
1330 raise NgRoException("Timeout waiting ns to deploy")
1331
1332 async def _terminate_ng_ro(
1333 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1334 ):
1335 db_nsr_update = {}
1336 failed_detail = []
1337 action_id = None
1338 start_deploy = time()
1339 try:
1340 target = {
1341 "ns": {"vld": []},
1342 "vnf": [],
1343 "image": [],
1344 "flavor": [],
1345 "action_id": nslcmop_id,
1346 }
1347 desc = await self.RO.deploy(nsr_id, target)
1348 action_id = desc["action_id"]
1349 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1350 self.logger.debug(
1351 logging_text
1352 + "ns terminate action at RO. action_id={}".format(action_id)
1353 )
1354
1355 # wait until done
1356 delete_timeout = 20 * 60 # 20 minutes
1357 await self._wait_ng_ro(
1358 nsr_id,
1359 action_id,
1360 nslcmop_id,
1361 start_deploy,
1362 delete_timeout,
1363 stage,
1364 operation="termination",
1365 )
1366 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1367 # delete all nsr
1368 await self.RO.delete(nsr_id)
1369 except NgRoException as e:
1370 if e.http_code == 404: # not found
1371 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1372 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1373 self.logger.debug(
1374 logging_text + "RO_action_id={} already deleted".format(action_id)
1375 )
1376 elif e.http_code == 409: # conflict
1377 failed_detail.append("delete conflict: {}".format(e))
1378 self.logger.debug(
1379 logging_text
1380 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1381 )
1382 else:
1383 failed_detail.append("delete error: {}".format(e))
1384 self.logger.error(
1385 logging_text
1386 + "RO_action_id={} delete error: {}".format(action_id, e)
1387 )
1388 except Exception as e:
1389 failed_detail.append("delete error: {}".format(e))
1390 self.logger.error(
1391 logging_text + "RO_action_id={} delete error: {}".format(action_id, e)
1392 )
1393
1394 if failed_detail:
1395 stage[2] = "Error deleting from VIM"
1396 else:
1397 stage[2] = "Deleted from VIM"
1398 db_nsr_update["detailed-status"] = " ".join(stage)
1399 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1400 self._write_op_status(nslcmop_id, stage)
1401
1402 if failed_detail:
1403 raise LcmException("; ".join(failed_detail))
1404 return
1405
1406 async def instantiate_RO(
1407 self,
1408 logging_text,
1409 nsr_id,
1410 nsd,
1411 db_nsr,
1412 db_nslcmop,
1413 db_vnfrs,
1414 db_vnfds,
1415 n2vc_key_list,
1416 stage,
1417 ):
1418 """
1419 Instantiate at RO
1420 :param logging_text: preffix text to use at logging
1421 :param nsr_id: nsr identity
1422 :param nsd: database content of ns descriptor
1423 :param db_nsr: database content of ns record
1424 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1425 :param db_vnfrs:
1426 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1427 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1428 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1429 :return: None or exception
1430 """
1431 try:
1432 start_deploy = time()
1433 ns_params = db_nslcmop.get("operationParams")
1434 if ns_params and ns_params.get("timeout_ns_deploy"):
1435 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1436 else:
1437 timeout_ns_deploy = self.timeout.ns_deploy
1438
1439 # Check for and optionally request placement optimization. Database will be updated if placement activated
1440 stage[2] = "Waiting for Placement."
1441 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1442 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1443 for vnfr in db_vnfrs.values():
1444 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1445 break
1446 else:
1447 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1448
1449 return await self._instantiate_ng_ro(
1450 logging_text,
1451 nsr_id,
1452 nsd,
1453 db_nsr,
1454 db_nslcmop,
1455 db_vnfrs,
1456 db_vnfds,
1457 n2vc_key_list,
1458 stage,
1459 start_deploy,
1460 timeout_ns_deploy,
1461 )
1462 except Exception as e:
1463 stage[2] = "ERROR deploying at VIM"
1464 self.set_vnfr_at_error(db_vnfrs, str(e))
1465 self.logger.error(
1466 "Error deploying at VIM {}".format(e),
1467 exc_info=not isinstance(
1468 e,
1469 (
1470 ROclient.ROClientException,
1471 LcmException,
1472 DbException,
1473 NgRoException,
1474 ),
1475 ),
1476 )
1477 raise
1478
1479 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1480 """
1481 Wait for kdu to be up, get ip address
1482 :param logging_text: prefix use for logging
1483 :param nsr_id:
1484 :param vnfr_id:
1485 :param kdu_name:
1486 :return: IP address, K8s services
1487 """
1488
1489 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1490 nb_tries = 0
1491
1492 while nb_tries < 360:
1493 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1494 kdur = next(
1495 (
1496 x
1497 for x in get_iterable(db_vnfr, "kdur")
1498 if x.get("kdu-name") == kdu_name
1499 ),
1500 None,
1501 )
1502 if not kdur:
1503 raise LcmException(
1504 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1505 )
1506 if kdur.get("status"):
1507 if kdur["status"] in ("READY", "ENABLED"):
1508 return kdur.get("ip-address"), kdur.get("services")
1509 else:
1510 raise LcmException(
1511 "target KDU={} is in error state".format(kdu_name)
1512 )
1513
1514 await asyncio.sleep(10)
1515 nb_tries += 1
1516 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1517
1518 async def wait_vm_up_insert_key_ro(
1519 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1520 ):
1521 """
1522 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1523 :param logging_text: prefix use for logging
1524 :param nsr_id:
1525 :param vnfr_id:
1526 :param vdu_id:
1527 :param vdu_index:
1528 :param pub_key: public ssh key to inject, None to skip
1529 :param user: user to apply the public ssh key
1530 :return: IP address
1531 """
1532
1533 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1534 ip_address = None
1535 target_vdu_id = None
1536 ro_retries = 0
1537
1538 while True:
1539 ro_retries += 1
1540 if ro_retries >= 360: # 1 hour
1541 raise LcmException(
1542 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1543 )
1544
1545 await asyncio.sleep(10)
1546
1547 # get ip address
1548 if not target_vdu_id:
1549 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1550
1551 if not vdu_id: # for the VNF case
1552 if db_vnfr.get("status") == "ERROR":
1553 raise LcmException(
1554 "Cannot inject ssh-key because target VNF is in error state"
1555 )
1556 ip_address = db_vnfr.get("ip-address")
1557 if not ip_address:
1558 continue
1559 vdur = next(
1560 (
1561 x
1562 for x in get_iterable(db_vnfr, "vdur")
1563 if x.get("ip-address") == ip_address
1564 ),
1565 None,
1566 )
1567 else: # VDU case
1568 vdur = next(
1569 (
1570 x
1571 for x in get_iterable(db_vnfr, "vdur")
1572 if x.get("vdu-id-ref") == vdu_id
1573 and x.get("count-index") == vdu_index
1574 ),
1575 None,
1576 )
1577
1578 if (
1579 not vdur and len(db_vnfr.get("vdur", ())) == 1
1580 ): # If only one, this should be the target vdu
1581 vdur = db_vnfr["vdur"][0]
1582 if not vdur:
1583 raise LcmException(
1584 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1585 vnfr_id, vdu_id, vdu_index
1586 )
1587 )
1588 # New generation RO stores information at "vim_info"
1589 ng_ro_status = None
1590 target_vim = None
1591 if vdur.get("vim_info"):
1592 target_vim = next(
1593 t for t in vdur["vim_info"]
1594 ) # there should be only one key
1595 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1596 if (
1597 vdur.get("pdu-type")
1598 or vdur.get("status") == "ACTIVE"
1599 or ng_ro_status == "ACTIVE"
1600 ):
1601 ip_address = vdur.get("ip-address")
1602 if not ip_address:
1603 continue
1604 target_vdu_id = vdur["vdu-id-ref"]
1605 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1606 raise LcmException(
1607 "Cannot inject ssh-key because target VM is in error state"
1608 )
1609
1610 if not target_vdu_id:
1611 continue
1612
1613 # inject public key into machine
1614 if pub_key and user:
1615 self.logger.debug(logging_text + "Inserting RO key")
1616 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1617 if vdur.get("pdu-type"):
1618 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1619 return ip_address
1620 try:
1621 target = {
1622 "action": {
1623 "action": "inject_ssh_key",
1624 "key": pub_key,
1625 "user": user,
1626 },
1627 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1628 }
1629 desc = await self.RO.deploy(nsr_id, target)
1630 action_id = desc["action_id"]
1631 await self._wait_ng_ro(
1632 nsr_id, action_id, timeout=600, operation="instantiation"
1633 )
1634 break
1635 except NgRoException as e:
1636 raise LcmException(
1637 "Reaching max tries injecting key. Error: {}".format(e)
1638 )
1639 else:
1640 break
1641
1642 return ip_address
1643
1644 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1645 """
1646 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1647 """
1648 my_vca = vca_deployed_list[vca_index]
1649 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1650 # vdu or kdu: no dependencies
1651 return
1652 timeout = 300
1653 while timeout >= 0:
1654 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1655 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1656 configuration_status_list = db_nsr["configurationStatus"]
1657 for index, vca_deployed in enumerate(configuration_status_list):
1658 if index == vca_index:
1659 # myself
1660 continue
1661 if not my_vca.get("member-vnf-index") or (
1662 vca_deployed.get("member-vnf-index")
1663 == my_vca.get("member-vnf-index")
1664 ):
1665 internal_status = configuration_status_list[index].get("status")
1666 if internal_status == "READY":
1667 continue
1668 elif internal_status == "BROKEN":
1669 raise LcmException(
1670 "Configuration aborted because dependent charm/s has failed"
1671 )
1672 else:
1673 break
1674 else:
1675 # no dependencies, return
1676 return
1677 await asyncio.sleep(10)
1678 timeout -= 1
1679
1680 raise LcmException("Configuration aborted because dependent charm/s timeout")
1681
1682 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1683 vca_id = None
1684 if db_vnfr:
1685 vca_id = deep_get(db_vnfr, ("vca-id",))
1686 elif db_nsr:
1687 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1688 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1689 return vca_id
1690
1691 async def instantiate_N2VC(
1692 self,
1693 logging_text,
1694 vca_index,
1695 nsi_id,
1696 db_nsr,
1697 db_vnfr,
1698 vdu_id,
1699 kdu_name,
1700 vdu_index,
1701 kdu_index,
1702 config_descriptor,
1703 deploy_params,
1704 base_folder,
1705 nslcmop_id,
1706 stage,
1707 vca_type,
1708 vca_name,
1709 ee_config_descriptor,
1710 ):
1711 nsr_id = db_nsr["_id"]
1712 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1713 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1714 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1715 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1716 db_dict = {
1717 "collection": "nsrs",
1718 "filter": {"_id": nsr_id},
1719 "path": db_update_entry,
1720 }
1721 step = ""
1722 try:
1723 element_type = "NS"
1724 element_under_configuration = nsr_id
1725
1726 vnfr_id = None
1727 if db_vnfr:
1728 vnfr_id = db_vnfr["_id"]
1729 osm_config["osm"]["vnf_id"] = vnfr_id
1730
1731 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1732
1733 if vca_type == "native_charm":
1734 index_number = 0
1735 else:
1736 index_number = vdu_index or 0
1737
1738 if vnfr_id:
1739 element_type = "VNF"
1740 element_under_configuration = vnfr_id
1741 namespace += ".{}-{}".format(vnfr_id, index_number)
1742 if vdu_id:
1743 namespace += ".{}-{}".format(vdu_id, index_number)
1744 element_type = "VDU"
1745 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1746 osm_config["osm"]["vdu_id"] = vdu_id
1747 elif kdu_name:
1748 namespace += ".{}".format(kdu_name)
1749 element_type = "KDU"
1750 element_under_configuration = kdu_name
1751 osm_config["osm"]["kdu_name"] = kdu_name
1752
1753 # Get artifact path
1754 if base_folder["pkg-dir"]:
1755 artifact_path = "{}/{}/{}/{}".format(
1756 base_folder["folder"],
1757 base_folder["pkg-dir"],
1758 "charms"
1759 if vca_type
1760 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1761 else "helm-charts",
1762 vca_name,
1763 )
1764 else:
1765 artifact_path = "{}/Scripts/{}/{}/".format(
1766 base_folder["folder"],
1767 "charms"
1768 if vca_type
1769 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1770 else "helm-charts",
1771 vca_name,
1772 )
1773
1774 self.logger.debug("Artifact path > {}".format(artifact_path))
1775
1776 # get initial_config_primitive_list that applies to this element
1777 initial_config_primitive_list = config_descriptor.get(
1778 "initial-config-primitive"
1779 )
1780
1781 self.logger.debug(
1782 "Initial config primitive list > {}".format(
1783 initial_config_primitive_list
1784 )
1785 )
1786
1787 # add config if not present for NS charm
1788 ee_descriptor_id = ee_config_descriptor.get("id")
1789 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1790 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1791 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1792 )
1793
1794 self.logger.debug(
1795 "Initial config primitive list #2 > {}".format(
1796 initial_config_primitive_list
1797 )
1798 )
1799 # n2vc_redesign STEP 3.1
1800 # find old ee_id if exists
1801 ee_id = vca_deployed.get("ee_id")
1802
1803 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1804 # create or register execution environment in VCA
1805 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm-v3"):
1806 self._write_configuration_status(
1807 nsr_id=nsr_id,
1808 vca_index=vca_index,
1809 status="CREATING",
1810 element_under_configuration=element_under_configuration,
1811 element_type=element_type,
1812 )
1813
1814 step = "create execution environment"
1815 self.logger.debug(logging_text + step)
1816
1817 ee_id = None
1818 credentials = None
1819 if vca_type == "k8s_proxy_charm":
1820 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1821 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1822 namespace=namespace,
1823 artifact_path=artifact_path,
1824 db_dict=db_dict,
1825 vca_id=vca_id,
1826 )
1827 elif vca_type == "helm-v3":
1828 ee_id, credentials = await self.vca_map[
1829 vca_type
1830 ].create_execution_environment(
1831 namespace=nsr_id,
1832 reuse_ee_id=ee_id,
1833 db_dict=db_dict,
1834 config=osm_config,
1835 artifact_path=artifact_path,
1836 chart_model=vca_name,
1837 vca_type=vca_type,
1838 )
1839 else:
1840 ee_id, credentials = await self.vca_map[
1841 vca_type
1842 ].create_execution_environment(
1843 namespace=namespace,
1844 reuse_ee_id=ee_id,
1845 db_dict=db_dict,
1846 vca_id=vca_id,
1847 )
1848
1849 elif vca_type == "native_charm":
1850 step = "Waiting to VM being up and getting IP address"
1851 self.logger.debug(logging_text + step)
1852 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1853 logging_text,
1854 nsr_id,
1855 vnfr_id,
1856 vdu_id,
1857 vdu_index,
1858 user=None,
1859 pub_key=None,
1860 )
1861 credentials = {"hostname": rw_mgmt_ip}
1862 # get username
1863 username = deep_get(
1864 config_descriptor, ("config-access", "ssh-access", "default-user")
1865 )
1866 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1867 # merged. Meanwhile let's get username from initial-config-primitive
1868 if not username and initial_config_primitive_list:
1869 for config_primitive in initial_config_primitive_list:
1870 for param in config_primitive.get("parameter", ()):
1871 if param["name"] == "ssh-username":
1872 username = param["value"]
1873 break
1874 if not username:
1875 raise LcmException(
1876 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1877 "'config-access.ssh-access.default-user'"
1878 )
1879 credentials["username"] = username
1880 # n2vc_redesign STEP 3.2
1881
1882 self._write_configuration_status(
1883 nsr_id=nsr_id,
1884 vca_index=vca_index,
1885 status="REGISTERING",
1886 element_under_configuration=element_under_configuration,
1887 element_type=element_type,
1888 )
1889
1890 step = "register execution environment {}".format(credentials)
1891 self.logger.debug(logging_text + step)
1892 ee_id = await self.vca_map[vca_type].register_execution_environment(
1893 credentials=credentials,
1894 namespace=namespace,
1895 db_dict=db_dict,
1896 vca_id=vca_id,
1897 )
1898
1899 # for compatibility with MON/POL modules, the need model and application name at database
1900 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1901 ee_id_parts = ee_id.split(".")
1902 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1903 if len(ee_id_parts) >= 2:
1904 model_name = ee_id_parts[0]
1905 application_name = ee_id_parts[1]
1906 db_nsr_update[db_update_entry + "model"] = model_name
1907 db_nsr_update[db_update_entry + "application"] = application_name
1908
1909 # n2vc_redesign STEP 3.3
1910 step = "Install configuration Software"
1911
1912 self._write_configuration_status(
1913 nsr_id=nsr_id,
1914 vca_index=vca_index,
1915 status="INSTALLING SW",
1916 element_under_configuration=element_under_configuration,
1917 element_type=element_type,
1918 other_update=db_nsr_update,
1919 )
1920
1921 # TODO check if already done
1922 self.logger.debug(logging_text + step)
1923 config = None
1924 if vca_type == "native_charm":
1925 config_primitive = next(
1926 (p for p in initial_config_primitive_list if p["name"] == "config"),
1927 None,
1928 )
1929 if config_primitive:
1930 config = self._map_primitive_params(
1931 config_primitive, {}, deploy_params
1932 )
1933 num_units = 1
1934 if vca_type == "lxc_proxy_charm":
1935 if element_type == "NS":
1936 num_units = db_nsr.get("config-units") or 1
1937 elif element_type == "VNF":
1938 num_units = db_vnfr.get("config-units") or 1
1939 elif element_type == "VDU":
1940 for v in db_vnfr["vdur"]:
1941 if vdu_id == v["vdu-id-ref"]:
1942 num_units = v.get("config-units") or 1
1943 break
1944 if vca_type != "k8s_proxy_charm":
1945 await self.vca_map[vca_type].install_configuration_sw(
1946 ee_id=ee_id,
1947 artifact_path=artifact_path,
1948 db_dict=db_dict,
1949 config=config,
1950 num_units=num_units,
1951 vca_id=vca_id,
1952 vca_type=vca_type,
1953 )
1954
1955 # write in db flag of configuration_sw already installed
1956 self.update_db_2(
1957 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
1958 )
1959
1960 # add relations for this VCA (wait for other peers related with this VCA)
1961 is_relation_added = await self._add_vca_relations(
1962 logging_text=logging_text,
1963 nsr_id=nsr_id,
1964 vca_type=vca_type,
1965 vca_index=vca_index,
1966 )
1967
1968 if not is_relation_added:
1969 raise LcmException("Relations could not be added to VCA.")
1970
1971 # if SSH access is required, then get execution environment SSH public
1972 # if native charm we have waited already to VM be UP
1973 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm-v3"):
1974 pub_key = None
1975 user = None
1976 # self.logger.debug("get ssh key block")
1977 if deep_get(
1978 config_descriptor, ("config-access", "ssh-access", "required")
1979 ):
1980 # self.logger.debug("ssh key needed")
1981 # Needed to inject a ssh key
1982 user = deep_get(
1983 config_descriptor,
1984 ("config-access", "ssh-access", "default-user"),
1985 )
1986 step = "Install configuration Software, getting public ssh key"
1987 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
1988 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
1989 )
1990
1991 step = "Insert public key into VM user={} ssh_key={}".format(
1992 user, pub_key
1993 )
1994 else:
1995 # self.logger.debug("no need to get ssh key")
1996 step = "Waiting to VM being up and getting IP address"
1997 self.logger.debug(logging_text + step)
1998
1999 # default rw_mgmt_ip to None, avoiding the non definition of the variable
2000 rw_mgmt_ip = None
2001
2002 # n2vc_redesign STEP 5.1
2003 # wait for RO (ip-address) Insert pub_key into VM
2004 if vnfr_id:
2005 if kdu_name:
2006 rw_mgmt_ip, services = await self.wait_kdu_up(
2007 logging_text, nsr_id, vnfr_id, kdu_name
2008 )
2009 vnfd = self.db.get_one(
2010 "vnfds_revisions",
2011 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2012 )
2013 kdu = get_kdu(vnfd, kdu_name)
2014 kdu_services = [
2015 service["name"] for service in get_kdu_services(kdu)
2016 ]
2017 exposed_services = []
2018 for service in services:
2019 if any(s in service["name"] for s in kdu_services):
2020 exposed_services.append(service)
2021 await self.vca_map[vca_type].exec_primitive(
2022 ee_id=ee_id,
2023 primitive_name="config",
2024 params_dict={
2025 "osm-config": json.dumps(
2026 OsmConfigBuilder(
2027 k8s={"services": exposed_services}
2028 ).build()
2029 )
2030 },
2031 vca_id=vca_id,
2032 )
2033
2034 # This verification is needed in order to avoid trying to add a public key
2035 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
2036 # for a KNF and not for its KDUs, the previous verification gives False, and the code
2037 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
2038 # or it is a KNF)
2039 elif db_vnfr.get("vdur"):
2040 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2041 logging_text,
2042 nsr_id,
2043 vnfr_id,
2044 vdu_id,
2045 vdu_index,
2046 user=user,
2047 pub_key=pub_key,
2048 )
2049
2050 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
2051
2052 # store rw_mgmt_ip in deploy params for later replacement
2053 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
2054
2055 # n2vc_redesign STEP 6 Execute initial config primitive
2056 step = "execute initial config primitive"
2057
2058 # wait for dependent primitives execution (NS -> VNF -> VDU)
2059 if initial_config_primitive_list:
2060 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
2061
2062 # stage, in function of element type: vdu, kdu, vnf or ns
2063 my_vca = vca_deployed_list[vca_index]
2064 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
2065 # VDU or KDU
2066 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
2067 elif my_vca.get("member-vnf-index"):
2068 # VNF
2069 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
2070 else:
2071 # NS
2072 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
2073
2074 self._write_configuration_status(
2075 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
2076 )
2077
2078 self._write_op_status(op_id=nslcmop_id, stage=stage)
2079
2080 check_if_terminated_needed = True
2081 for initial_config_primitive in initial_config_primitive_list:
2082 # adding information on the vca_deployed if it is a NS execution environment
2083 if not vca_deployed["member-vnf-index"]:
2084 deploy_params["ns_config_info"] = json.dumps(
2085 self._get_ns_config_info(nsr_id)
2086 )
2087 # TODO check if already done
2088 primitive_params_ = self._map_primitive_params(
2089 initial_config_primitive, {}, deploy_params
2090 )
2091
2092 step = "execute primitive '{}' params '{}'".format(
2093 initial_config_primitive["name"], primitive_params_
2094 )
2095 self.logger.debug(logging_text + step)
2096 await self.vca_map[vca_type].exec_primitive(
2097 ee_id=ee_id,
2098 primitive_name=initial_config_primitive["name"],
2099 params_dict=primitive_params_,
2100 db_dict=db_dict,
2101 vca_id=vca_id,
2102 vca_type=vca_type,
2103 )
2104 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2105 if check_if_terminated_needed:
2106 if config_descriptor.get("terminate-config-primitive"):
2107 self.update_db_2(
2108 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2109 )
2110 check_if_terminated_needed = False
2111
2112 # TODO register in database that primitive is done
2113
2114 # STEP 7 Configure metrics
2115 if vca_type == "helm-v3":
2116 # TODO: review for those cases where the helm chart is a reference and
2117 # is not part of the NF package
2118 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2119 ee_id=ee_id,
2120 artifact_path=artifact_path,
2121 ee_config_descriptor=ee_config_descriptor,
2122 vnfr_id=vnfr_id,
2123 nsr_id=nsr_id,
2124 target_ip=rw_mgmt_ip,
2125 element_type=element_type,
2126 vnf_member_index=db_vnfr.get("member-vnf-index-ref", ""),
2127 vdu_id=vdu_id,
2128 vdu_index=vdu_index,
2129 kdu_name=kdu_name,
2130 kdu_index=kdu_index,
2131 )
2132 if prometheus_jobs:
2133 self.update_db_2(
2134 "nsrs",
2135 nsr_id,
2136 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2137 )
2138
2139 for job in prometheus_jobs:
2140 self.db.set_one(
2141 "prometheus_jobs",
2142 {"job_name": job["job_name"]},
2143 job,
2144 upsert=True,
2145 fail_on_empty=False,
2146 )
2147
2148 step = "instantiated at VCA"
2149 self.logger.debug(logging_text + step)
2150
2151 self._write_configuration_status(
2152 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2153 )
2154
2155 except Exception as e: # TODO not use Exception but N2VC exception
2156 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2157 if not isinstance(
2158 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2159 ):
2160 self.logger.error(
2161 "Exception while {} : {}".format(step, e), exc_info=True
2162 )
2163 self._write_configuration_status(
2164 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2165 )
2166 raise LcmException("{}. {}".format(step, e)) from e
2167
2168 def _write_ns_status(
2169 self,
2170 nsr_id: str,
2171 ns_state: str,
2172 current_operation: str,
2173 current_operation_id: str,
2174 error_description: str = None,
2175 error_detail: str = None,
2176 other_update: dict = None,
2177 ):
2178 """
2179 Update db_nsr fields.
2180 :param nsr_id:
2181 :param ns_state:
2182 :param current_operation:
2183 :param current_operation_id:
2184 :param error_description:
2185 :param error_detail:
2186 :param other_update: Other required changes at database if provided, will be cleared
2187 :return:
2188 """
2189 try:
2190 db_dict = other_update or {}
2191 db_dict[
2192 "_admin.nslcmop"
2193 ] = current_operation_id # for backward compatibility
2194 db_dict["_admin.current-operation"] = current_operation_id
2195 db_dict["_admin.operation-type"] = (
2196 current_operation if current_operation != "IDLE" else None
2197 )
2198 db_dict["currentOperation"] = current_operation
2199 db_dict["currentOperationID"] = current_operation_id
2200 db_dict["errorDescription"] = error_description
2201 db_dict["errorDetail"] = error_detail
2202
2203 if ns_state:
2204 db_dict["nsState"] = ns_state
2205 self.update_db_2("nsrs", nsr_id, db_dict)
2206 except DbException as e:
2207 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2208
2209 def _write_op_status(
2210 self,
2211 op_id: str,
2212 stage: list = None,
2213 error_message: str = None,
2214 queuePosition: int = 0,
2215 operation_state: str = None,
2216 other_update: dict = None,
2217 ):
2218 try:
2219 db_dict = other_update or {}
2220 db_dict["queuePosition"] = queuePosition
2221 if isinstance(stage, list):
2222 db_dict["stage"] = stage[0]
2223 db_dict["detailed-status"] = " ".join(stage)
2224 elif stage is not None:
2225 db_dict["stage"] = str(stage)
2226
2227 if error_message is not None:
2228 db_dict["errorMessage"] = error_message
2229 if operation_state is not None:
2230 db_dict["operationState"] = operation_state
2231 db_dict["statusEnteredTime"] = time()
2232 self.update_db_2("nslcmops", op_id, db_dict)
2233 except DbException as e:
2234 self.logger.warn(
2235 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2236 )
2237
2238 def _write_all_config_status(self, db_nsr: dict, status: str):
2239 try:
2240 nsr_id = db_nsr["_id"]
2241 # configurationStatus
2242 config_status = db_nsr.get("configurationStatus")
2243 if config_status:
2244 db_nsr_update = {
2245 "configurationStatus.{}.status".format(index): status
2246 for index, v in enumerate(config_status)
2247 if v
2248 }
2249 # update status
2250 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2251
2252 except DbException as e:
2253 self.logger.warn(
2254 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2255 )
2256
2257 def _write_configuration_status(
2258 self,
2259 nsr_id: str,
2260 vca_index: int,
2261 status: str = None,
2262 element_under_configuration: str = None,
2263 element_type: str = None,
2264 other_update: dict = None,
2265 ):
2266 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2267 # .format(vca_index, status))
2268
2269 try:
2270 db_path = "configurationStatus.{}.".format(vca_index)
2271 db_dict = other_update or {}
2272 if status:
2273 db_dict[db_path + "status"] = status
2274 if element_under_configuration:
2275 db_dict[
2276 db_path + "elementUnderConfiguration"
2277 ] = element_under_configuration
2278 if element_type:
2279 db_dict[db_path + "elementType"] = element_type
2280 self.update_db_2("nsrs", nsr_id, db_dict)
2281 except DbException as e:
2282 self.logger.warn(
2283 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2284 status, nsr_id, vca_index, e
2285 )
2286 )
2287
2288 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2289 """
2290 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2291 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2292 Database is used because the result can be obtained from a different LCM worker in case of HA.
2293 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2294 :param db_nslcmop: database content of nslcmop
2295 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2296 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2297 computed 'vim-account-id'
2298 """
2299 modified = False
2300 nslcmop_id = db_nslcmop["_id"]
2301 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2302 if placement_engine == "PLA":
2303 self.logger.debug(
2304 logging_text + "Invoke and wait for placement optimization"
2305 )
2306 await self.msg.aiowrite("pla", "get_placement", {"nslcmopId": nslcmop_id})
2307 db_poll_interval = 5
2308 wait = db_poll_interval * 10
2309 pla_result = None
2310 while not pla_result and wait >= 0:
2311 await asyncio.sleep(db_poll_interval)
2312 wait -= db_poll_interval
2313 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2314 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2315
2316 if not pla_result:
2317 raise LcmException(
2318 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2319 )
2320
2321 for pla_vnf in pla_result["vnf"]:
2322 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2323 if not pla_vnf.get("vimAccountId") or not vnfr:
2324 continue
2325 modified = True
2326 self.db.set_one(
2327 "vnfrs",
2328 {"_id": vnfr["_id"]},
2329 {"vim-account-id": pla_vnf["vimAccountId"]},
2330 )
2331 # Modifies db_vnfrs
2332 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2333 return modified
2334
2335 def _gather_vnfr_healing_alerts(self, vnfr, vnfd):
2336 alerts = []
2337 nsr_id = vnfr["nsr-id-ref"]
2338 df = vnfd.get("df", [{}])[0]
2339 # Checking for auto-healing configuration
2340 if "healing-aspect" in df:
2341 healing_aspects = df["healing-aspect"]
2342 for healing in healing_aspects:
2343 for healing_policy in healing.get("healing-policy", ()):
2344 vdu_id = healing_policy["vdu-id"]
2345 vdur = next(
2346 (vdur for vdur in vnfr["vdur"] if vdu_id == vdur["vdu-id-ref"]),
2347 {},
2348 )
2349 if not vdur:
2350 continue
2351 metric_name = "vm_status"
2352 vdu_name = vdur.get("name")
2353 vnf_member_index = vnfr["member-vnf-index-ref"]
2354 uuid = str(uuid4())
2355 name = f"healing_{uuid}"
2356 action = healing_policy
2357 # action_on_recovery = healing.get("action-on-recovery")
2358 # cooldown_time = healing.get("cooldown-time")
2359 # day1 = healing.get("day1")
2360 alert = {
2361 "uuid": uuid,
2362 "name": name,
2363 "metric": metric_name,
2364 "tags": {
2365 "ns_id": nsr_id,
2366 "vnf_member_index": vnf_member_index,
2367 "vdu_name": vdu_name,
2368 },
2369 "alarm_status": "ok",
2370 "action_type": "healing",
2371 "action": action,
2372 }
2373 alerts.append(alert)
2374 return alerts
2375
2376 def _gather_vnfr_scaling_alerts(self, vnfr, vnfd):
2377 alerts = []
2378 nsr_id = vnfr["nsr-id-ref"]
2379 df = vnfd.get("df", [{}])[0]
2380 # Checking for auto-scaling configuration
2381 if "scaling-aspect" in df:
2382 scaling_aspects = df["scaling-aspect"]
2383 all_vnfd_monitoring_params = {}
2384 for ivld in vnfd.get("int-virtual-link-desc", ()):
2385 for mp in ivld.get("monitoring-parameters", ()):
2386 all_vnfd_monitoring_params[mp.get("id")] = mp
2387 for vdu in vnfd.get("vdu", ()):
2388 for mp in vdu.get("monitoring-parameter", ()):
2389 all_vnfd_monitoring_params[mp.get("id")] = mp
2390 for df in vnfd.get("df", ()):
2391 for mp in df.get("monitoring-parameter", ()):
2392 all_vnfd_monitoring_params[mp.get("id")] = mp
2393 for scaling_aspect in scaling_aspects:
2394 scaling_group_name = scaling_aspect.get("name", "")
2395 # Get monitored VDUs
2396 all_monitored_vdus = set()
2397 for delta in scaling_aspect.get("aspect-delta-details", {}).get(
2398 "deltas", ()
2399 ):
2400 for vdu_delta in delta.get("vdu-delta", ()):
2401 all_monitored_vdus.add(vdu_delta.get("id"))
2402 monitored_vdurs = list(
2403 filter(
2404 lambda vdur: vdur["vdu-id-ref"] in all_monitored_vdus,
2405 vnfr["vdur"],
2406 )
2407 )
2408 if not monitored_vdurs:
2409 self.logger.error(
2410 "Scaling criteria is referring to a vnf-monitoring-param that does not contain a reference to a vdu or vnf metric"
2411 )
2412 continue
2413 for scaling_policy in scaling_aspect.get("scaling-policy", ()):
2414 if scaling_policy["scaling-type"] != "automatic":
2415 continue
2416 threshold_time = scaling_policy.get("threshold-time", "1")
2417 cooldown_time = scaling_policy.get("cooldown-time", "0")
2418 for scaling_criteria in scaling_policy["scaling-criteria"]:
2419 monitoring_param_ref = scaling_criteria.get(
2420 "vnf-monitoring-param-ref"
2421 )
2422 vnf_monitoring_param = all_vnfd_monitoring_params[
2423 monitoring_param_ref
2424 ]
2425 for vdur in monitored_vdurs:
2426 vdu_id = vdur["vdu-id-ref"]
2427 metric_name = vnf_monitoring_param.get("performance-metric")
2428 metric_name = f"osm_{metric_name}"
2429 vnf_member_index = vnfr["member-vnf-index-ref"]
2430 scalein_threshold = scaling_criteria.get(
2431 "scale-in-threshold"
2432 )
2433 scaleout_threshold = scaling_criteria.get(
2434 "scale-out-threshold"
2435 )
2436 # Looking for min/max-number-of-instances
2437 instances_min_number = 1
2438 instances_max_number = 1
2439 vdu_profile = df["vdu-profile"]
2440 if vdu_profile:
2441 profile = next(
2442 item for item in vdu_profile if item["id"] == vdu_id
2443 )
2444 instances_min_number = profile.get(
2445 "min-number-of-instances", 1
2446 )
2447 instances_max_number = profile.get(
2448 "max-number-of-instances", 1
2449 )
2450
2451 if scalein_threshold:
2452 uuid = str(uuid4())
2453 name = f"scalein_{uuid}"
2454 operation = scaling_criteria[
2455 "scale-in-relational-operation"
2456 ]
2457 rel_operator = self.rel_operation_types.get(
2458 operation, "<="
2459 )
2460 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2461 expression = f"(count ({metric_selector}) > {instances_min_number}) and (avg({metric_selector}) {rel_operator} {scalein_threshold})"
2462 labels = {
2463 "ns_id": nsr_id,
2464 "vnf_member_index": vnf_member_index,
2465 "vdu_id": vdu_id,
2466 }
2467 prom_cfg = {
2468 "alert": name,
2469 "expr": expression,
2470 "for": str(threshold_time) + "m",
2471 "labels": labels,
2472 }
2473 action = scaling_policy
2474 action = {
2475 "scaling-group": scaling_group_name,
2476 "cooldown-time": cooldown_time,
2477 }
2478 alert = {
2479 "uuid": uuid,
2480 "name": name,
2481 "metric": metric_name,
2482 "tags": {
2483 "ns_id": nsr_id,
2484 "vnf_member_index": vnf_member_index,
2485 "vdu_id": vdu_id,
2486 },
2487 "alarm_status": "ok",
2488 "action_type": "scale_in",
2489 "action": action,
2490 "prometheus_config": prom_cfg,
2491 }
2492 alerts.append(alert)
2493
2494 if scaleout_threshold:
2495 uuid = str(uuid4())
2496 name = f"scaleout_{uuid}"
2497 operation = scaling_criteria[
2498 "scale-out-relational-operation"
2499 ]
2500 rel_operator = self.rel_operation_types.get(
2501 operation, "<="
2502 )
2503 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2504 expression = f"(count ({metric_selector}) < {instances_max_number}) and (avg({metric_selector}) {rel_operator} {scaleout_threshold})"
2505 labels = {
2506 "ns_id": nsr_id,
2507 "vnf_member_index": vnf_member_index,
2508 "vdu_id": vdu_id,
2509 }
2510 prom_cfg = {
2511 "alert": name,
2512 "expr": expression,
2513 "for": str(threshold_time) + "m",
2514 "labels": labels,
2515 }
2516 action = scaling_policy
2517 action = {
2518 "scaling-group": scaling_group_name,
2519 "cooldown-time": cooldown_time,
2520 }
2521 alert = {
2522 "uuid": uuid,
2523 "name": name,
2524 "metric": metric_name,
2525 "tags": {
2526 "ns_id": nsr_id,
2527 "vnf_member_index": vnf_member_index,
2528 "vdu_id": vdu_id,
2529 },
2530 "alarm_status": "ok",
2531 "action_type": "scale_out",
2532 "action": action,
2533 "prometheus_config": prom_cfg,
2534 }
2535 alerts.append(alert)
2536 return alerts
2537
2538 def _gather_vnfr_alarm_alerts(self, vnfr, vnfd):
2539 alerts = []
2540 nsr_id = vnfr["nsr-id-ref"]
2541 vnf_member_index = vnfr["member-vnf-index-ref"]
2542
2543 # Checking for VNF alarm configuration
2544 for vdur in vnfr["vdur"]:
2545 vdu_id = vdur["vdu-id-ref"]
2546 vdu = next(filter(lambda vdu: vdu["id"] == vdu_id, vnfd["vdu"]))
2547 if "alarm" in vdu:
2548 # Get VDU monitoring params, since alerts are based on them
2549 vdu_monitoring_params = {}
2550 for mp in vdu.get("monitoring-parameter", []):
2551 vdu_monitoring_params[mp.get("id")] = mp
2552 if not vdu_monitoring_params:
2553 self.logger.error(
2554 "VDU alarm refers to a VDU monitoring param, but there are no VDU monitoring params in the VDU"
2555 )
2556 continue
2557 # Get alarms in the VDU
2558 alarm_descriptors = vdu["alarm"]
2559 # Create VDU alarms for each alarm in the VDU
2560 for alarm_descriptor in alarm_descriptors:
2561 # Check that the VDU alarm refers to a proper monitoring param
2562 alarm_monitoring_param = alarm_descriptor.get(
2563 "vnf-monitoring-param-ref", ""
2564 )
2565 vdu_specific_monitoring_param = vdu_monitoring_params.get(
2566 alarm_monitoring_param, {}
2567 )
2568 if not vdu_specific_monitoring_param:
2569 self.logger.error(
2570 "VDU alarm refers to a VDU monitoring param not present in the VDU"
2571 )
2572 continue
2573 metric_name = vdu_specific_monitoring_param.get(
2574 "performance-metric"
2575 )
2576 if not metric_name:
2577 self.logger.error(
2578 "VDU alarm refers to a VDU monitoring param that has no associated performance-metric"
2579 )
2580 continue
2581 # Set params of the alarm to be created in Prometheus
2582 metric_name = f"osm_{metric_name}"
2583 metric_threshold = alarm_descriptor.get("value")
2584 uuid = str(uuid4())
2585 alert_name = f"vdu_alarm_{uuid}"
2586 operation = alarm_descriptor["operation"]
2587 rel_operator = self.rel_operation_types.get(operation, "<=")
2588 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2589 expression = f"{metric_selector} {rel_operator} {metric_threshold}"
2590 labels = {
2591 "ns_id": nsr_id,
2592 "vnf_member_index": vnf_member_index,
2593 "vdu_id": vdu_id,
2594 "vdu_name": "{{ $labels.vdu_name }}",
2595 }
2596 prom_cfg = {
2597 "alert": alert_name,
2598 "expr": expression,
2599 "for": "1m", # default value. Ideally, this should be related to an IM param, but there is not such param
2600 "labels": labels,
2601 }
2602 alarm_action = dict()
2603 for action_type in ["ok", "insufficient-data", "alarm"]:
2604 if (
2605 "actions" in alarm_descriptor
2606 and action_type in alarm_descriptor["actions"]
2607 ):
2608 alarm_action[action_type] = alarm_descriptor["actions"][
2609 action_type
2610 ]
2611 alert = {
2612 "uuid": uuid,
2613 "name": alert_name,
2614 "metric": metric_name,
2615 "tags": {
2616 "ns_id": nsr_id,
2617 "vnf_member_index": vnf_member_index,
2618 "vdu_id": vdu_id,
2619 },
2620 "alarm_status": "ok",
2621 "action_type": "vdu_alarm",
2622 "action": alarm_action,
2623 "prometheus_config": prom_cfg,
2624 }
2625 alerts.append(alert)
2626 return alerts
2627
2628 def update_nsrs_with_pla_result(self, params):
2629 try:
2630 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2631 self.update_db_2(
2632 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2633 )
2634 except Exception as e:
2635 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2636
2637 async def instantiate(self, nsr_id, nslcmop_id):
2638 """
2639
2640 :param nsr_id: ns instance to deploy
2641 :param nslcmop_id: operation to run
2642 :return:
2643 """
2644
2645 # Try to lock HA task here
2646 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2647 if not task_is_locked_by_me:
2648 self.logger.debug(
2649 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2650 )
2651 return
2652
2653 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2654 self.logger.debug(logging_text + "Enter")
2655
2656 # get all needed from database
2657
2658 # database nsrs record
2659 db_nsr = None
2660
2661 # database nslcmops record
2662 db_nslcmop = None
2663
2664 # update operation on nsrs
2665 db_nsr_update = {}
2666 # update operation on nslcmops
2667 db_nslcmop_update = {}
2668
2669 timeout_ns_deploy = self.timeout.ns_deploy
2670
2671 nslcmop_operation_state = None
2672 db_vnfrs = {} # vnf's info indexed by member-index
2673 # n2vc_info = {}
2674 tasks_dict_info = {} # from task to info text
2675 exc = None
2676 error_list = []
2677 stage = [
2678 "Stage 1/5: preparation of the environment.",
2679 "Waiting for previous operations to terminate.",
2680 "",
2681 ]
2682 # ^ stage, step, VIM progress
2683 try:
2684 # wait for any previous tasks in process
2685 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2686
2687 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2688 stage[1] = "Reading from database."
2689 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2690 db_nsr_update["detailed-status"] = "creating"
2691 db_nsr_update["operational-status"] = "init"
2692 self._write_ns_status(
2693 nsr_id=nsr_id,
2694 ns_state="BUILDING",
2695 current_operation="INSTANTIATING",
2696 current_operation_id=nslcmop_id,
2697 other_update=db_nsr_update,
2698 )
2699 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2700
2701 # read from db: operation
2702 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2703 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2704 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2705 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2706 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2707 )
2708 ns_params = db_nslcmop.get("operationParams")
2709 if ns_params and ns_params.get("timeout_ns_deploy"):
2710 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2711
2712 # read from db: ns
2713 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2714 self.logger.debug(logging_text + stage[1])
2715 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2716 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2717 self.logger.debug(logging_text + stage[1])
2718 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2719 self.fs.sync(db_nsr["nsd-id"])
2720 db_nsr["nsd"] = nsd
2721 # nsr_name = db_nsr["name"] # TODO short-name??
2722
2723 # read from db: vnf's of this ns
2724 stage[1] = "Getting vnfrs from db."
2725 self.logger.debug(logging_text + stage[1])
2726 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2727
2728 # read from db: vnfd's for every vnf
2729 db_vnfds = [] # every vnfd data
2730
2731 # for each vnf in ns, read vnfd
2732 for vnfr in db_vnfrs_list:
2733 if vnfr.get("kdur"):
2734 kdur_list = []
2735 for kdur in vnfr["kdur"]:
2736 if kdur.get("additionalParams"):
2737 kdur["additionalParams"] = json.loads(
2738 kdur["additionalParams"]
2739 )
2740 kdur_list.append(kdur)
2741 vnfr["kdur"] = kdur_list
2742
2743 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2744 vnfd_id = vnfr["vnfd-id"]
2745 vnfd_ref = vnfr["vnfd-ref"]
2746 self.fs.sync(vnfd_id)
2747
2748 # if we haven't this vnfd, read it from db
2749 if vnfd_id not in db_vnfds:
2750 # read from db
2751 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2752 vnfd_id, vnfd_ref
2753 )
2754 self.logger.debug(logging_text + stage[1])
2755 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2756
2757 # store vnfd
2758 db_vnfds.append(vnfd)
2759
2760 # Get or generates the _admin.deployed.VCA list
2761 vca_deployed_list = None
2762 if db_nsr["_admin"].get("deployed"):
2763 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2764 if vca_deployed_list is None:
2765 vca_deployed_list = []
2766 configuration_status_list = []
2767 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2768 db_nsr_update["configurationStatus"] = configuration_status_list
2769 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2770 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2771 elif isinstance(vca_deployed_list, dict):
2772 # maintain backward compatibility. Change a dict to list at database
2773 vca_deployed_list = list(vca_deployed_list.values())
2774 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2775 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2776
2777 if not isinstance(
2778 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2779 ):
2780 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2781 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2782
2783 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2784 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2785 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2786 self.db.set_list(
2787 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2788 )
2789
2790 # n2vc_redesign STEP 2 Deploy Network Scenario
2791 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2792 self._write_op_status(op_id=nslcmop_id, stage=stage)
2793
2794 stage[1] = "Deploying KDUs."
2795 # self.logger.debug(logging_text + "Before deploy_kdus")
2796 # Call to deploy_kdus in case exists the "vdu:kdu" param
2797 await self.deploy_kdus(
2798 logging_text=logging_text,
2799 nsr_id=nsr_id,
2800 nslcmop_id=nslcmop_id,
2801 db_vnfrs=db_vnfrs,
2802 db_vnfds=db_vnfds,
2803 task_instantiation_info=tasks_dict_info,
2804 )
2805
2806 stage[1] = "Getting VCA public key."
2807 # n2vc_redesign STEP 1 Get VCA public ssh-key
2808 # feature 1429. Add n2vc public key to needed VMs
2809 n2vc_key = self.n2vc.get_public_key()
2810 n2vc_key_list = [n2vc_key]
2811 if self.vca_config.public_key:
2812 n2vc_key_list.append(self.vca_config.public_key)
2813
2814 stage[1] = "Deploying NS at VIM."
2815 task_ro = asyncio.ensure_future(
2816 self.instantiate_RO(
2817 logging_text=logging_text,
2818 nsr_id=nsr_id,
2819 nsd=nsd,
2820 db_nsr=db_nsr,
2821 db_nslcmop=db_nslcmop,
2822 db_vnfrs=db_vnfrs,
2823 db_vnfds=db_vnfds,
2824 n2vc_key_list=n2vc_key_list,
2825 stage=stage,
2826 )
2827 )
2828 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2829 tasks_dict_info[task_ro] = "Deploying at VIM"
2830
2831 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2832 stage[1] = "Deploying Execution Environments."
2833 self.logger.debug(logging_text + stage[1])
2834
2835 # create namespace and certificate if any helm based EE is present in the NS
2836 if check_helm_ee_in_ns(db_vnfds):
2837 await self.vca_map["helm-v3"].setup_ns_namespace(
2838 name=nsr_id,
2839 )
2840 # create TLS certificates
2841 await self.vca_map["helm-v3"].create_tls_certificate(
2842 secret_name=self.EE_TLS_NAME,
2843 dns_prefix="*",
2844 nsr_id=nsr_id,
2845 usage="server auth",
2846 namespace=nsr_id,
2847 )
2848
2849 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2850 for vnf_profile in get_vnf_profiles(nsd):
2851 vnfd_id = vnf_profile["vnfd-id"]
2852 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2853 member_vnf_index = str(vnf_profile["id"])
2854 db_vnfr = db_vnfrs[member_vnf_index]
2855 base_folder = vnfd["_admin"]["storage"]
2856 vdu_id = None
2857 vdu_index = 0
2858 vdu_name = None
2859 kdu_name = None
2860 kdu_index = None
2861
2862 # Get additional parameters
2863 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2864 if db_vnfr.get("additionalParamsForVnf"):
2865 deploy_params.update(
2866 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2867 )
2868
2869 descriptor_config = get_configuration(vnfd, vnfd["id"])
2870 if descriptor_config:
2871 self._deploy_n2vc(
2872 logging_text=logging_text
2873 + "member_vnf_index={} ".format(member_vnf_index),
2874 db_nsr=db_nsr,
2875 db_vnfr=db_vnfr,
2876 nslcmop_id=nslcmop_id,
2877 nsr_id=nsr_id,
2878 nsi_id=nsi_id,
2879 vnfd_id=vnfd_id,
2880 vdu_id=vdu_id,
2881 kdu_name=kdu_name,
2882 member_vnf_index=member_vnf_index,
2883 vdu_index=vdu_index,
2884 kdu_index=kdu_index,
2885 vdu_name=vdu_name,
2886 deploy_params=deploy_params,
2887 descriptor_config=descriptor_config,
2888 base_folder=base_folder,
2889 task_instantiation_info=tasks_dict_info,
2890 stage=stage,
2891 )
2892
2893 # Deploy charms for each VDU that supports one.
2894 for vdud in get_vdu_list(vnfd):
2895 vdu_id = vdud["id"]
2896 descriptor_config = get_configuration(vnfd, vdu_id)
2897 vdur = find_in_list(
2898 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2899 )
2900
2901 if vdur.get("additionalParams"):
2902 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2903 else:
2904 deploy_params_vdu = deploy_params
2905 deploy_params_vdu["OSM"] = get_osm_params(
2906 db_vnfr, vdu_id, vdu_count_index=0
2907 )
2908 vdud_count = get_number_of_instances(vnfd, vdu_id)
2909
2910 self.logger.debug("VDUD > {}".format(vdud))
2911 self.logger.debug(
2912 "Descriptor config > {}".format(descriptor_config)
2913 )
2914 if descriptor_config:
2915 vdu_name = None
2916 kdu_name = None
2917 kdu_index = None
2918 for vdu_index in range(vdud_count):
2919 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2920 self._deploy_n2vc(
2921 logging_text=logging_text
2922 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2923 member_vnf_index, vdu_id, vdu_index
2924 ),
2925 db_nsr=db_nsr,
2926 db_vnfr=db_vnfr,
2927 nslcmop_id=nslcmop_id,
2928 nsr_id=nsr_id,
2929 nsi_id=nsi_id,
2930 vnfd_id=vnfd_id,
2931 vdu_id=vdu_id,
2932 kdu_name=kdu_name,
2933 kdu_index=kdu_index,
2934 member_vnf_index=member_vnf_index,
2935 vdu_index=vdu_index,
2936 vdu_name=vdu_name,
2937 deploy_params=deploy_params_vdu,
2938 descriptor_config=descriptor_config,
2939 base_folder=base_folder,
2940 task_instantiation_info=tasks_dict_info,
2941 stage=stage,
2942 )
2943 for kdud in get_kdu_list(vnfd):
2944 kdu_name = kdud["name"]
2945 descriptor_config = get_configuration(vnfd, kdu_name)
2946 if descriptor_config:
2947 vdu_id = None
2948 vdu_index = 0
2949 vdu_name = None
2950 kdu_index, kdur = next(
2951 x
2952 for x in enumerate(db_vnfr["kdur"])
2953 if x[1]["kdu-name"] == kdu_name
2954 )
2955 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2956 if kdur.get("additionalParams"):
2957 deploy_params_kdu.update(
2958 parse_yaml_strings(kdur["additionalParams"].copy())
2959 )
2960
2961 self._deploy_n2vc(
2962 logging_text=logging_text,
2963 db_nsr=db_nsr,
2964 db_vnfr=db_vnfr,
2965 nslcmop_id=nslcmop_id,
2966 nsr_id=nsr_id,
2967 nsi_id=nsi_id,
2968 vnfd_id=vnfd_id,
2969 vdu_id=vdu_id,
2970 kdu_name=kdu_name,
2971 member_vnf_index=member_vnf_index,
2972 vdu_index=vdu_index,
2973 kdu_index=kdu_index,
2974 vdu_name=vdu_name,
2975 deploy_params=deploy_params_kdu,
2976 descriptor_config=descriptor_config,
2977 base_folder=base_folder,
2978 task_instantiation_info=tasks_dict_info,
2979 stage=stage,
2980 )
2981
2982 # Check if each vnf has exporter for metric collection if so update prometheus job records
2983 if "exporters-endpoints" in vnfd.get("df")[0]:
2984 exporter_config = vnfd.get("df")[0].get("exporters-endpoints")
2985 self.logger.debug("exporter config :{}".format(exporter_config))
2986 artifact_path = "{}/{}/{}".format(
2987 base_folder["folder"],
2988 base_folder["pkg-dir"],
2989 "exporter-endpoint",
2990 )
2991 ee_id = None
2992 ee_config_descriptor = exporter_config
2993 vnfr_id = db_vnfr["id"]
2994 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2995 logging_text,
2996 nsr_id,
2997 vnfr_id,
2998 vdu_id=None,
2999 vdu_index=None,
3000 user=None,
3001 pub_key=None,
3002 )
3003 self.logger.debug("rw_mgmt_ip:{}".format(rw_mgmt_ip))
3004 self.logger.debug("Artifact_path:{}".format(artifact_path))
3005 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
3006 vdu_id_for_prom = None
3007 vdu_index_for_prom = None
3008 for x in get_iterable(db_vnfr, "vdur"):
3009 vdu_id_for_prom = x.get("vdu-id-ref")
3010 vdu_index_for_prom = x.get("count-index")
3011 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
3012 ee_id=ee_id,
3013 artifact_path=artifact_path,
3014 ee_config_descriptor=ee_config_descriptor,
3015 vnfr_id=vnfr_id,
3016 nsr_id=nsr_id,
3017 target_ip=rw_mgmt_ip,
3018 element_type="VDU",
3019 vdu_id=vdu_id_for_prom,
3020 vdu_index=vdu_index_for_prom,
3021 )
3022
3023 self.logger.debug("Prometheus job:{}".format(prometheus_jobs))
3024 if prometheus_jobs:
3025 db_nsr_update["_admin.deployed.prometheus_jobs"] = prometheus_jobs
3026 self.update_db_2(
3027 "nsrs",
3028 nsr_id,
3029 db_nsr_update,
3030 )
3031
3032 for job in prometheus_jobs:
3033 self.db.set_one(
3034 "prometheus_jobs",
3035 {"job_name": job["job_name"]},
3036 job,
3037 upsert=True,
3038 fail_on_empty=False,
3039 )
3040
3041 # Check if this NS has a charm configuration
3042 descriptor_config = nsd.get("ns-configuration")
3043 if descriptor_config and descriptor_config.get("juju"):
3044 vnfd_id = None
3045 db_vnfr = None
3046 member_vnf_index = None
3047 vdu_id = None
3048 kdu_name = None
3049 kdu_index = None
3050 vdu_index = 0
3051 vdu_name = None
3052
3053 # Get additional parameters
3054 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
3055 if db_nsr.get("additionalParamsForNs"):
3056 deploy_params.update(
3057 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
3058 )
3059 base_folder = nsd["_admin"]["storage"]
3060 self._deploy_n2vc(
3061 logging_text=logging_text,
3062 db_nsr=db_nsr,
3063 db_vnfr=db_vnfr,
3064 nslcmop_id=nslcmop_id,
3065 nsr_id=nsr_id,
3066 nsi_id=nsi_id,
3067 vnfd_id=vnfd_id,
3068 vdu_id=vdu_id,
3069 kdu_name=kdu_name,
3070 member_vnf_index=member_vnf_index,
3071 vdu_index=vdu_index,
3072 kdu_index=kdu_index,
3073 vdu_name=vdu_name,
3074 deploy_params=deploy_params,
3075 descriptor_config=descriptor_config,
3076 base_folder=base_folder,
3077 task_instantiation_info=tasks_dict_info,
3078 stage=stage,
3079 )
3080
3081 # rest of staff will be done at finally
3082
3083 except (
3084 ROclient.ROClientException,
3085 DbException,
3086 LcmException,
3087 N2VCException,
3088 ) as e:
3089 self.logger.error(
3090 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
3091 )
3092 exc = e
3093 except asyncio.CancelledError:
3094 self.logger.error(
3095 logging_text + "Cancelled Exception while '{}'".format(stage[1])
3096 )
3097 exc = "Operation was cancelled"
3098 except Exception as e:
3099 exc = traceback.format_exc()
3100 self.logger.critical(
3101 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
3102 exc_info=True,
3103 )
3104 finally:
3105 if exc:
3106 error_list.append(str(exc))
3107 try:
3108 # wait for pending tasks
3109 if tasks_dict_info:
3110 stage[1] = "Waiting for instantiate pending tasks."
3111 self.logger.debug(logging_text + stage[1])
3112 error_list += await self._wait_for_tasks(
3113 logging_text,
3114 tasks_dict_info,
3115 timeout_ns_deploy,
3116 stage,
3117 nslcmop_id,
3118 nsr_id=nsr_id,
3119 )
3120 stage[1] = stage[2] = ""
3121 except asyncio.CancelledError:
3122 error_list.append("Cancelled")
3123 await self._cancel_pending_tasks(logging_text, tasks_dict_info)
3124 await self._wait_for_tasks(
3125 logging_text,
3126 tasks_dict_info,
3127 timeout_ns_deploy,
3128 stage,
3129 nslcmop_id,
3130 nsr_id=nsr_id,
3131 )
3132 except Exception as exc:
3133 error_list.append(str(exc))
3134
3135 # update operation-status
3136 db_nsr_update["operational-status"] = "running"
3137 # let's begin with VCA 'configured' status (later we can change it)
3138 db_nsr_update["config-status"] = "configured"
3139 for task, task_name in tasks_dict_info.items():
3140 if not task.done() or task.cancelled() or task.exception():
3141 if task_name.startswith(self.task_name_deploy_vca):
3142 # A N2VC task is pending
3143 db_nsr_update["config-status"] = "failed"
3144 else:
3145 # RO or KDU task is pending
3146 db_nsr_update["operational-status"] = "failed"
3147
3148 # update status at database
3149 if error_list:
3150 error_detail = ". ".join(error_list)
3151 self.logger.error(logging_text + error_detail)
3152 error_description_nslcmop = "{} Detail: {}".format(
3153 stage[0], error_detail
3154 )
3155 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
3156 nslcmop_id, stage[0]
3157 )
3158
3159 db_nsr_update["detailed-status"] = (
3160 error_description_nsr + " Detail: " + error_detail
3161 )
3162 db_nslcmop_update["detailed-status"] = error_detail
3163 nslcmop_operation_state = "FAILED"
3164 ns_state = "BROKEN"
3165 else:
3166 error_detail = None
3167 error_description_nsr = error_description_nslcmop = None
3168 ns_state = "READY"
3169 db_nsr_update["detailed-status"] = "Done"
3170 db_nslcmop_update["detailed-status"] = "Done"
3171 nslcmop_operation_state = "COMPLETED"
3172 # Gather auto-healing and auto-scaling alerts for each vnfr
3173 healing_alerts = []
3174 scaling_alerts = []
3175 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
3176 vnfd = next(
3177 (sub for sub in db_vnfds if sub["_id"] == vnfr["vnfd-id"]), None
3178 )
3179 healing_alerts = self._gather_vnfr_healing_alerts(vnfr, vnfd)
3180 for alert in healing_alerts:
3181 self.logger.info(f"Storing healing alert in MongoDB: {alert}")
3182 self.db.create("alerts", alert)
3183
3184 scaling_alerts = self._gather_vnfr_scaling_alerts(vnfr, vnfd)
3185 for alert in scaling_alerts:
3186 self.logger.info(f"Storing scaling alert in MongoDB: {alert}")
3187 self.db.create("alerts", alert)
3188
3189 alarm_alerts = self._gather_vnfr_alarm_alerts(vnfr, vnfd)
3190 for alert in alarm_alerts:
3191 self.logger.info(f"Storing VNF alarm alert in MongoDB: {alert}")
3192 self.db.create("alerts", alert)
3193 if db_nsr:
3194 self._write_ns_status(
3195 nsr_id=nsr_id,
3196 ns_state=ns_state,
3197 current_operation="IDLE",
3198 current_operation_id=None,
3199 error_description=error_description_nsr,
3200 error_detail=error_detail,
3201 other_update=db_nsr_update,
3202 )
3203 self._write_op_status(
3204 op_id=nslcmop_id,
3205 stage="",
3206 error_message=error_description_nslcmop,
3207 operation_state=nslcmop_operation_state,
3208 other_update=db_nslcmop_update,
3209 )
3210
3211 if nslcmop_operation_state:
3212 try:
3213 await self.msg.aiowrite(
3214 "ns",
3215 "instantiated",
3216 {
3217 "nsr_id": nsr_id,
3218 "nslcmop_id": nslcmop_id,
3219 "operationState": nslcmop_operation_state,
3220 "startTime": db_nslcmop["startTime"],
3221 "links": db_nslcmop["links"],
3222 "operationParams": {
3223 "nsInstanceId": nsr_id,
3224 "nsdId": db_nsr["nsd-id"],
3225 },
3226 },
3227 )
3228 except Exception as e:
3229 self.logger.error(
3230 logging_text + "kafka_write notification Exception {}".format(e)
3231 )
3232
3233 self.logger.debug(logging_text + "Exit")
3234 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
3235
3236 def _get_vnfd(self, vnfd_id: str, projects_read: str, cached_vnfds: Dict[str, Any]):
3237 if vnfd_id not in cached_vnfds:
3238 cached_vnfds[vnfd_id] = self.db.get_one(
3239 "vnfds", {"id": vnfd_id, "_admin.projects_read": projects_read}
3240 )
3241 return cached_vnfds[vnfd_id]
3242
3243 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
3244 if vnf_profile_id not in cached_vnfrs:
3245 cached_vnfrs[vnf_profile_id] = self.db.get_one(
3246 "vnfrs",
3247 {
3248 "member-vnf-index-ref": vnf_profile_id,
3249 "nsr-id-ref": nsr_id,
3250 },
3251 )
3252 return cached_vnfrs[vnf_profile_id]
3253
3254 def _is_deployed_vca_in_relation(
3255 self, vca: DeployedVCA, relation: Relation
3256 ) -> bool:
3257 found = False
3258 for endpoint in (relation.provider, relation.requirer):
3259 if endpoint["kdu-resource-profile-id"]:
3260 continue
3261 found = (
3262 vca.vnf_profile_id == endpoint.vnf_profile_id
3263 and vca.vdu_profile_id == endpoint.vdu_profile_id
3264 and vca.execution_environment_ref == endpoint.execution_environment_ref
3265 )
3266 if found:
3267 break
3268 return found
3269
3270 def _update_ee_relation_data_with_implicit_data(
3271 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
3272 ):
3273 ee_relation_data = safe_get_ee_relation(
3274 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
3275 )
3276 ee_relation_level = EELevel.get_level(ee_relation_data)
3277 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
3278 "execution-environment-ref"
3279 ]:
3280 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
3281 vnfd_id = vnf_profile["vnfd-id"]
3282 project = nsd["_admin"]["projects_read"][0]
3283 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3284 entity_id = (
3285 vnfd_id
3286 if ee_relation_level == EELevel.VNF
3287 else ee_relation_data["vdu-profile-id"]
3288 )
3289 ee = get_juju_ee_ref(db_vnfd, entity_id)
3290 if not ee:
3291 raise Exception(
3292 f"not execution environments found for ee_relation {ee_relation_data}"
3293 )
3294 ee_relation_data["execution-environment-ref"] = ee["id"]
3295 return ee_relation_data
3296
3297 def _get_ns_relations(
3298 self,
3299 nsr_id: str,
3300 nsd: Dict[str, Any],
3301 vca: DeployedVCA,
3302 cached_vnfds: Dict[str, Any],
3303 ) -> List[Relation]:
3304 relations = []
3305 db_ns_relations = get_ns_configuration_relation_list(nsd)
3306 for r in db_ns_relations:
3307 provider_dict = None
3308 requirer_dict = None
3309 if all(key in r for key in ("provider", "requirer")):
3310 provider_dict = r["provider"]
3311 requirer_dict = r["requirer"]
3312 elif "entities" in r:
3313 provider_id = r["entities"][0]["id"]
3314 provider_dict = {
3315 "nsr-id": nsr_id,
3316 "endpoint": r["entities"][0]["endpoint"],
3317 }
3318 if provider_id != nsd["id"]:
3319 provider_dict["vnf-profile-id"] = provider_id
3320 requirer_id = r["entities"][1]["id"]
3321 requirer_dict = {
3322 "nsr-id": nsr_id,
3323 "endpoint": r["entities"][1]["endpoint"],
3324 }
3325 if requirer_id != nsd["id"]:
3326 requirer_dict["vnf-profile-id"] = requirer_id
3327 else:
3328 raise Exception(
3329 "provider/requirer or entities must be included in the relation."
3330 )
3331 relation_provider = self._update_ee_relation_data_with_implicit_data(
3332 nsr_id, nsd, provider_dict, cached_vnfds
3333 )
3334 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3335 nsr_id, nsd, requirer_dict, cached_vnfds
3336 )
3337 provider = EERelation(relation_provider)
3338 requirer = EERelation(relation_requirer)
3339 relation = Relation(r["name"], provider, requirer)
3340 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3341 if vca_in_relation:
3342 relations.append(relation)
3343 return relations
3344
3345 def _get_vnf_relations(
3346 self,
3347 nsr_id: str,
3348 nsd: Dict[str, Any],
3349 vca: DeployedVCA,
3350 cached_vnfds: Dict[str, Any],
3351 ) -> List[Relation]:
3352 relations = []
3353 if vca.target_element == "ns":
3354 self.logger.debug("VCA is a NS charm, not a VNF.")
3355 return relations
3356 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
3357 vnf_profile_id = vnf_profile["id"]
3358 vnfd_id = vnf_profile["vnfd-id"]
3359 project = nsd["_admin"]["projects_read"][0]
3360 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3361 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
3362 for r in db_vnf_relations:
3363 provider_dict = None
3364 requirer_dict = None
3365 if all(key in r for key in ("provider", "requirer")):
3366 provider_dict = r["provider"]
3367 requirer_dict = r["requirer"]
3368 elif "entities" in r:
3369 provider_id = r["entities"][0]["id"]
3370 provider_dict = {
3371 "nsr-id": nsr_id,
3372 "vnf-profile-id": vnf_profile_id,
3373 "endpoint": r["entities"][0]["endpoint"],
3374 }
3375 if provider_id != vnfd_id:
3376 provider_dict["vdu-profile-id"] = provider_id
3377 requirer_id = r["entities"][1]["id"]
3378 requirer_dict = {
3379 "nsr-id": nsr_id,
3380 "vnf-profile-id": vnf_profile_id,
3381 "endpoint": r["entities"][1]["endpoint"],
3382 }
3383 if requirer_id != vnfd_id:
3384 requirer_dict["vdu-profile-id"] = requirer_id
3385 else:
3386 raise Exception(
3387 "provider/requirer or entities must be included in the relation."
3388 )
3389 relation_provider = self._update_ee_relation_data_with_implicit_data(
3390 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3391 )
3392 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3393 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3394 )
3395 provider = EERelation(relation_provider)
3396 requirer = EERelation(relation_requirer)
3397 relation = Relation(r["name"], provider, requirer)
3398 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3399 if vca_in_relation:
3400 relations.append(relation)
3401 return relations
3402
3403 def _get_kdu_resource_data(
3404 self,
3405 ee_relation: EERelation,
3406 db_nsr: Dict[str, Any],
3407 cached_vnfds: Dict[str, Any],
3408 ) -> DeployedK8sResource:
3409 nsd = get_nsd(db_nsr)
3410 vnf_profiles = get_vnf_profiles(nsd)
3411 vnfd_id = find_in_list(
3412 vnf_profiles,
3413 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3414 )["vnfd-id"]
3415 project = nsd["_admin"]["projects_read"][0]
3416 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3417 kdu_resource_profile = get_kdu_resource_profile(
3418 db_vnfd, ee_relation.kdu_resource_profile_id
3419 )
3420 kdu_name = kdu_resource_profile["kdu-name"]
3421 deployed_kdu, _ = get_deployed_kdu(
3422 db_nsr.get("_admin", ()).get("deployed", ()),
3423 kdu_name,
3424 ee_relation.vnf_profile_id,
3425 )
3426 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3427 return deployed_kdu
3428
3429 def _get_deployed_component(
3430 self,
3431 ee_relation: EERelation,
3432 db_nsr: Dict[str, Any],
3433 cached_vnfds: Dict[str, Any],
3434 ) -> DeployedComponent:
3435 nsr_id = db_nsr["_id"]
3436 deployed_component = None
3437 ee_level = EELevel.get_level(ee_relation)
3438 if ee_level == EELevel.NS:
3439 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3440 if vca:
3441 deployed_component = DeployedVCA(nsr_id, vca)
3442 elif ee_level == EELevel.VNF:
3443 vca = get_deployed_vca(
3444 db_nsr,
3445 {
3446 "vdu_id": None,
3447 "member-vnf-index": ee_relation.vnf_profile_id,
3448 "ee_descriptor_id": ee_relation.execution_environment_ref,
3449 },
3450 )
3451 if vca:
3452 deployed_component = DeployedVCA(nsr_id, vca)
3453 elif ee_level == EELevel.VDU:
3454 vca = get_deployed_vca(
3455 db_nsr,
3456 {
3457 "vdu_id": ee_relation.vdu_profile_id,
3458 "member-vnf-index": ee_relation.vnf_profile_id,
3459 "ee_descriptor_id": ee_relation.execution_environment_ref,
3460 },
3461 )
3462 if vca:
3463 deployed_component = DeployedVCA(nsr_id, vca)
3464 elif ee_level == EELevel.KDU:
3465 kdu_resource_data = self._get_kdu_resource_data(
3466 ee_relation, db_nsr, cached_vnfds
3467 )
3468 if kdu_resource_data:
3469 deployed_component = DeployedK8sResource(kdu_resource_data)
3470 return deployed_component
3471
3472 async def _add_relation(
3473 self,
3474 relation: Relation,
3475 vca_type: str,
3476 db_nsr: Dict[str, Any],
3477 cached_vnfds: Dict[str, Any],
3478 cached_vnfrs: Dict[str, Any],
3479 ) -> bool:
3480 deployed_provider = self._get_deployed_component(
3481 relation.provider, db_nsr, cached_vnfds
3482 )
3483 deployed_requirer = self._get_deployed_component(
3484 relation.requirer, db_nsr, cached_vnfds
3485 )
3486 if (
3487 deployed_provider
3488 and deployed_requirer
3489 and deployed_provider.config_sw_installed
3490 and deployed_requirer.config_sw_installed
3491 ):
3492 provider_db_vnfr = (
3493 self._get_vnfr(
3494 relation.provider.nsr_id,
3495 relation.provider.vnf_profile_id,
3496 cached_vnfrs,
3497 )
3498 if relation.provider.vnf_profile_id
3499 else None
3500 )
3501 requirer_db_vnfr = (
3502 self._get_vnfr(
3503 relation.requirer.nsr_id,
3504 relation.requirer.vnf_profile_id,
3505 cached_vnfrs,
3506 )
3507 if relation.requirer.vnf_profile_id
3508 else None
3509 )
3510 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3511 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3512 provider_relation_endpoint = RelationEndpoint(
3513 deployed_provider.ee_id,
3514 provider_vca_id,
3515 relation.provider.endpoint,
3516 )
3517 requirer_relation_endpoint = RelationEndpoint(
3518 deployed_requirer.ee_id,
3519 requirer_vca_id,
3520 relation.requirer.endpoint,
3521 )
3522 try:
3523 await self.vca_map[vca_type].add_relation(
3524 provider=provider_relation_endpoint,
3525 requirer=requirer_relation_endpoint,
3526 )
3527 except N2VCException as exception:
3528 self.logger.error(exception)
3529 raise LcmException(exception)
3530 return True
3531 return False
3532
3533 async def _add_vca_relations(
3534 self,
3535 logging_text,
3536 nsr_id,
3537 vca_type: str,
3538 vca_index: int,
3539 timeout: int = 3600,
3540 ) -> bool:
3541 # steps:
3542 # 1. find all relations for this VCA
3543 # 2. wait for other peers related
3544 # 3. add relations
3545
3546 try:
3547 # STEP 1: find all relations for this VCA
3548
3549 # read nsr record
3550 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3551 nsd = get_nsd(db_nsr)
3552
3553 # this VCA data
3554 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3555 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3556
3557 cached_vnfds = {}
3558 cached_vnfrs = {}
3559 relations = []
3560 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3561 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3562
3563 # if no relations, terminate
3564 if not relations:
3565 self.logger.debug(logging_text + " No relations")
3566 return True
3567
3568 self.logger.debug(logging_text + " adding relations {}".format(relations))
3569
3570 # add all relations
3571 start = time()
3572 while True:
3573 # check timeout
3574 now = time()
3575 if now - start >= timeout:
3576 self.logger.error(logging_text + " : timeout adding relations")
3577 return False
3578
3579 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3580 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3581
3582 # for each relation, find the VCA's related
3583 for relation in relations.copy():
3584 added = await self._add_relation(
3585 relation,
3586 vca_type,
3587 db_nsr,
3588 cached_vnfds,
3589 cached_vnfrs,
3590 )
3591 if added:
3592 relations.remove(relation)
3593
3594 if not relations:
3595 self.logger.debug("Relations added")
3596 break
3597 await asyncio.sleep(5.0)
3598
3599 return True
3600
3601 except Exception as e:
3602 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3603 return False
3604
3605 async def _install_kdu(
3606 self,
3607 nsr_id: str,
3608 nsr_db_path: str,
3609 vnfr_data: dict,
3610 kdu_index: int,
3611 kdud: dict,
3612 vnfd: dict,
3613 k8s_instance_info: dict,
3614 k8params: dict = None,
3615 timeout: int = 600,
3616 vca_id: str = None,
3617 ):
3618 try:
3619 k8sclustertype = k8s_instance_info["k8scluster-type"]
3620 # Instantiate kdu
3621 db_dict_install = {
3622 "collection": "nsrs",
3623 "filter": {"_id": nsr_id},
3624 "path": nsr_db_path,
3625 }
3626
3627 if k8s_instance_info.get("kdu-deployment-name"):
3628 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3629 else:
3630 kdu_instance = self.k8scluster_map[
3631 k8sclustertype
3632 ].generate_kdu_instance_name(
3633 db_dict=db_dict_install,
3634 kdu_model=k8s_instance_info["kdu-model"],
3635 kdu_name=k8s_instance_info["kdu-name"],
3636 )
3637
3638 # Update the nsrs table with the kdu-instance value
3639 self.update_db_2(
3640 item="nsrs",
3641 _id=nsr_id,
3642 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3643 )
3644
3645 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3646 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3647 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3648 # namespace, this first verification could be removed, and the next step would be done for any kind
3649 # of KNF.
3650 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3651 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3652 if k8sclustertype in ("juju", "juju-bundle"):
3653 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3654 # that the user passed a namespace which he wants its KDU to be deployed in)
3655 if (
3656 self.db.count(
3657 table="nsrs",
3658 q_filter={
3659 "_id": nsr_id,
3660 "_admin.projects_write": k8s_instance_info["namespace"],
3661 "_admin.projects_read": k8s_instance_info["namespace"],
3662 },
3663 )
3664 > 0
3665 ):
3666 self.logger.debug(
3667 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3668 )
3669 self.update_db_2(
3670 item="nsrs",
3671 _id=nsr_id,
3672 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3673 )
3674 k8s_instance_info["namespace"] = kdu_instance
3675
3676 await self.k8scluster_map[k8sclustertype].install(
3677 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3678 kdu_model=k8s_instance_info["kdu-model"],
3679 atomic=True,
3680 params=k8params,
3681 db_dict=db_dict_install,
3682 timeout=timeout,
3683 kdu_name=k8s_instance_info["kdu-name"],
3684 namespace=k8s_instance_info["namespace"],
3685 kdu_instance=kdu_instance,
3686 vca_id=vca_id,
3687 )
3688
3689 # Obtain services to obtain management service ip
3690 services = await self.k8scluster_map[k8sclustertype].get_services(
3691 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3692 kdu_instance=kdu_instance,
3693 namespace=k8s_instance_info["namespace"],
3694 )
3695
3696 # Obtain management service info (if exists)
3697 vnfr_update_dict = {}
3698 kdu_config = get_configuration(vnfd, kdud["name"])
3699 if kdu_config:
3700 target_ee_list = kdu_config.get("execution-environment-list", [])
3701 else:
3702 target_ee_list = []
3703
3704 if services:
3705 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3706 mgmt_services = [
3707 service
3708 for service in kdud.get("service", [])
3709 if service.get("mgmt-service")
3710 ]
3711 for mgmt_service in mgmt_services:
3712 for service in services:
3713 if service["name"].startswith(mgmt_service["name"]):
3714 # Mgmt service found, Obtain service ip
3715 ip = service.get("external_ip", service.get("cluster_ip"))
3716 if isinstance(ip, list) and len(ip) == 1:
3717 ip = ip[0]
3718
3719 vnfr_update_dict[
3720 "kdur.{}.ip-address".format(kdu_index)
3721 ] = ip
3722
3723 # Check if must update also mgmt ip at the vnf
3724 service_external_cp = mgmt_service.get(
3725 "external-connection-point-ref"
3726 )
3727 if service_external_cp:
3728 if (
3729 deep_get(vnfd, ("mgmt-interface", "cp"))
3730 == service_external_cp
3731 ):
3732 vnfr_update_dict["ip-address"] = ip
3733
3734 if find_in_list(
3735 target_ee_list,
3736 lambda ee: ee.get(
3737 "external-connection-point-ref", ""
3738 )
3739 == service_external_cp,
3740 ):
3741 vnfr_update_dict[
3742 "kdur.{}.ip-address".format(kdu_index)
3743 ] = ip
3744 break
3745 else:
3746 self.logger.warn(
3747 "Mgmt service name: {} not found".format(
3748 mgmt_service["name"]
3749 )
3750 )
3751
3752 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3753 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3754
3755 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3756 if (
3757 kdu_config
3758 and kdu_config.get("initial-config-primitive")
3759 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3760 ):
3761 initial_config_primitive_list = kdu_config.get(
3762 "initial-config-primitive"
3763 )
3764 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3765
3766 for initial_config_primitive in initial_config_primitive_list:
3767 primitive_params_ = self._map_primitive_params(
3768 initial_config_primitive, {}, {}
3769 )
3770
3771 await asyncio.wait_for(
3772 self.k8scluster_map[k8sclustertype].exec_primitive(
3773 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3774 kdu_instance=kdu_instance,
3775 primitive_name=initial_config_primitive["name"],
3776 params=primitive_params_,
3777 db_dict=db_dict_install,
3778 vca_id=vca_id,
3779 ),
3780 timeout=timeout,
3781 )
3782
3783 except Exception as e:
3784 # Prepare update db with error and raise exception
3785 try:
3786 self.update_db_2(
3787 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3788 )
3789 self.update_db_2(
3790 "vnfrs",
3791 vnfr_data.get("_id"),
3792 {"kdur.{}.status".format(kdu_index): "ERROR"},
3793 )
3794 except Exception as error:
3795 # ignore to keep original exception
3796 self.logger.warning(
3797 f"An exception occurred while updating DB: {str(error)}"
3798 )
3799 # reraise original error
3800 raise
3801
3802 return kdu_instance
3803
3804 async def deploy_kdus(
3805 self,
3806 logging_text,
3807 nsr_id,
3808 nslcmop_id,
3809 db_vnfrs,
3810 db_vnfds,
3811 task_instantiation_info,
3812 ):
3813 # Launch kdus if present in the descriptor
3814
3815 k8scluster_id_2_uuic = {
3816 "helm-chart-v3": {},
3817 "juju-bundle": {},
3818 }
3819
3820 async def _get_cluster_id(cluster_id, cluster_type):
3821 nonlocal k8scluster_id_2_uuic
3822 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3823 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3824
3825 # check if K8scluster is creating and wait look if previous tasks in process
3826 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3827 "k8scluster", cluster_id
3828 )
3829 if task_dependency:
3830 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3831 task_name, cluster_id
3832 )
3833 self.logger.debug(logging_text + text)
3834 await asyncio.wait(task_dependency, timeout=3600)
3835
3836 db_k8scluster = self.db.get_one(
3837 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3838 )
3839 if not db_k8scluster:
3840 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3841
3842 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3843 if not k8s_id:
3844 if cluster_type == "helm-chart-v3":
3845 try:
3846 # backward compatibility for existing clusters that have not been initialized for helm v3
3847 k8s_credentials = yaml.safe_dump(
3848 db_k8scluster.get("credentials")
3849 )
3850 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3851 k8s_credentials, reuse_cluster_uuid=cluster_id
3852 )
3853 db_k8scluster_update = {}
3854 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3855 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3856 db_k8scluster_update[
3857 "_admin.helm-chart-v3.created"
3858 ] = uninstall_sw
3859 db_k8scluster_update[
3860 "_admin.helm-chart-v3.operationalState"
3861 ] = "ENABLED"
3862 self.update_db_2(
3863 "k8sclusters", cluster_id, db_k8scluster_update
3864 )
3865 except Exception as e:
3866 self.logger.error(
3867 logging_text
3868 + "error initializing helm-v3 cluster: {}".format(str(e))
3869 )
3870 raise LcmException(
3871 "K8s cluster '{}' has not been initialized for '{}'".format(
3872 cluster_id, cluster_type
3873 )
3874 )
3875 else:
3876 raise LcmException(
3877 "K8s cluster '{}' has not been initialized for '{}'".format(
3878 cluster_id, cluster_type
3879 )
3880 )
3881 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3882 return k8s_id
3883
3884 logging_text += "Deploy kdus: "
3885 step = ""
3886 try:
3887 db_nsr_update = {"_admin.deployed.K8s": []}
3888 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3889
3890 index = 0
3891 updated_cluster_list = []
3892 updated_v3_cluster_list = []
3893
3894 for vnfr_data in db_vnfrs.values():
3895 vca_id = self.get_vca_id(vnfr_data, {})
3896 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3897 # Step 0: Prepare and set parameters
3898 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3899 vnfd_id = vnfr_data.get("vnfd-id")
3900 vnfd_with_id = find_in_list(
3901 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3902 )
3903 kdud = next(
3904 kdud
3905 for kdud in vnfd_with_id["kdu"]
3906 if kdud["name"] == kdur["kdu-name"]
3907 )
3908 namespace = kdur.get("k8s-namespace")
3909 kdu_deployment_name = kdur.get("kdu-deployment-name")
3910 if kdur.get("helm-chart"):
3911 kdumodel = kdur["helm-chart"]
3912 # Default version: helm3, if helm-version is v2 assign v2
3913 k8sclustertype = "helm-chart-v3"
3914 self.logger.debug("kdur: {}".format(kdur))
3915 elif kdur.get("juju-bundle"):
3916 kdumodel = kdur["juju-bundle"]
3917 k8sclustertype = "juju-bundle"
3918 else:
3919 raise LcmException(
3920 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3921 "juju-bundle. Maybe an old NBI version is running".format(
3922 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3923 )
3924 )
3925 # check if kdumodel is a file and exists
3926 try:
3927 vnfd_with_id = find_in_list(
3928 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3929 )
3930 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3931 if storage: # may be not present if vnfd has not artifacts
3932 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3933 if storage["pkg-dir"]:
3934 filename = "{}/{}/{}s/{}".format(
3935 storage["folder"],
3936 storage["pkg-dir"],
3937 k8sclustertype,
3938 kdumodel,
3939 )
3940 else:
3941 filename = "{}/Scripts/{}s/{}".format(
3942 storage["folder"],
3943 k8sclustertype,
3944 kdumodel,
3945 )
3946 if self.fs.file_exists(
3947 filename, mode="file"
3948 ) or self.fs.file_exists(filename, mode="dir"):
3949 kdumodel = self.fs.path + filename
3950 except (asyncio.TimeoutError, asyncio.CancelledError):
3951 raise
3952 except Exception as e: # it is not a file
3953 self.logger.warning(f"An exception occurred: {str(e)}")
3954
3955 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3956 step = "Synchronize repos for k8s cluster '{}'".format(
3957 k8s_cluster_id
3958 )
3959 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3960
3961 # Synchronize repos
3962 if (
3963 k8sclustertype == "helm-chart"
3964 and cluster_uuid not in updated_cluster_list
3965 ) or (
3966 k8sclustertype == "helm-chart-v3"
3967 and cluster_uuid not in updated_v3_cluster_list
3968 ):
3969 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3970 self.k8scluster_map[k8sclustertype].synchronize_repos(
3971 cluster_uuid=cluster_uuid
3972 )
3973 )
3974 if del_repo_list or added_repo_dict:
3975 if k8sclustertype == "helm-chart":
3976 unset = {
3977 "_admin.helm_charts_added." + item: None
3978 for item in del_repo_list
3979 }
3980 updated = {
3981 "_admin.helm_charts_added." + item: name
3982 for item, name in added_repo_dict.items()
3983 }
3984 updated_cluster_list.append(cluster_uuid)
3985 elif k8sclustertype == "helm-chart-v3":
3986 unset = {
3987 "_admin.helm_charts_v3_added." + item: None
3988 for item in del_repo_list
3989 }
3990 updated = {
3991 "_admin.helm_charts_v3_added." + item: name
3992 for item, name in added_repo_dict.items()
3993 }
3994 updated_v3_cluster_list.append(cluster_uuid)
3995 self.logger.debug(
3996 logging_text + "repos synchronized on k8s cluster "
3997 "'{}' to_delete: {}, to_add: {}".format(
3998 k8s_cluster_id, del_repo_list, added_repo_dict
3999 )
4000 )
4001 self.db.set_one(
4002 "k8sclusters",
4003 {"_id": k8s_cluster_id},
4004 updated,
4005 unset=unset,
4006 )
4007
4008 # Instantiate kdu
4009 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
4010 vnfr_data["member-vnf-index-ref"],
4011 kdur["kdu-name"],
4012 k8s_cluster_id,
4013 )
4014 k8s_instance_info = {
4015 "kdu-instance": None,
4016 "k8scluster-uuid": cluster_uuid,
4017 "k8scluster-type": k8sclustertype,
4018 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
4019 "kdu-name": kdur["kdu-name"],
4020 "kdu-model": kdumodel,
4021 "namespace": namespace,
4022 "kdu-deployment-name": kdu_deployment_name,
4023 }
4024 db_path = "_admin.deployed.K8s.{}".format(index)
4025 db_nsr_update[db_path] = k8s_instance_info
4026 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4027 vnfd_with_id = find_in_list(
4028 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
4029 )
4030 task = asyncio.ensure_future(
4031 self._install_kdu(
4032 nsr_id,
4033 db_path,
4034 vnfr_data,
4035 kdu_index,
4036 kdud,
4037 vnfd_with_id,
4038 k8s_instance_info,
4039 k8params=desc_params,
4040 timeout=1800,
4041 vca_id=vca_id,
4042 )
4043 )
4044 self.lcm_tasks.register(
4045 "ns",
4046 nsr_id,
4047 nslcmop_id,
4048 "instantiate_KDU-{}".format(index),
4049 task,
4050 )
4051 task_instantiation_info[task] = "Deploying KDU {}".format(
4052 kdur["kdu-name"]
4053 )
4054
4055 index += 1
4056
4057 except (LcmException, asyncio.CancelledError):
4058 raise
4059 except Exception as e:
4060 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
4061 if isinstance(e, (N2VCException, DbException)):
4062 self.logger.error(logging_text + msg)
4063 else:
4064 self.logger.critical(logging_text + msg, exc_info=True)
4065 raise LcmException(msg)
4066 finally:
4067 if db_nsr_update:
4068 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4069
4070 def _deploy_n2vc(
4071 self,
4072 logging_text,
4073 db_nsr,
4074 db_vnfr,
4075 nslcmop_id,
4076 nsr_id,
4077 nsi_id,
4078 vnfd_id,
4079 vdu_id,
4080 kdu_name,
4081 member_vnf_index,
4082 vdu_index,
4083 kdu_index,
4084 vdu_name,
4085 deploy_params,
4086 descriptor_config,
4087 base_folder,
4088 task_instantiation_info,
4089 stage,
4090 ):
4091 # launch instantiate_N2VC in a asyncio task and register task object
4092 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
4093 # if not found, create one entry and update database
4094 # fill db_nsr._admin.deployed.VCA.<index>
4095
4096 self.logger.debug(
4097 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
4098 )
4099
4100 charm_name = ""
4101 get_charm_name = False
4102 if "execution-environment-list" in descriptor_config:
4103 ee_list = descriptor_config.get("execution-environment-list", [])
4104 elif "juju" in descriptor_config:
4105 ee_list = [descriptor_config] # ns charms
4106 if "execution-environment-list" not in descriptor_config:
4107 # charm name is only required for ns charms
4108 get_charm_name = True
4109 else: # other types as script are not supported
4110 ee_list = []
4111
4112 for ee_item in ee_list:
4113 self.logger.debug(
4114 logging_text
4115 + "_deploy_n2vc ee_item juju={}, helm={}".format(
4116 ee_item.get("juju"), ee_item.get("helm-chart")
4117 )
4118 )
4119 ee_descriptor_id = ee_item.get("id")
4120 if ee_item.get("juju"):
4121 vca_name = ee_item["juju"].get("charm")
4122 if get_charm_name:
4123 charm_name = self.find_charm_name(db_nsr, str(vca_name))
4124 vca_type = (
4125 "lxc_proxy_charm"
4126 if ee_item["juju"].get("charm") is not None
4127 else "native_charm"
4128 )
4129 if ee_item["juju"].get("cloud") == "k8s":
4130 vca_type = "k8s_proxy_charm"
4131 elif ee_item["juju"].get("proxy") is False:
4132 vca_type = "native_charm"
4133 elif ee_item.get("helm-chart"):
4134 vca_name = ee_item["helm-chart"]
4135 vca_type = "helm-v3"
4136 else:
4137 self.logger.debug(
4138 logging_text + "skipping non juju neither charm configuration"
4139 )
4140 continue
4141
4142 vca_index = -1
4143 for vca_index, vca_deployed in enumerate(
4144 db_nsr["_admin"]["deployed"]["VCA"]
4145 ):
4146 if not vca_deployed:
4147 continue
4148 if (
4149 vca_deployed.get("member-vnf-index") == member_vnf_index
4150 and vca_deployed.get("vdu_id") == vdu_id
4151 and vca_deployed.get("kdu_name") == kdu_name
4152 and vca_deployed.get("vdu_count_index", 0) == vdu_index
4153 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
4154 ):
4155 break
4156 else:
4157 # not found, create one.
4158 target = (
4159 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
4160 )
4161 if vdu_id:
4162 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
4163 elif kdu_name:
4164 target += "/kdu/{}".format(kdu_name)
4165 vca_deployed = {
4166 "target_element": target,
4167 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
4168 "member-vnf-index": member_vnf_index,
4169 "vdu_id": vdu_id,
4170 "kdu_name": kdu_name,
4171 "vdu_count_index": vdu_index,
4172 "operational-status": "init", # TODO revise
4173 "detailed-status": "", # TODO revise
4174 "step": "initial-deploy", # TODO revise
4175 "vnfd_id": vnfd_id,
4176 "vdu_name": vdu_name,
4177 "type": vca_type,
4178 "ee_descriptor_id": ee_descriptor_id,
4179 "charm_name": charm_name,
4180 }
4181 vca_index += 1
4182
4183 # create VCA and configurationStatus in db
4184 db_dict = {
4185 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
4186 "configurationStatus.{}".format(vca_index): dict(),
4187 }
4188 self.update_db_2("nsrs", nsr_id, db_dict)
4189
4190 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
4191
4192 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
4193 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
4194 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
4195
4196 # Launch task
4197 task_n2vc = asyncio.ensure_future(
4198 self.instantiate_N2VC(
4199 logging_text=logging_text,
4200 vca_index=vca_index,
4201 nsi_id=nsi_id,
4202 db_nsr=db_nsr,
4203 db_vnfr=db_vnfr,
4204 vdu_id=vdu_id,
4205 kdu_name=kdu_name,
4206 vdu_index=vdu_index,
4207 kdu_index=kdu_index,
4208 deploy_params=deploy_params,
4209 config_descriptor=descriptor_config,
4210 base_folder=base_folder,
4211 nslcmop_id=nslcmop_id,
4212 stage=stage,
4213 vca_type=vca_type,
4214 vca_name=vca_name,
4215 ee_config_descriptor=ee_item,
4216 )
4217 )
4218 self.lcm_tasks.register(
4219 "ns",
4220 nsr_id,
4221 nslcmop_id,
4222 "instantiate_N2VC-{}".format(vca_index),
4223 task_n2vc,
4224 )
4225 task_instantiation_info[
4226 task_n2vc
4227 ] = self.task_name_deploy_vca + " {}.{}".format(
4228 member_vnf_index or "", vdu_id or ""
4229 )
4230
4231 @staticmethod
4232 def _create_nslcmop(nsr_id, operation, params):
4233 """
4234 Creates a ns-lcm-opp content to be stored at database.
4235 :param nsr_id: internal id of the instance
4236 :param operation: instantiate, terminate, scale, action, ...
4237 :param params: user parameters for the operation
4238 :return: dictionary following SOL005 format
4239 """
4240 # Raise exception if invalid arguments
4241 if not (nsr_id and operation and params):
4242 raise LcmException(
4243 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
4244 )
4245 now = time()
4246 _id = str(uuid4())
4247 nslcmop = {
4248 "id": _id,
4249 "_id": _id,
4250 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
4251 "operationState": "PROCESSING",
4252 "statusEnteredTime": now,
4253 "nsInstanceId": nsr_id,
4254 "lcmOperationType": operation,
4255 "startTime": now,
4256 "isAutomaticInvocation": False,
4257 "operationParams": params,
4258 "isCancelPending": False,
4259 "links": {
4260 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
4261 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
4262 },
4263 }
4264 return nslcmop
4265
4266 def _format_additional_params(self, params):
4267 params = params or {}
4268 for key, value in params.items():
4269 if str(value).startswith("!!yaml "):
4270 params[key] = yaml.safe_load(value[7:])
4271 return params
4272
4273 def _get_terminate_primitive_params(self, seq, vnf_index):
4274 primitive = seq.get("name")
4275 primitive_params = {}
4276 params = {
4277 "member_vnf_index": vnf_index,
4278 "primitive": primitive,
4279 "primitive_params": primitive_params,
4280 }
4281 desc_params = {}
4282 return self._map_primitive_params(seq, params, desc_params)
4283
4284 # sub-operations
4285
4286 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
4287 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
4288 if op.get("operationState") == "COMPLETED":
4289 # b. Skip sub-operation
4290 # _ns_execute_primitive() or RO.create_action() will NOT be executed
4291 return self.SUBOPERATION_STATUS_SKIP
4292 else:
4293 # c. retry executing sub-operation
4294 # The sub-operation exists, and operationState != 'COMPLETED'
4295 # Update operationState = 'PROCESSING' to indicate a retry.
4296 operationState = "PROCESSING"
4297 detailed_status = "In progress"
4298 self._update_suboperation_status(
4299 db_nslcmop, op_index, operationState, detailed_status
4300 )
4301 # Return the sub-operation index
4302 # _ns_execute_primitive() or RO.create_action() will be called from scale()
4303 # with arguments extracted from the sub-operation
4304 return op_index
4305
4306 # Find a sub-operation where all keys in a matching dictionary must match
4307 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
4308 def _find_suboperation(self, db_nslcmop, match):
4309 if db_nslcmop and match:
4310 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
4311 for i, op in enumerate(op_list):
4312 if all(op.get(k) == match[k] for k in match):
4313 return i
4314 return self.SUBOPERATION_STATUS_NOT_FOUND
4315
4316 # Update status for a sub-operation given its index
4317 def _update_suboperation_status(
4318 self, db_nslcmop, op_index, operationState, detailed_status
4319 ):
4320 # Update DB for HA tasks
4321 q_filter = {"_id": db_nslcmop["_id"]}
4322 update_dict = {
4323 "_admin.operations.{}.operationState".format(op_index): operationState,
4324 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
4325 }
4326 self.db.set_one(
4327 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
4328 )
4329
4330 # Add sub-operation, return the index of the added sub-operation
4331 # Optionally, set operationState, detailed-status, and operationType
4332 # Status and type are currently set for 'scale' sub-operations:
4333 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
4334 # 'detailed-status' : status message
4335 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
4336 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
4337 def _add_suboperation(
4338 self,
4339 db_nslcmop,
4340 vnf_index,
4341 vdu_id,
4342 vdu_count_index,
4343 vdu_name,
4344 primitive,
4345 mapped_primitive_params,
4346 operationState=None,
4347 detailed_status=None,
4348 operationType=None,
4349 RO_nsr_id=None,
4350 RO_scaling_info=None,
4351 ):
4352 if not db_nslcmop:
4353 return self.SUBOPERATION_STATUS_NOT_FOUND
4354 # Get the "_admin.operations" list, if it exists
4355 db_nslcmop_admin = db_nslcmop.get("_admin", {})
4356 op_list = db_nslcmop_admin.get("operations")
4357 # Create or append to the "_admin.operations" list
4358 new_op = {
4359 "member_vnf_index": vnf_index,
4360 "vdu_id": vdu_id,
4361 "vdu_count_index": vdu_count_index,
4362 "primitive": primitive,
4363 "primitive_params": mapped_primitive_params,
4364 }
4365 if operationState:
4366 new_op["operationState"] = operationState
4367 if detailed_status:
4368 new_op["detailed-status"] = detailed_status
4369 if operationType:
4370 new_op["lcmOperationType"] = operationType
4371 if RO_nsr_id:
4372 new_op["RO_nsr_id"] = RO_nsr_id
4373 if RO_scaling_info:
4374 new_op["RO_scaling_info"] = RO_scaling_info
4375 if not op_list:
4376 # No existing operations, create key 'operations' with current operation as first list element
4377 db_nslcmop_admin.update({"operations": [new_op]})
4378 op_list = db_nslcmop_admin.get("operations")
4379 else:
4380 # Existing operations, append operation to list
4381 op_list.append(new_op)
4382
4383 db_nslcmop_update = {"_admin.operations": op_list}
4384 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
4385 op_index = len(op_list) - 1
4386 return op_index
4387
4388 # Helper methods for scale() sub-operations
4389
4390 # pre-scale/post-scale:
4391 # Check for 3 different cases:
4392 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4393 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4394 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4395 def _check_or_add_scale_suboperation(
4396 self,
4397 db_nslcmop,
4398 vnf_index,
4399 vnf_config_primitive,
4400 primitive_params,
4401 operationType,
4402 RO_nsr_id=None,
4403 RO_scaling_info=None,
4404 ):
4405 # Find this sub-operation
4406 if RO_nsr_id and RO_scaling_info:
4407 operationType = "SCALE-RO"
4408 match = {
4409 "member_vnf_index": vnf_index,
4410 "RO_nsr_id": RO_nsr_id,
4411 "RO_scaling_info": RO_scaling_info,
4412 }
4413 else:
4414 match = {
4415 "member_vnf_index": vnf_index,
4416 "primitive": vnf_config_primitive,
4417 "primitive_params": primitive_params,
4418 "lcmOperationType": operationType,
4419 }
4420 op_index = self._find_suboperation(db_nslcmop, match)
4421 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4422 # a. New sub-operation
4423 # The sub-operation does not exist, add it.
4424 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4425 # The following parameters are set to None for all kind of scaling:
4426 vdu_id = None
4427 vdu_count_index = None
4428 vdu_name = None
4429 if RO_nsr_id and RO_scaling_info:
4430 vnf_config_primitive = None
4431 primitive_params = None
4432 else:
4433 RO_nsr_id = None
4434 RO_scaling_info = None
4435 # Initial status for sub-operation
4436 operationState = "PROCESSING"
4437 detailed_status = "In progress"
4438 # Add sub-operation for pre/post-scaling (zero or more operations)
4439 self._add_suboperation(
4440 db_nslcmop,
4441 vnf_index,
4442 vdu_id,
4443 vdu_count_index,
4444 vdu_name,
4445 vnf_config_primitive,
4446 primitive_params,
4447 operationState,
4448 detailed_status,
4449 operationType,
4450 RO_nsr_id,
4451 RO_scaling_info,
4452 )
4453 return self.SUBOPERATION_STATUS_NEW
4454 else:
4455 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4456 # or op_index (operationState != 'COMPLETED')
4457 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4458
4459 # Function to return execution_environment id
4460
4461 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
4462 # TODO vdu_index_count
4463 for vca in vca_deployed_list:
4464 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
4465 return vca.get("ee_id")
4466
4467 async def destroy_N2VC(
4468 self,
4469 logging_text,
4470 db_nslcmop,
4471 vca_deployed,
4472 config_descriptor,
4473 vca_index,
4474 destroy_ee=True,
4475 exec_primitives=True,
4476 scaling_in=False,
4477 vca_id: str = None,
4478 ):
4479 """
4480 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4481 :param logging_text:
4482 :param db_nslcmop:
4483 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4484 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4485 :param vca_index: index in the database _admin.deployed.VCA
4486 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4487 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4488 not executed properly
4489 :param scaling_in: True destroys the application, False destroys the model
4490 :return: None or exception
4491 """
4492
4493 self.logger.debug(
4494 logging_text
4495 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4496 vca_index, vca_deployed, config_descriptor, destroy_ee
4497 )
4498 )
4499
4500 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4501
4502 # execute terminate_primitives
4503 if exec_primitives:
4504 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4505 config_descriptor.get("terminate-config-primitive"),
4506 vca_deployed.get("ee_descriptor_id"),
4507 )
4508 vdu_id = vca_deployed.get("vdu_id")
4509 vdu_count_index = vca_deployed.get("vdu_count_index")
4510 vdu_name = vca_deployed.get("vdu_name")
4511 vnf_index = vca_deployed.get("member-vnf-index")
4512 if terminate_primitives and vca_deployed.get("needed_terminate"):
4513 for seq in terminate_primitives:
4514 # For each sequence in list, get primitive and call _ns_execute_primitive()
4515 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4516 vnf_index, seq.get("name")
4517 )
4518 self.logger.debug(logging_text + step)
4519 # Create the primitive for each sequence, i.e. "primitive": "touch"
4520 primitive = seq.get("name")
4521 mapped_primitive_params = self._get_terminate_primitive_params(
4522 seq, vnf_index
4523 )
4524
4525 # Add sub-operation
4526 self._add_suboperation(
4527 db_nslcmop,
4528 vnf_index,
4529 vdu_id,
4530 vdu_count_index,
4531 vdu_name,
4532 primitive,
4533 mapped_primitive_params,
4534 )
4535 # Sub-operations: Call _ns_execute_primitive() instead of action()
4536 try:
4537 result, result_detail = await self._ns_execute_primitive(
4538 vca_deployed["ee_id"],
4539 primitive,
4540 mapped_primitive_params,
4541 vca_type=vca_type,
4542 vca_id=vca_id,
4543 )
4544 except LcmException:
4545 # this happens when VCA is not deployed. In this case it is not needed to terminate
4546 continue
4547 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4548 if result not in result_ok:
4549 raise LcmException(
4550 "terminate_primitive {} for vnf_member_index={} fails with "
4551 "error {}".format(seq.get("name"), vnf_index, result_detail)
4552 )
4553 # set that this VCA do not need terminated
4554 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4555 vca_index
4556 )
4557 self.update_db_2(
4558 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4559 )
4560
4561 # Delete Prometheus Jobs if any
4562 # This uses NSR_ID, so it will destroy any jobs under this index
4563 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4564
4565 if destroy_ee:
4566 await self.vca_map[vca_type].delete_execution_environment(
4567 vca_deployed["ee_id"],
4568 scaling_in=scaling_in,
4569 vca_type=vca_type,
4570 vca_id=vca_id,
4571 )
4572
4573 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4574 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4575 namespace = "." + db_nsr["_id"]
4576 try:
4577 await self.n2vc.delete_namespace(
4578 namespace=namespace,
4579 total_timeout=self.timeout.charm_delete,
4580 vca_id=vca_id,
4581 )
4582 except N2VCNotFound: # already deleted. Skip
4583 pass
4584 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4585
4586 async def terminate(self, nsr_id, nslcmop_id):
4587 # Try to lock HA task here
4588 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4589 if not task_is_locked_by_me:
4590 return
4591
4592 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4593 self.logger.debug(logging_text + "Enter")
4594 timeout_ns_terminate = self.timeout.ns_terminate
4595 db_nsr = None
4596 db_nslcmop = None
4597 operation_params = None
4598 exc = None
4599 error_list = [] # annotates all failed error messages
4600 db_nslcmop_update = {}
4601 autoremove = False # autoremove after terminated
4602 tasks_dict_info = {}
4603 db_nsr_update = {}
4604 stage = [
4605 "Stage 1/3: Preparing task.",
4606 "Waiting for previous operations to terminate.",
4607 "",
4608 ]
4609 # ^ contains [stage, step, VIM-status]
4610 try:
4611 # wait for any previous tasks in process
4612 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4613
4614 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4615 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4616 operation_params = db_nslcmop.get("operationParams") or {}
4617 if operation_params.get("timeout_ns_terminate"):
4618 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4619 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4620 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4621
4622 db_nsr_update["operational-status"] = "terminating"
4623 db_nsr_update["config-status"] = "terminating"
4624 self._write_ns_status(
4625 nsr_id=nsr_id,
4626 ns_state="TERMINATING",
4627 current_operation="TERMINATING",
4628 current_operation_id=nslcmop_id,
4629 other_update=db_nsr_update,
4630 )
4631 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4632 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4633 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4634 return
4635
4636 stage[1] = "Getting vnf descriptors from db."
4637 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4638 db_vnfrs_dict = {
4639 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4640 }
4641 db_vnfds_from_id = {}
4642 db_vnfds_from_member_index = {}
4643 # Loop over VNFRs
4644 for vnfr in db_vnfrs_list:
4645 vnfd_id = vnfr["vnfd-id"]
4646 if vnfd_id not in db_vnfds_from_id:
4647 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4648 db_vnfds_from_id[vnfd_id] = vnfd
4649 db_vnfds_from_member_index[
4650 vnfr["member-vnf-index-ref"]
4651 ] = db_vnfds_from_id[vnfd_id]
4652
4653 # Destroy individual execution environments when there are terminating primitives.
4654 # Rest of EE will be deleted at once
4655 # TODO - check before calling _destroy_N2VC
4656 # if not operation_params.get("skip_terminate_primitives"):#
4657 # or not vca.get("needed_terminate"):
4658 stage[0] = "Stage 2/3 execute terminating primitives."
4659 self.logger.debug(logging_text + stage[0])
4660 stage[1] = "Looking execution environment that needs terminate."
4661 self.logger.debug(logging_text + stage[1])
4662
4663 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4664 config_descriptor = None
4665 vca_member_vnf_index = vca.get("member-vnf-index")
4666 vca_id = self.get_vca_id(
4667 db_vnfrs_dict.get(vca_member_vnf_index)
4668 if vca_member_vnf_index
4669 else None,
4670 db_nsr,
4671 )
4672 if not vca or not vca.get("ee_id"):
4673 continue
4674 if not vca.get("member-vnf-index"):
4675 # ns
4676 config_descriptor = db_nsr.get("ns-configuration")
4677 elif vca.get("vdu_id"):
4678 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4679 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4680 elif vca.get("kdu_name"):
4681 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4682 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4683 else:
4684 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4685 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4686 vca_type = vca.get("type")
4687 exec_terminate_primitives = not operation_params.get(
4688 "skip_terminate_primitives"
4689 ) and vca.get("needed_terminate")
4690 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4691 # pending native charms
4692 destroy_ee = True if vca_type in ("helm-v3", "native_charm") else False
4693 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4694 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4695 task = asyncio.ensure_future(
4696 self.destroy_N2VC(
4697 logging_text,
4698 db_nslcmop,
4699 vca,
4700 config_descriptor,
4701 vca_index,
4702 destroy_ee,
4703 exec_terminate_primitives,
4704 vca_id=vca_id,
4705 )
4706 )
4707 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4708
4709 # wait for pending tasks of terminate primitives
4710 if tasks_dict_info:
4711 self.logger.debug(
4712 logging_text
4713 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4714 )
4715 error_list = await self._wait_for_tasks(
4716 logging_text,
4717 tasks_dict_info,
4718 min(self.timeout.charm_delete, timeout_ns_terminate),
4719 stage,
4720 nslcmop_id,
4721 )
4722 tasks_dict_info.clear()
4723 if error_list:
4724 return # raise LcmException("; ".join(error_list))
4725
4726 # remove All execution environments at once
4727 stage[0] = "Stage 3/3 delete all."
4728
4729 if nsr_deployed.get("VCA"):
4730 stage[1] = "Deleting all execution environments."
4731 self.logger.debug(logging_text + stage[1])
4732 vca_id = self.get_vca_id({}, db_nsr)
4733 task_delete_ee = asyncio.ensure_future(
4734 asyncio.wait_for(
4735 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4736 timeout=self.timeout.charm_delete,
4737 )
4738 )
4739 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4740 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4741
4742 # Delete Namespace and Certificates if necessary
4743 if check_helm_ee_in_ns(list(db_vnfds_from_member_index.values())):
4744 await self.vca_map["helm-v3"].delete_tls_certificate(
4745 namespace=db_nslcmop["nsInstanceId"],
4746 certificate_name=self.EE_TLS_NAME,
4747 )
4748 await self.vca_map["helm-v3"].delete_namespace(
4749 namespace=db_nslcmop["nsInstanceId"],
4750 )
4751
4752 # Delete from k8scluster
4753 stage[1] = "Deleting KDUs."
4754 self.logger.debug(logging_text + stage[1])
4755 # print(nsr_deployed)
4756 for kdu in get_iterable(nsr_deployed, "K8s"):
4757 if not kdu or not kdu.get("kdu-instance"):
4758 continue
4759 kdu_instance = kdu.get("kdu-instance")
4760 if kdu.get("k8scluster-type") in self.k8scluster_map:
4761 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4762 vca_id = self.get_vca_id({}, db_nsr)
4763 task_delete_kdu_instance = asyncio.ensure_future(
4764 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4765 cluster_uuid=kdu.get("k8scluster-uuid"),
4766 kdu_instance=kdu_instance,
4767 vca_id=vca_id,
4768 namespace=kdu.get("namespace"),
4769 )
4770 )
4771 else:
4772 self.logger.error(
4773 logging_text
4774 + "Unknown k8s deployment type {}".format(
4775 kdu.get("k8scluster-type")
4776 )
4777 )
4778 continue
4779 tasks_dict_info[
4780 task_delete_kdu_instance
4781 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4782
4783 # remove from RO
4784 stage[1] = "Deleting ns from VIM."
4785 if self.ro_config.ng:
4786 task_delete_ro = asyncio.ensure_future(
4787 self._terminate_ng_ro(
4788 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4789 )
4790 )
4791 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4792
4793 # rest of staff will be done at finally
4794
4795 except (
4796 ROclient.ROClientException,
4797 DbException,
4798 LcmException,
4799 N2VCException,
4800 ) as e:
4801 self.logger.error(logging_text + "Exit Exception {}".format(e))
4802 exc = e
4803 except asyncio.CancelledError:
4804 self.logger.error(
4805 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4806 )
4807 exc = "Operation was cancelled"
4808 except Exception as e:
4809 exc = traceback.format_exc()
4810 self.logger.critical(
4811 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4812 exc_info=True,
4813 )
4814 finally:
4815 if exc:
4816 error_list.append(str(exc))
4817 try:
4818 # wait for pending tasks
4819 if tasks_dict_info:
4820 stage[1] = "Waiting for terminate pending tasks."
4821 self.logger.debug(logging_text + stage[1])
4822 error_list += await self._wait_for_tasks(
4823 logging_text,
4824 tasks_dict_info,
4825 timeout_ns_terminate,
4826 stage,
4827 nslcmop_id,
4828 )
4829 stage[1] = stage[2] = ""
4830 except asyncio.CancelledError:
4831 error_list.append("Cancelled")
4832 await self._cancel_pending_tasks(logging_text, tasks_dict_info)
4833 await self._wait_for_tasks(
4834 logging_text,
4835 tasks_dict_info,
4836 timeout_ns_terminate,
4837 stage,
4838 nslcmop_id,
4839 )
4840 except Exception as exc:
4841 error_list.append(str(exc))
4842 # update status at database
4843 if error_list:
4844 error_detail = "; ".join(error_list)
4845 # self.logger.error(logging_text + error_detail)
4846 error_description_nslcmop = "{} Detail: {}".format(
4847 stage[0], error_detail
4848 )
4849 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4850 nslcmop_id, stage[0]
4851 )
4852
4853 db_nsr_update["operational-status"] = "failed"
4854 db_nsr_update["detailed-status"] = (
4855 error_description_nsr + " Detail: " + error_detail
4856 )
4857 db_nslcmop_update["detailed-status"] = error_detail
4858 nslcmop_operation_state = "FAILED"
4859 ns_state = "BROKEN"
4860 else:
4861 error_detail = None
4862 error_description_nsr = error_description_nslcmop = None
4863 ns_state = "NOT_INSTANTIATED"
4864 db_nsr_update["operational-status"] = "terminated"
4865 db_nsr_update["detailed-status"] = "Done"
4866 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4867 db_nslcmop_update["detailed-status"] = "Done"
4868 nslcmop_operation_state = "COMPLETED"
4869
4870 if db_nsr:
4871 self._write_ns_status(
4872 nsr_id=nsr_id,
4873 ns_state=ns_state,
4874 current_operation="IDLE",
4875 current_operation_id=None,
4876 error_description=error_description_nsr,
4877 error_detail=error_detail,
4878 other_update=db_nsr_update,
4879 )
4880 self._write_op_status(
4881 op_id=nslcmop_id,
4882 stage="",
4883 error_message=error_description_nslcmop,
4884 operation_state=nslcmop_operation_state,
4885 other_update=db_nslcmop_update,
4886 )
4887 if ns_state == "NOT_INSTANTIATED":
4888 try:
4889 self.db.set_list(
4890 "vnfrs",
4891 {"nsr-id-ref": nsr_id},
4892 {"_admin.nsState": "NOT_INSTANTIATED"},
4893 )
4894 except DbException as e:
4895 self.logger.warn(
4896 logging_text
4897 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4898 nsr_id, e
4899 )
4900 )
4901 if operation_params:
4902 autoremove = operation_params.get("autoremove", False)
4903 if nslcmop_operation_state:
4904 try:
4905 await self.msg.aiowrite(
4906 "ns",
4907 "terminated",
4908 {
4909 "nsr_id": nsr_id,
4910 "nslcmop_id": nslcmop_id,
4911 "operationState": nslcmop_operation_state,
4912 "autoremove": autoremove,
4913 },
4914 )
4915 except Exception as e:
4916 self.logger.error(
4917 logging_text + "kafka_write notification Exception {}".format(e)
4918 )
4919 self.logger.debug(f"Deleting alerts: ns_id={nsr_id}")
4920 self.db.del_list("alerts", {"tags.ns_id": nsr_id})
4921
4922 self.logger.debug(logging_text + "Exit")
4923 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4924
4925 async def _wait_for_tasks(
4926 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4927 ):
4928 time_start = time()
4929 error_detail_list = []
4930 error_list = []
4931 pending_tasks = list(created_tasks_info.keys())
4932 num_tasks = len(pending_tasks)
4933 num_done = 0
4934 stage[1] = "{}/{}.".format(num_done, num_tasks)
4935 self._write_op_status(nslcmop_id, stage)
4936 while pending_tasks:
4937 new_error = None
4938 _timeout = timeout + time_start - time()
4939 done, pending_tasks = await asyncio.wait(
4940 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4941 )
4942 num_done += len(done)
4943 if not done: # Timeout
4944 for task in pending_tasks:
4945 new_error = created_tasks_info[task] + ": Timeout"
4946 error_detail_list.append(new_error)
4947 error_list.append(new_error)
4948 break
4949 for task in done:
4950 if task.cancelled():
4951 exc = "Cancelled"
4952 else:
4953 exc = task.exception()
4954 if exc:
4955 if isinstance(exc, asyncio.TimeoutError):
4956 exc = "Timeout"
4957 new_error = created_tasks_info[task] + ": {}".format(exc)
4958 error_list.append(created_tasks_info[task])
4959 error_detail_list.append(new_error)
4960 if isinstance(
4961 exc,
4962 (
4963 str,
4964 DbException,
4965 N2VCException,
4966 ROclient.ROClientException,
4967 LcmException,
4968 K8sException,
4969 NgRoException,
4970 ),
4971 ):
4972 self.logger.error(logging_text + new_error)
4973 else:
4974 exc_traceback = "".join(
4975 traceback.format_exception(None, exc, exc.__traceback__)
4976 )
4977 self.logger.error(
4978 logging_text
4979 + created_tasks_info[task]
4980 + " "
4981 + exc_traceback
4982 )
4983 else:
4984 self.logger.debug(
4985 logging_text + created_tasks_info[task] + ": Done"
4986 )
4987 stage[1] = "{}/{}.".format(num_done, num_tasks)
4988 if new_error:
4989 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4990 if nsr_id: # update also nsr
4991 self.update_db_2(
4992 "nsrs",
4993 nsr_id,
4994 {
4995 "errorDescription": "Error at: " + ", ".join(error_list),
4996 "errorDetail": ". ".join(error_detail_list),
4997 },
4998 )
4999 self._write_op_status(nslcmop_id, stage)
5000 return error_detail_list
5001
5002 async def _cancel_pending_tasks(self, logging_text, created_tasks_info):
5003 for task, name in created_tasks_info.items():
5004 self.logger.debug(logging_text + "Cancelling task: " + name)
5005 task.cancel()
5006
5007 @staticmethod
5008 def _map_primitive_params(primitive_desc, params, instantiation_params):
5009 """
5010 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
5011 The default-value is used. If it is between < > it look for a value at instantiation_params
5012 :param primitive_desc: portion of VNFD/NSD that describes primitive
5013 :param params: Params provided by user
5014 :param instantiation_params: Instantiation params provided by user
5015 :return: a dictionary with the calculated params
5016 """
5017 calculated_params = {}
5018 for parameter in primitive_desc.get("parameter", ()):
5019 param_name = parameter["name"]
5020 if param_name in params:
5021 calculated_params[param_name] = params[param_name]
5022 elif "default-value" in parameter or "value" in parameter:
5023 if "value" in parameter:
5024 calculated_params[param_name] = parameter["value"]
5025 else:
5026 calculated_params[param_name] = parameter["default-value"]
5027 if (
5028 isinstance(calculated_params[param_name], str)
5029 and calculated_params[param_name].startswith("<")
5030 and calculated_params[param_name].endswith(">")
5031 ):
5032 if calculated_params[param_name][1:-1] in instantiation_params:
5033 calculated_params[param_name] = instantiation_params[
5034 calculated_params[param_name][1:-1]
5035 ]
5036 else:
5037 raise LcmException(
5038 "Parameter {} needed to execute primitive {} not provided".format(
5039 calculated_params[param_name], primitive_desc["name"]
5040 )
5041 )
5042 else:
5043 raise LcmException(
5044 "Parameter {} needed to execute primitive {} not provided".format(
5045 param_name, primitive_desc["name"]
5046 )
5047 )
5048
5049 if isinstance(calculated_params[param_name], (dict, list, tuple)):
5050 calculated_params[param_name] = yaml.safe_dump(
5051 calculated_params[param_name], default_flow_style=True, width=256
5052 )
5053 elif isinstance(calculated_params[param_name], str) and calculated_params[
5054 param_name
5055 ].startswith("!!yaml "):
5056 calculated_params[param_name] = calculated_params[param_name][7:]
5057 if parameter.get("data-type") == "INTEGER":
5058 try:
5059 calculated_params[param_name] = int(calculated_params[param_name])
5060 except ValueError: # error converting string to int
5061 raise LcmException(
5062 "Parameter {} of primitive {} must be integer".format(
5063 param_name, primitive_desc["name"]
5064 )
5065 )
5066 elif parameter.get("data-type") == "BOOLEAN":
5067 calculated_params[param_name] = not (
5068 (str(calculated_params[param_name])).lower() == "false"
5069 )
5070
5071 # add always ns_config_info if primitive name is config
5072 if primitive_desc["name"] == "config":
5073 if "ns_config_info" in instantiation_params:
5074 calculated_params["ns_config_info"] = instantiation_params[
5075 "ns_config_info"
5076 ]
5077 return calculated_params
5078
5079 def _look_for_deployed_vca(
5080 self,
5081 deployed_vca,
5082 member_vnf_index,
5083 vdu_id,
5084 vdu_count_index,
5085 kdu_name=None,
5086 ee_descriptor_id=None,
5087 ):
5088 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
5089 for vca in deployed_vca:
5090 if not vca:
5091 continue
5092 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
5093 continue
5094 if (
5095 vdu_count_index is not None
5096 and vdu_count_index != vca["vdu_count_index"]
5097 ):
5098 continue
5099 if kdu_name and kdu_name != vca["kdu_name"]:
5100 continue
5101 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
5102 continue
5103 break
5104 else:
5105 # vca_deployed not found
5106 raise LcmException(
5107 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
5108 " is not deployed".format(
5109 member_vnf_index,
5110 vdu_id,
5111 vdu_count_index,
5112 kdu_name,
5113 ee_descriptor_id,
5114 )
5115 )
5116 # get ee_id
5117 ee_id = vca.get("ee_id")
5118 vca_type = vca.get(
5119 "type", "lxc_proxy_charm"
5120 ) # default value for backward compatibility - proxy charm
5121 if not ee_id:
5122 raise LcmException(
5123 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
5124 "execution environment".format(
5125 member_vnf_index, vdu_id, kdu_name, vdu_count_index
5126 )
5127 )
5128 return ee_id, vca_type
5129
5130 async def _ns_execute_primitive(
5131 self,
5132 ee_id,
5133 primitive,
5134 primitive_params,
5135 retries=0,
5136 retries_interval=30,
5137 timeout=None,
5138 vca_type=None,
5139 db_dict=None,
5140 vca_id: str = None,
5141 ) -> (str, str):
5142 try:
5143 if primitive == "config":
5144 primitive_params = {"params": primitive_params}
5145
5146 vca_type = vca_type or "lxc_proxy_charm"
5147
5148 while retries >= 0:
5149 try:
5150 output = await asyncio.wait_for(
5151 self.vca_map[vca_type].exec_primitive(
5152 ee_id=ee_id,
5153 primitive_name=primitive,
5154 params_dict=primitive_params,
5155 progress_timeout=self.timeout.progress_primitive,
5156 total_timeout=self.timeout.primitive,
5157 db_dict=db_dict,
5158 vca_id=vca_id,
5159 vca_type=vca_type,
5160 ),
5161 timeout=timeout or self.timeout.primitive,
5162 )
5163 # execution was OK
5164 break
5165 except asyncio.CancelledError:
5166 raise
5167 except Exception as e:
5168 retries -= 1
5169 if retries >= 0:
5170 self.logger.debug(
5171 "Error executing action {} on {} -> {}".format(
5172 primitive, ee_id, e
5173 )
5174 )
5175 # wait and retry
5176 await asyncio.sleep(retries_interval)
5177 else:
5178 if isinstance(e, asyncio.TimeoutError):
5179 e = N2VCException(
5180 message="Timed out waiting for action to complete"
5181 )
5182 return "FAILED", getattr(e, "message", repr(e))
5183
5184 return "COMPLETED", output
5185
5186 except (LcmException, asyncio.CancelledError):
5187 raise
5188 except Exception as e:
5189 return "FAIL", "Error executing action {}: {}".format(primitive, e)
5190
5191 async def vca_status_refresh(self, nsr_id, nslcmop_id):
5192 """
5193 Updating the vca_status with latest juju information in nsrs record
5194 :param: nsr_id: Id of the nsr
5195 :param: nslcmop_id: Id of the nslcmop
5196 :return: None
5197 """
5198
5199 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
5200 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5201 vca_id = self.get_vca_id({}, db_nsr)
5202 if db_nsr["_admin"]["deployed"]["K8s"]:
5203 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
5204 cluster_uuid, kdu_instance, cluster_type = (
5205 k8s["k8scluster-uuid"],
5206 k8s["kdu-instance"],
5207 k8s["k8scluster-type"],
5208 )
5209 await self._on_update_k8s_db(
5210 cluster_uuid=cluster_uuid,
5211 kdu_instance=kdu_instance,
5212 filter={"_id": nsr_id},
5213 vca_id=vca_id,
5214 cluster_type=cluster_type,
5215 )
5216 else:
5217 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
5218 table, filter = "nsrs", {"_id": nsr_id}
5219 path = "_admin.deployed.VCA.{}.".format(vca_index)
5220 await self._on_update_n2vc_db(table, filter, path, {})
5221
5222 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
5223 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
5224
5225 async def action(self, nsr_id, nslcmop_id):
5226 # Try to lock HA task here
5227 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5228 if not task_is_locked_by_me:
5229 return
5230
5231 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
5232 self.logger.debug(logging_text + "Enter")
5233 # get all needed from database
5234 db_nsr = None
5235 db_nslcmop = None
5236 db_nsr_update = {}
5237 db_nslcmop_update = {}
5238 nslcmop_operation_state = None
5239 error_description_nslcmop = None
5240 exc = None
5241 step = ""
5242 try:
5243 # wait for any previous tasks in process
5244 step = "Waiting for previous operations to terminate"
5245 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5246
5247 self._write_ns_status(
5248 nsr_id=nsr_id,
5249 ns_state=None,
5250 current_operation="RUNNING ACTION",
5251 current_operation_id=nslcmop_id,
5252 )
5253
5254 step = "Getting information from database"
5255 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5256 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5257 if db_nslcmop["operationParams"].get("primitive_params"):
5258 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5259 db_nslcmop["operationParams"]["primitive_params"]
5260 )
5261
5262 nsr_deployed = db_nsr["_admin"].get("deployed")
5263 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
5264 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
5265 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
5266 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
5267 primitive = db_nslcmop["operationParams"]["primitive"]
5268 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
5269 timeout_ns_action = db_nslcmop["operationParams"].get(
5270 "timeout_ns_action", self.timeout.primitive
5271 )
5272
5273 if vnf_index:
5274 step = "Getting vnfr from database"
5275 db_vnfr = self.db.get_one(
5276 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5277 )
5278 if db_vnfr.get("kdur"):
5279 kdur_list = []
5280 for kdur in db_vnfr["kdur"]:
5281 if kdur.get("additionalParams"):
5282 kdur["additionalParams"] = json.loads(
5283 kdur["additionalParams"]
5284 )
5285 kdur_list.append(kdur)
5286 db_vnfr["kdur"] = kdur_list
5287 step = "Getting vnfd from database"
5288 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5289
5290 # Sync filesystem before running a primitive
5291 self.fs.sync(db_vnfr["vnfd-id"])
5292 else:
5293 step = "Getting nsd from database"
5294 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
5295
5296 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5297 # for backward compatibility
5298 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5299 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5300 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5301 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5302
5303 # look for primitive
5304 config_primitive_desc = descriptor_configuration = None
5305 if vdu_id:
5306 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
5307 elif kdu_name:
5308 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
5309 elif vnf_index:
5310 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
5311 else:
5312 descriptor_configuration = db_nsd.get("ns-configuration")
5313
5314 if descriptor_configuration and descriptor_configuration.get(
5315 "config-primitive"
5316 ):
5317 for config_primitive in descriptor_configuration["config-primitive"]:
5318 if config_primitive["name"] == primitive:
5319 config_primitive_desc = config_primitive
5320 break
5321
5322 if not config_primitive_desc:
5323 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
5324 raise LcmException(
5325 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5326 primitive
5327 )
5328 )
5329 primitive_name = primitive
5330 ee_descriptor_id = None
5331 else:
5332 primitive_name = config_primitive_desc.get(
5333 "execution-environment-primitive", primitive
5334 )
5335 ee_descriptor_id = config_primitive_desc.get(
5336 "execution-environment-ref"
5337 )
5338
5339 if vnf_index:
5340 if vdu_id:
5341 vdur = next(
5342 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5343 )
5344 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
5345 elif kdu_name:
5346 kdur = next(
5347 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5348 )
5349 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
5350 else:
5351 desc_params = parse_yaml_strings(
5352 db_vnfr.get("additionalParamsForVnf")
5353 )
5354 else:
5355 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
5356 if kdu_name and get_configuration(db_vnfd, kdu_name):
5357 kdu_configuration = get_configuration(db_vnfd, kdu_name)
5358 actions = set()
5359 for primitive in kdu_configuration.get("initial-config-primitive", []):
5360 actions.add(primitive["name"])
5361 for primitive in kdu_configuration.get("config-primitive", []):
5362 actions.add(primitive["name"])
5363 kdu = find_in_list(
5364 nsr_deployed["K8s"],
5365 lambda kdu: kdu_name == kdu["kdu-name"]
5366 and kdu["member-vnf-index"] == vnf_index,
5367 )
5368 kdu_action = (
5369 True
5370 if primitive_name in actions
5371 and kdu["k8scluster-type"] != "helm-chart-v3"
5372 else False
5373 )
5374
5375 # TODO check if ns is in a proper status
5376 if kdu_name and (
5377 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5378 ):
5379 # kdur and desc_params already set from before
5380 if primitive_params:
5381 desc_params.update(primitive_params)
5382 # TODO Check if we will need something at vnf level
5383 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5384 if (
5385 kdu_name == kdu["kdu-name"]
5386 and kdu["member-vnf-index"] == vnf_index
5387 ):
5388 break
5389 else:
5390 raise LcmException(
5391 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5392 )
5393
5394 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5395 msg = "unknown k8scluster-type '{}'".format(
5396 kdu.get("k8scluster-type")
5397 )
5398 raise LcmException(msg)
5399
5400 db_dict = {
5401 "collection": "nsrs",
5402 "filter": {"_id": nsr_id},
5403 "path": "_admin.deployed.K8s.{}".format(index),
5404 }
5405 self.logger.debug(
5406 logging_text
5407 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5408 )
5409 step = "Executing kdu {}".format(primitive_name)
5410 if primitive_name == "upgrade":
5411 if desc_params.get("kdu_model"):
5412 kdu_model = desc_params.get("kdu_model")
5413 del desc_params["kdu_model"]
5414 else:
5415 kdu_model = kdu.get("kdu-model")
5416 if kdu_model.count("/") < 2: # helm chart is not embedded
5417 parts = kdu_model.split(sep=":")
5418 if len(parts) == 2:
5419 kdu_model = parts[0]
5420 if desc_params.get("kdu_atomic_upgrade"):
5421 atomic_upgrade = desc_params.get(
5422 "kdu_atomic_upgrade"
5423 ).lower() in ("yes", "true", "1")
5424 del desc_params["kdu_atomic_upgrade"]
5425 else:
5426 atomic_upgrade = True
5427
5428 detailed_status = await asyncio.wait_for(
5429 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5430 cluster_uuid=kdu.get("k8scluster-uuid"),
5431 kdu_instance=kdu.get("kdu-instance"),
5432 atomic=atomic_upgrade,
5433 kdu_model=kdu_model,
5434 params=desc_params,
5435 db_dict=db_dict,
5436 timeout=timeout_ns_action,
5437 ),
5438 timeout=timeout_ns_action + 10,
5439 )
5440 self.logger.debug(
5441 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5442 )
5443 elif primitive_name == "rollback":
5444 detailed_status = await asyncio.wait_for(
5445 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5446 cluster_uuid=kdu.get("k8scluster-uuid"),
5447 kdu_instance=kdu.get("kdu-instance"),
5448 db_dict=db_dict,
5449 ),
5450 timeout=timeout_ns_action,
5451 )
5452 elif primitive_name == "status":
5453 detailed_status = await asyncio.wait_for(
5454 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5455 cluster_uuid=kdu.get("k8scluster-uuid"),
5456 kdu_instance=kdu.get("kdu-instance"),
5457 vca_id=vca_id,
5458 ),
5459 timeout=timeout_ns_action,
5460 )
5461 else:
5462 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5463 kdu["kdu-name"], nsr_id
5464 )
5465 params = self._map_primitive_params(
5466 config_primitive_desc, primitive_params, desc_params
5467 )
5468
5469 detailed_status = await asyncio.wait_for(
5470 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5471 cluster_uuid=kdu.get("k8scluster-uuid"),
5472 kdu_instance=kdu_instance,
5473 primitive_name=primitive_name,
5474 params=params,
5475 db_dict=db_dict,
5476 timeout=timeout_ns_action,
5477 vca_id=vca_id,
5478 ),
5479 timeout=timeout_ns_action,
5480 )
5481
5482 if detailed_status:
5483 nslcmop_operation_state = "COMPLETED"
5484 else:
5485 detailed_status = ""
5486 nslcmop_operation_state = "FAILED"
5487 else:
5488 ee_id, vca_type = self._look_for_deployed_vca(
5489 nsr_deployed["VCA"],
5490 member_vnf_index=vnf_index,
5491 vdu_id=vdu_id,
5492 vdu_count_index=vdu_count_index,
5493 ee_descriptor_id=ee_descriptor_id,
5494 )
5495 for vca_index, vca_deployed in enumerate(
5496 db_nsr["_admin"]["deployed"]["VCA"]
5497 ):
5498 if vca_deployed.get("member-vnf-index") == vnf_index:
5499 db_dict = {
5500 "collection": "nsrs",
5501 "filter": {"_id": nsr_id},
5502 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5503 }
5504 break
5505 (
5506 nslcmop_operation_state,
5507 detailed_status,
5508 ) = await self._ns_execute_primitive(
5509 ee_id,
5510 primitive=primitive_name,
5511 primitive_params=self._map_primitive_params(
5512 config_primitive_desc, primitive_params, desc_params
5513 ),
5514 timeout=timeout_ns_action,
5515 vca_type=vca_type,
5516 db_dict=db_dict,
5517 vca_id=vca_id,
5518 )
5519
5520 db_nslcmop_update["detailed-status"] = detailed_status
5521 error_description_nslcmop = (
5522 detailed_status if nslcmop_operation_state == "FAILED" else ""
5523 )
5524 self.logger.debug(
5525 logging_text
5526 + "Done with result {} {}".format(
5527 nslcmop_operation_state, detailed_status
5528 )
5529 )
5530 return # database update is called inside finally
5531
5532 except (DbException, LcmException, N2VCException, K8sException) as e:
5533 self.logger.error(logging_text + "Exit Exception {}".format(e))
5534 exc = e
5535 except asyncio.CancelledError:
5536 self.logger.error(
5537 logging_text + "Cancelled Exception while '{}'".format(step)
5538 )
5539 exc = "Operation was cancelled"
5540 except asyncio.TimeoutError:
5541 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5542 exc = "Timeout"
5543 except Exception as e:
5544 exc = traceback.format_exc()
5545 self.logger.critical(
5546 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5547 exc_info=True,
5548 )
5549 finally:
5550 if exc:
5551 db_nslcmop_update[
5552 "detailed-status"
5553 ] = (
5554 detailed_status
5555 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5556 nslcmop_operation_state = "FAILED"
5557 if db_nsr:
5558 self._write_ns_status(
5559 nsr_id=nsr_id,
5560 ns_state=db_nsr[
5561 "nsState"
5562 ], # TODO check if degraded. For the moment use previous status
5563 current_operation="IDLE",
5564 current_operation_id=None,
5565 # error_description=error_description_nsr,
5566 # error_detail=error_detail,
5567 other_update=db_nsr_update,
5568 )
5569
5570 self._write_op_status(
5571 op_id=nslcmop_id,
5572 stage="",
5573 error_message=error_description_nslcmop,
5574 operation_state=nslcmop_operation_state,
5575 other_update=db_nslcmop_update,
5576 )
5577
5578 if nslcmop_operation_state:
5579 try:
5580 await self.msg.aiowrite(
5581 "ns",
5582 "actioned",
5583 {
5584 "nsr_id": nsr_id,
5585 "nslcmop_id": nslcmop_id,
5586 "operationState": nslcmop_operation_state,
5587 },
5588 )
5589 except Exception as e:
5590 self.logger.error(
5591 logging_text + "kafka_write notification Exception {}".format(e)
5592 )
5593 self.logger.debug(logging_text + "Exit")
5594 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5595 return nslcmop_operation_state, detailed_status
5596
5597 async def terminate_vdus(
5598 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5599 ):
5600 """This method terminates VDUs
5601
5602 Args:
5603 db_vnfr: VNF instance record
5604 member_vnf_index: VNF index to identify the VDUs to be removed
5605 db_nsr: NS instance record
5606 update_db_nslcmops: Nslcmop update record
5607 """
5608 vca_scaling_info = []
5609 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5610 scaling_info["scaling_direction"] = "IN"
5611 scaling_info["vdu-delete"] = {}
5612 scaling_info["kdu-delete"] = {}
5613 db_vdur = db_vnfr.get("vdur")
5614 vdur_list = copy(db_vdur)
5615 count_index = 0
5616 for index, vdu in enumerate(vdur_list):
5617 vca_scaling_info.append(
5618 {
5619 "osm_vdu_id": vdu["vdu-id-ref"],
5620 "member-vnf-index": member_vnf_index,
5621 "type": "delete",
5622 "vdu_index": count_index,
5623 }
5624 )
5625 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5626 scaling_info["vdu"].append(
5627 {
5628 "name": vdu.get("name") or vdu.get("vdu-name"),
5629 "vdu_id": vdu["vdu-id-ref"],
5630 "interface": [],
5631 }
5632 )
5633 for interface in vdu["interfaces"]:
5634 scaling_info["vdu"][index]["interface"].append(
5635 {
5636 "name": interface["name"],
5637 "ip_address": interface["ip-address"],
5638 "mac_address": interface.get("mac-address"),
5639 }
5640 )
5641 self.logger.info("NS update scaling info{}".format(scaling_info))
5642 stage[2] = "Terminating VDUs"
5643 if scaling_info.get("vdu-delete"):
5644 # scale_process = "RO"
5645 if self.ro_config.ng:
5646 await self._scale_ng_ro(
5647 logging_text,
5648 db_nsr,
5649 update_db_nslcmops,
5650 db_vnfr,
5651 scaling_info,
5652 stage,
5653 )
5654
5655 async def remove_vnf(self, nsr_id, nslcmop_id, vnf_instance_id):
5656 """This method is to Remove VNF instances from NS.
5657
5658 Args:
5659 nsr_id: NS instance id
5660 nslcmop_id: nslcmop id of update
5661 vnf_instance_id: id of the VNF instance to be removed
5662
5663 Returns:
5664 result: (str, str) COMPLETED/FAILED, details
5665 """
5666 try:
5667 db_nsr_update = {}
5668 logging_text = "Task ns={} update ".format(nsr_id)
5669 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5670 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5671 if check_vnfr_count > 1:
5672 stage = ["", "", ""]
5673 step = "Getting nslcmop from database"
5674 self.logger.debug(
5675 step + " after having waited for previous tasks to be completed"
5676 )
5677 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5678 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5679 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5680 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5681 """ db_vnfr = self.db.get_one(
5682 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5683
5684 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5685 await self.terminate_vdus(
5686 db_vnfr,
5687 member_vnf_index,
5688 db_nsr,
5689 update_db_nslcmops,
5690 stage,
5691 logging_text,
5692 )
5693
5694 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5695 constituent_vnfr.remove(db_vnfr.get("_id"))
5696 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get(
5697 "constituent-vnfr-ref"
5698 )
5699 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5700 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5701 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5702 return "COMPLETED", "Done"
5703 else:
5704 step = "Terminate VNF Failed with"
5705 raise LcmException(
5706 "{} Cannot terminate the last VNF in this NS.".format(
5707 vnf_instance_id
5708 )
5709 )
5710 except (LcmException, asyncio.CancelledError):
5711 raise
5712 except Exception as e:
5713 self.logger.debug("Error removing VNF {}".format(e))
5714 return "FAILED", "Error removing VNF {}".format(e)
5715
5716 async def _ns_redeploy_vnf(
5717 self,
5718 nsr_id,
5719 nslcmop_id,
5720 db_vnfd,
5721 db_vnfr,
5722 db_nsr,
5723 ):
5724 """This method updates and redeploys VNF instances
5725
5726 Args:
5727 nsr_id: NS instance id
5728 nslcmop_id: nslcmop id
5729 db_vnfd: VNF descriptor
5730 db_vnfr: VNF instance record
5731 db_nsr: NS instance record
5732
5733 Returns:
5734 result: (str, str) COMPLETED/FAILED, details
5735 """
5736 try:
5737 count_index = 0
5738 stage = ["", "", ""]
5739 logging_text = "Task ns={} update ".format(nsr_id)
5740 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5741 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5742
5743 # Terminate old VNF resources
5744 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5745 await self.terminate_vdus(
5746 db_vnfr,
5747 member_vnf_index,
5748 db_nsr,
5749 update_db_nslcmops,
5750 stage,
5751 logging_text,
5752 )
5753
5754 # old_vnfd_id = db_vnfr["vnfd-id"]
5755 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5756 new_db_vnfd = db_vnfd
5757 # new_vnfd_ref = new_db_vnfd["id"]
5758 # new_vnfd_id = vnfd_id
5759
5760 # Create VDUR
5761 new_vnfr_cp = []
5762 for cp in new_db_vnfd.get("ext-cpd", ()):
5763 vnf_cp = {
5764 "name": cp.get("id"),
5765 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5766 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5767 "id": cp.get("id"),
5768 }
5769 new_vnfr_cp.append(vnf_cp)
5770 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5771 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5772 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5773 new_vnfr_update = {
5774 "revision": latest_vnfd_revision,
5775 "connection-point": new_vnfr_cp,
5776 "vdur": new_vdur,
5777 "ip-address": "",
5778 }
5779 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5780 updated_db_vnfr = self.db.get_one(
5781 "vnfrs",
5782 {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id},
5783 )
5784
5785 # Instantiate new VNF resources
5786 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5787 vca_scaling_info = []
5788 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5789 scaling_info["scaling_direction"] = "OUT"
5790 scaling_info["vdu-create"] = {}
5791 scaling_info["kdu-create"] = {}
5792 vdud_instantiate_list = db_vnfd["vdu"]
5793 for index, vdud in enumerate(vdud_instantiate_list):
5794 cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
5795 if cloud_init_text:
5796 additional_params = (
5797 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5798 or {}
5799 )
5800 cloud_init_list = []
5801 if cloud_init_text:
5802 # TODO Information of its own ip is not available because db_vnfr is not updated.
5803 additional_params["OSM"] = get_osm_params(
5804 updated_db_vnfr, vdud["id"], 1
5805 )
5806 cloud_init_list.append(
5807 self._parse_cloud_init(
5808 cloud_init_text,
5809 additional_params,
5810 db_vnfd["id"],
5811 vdud["id"],
5812 )
5813 )
5814 vca_scaling_info.append(
5815 {
5816 "osm_vdu_id": vdud["id"],
5817 "member-vnf-index": member_vnf_index,
5818 "type": "create",
5819 "vdu_index": count_index,
5820 }
5821 )
5822 scaling_info["vdu-create"][vdud["id"]] = count_index
5823 if self.ro_config.ng:
5824 self.logger.debug(
5825 "New Resources to be deployed: {}".format(scaling_info)
5826 )
5827 await self._scale_ng_ro(
5828 logging_text,
5829 db_nsr,
5830 update_db_nslcmops,
5831 updated_db_vnfr,
5832 scaling_info,
5833 stage,
5834 )
5835 return "COMPLETED", "Done"
5836 except (LcmException, asyncio.CancelledError):
5837 raise
5838 except Exception as e:
5839 self.logger.debug("Error updating VNF {}".format(e))
5840 return "FAILED", "Error updating VNF {}".format(e)
5841
5842 async def _ns_charm_upgrade(
5843 self,
5844 ee_id,
5845 charm_id,
5846 charm_type,
5847 path,
5848 timeout: float = None,
5849 ) -> (str, str):
5850 """This method upgrade charms in VNF instances
5851
5852 Args:
5853 ee_id: Execution environment id
5854 path: Local path to the charm
5855 charm_id: charm-id
5856 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5857 timeout: (Float) Timeout for the ns update operation
5858
5859 Returns:
5860 result: (str, str) COMPLETED/FAILED, details
5861 """
5862 try:
5863 charm_type = charm_type or "lxc_proxy_charm"
5864 output = await self.vca_map[charm_type].upgrade_charm(
5865 ee_id=ee_id,
5866 path=path,
5867 charm_id=charm_id,
5868 charm_type=charm_type,
5869 timeout=timeout or self.timeout.ns_update,
5870 )
5871
5872 if output:
5873 return "COMPLETED", output
5874
5875 except (LcmException, asyncio.CancelledError):
5876 raise
5877
5878 except Exception as e:
5879 self.logger.debug("Error upgrading charm {}".format(path))
5880
5881 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5882
5883 async def update(self, nsr_id, nslcmop_id):
5884 """Update NS according to different update types
5885
5886 This method performs upgrade of VNF instances then updates the revision
5887 number in VNF record
5888
5889 Args:
5890 nsr_id: Network service will be updated
5891 nslcmop_id: ns lcm operation id
5892
5893 Returns:
5894 It may raise DbException, LcmException, N2VCException, K8sException
5895
5896 """
5897 # Try to lock HA task here
5898 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5899 if not task_is_locked_by_me:
5900 return
5901
5902 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5903 self.logger.debug(logging_text + "Enter")
5904
5905 # Set the required variables to be filled up later
5906 db_nsr = None
5907 db_nslcmop_update = {}
5908 vnfr_update = {}
5909 nslcmop_operation_state = None
5910 db_nsr_update = {}
5911 error_description_nslcmop = ""
5912 exc = None
5913 change_type = "updated"
5914 detailed_status = ""
5915 member_vnf_index = None
5916
5917 try:
5918 # wait for any previous tasks in process
5919 step = "Waiting for previous operations to terminate"
5920 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5921 self._write_ns_status(
5922 nsr_id=nsr_id,
5923 ns_state=None,
5924 current_operation="UPDATING",
5925 current_operation_id=nslcmop_id,
5926 )
5927
5928 step = "Getting nslcmop from database"
5929 db_nslcmop = self.db.get_one(
5930 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5931 )
5932 update_type = db_nslcmop["operationParams"]["updateType"]
5933
5934 step = "Getting nsr from database"
5935 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5936 old_operational_status = db_nsr["operational-status"]
5937 db_nsr_update["operational-status"] = "updating"
5938 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5939 nsr_deployed = db_nsr["_admin"].get("deployed")
5940
5941 if update_type == "CHANGE_VNFPKG":
5942 # Get the input parameters given through update request
5943 vnf_instance_id = db_nslcmop["operationParams"][
5944 "changeVnfPackageData"
5945 ].get("vnfInstanceId")
5946
5947 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5948 "vnfdId"
5949 )
5950 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5951
5952 step = "Getting vnfr from database"
5953 db_vnfr = self.db.get_one(
5954 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5955 )
5956
5957 step = "Getting vnfds from database"
5958 # Latest VNFD
5959 latest_vnfd = self.db.get_one(
5960 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5961 )
5962 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5963
5964 # Current VNFD
5965 current_vnf_revision = db_vnfr.get("revision", 1)
5966 current_vnfd = self.db.get_one(
5967 "vnfds_revisions",
5968 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5969 fail_on_empty=False,
5970 )
5971 # Charm artifact paths will be filled up later
5972 (
5973 current_charm_artifact_path,
5974 target_charm_artifact_path,
5975 charm_artifact_paths,
5976 helm_artifacts,
5977 ) = ([], [], [], [])
5978
5979 step = "Checking if revision has changed in VNFD"
5980 if current_vnf_revision != latest_vnfd_revision:
5981 change_type = "policy_updated"
5982
5983 # There is new revision of VNFD, update operation is required
5984 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5985 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5986
5987 step = "Removing the VNFD packages if they exist in the local path"
5988 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5989 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5990
5991 step = "Get the VNFD packages from FSMongo"
5992 self.fs.sync(from_path=latest_vnfd_path)
5993 self.fs.sync(from_path=current_vnfd_path)
5994
5995 step = (
5996 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5997 )
5998 current_base_folder = current_vnfd["_admin"]["storage"]
5999 latest_base_folder = latest_vnfd["_admin"]["storage"]
6000
6001 for vca_index, vca_deployed in enumerate(
6002 get_iterable(nsr_deployed, "VCA")
6003 ):
6004 vnf_index = db_vnfr.get("member-vnf-index-ref")
6005
6006 # Getting charm-id and charm-type
6007 if vca_deployed.get("member-vnf-index") == vnf_index:
6008 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6009 vca_type = vca_deployed.get("type")
6010 vdu_count_index = vca_deployed.get("vdu_count_index")
6011
6012 # Getting ee-id
6013 ee_id = vca_deployed.get("ee_id")
6014
6015 step = "Getting descriptor config"
6016 if current_vnfd.get("kdu"):
6017 search_key = "kdu_name"
6018 else:
6019 search_key = "vnfd_id"
6020
6021 entity_id = vca_deployed.get(search_key)
6022
6023 descriptor_config = get_configuration(
6024 current_vnfd, entity_id
6025 )
6026
6027 if "execution-environment-list" in descriptor_config:
6028 ee_list = descriptor_config.get(
6029 "execution-environment-list", []
6030 )
6031 else:
6032 ee_list = []
6033
6034 # There could be several charm used in the same VNF
6035 for ee_item in ee_list:
6036 if ee_item.get("juju"):
6037 step = "Getting charm name"
6038 charm_name = ee_item["juju"].get("charm")
6039
6040 step = "Setting Charm artifact paths"
6041 current_charm_artifact_path.append(
6042 get_charm_artifact_path(
6043 current_base_folder,
6044 charm_name,
6045 vca_type,
6046 current_vnf_revision,
6047 )
6048 )
6049 target_charm_artifact_path.append(
6050 get_charm_artifact_path(
6051 latest_base_folder,
6052 charm_name,
6053 vca_type,
6054 latest_vnfd_revision,
6055 )
6056 )
6057 elif ee_item.get("helm-chart"):
6058 # add chart to list and all parameters
6059 step = "Getting helm chart name"
6060 chart_name = ee_item.get("helm-chart")
6061 vca_type = "helm-v3"
6062 step = "Setting Helm chart artifact paths"
6063
6064 helm_artifacts.append(
6065 {
6066 "current_artifact_path": get_charm_artifact_path(
6067 current_base_folder,
6068 chart_name,
6069 vca_type,
6070 current_vnf_revision,
6071 ),
6072 "target_artifact_path": get_charm_artifact_path(
6073 latest_base_folder,
6074 chart_name,
6075 vca_type,
6076 latest_vnfd_revision,
6077 ),
6078 "ee_id": ee_id,
6079 "vca_index": vca_index,
6080 "vdu_index": vdu_count_index,
6081 }
6082 )
6083
6084 charm_artifact_paths = zip(
6085 current_charm_artifact_path, target_charm_artifact_path
6086 )
6087
6088 step = "Checking if software version has changed in VNFD"
6089 if find_software_version(current_vnfd) != find_software_version(
6090 latest_vnfd
6091 ):
6092 step = "Checking if existing VNF has charm"
6093 for current_charm_path, target_charm_path in list(
6094 charm_artifact_paths
6095 ):
6096 if current_charm_path:
6097 raise LcmException(
6098 "Software version change is not supported as VNF instance {} has charm.".format(
6099 vnf_instance_id
6100 )
6101 )
6102
6103 # There is no change in the charm package, then redeploy the VNF
6104 # based on new descriptor
6105 step = "Redeploying VNF"
6106 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6107 (result, detailed_status) = await self._ns_redeploy_vnf(
6108 nsr_id, nslcmop_id, latest_vnfd, db_vnfr, db_nsr
6109 )
6110 if result == "FAILED":
6111 nslcmop_operation_state = result
6112 error_description_nslcmop = detailed_status
6113 db_nslcmop_update["detailed-status"] = detailed_status
6114 self.logger.debug(
6115 logging_text
6116 + " step {} Done with result {} {}".format(
6117 step, nslcmop_operation_state, detailed_status
6118 )
6119 )
6120
6121 else:
6122 step = "Checking if any charm package has changed or not"
6123 for current_charm_path, target_charm_path in list(
6124 charm_artifact_paths
6125 ):
6126 if (
6127 current_charm_path
6128 and target_charm_path
6129 and self.check_charm_hash_changed(
6130 current_charm_path, target_charm_path
6131 )
6132 ):
6133 step = "Checking whether VNF uses juju bundle"
6134 if check_juju_bundle_existence(current_vnfd):
6135 raise LcmException(
6136 "Charm upgrade is not supported for the instance which"
6137 " uses juju-bundle: {}".format(
6138 check_juju_bundle_existence(current_vnfd)
6139 )
6140 )
6141
6142 step = "Upgrading Charm"
6143 (
6144 result,
6145 detailed_status,
6146 ) = await self._ns_charm_upgrade(
6147 ee_id=ee_id,
6148 charm_id=vca_id,
6149 charm_type=vca_type,
6150 path=self.fs.path + target_charm_path,
6151 timeout=timeout_seconds,
6152 )
6153
6154 if result == "FAILED":
6155 nslcmop_operation_state = result
6156 error_description_nslcmop = detailed_status
6157
6158 db_nslcmop_update["detailed-status"] = detailed_status
6159 self.logger.debug(
6160 logging_text
6161 + " step {} Done with result {} {}".format(
6162 step, nslcmop_operation_state, detailed_status
6163 )
6164 )
6165
6166 step = "Updating policies"
6167 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6168 result = "COMPLETED"
6169 detailed_status = "Done"
6170 db_nslcmop_update["detailed-status"] = "Done"
6171
6172 # helm base EE
6173 for item in helm_artifacts:
6174 if not (
6175 item["current_artifact_path"]
6176 and item["target_artifact_path"]
6177 and self.check_charm_hash_changed(
6178 item["current_artifact_path"],
6179 item["target_artifact_path"],
6180 )
6181 ):
6182 continue
6183 db_update_entry = "_admin.deployed.VCA.{}.".format(
6184 item["vca_index"]
6185 )
6186 vnfr_id = db_vnfr["_id"]
6187 osm_config = {"osm": {"ns_id": nsr_id, "vnf_id": vnfr_id}}
6188 db_dict = {
6189 "collection": "nsrs",
6190 "filter": {"_id": nsr_id},
6191 "path": db_update_entry,
6192 }
6193 vca_type, namespace, helm_id = get_ee_id_parts(item["ee_id"])
6194 await self.vca_map[vca_type].upgrade_execution_environment(
6195 namespace=namespace,
6196 helm_id=helm_id,
6197 db_dict=db_dict,
6198 config=osm_config,
6199 artifact_path=item["target_artifact_path"],
6200 vca_type=vca_type,
6201 )
6202 vnf_id = db_vnfr.get("vnfd-ref")
6203 config_descriptor = get_configuration(latest_vnfd, vnf_id)
6204 self.logger.debug("get ssh key block")
6205 rw_mgmt_ip = None
6206 if deep_get(
6207 config_descriptor,
6208 ("config-access", "ssh-access", "required"),
6209 ):
6210 # Needed to inject a ssh key
6211 user = deep_get(
6212 config_descriptor,
6213 ("config-access", "ssh-access", "default-user"),
6214 )
6215 step = (
6216 "Install configuration Software, getting public ssh key"
6217 )
6218 pub_key = await self.vca_map[
6219 vca_type
6220 ].get_ee_ssh_public__key(
6221 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
6222 )
6223
6224 step = (
6225 "Insert public key into VM user={} ssh_key={}".format(
6226 user, pub_key
6227 )
6228 )
6229 self.logger.debug(logging_text + step)
6230
6231 # wait for RO (ip-address) Insert pub_key into VM
6232 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
6233 logging_text,
6234 nsr_id,
6235 vnfr_id,
6236 None,
6237 item["vdu_index"],
6238 user=user,
6239 pub_key=pub_key,
6240 )
6241
6242 initial_config_primitive_list = config_descriptor.get(
6243 "initial-config-primitive"
6244 )
6245 config_primitive = next(
6246 (
6247 p
6248 for p in initial_config_primitive_list
6249 if p["name"] == "config"
6250 ),
6251 None,
6252 )
6253 if not config_primitive:
6254 continue
6255
6256 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6257 if rw_mgmt_ip:
6258 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
6259 if db_vnfr.get("additionalParamsForVnf"):
6260 deploy_params.update(
6261 parse_yaml_strings(
6262 db_vnfr["additionalParamsForVnf"].copy()
6263 )
6264 )
6265 primitive_params_ = self._map_primitive_params(
6266 config_primitive, {}, deploy_params
6267 )
6268
6269 step = "execute primitive '{}' params '{}'".format(
6270 config_primitive["name"], primitive_params_
6271 )
6272 self.logger.debug(logging_text + step)
6273 await self.vca_map[vca_type].exec_primitive(
6274 ee_id=ee_id,
6275 primitive_name=config_primitive["name"],
6276 params_dict=primitive_params_,
6277 db_dict=db_dict,
6278 vca_id=vca_id,
6279 vca_type=vca_type,
6280 )
6281
6282 step = "Updating policies"
6283 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6284 detailed_status = "Done"
6285 db_nslcmop_update["detailed-status"] = "Done"
6286
6287 # If nslcmop_operation_state is None, so any operation is not failed.
6288 if not nslcmop_operation_state:
6289 nslcmop_operation_state = "COMPLETED"
6290
6291 # If update CHANGE_VNFPKG nslcmop_operation is successful
6292 # vnf revision need to be updated
6293 vnfr_update["revision"] = latest_vnfd_revision
6294 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
6295
6296 self.logger.debug(
6297 logging_text
6298 + " task Done with result {} {}".format(
6299 nslcmop_operation_state, detailed_status
6300 )
6301 )
6302 elif update_type == "REMOVE_VNF":
6303 # This part is included in https://osm.etsi.org/gerrit/11876
6304 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
6305 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
6306 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6307 step = "Removing VNF"
6308 (result, detailed_status) = await self.remove_vnf(
6309 nsr_id, nslcmop_id, vnf_instance_id
6310 )
6311 if result == "FAILED":
6312 nslcmop_operation_state = result
6313 error_description_nslcmop = detailed_status
6314 db_nslcmop_update["detailed-status"] = detailed_status
6315 change_type = "vnf_terminated"
6316 if not nslcmop_operation_state:
6317 nslcmop_operation_state = "COMPLETED"
6318 self.logger.debug(
6319 logging_text
6320 + " task Done with result {} {}".format(
6321 nslcmop_operation_state, detailed_status
6322 )
6323 )
6324
6325 elif update_type == "OPERATE_VNF":
6326 vnf_id = db_nslcmop["operationParams"]["operateVnfData"][
6327 "vnfInstanceId"
6328 ]
6329 operation_type = db_nslcmop["operationParams"]["operateVnfData"][
6330 "changeStateTo"
6331 ]
6332 additional_param = db_nslcmop["operationParams"]["operateVnfData"][
6333 "additionalParam"
6334 ]
6335 (result, detailed_status) = await self.rebuild_start_stop(
6336 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
6337 )
6338 if result == "FAILED":
6339 nslcmop_operation_state = result
6340 error_description_nslcmop = detailed_status
6341 db_nslcmop_update["detailed-status"] = detailed_status
6342 if not nslcmop_operation_state:
6343 nslcmop_operation_state = "COMPLETED"
6344 self.logger.debug(
6345 logging_text
6346 + " task Done with result {} {}".format(
6347 nslcmop_operation_state, detailed_status
6348 )
6349 )
6350
6351 # If nslcmop_operation_state is None, so any operation is not failed.
6352 # All operations are executed in overall.
6353 if not nslcmop_operation_state:
6354 nslcmop_operation_state = "COMPLETED"
6355 db_nsr_update["operational-status"] = old_operational_status
6356
6357 except (DbException, LcmException, N2VCException, K8sException) as e:
6358 self.logger.error(logging_text + "Exit Exception {}".format(e))
6359 exc = e
6360 except asyncio.CancelledError:
6361 self.logger.error(
6362 logging_text + "Cancelled Exception while '{}'".format(step)
6363 )
6364 exc = "Operation was cancelled"
6365 except asyncio.TimeoutError:
6366 self.logger.error(logging_text + "Timeout while '{}'".format(step))
6367 exc = "Timeout"
6368 except Exception as e:
6369 exc = traceback.format_exc()
6370 self.logger.critical(
6371 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6372 exc_info=True,
6373 )
6374 finally:
6375 if exc:
6376 db_nslcmop_update[
6377 "detailed-status"
6378 ] = (
6379 detailed_status
6380 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6381 nslcmop_operation_state = "FAILED"
6382 db_nsr_update["operational-status"] = old_operational_status
6383 if db_nsr:
6384 self._write_ns_status(
6385 nsr_id=nsr_id,
6386 ns_state=db_nsr["nsState"],
6387 current_operation="IDLE",
6388 current_operation_id=None,
6389 other_update=db_nsr_update,
6390 )
6391
6392 self._write_op_status(
6393 op_id=nslcmop_id,
6394 stage="",
6395 error_message=error_description_nslcmop,
6396 operation_state=nslcmop_operation_state,
6397 other_update=db_nslcmop_update,
6398 )
6399
6400 if nslcmop_operation_state:
6401 try:
6402 msg = {
6403 "nsr_id": nsr_id,
6404 "nslcmop_id": nslcmop_id,
6405 "operationState": nslcmop_operation_state,
6406 }
6407 if (
6408 change_type in ("vnf_terminated", "policy_updated")
6409 and member_vnf_index
6410 ):
6411 msg.update({"vnf_member_index": member_vnf_index})
6412 await self.msg.aiowrite("ns", change_type, msg)
6413 except Exception as e:
6414 self.logger.error(
6415 logging_text + "kafka_write notification Exception {}".format(e)
6416 )
6417 self.logger.debug(logging_text + "Exit")
6418 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6419 return nslcmop_operation_state, detailed_status
6420
6421 async def scale(self, nsr_id, nslcmop_id):
6422 # Try to lock HA task here
6423 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6424 if not task_is_locked_by_me:
6425 return
6426
6427 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6428 stage = ["", "", ""]
6429 tasks_dict_info = {}
6430 # ^ stage, step, VIM progress
6431 self.logger.debug(logging_text + "Enter")
6432 # get all needed from database
6433 db_nsr = None
6434 db_nslcmop_update = {}
6435 db_nsr_update = {}
6436 exc = None
6437 # in case of error, indicates what part of scale was failed to put nsr at error status
6438 scale_process = None
6439 old_operational_status = ""
6440 old_config_status = ""
6441 nsi_id = None
6442 try:
6443 # wait for any previous tasks in process
6444 step = "Waiting for previous operations to terminate"
6445 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6446 self._write_ns_status(
6447 nsr_id=nsr_id,
6448 ns_state=None,
6449 current_operation="SCALING",
6450 current_operation_id=nslcmop_id,
6451 )
6452
6453 step = "Getting nslcmop from database"
6454 self.logger.debug(
6455 step + " after having waited for previous tasks to be completed"
6456 )
6457 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6458
6459 step = "Getting nsr from database"
6460 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6461 old_operational_status = db_nsr["operational-status"]
6462 old_config_status = db_nsr["config-status"]
6463
6464 step = "Parsing scaling parameters"
6465 db_nsr_update["operational-status"] = "scaling"
6466 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6467 nsr_deployed = db_nsr["_admin"].get("deployed")
6468
6469 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6470 "scaleByStepData"
6471 ]["member-vnf-index"]
6472 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6473 "scaleByStepData"
6474 ]["scaling-group-descriptor"]
6475 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6476 # for backward compatibility
6477 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6478 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6479 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6480 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6481
6482 step = "Getting vnfr from database"
6483 db_vnfr = self.db.get_one(
6484 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6485 )
6486
6487 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6488
6489 step = "Getting vnfd from database"
6490 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6491
6492 base_folder = db_vnfd["_admin"]["storage"]
6493
6494 step = "Getting scaling-group-descriptor"
6495 scaling_descriptor = find_in_list(
6496 get_scaling_aspect(db_vnfd),
6497 lambda scale_desc: scale_desc["name"] == scaling_group,
6498 )
6499 if not scaling_descriptor:
6500 raise LcmException(
6501 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6502 "at vnfd:scaling-group-descriptor".format(scaling_group)
6503 )
6504
6505 step = "Sending scale order to VIM"
6506 # TODO check if ns is in a proper status
6507 nb_scale_op = 0
6508 if not db_nsr["_admin"].get("scaling-group"):
6509 self.update_db_2(
6510 "nsrs",
6511 nsr_id,
6512 {
6513 "_admin.scaling-group": [
6514 {"name": scaling_group, "nb-scale-op": 0}
6515 ]
6516 },
6517 )
6518 admin_scale_index = 0
6519 else:
6520 for admin_scale_index, admin_scale_info in enumerate(
6521 db_nsr["_admin"]["scaling-group"]
6522 ):
6523 if admin_scale_info["name"] == scaling_group:
6524 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6525 break
6526 else: # not found, set index one plus last element and add new entry with the name
6527 admin_scale_index += 1
6528 db_nsr_update[
6529 "_admin.scaling-group.{}.name".format(admin_scale_index)
6530 ] = scaling_group
6531
6532 vca_scaling_info = []
6533 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6534 if scaling_type == "SCALE_OUT":
6535 if "aspect-delta-details" not in scaling_descriptor:
6536 raise LcmException(
6537 "Aspect delta details not fount in scaling descriptor {}".format(
6538 scaling_descriptor["name"]
6539 )
6540 )
6541 # count if max-instance-count is reached
6542 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6543
6544 scaling_info["scaling_direction"] = "OUT"
6545 scaling_info["vdu-create"] = {}
6546 scaling_info["kdu-create"] = {}
6547 for delta in deltas:
6548 for vdu_delta in delta.get("vdu-delta", {}):
6549 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6550 # vdu_index also provides the number of instance of the targeted vdu
6551 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6552 cloud_init_text = self._get_vdu_cloud_init_content(
6553 vdud, db_vnfd
6554 )
6555 if cloud_init_text:
6556 additional_params = (
6557 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6558 or {}
6559 )
6560 cloud_init_list = []
6561
6562 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6563 max_instance_count = 10
6564 if vdu_profile and "max-number-of-instances" in vdu_profile:
6565 max_instance_count = vdu_profile.get(
6566 "max-number-of-instances", 10
6567 )
6568
6569 default_instance_num = get_number_of_instances(
6570 db_vnfd, vdud["id"]
6571 )
6572 instances_number = vdu_delta.get("number-of-instances", 1)
6573 nb_scale_op += instances_number
6574
6575 new_instance_count = nb_scale_op + default_instance_num
6576 # Control if new count is over max and vdu count is less than max.
6577 # Then assign new instance count
6578 if new_instance_count > max_instance_count > vdu_count:
6579 instances_number = new_instance_count - max_instance_count
6580 else:
6581 instances_number = instances_number
6582
6583 if new_instance_count > max_instance_count:
6584 raise LcmException(
6585 "reached the limit of {} (max-instance-count) "
6586 "scaling-out operations for the "
6587 "scaling-group-descriptor '{}'".format(
6588 nb_scale_op, scaling_group
6589 )
6590 )
6591 for x in range(vdu_delta.get("number-of-instances", 1)):
6592 if cloud_init_text:
6593 # TODO Information of its own ip is not available because db_vnfr is not updated.
6594 additional_params["OSM"] = get_osm_params(
6595 db_vnfr, vdu_delta["id"], vdu_index + x
6596 )
6597 cloud_init_list.append(
6598 self._parse_cloud_init(
6599 cloud_init_text,
6600 additional_params,
6601 db_vnfd["id"],
6602 vdud["id"],
6603 )
6604 )
6605 vca_scaling_info.append(
6606 {
6607 "osm_vdu_id": vdu_delta["id"],
6608 "member-vnf-index": vnf_index,
6609 "type": "create",
6610 "vdu_index": vdu_index + x,
6611 }
6612 )
6613 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6614 for kdu_delta in delta.get("kdu-resource-delta", {}):
6615 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6616 kdu_name = kdu_profile["kdu-name"]
6617 resource_name = kdu_profile.get("resource-name", "")
6618
6619 # Might have different kdus in the same delta
6620 # Should have list for each kdu
6621 if not scaling_info["kdu-create"].get(kdu_name, None):
6622 scaling_info["kdu-create"][kdu_name] = []
6623
6624 kdur = get_kdur(db_vnfr, kdu_name)
6625 if kdur.get("helm-chart"):
6626 k8s_cluster_type = "helm-chart-v3"
6627 self.logger.debug("kdur: {}".format(kdur))
6628 elif kdur.get("juju-bundle"):
6629 k8s_cluster_type = "juju-bundle"
6630 else:
6631 raise LcmException(
6632 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6633 "juju-bundle. Maybe an old NBI version is running".format(
6634 db_vnfr["member-vnf-index-ref"], kdu_name
6635 )
6636 )
6637
6638 max_instance_count = 10
6639 if kdu_profile and "max-number-of-instances" in kdu_profile:
6640 max_instance_count = kdu_profile.get(
6641 "max-number-of-instances", 10
6642 )
6643
6644 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6645 deployed_kdu, _ = get_deployed_kdu(
6646 nsr_deployed, kdu_name, vnf_index
6647 )
6648 if deployed_kdu is None:
6649 raise LcmException(
6650 "KDU '{}' for vnf '{}' not deployed".format(
6651 kdu_name, vnf_index
6652 )
6653 )
6654 kdu_instance = deployed_kdu.get("kdu-instance")
6655 instance_num = await self.k8scluster_map[
6656 k8s_cluster_type
6657 ].get_scale_count(
6658 resource_name,
6659 kdu_instance,
6660 vca_id=vca_id,
6661 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6662 kdu_model=deployed_kdu.get("kdu-model"),
6663 )
6664 kdu_replica_count = instance_num + kdu_delta.get(
6665 "number-of-instances", 1
6666 )
6667
6668 # Control if new count is over max and instance_num is less than max.
6669 # Then assign max instance number to kdu replica count
6670 if kdu_replica_count > max_instance_count > instance_num:
6671 kdu_replica_count = max_instance_count
6672 if kdu_replica_count > max_instance_count:
6673 raise LcmException(
6674 "reached the limit of {} (max-instance-count) "
6675 "scaling-out operations for the "
6676 "scaling-group-descriptor '{}'".format(
6677 instance_num, scaling_group
6678 )
6679 )
6680
6681 for x in range(kdu_delta.get("number-of-instances", 1)):
6682 vca_scaling_info.append(
6683 {
6684 "osm_kdu_id": kdu_name,
6685 "member-vnf-index": vnf_index,
6686 "type": "create",
6687 "kdu_index": instance_num + x - 1,
6688 }
6689 )
6690 scaling_info["kdu-create"][kdu_name].append(
6691 {
6692 "member-vnf-index": vnf_index,
6693 "type": "create",
6694 "k8s-cluster-type": k8s_cluster_type,
6695 "resource-name": resource_name,
6696 "scale": kdu_replica_count,
6697 }
6698 )
6699 elif scaling_type == "SCALE_IN":
6700 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6701
6702 scaling_info["scaling_direction"] = "IN"
6703 scaling_info["vdu-delete"] = {}
6704 scaling_info["kdu-delete"] = {}
6705
6706 for delta in deltas:
6707 for vdu_delta in delta.get("vdu-delta", {}):
6708 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6709 min_instance_count = 0
6710 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6711 if vdu_profile and "min-number-of-instances" in vdu_profile:
6712 min_instance_count = vdu_profile["min-number-of-instances"]
6713
6714 default_instance_num = get_number_of_instances(
6715 db_vnfd, vdu_delta["id"]
6716 )
6717 instance_num = vdu_delta.get("number-of-instances", 1)
6718 nb_scale_op -= instance_num
6719
6720 new_instance_count = nb_scale_op + default_instance_num
6721
6722 if new_instance_count < min_instance_count < vdu_count:
6723 instances_number = min_instance_count - new_instance_count
6724 else:
6725 instances_number = instance_num
6726
6727 if new_instance_count < min_instance_count:
6728 raise LcmException(
6729 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6730 "scaling-group-descriptor '{}'".format(
6731 nb_scale_op, scaling_group
6732 )
6733 )
6734 for x in range(vdu_delta.get("number-of-instances", 1)):
6735 vca_scaling_info.append(
6736 {
6737 "osm_vdu_id": vdu_delta["id"],
6738 "member-vnf-index": vnf_index,
6739 "type": "delete",
6740 "vdu_index": vdu_index - 1 - x,
6741 }
6742 )
6743 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6744 for kdu_delta in delta.get("kdu-resource-delta", {}):
6745 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6746 kdu_name = kdu_profile["kdu-name"]
6747 resource_name = kdu_profile.get("resource-name", "")
6748
6749 if not scaling_info["kdu-delete"].get(kdu_name, None):
6750 scaling_info["kdu-delete"][kdu_name] = []
6751
6752 kdur = get_kdur(db_vnfr, kdu_name)
6753 if kdur.get("helm-chart"):
6754 k8s_cluster_type = "helm-chart-v3"
6755 self.logger.debug("kdur: {}".format(kdur))
6756 elif kdur.get("juju-bundle"):
6757 k8s_cluster_type = "juju-bundle"
6758 else:
6759 raise LcmException(
6760 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6761 "juju-bundle. Maybe an old NBI version is running".format(
6762 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6763 )
6764 )
6765
6766 min_instance_count = 0
6767 if kdu_profile and "min-number-of-instances" in kdu_profile:
6768 min_instance_count = kdu_profile["min-number-of-instances"]
6769
6770 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6771 deployed_kdu, _ = get_deployed_kdu(
6772 nsr_deployed, kdu_name, vnf_index
6773 )
6774 if deployed_kdu is None:
6775 raise LcmException(
6776 "KDU '{}' for vnf '{}' not deployed".format(
6777 kdu_name, vnf_index
6778 )
6779 )
6780 kdu_instance = deployed_kdu.get("kdu-instance")
6781 instance_num = await self.k8scluster_map[
6782 k8s_cluster_type
6783 ].get_scale_count(
6784 resource_name,
6785 kdu_instance,
6786 vca_id=vca_id,
6787 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6788 kdu_model=deployed_kdu.get("kdu-model"),
6789 )
6790 kdu_replica_count = instance_num - kdu_delta.get(
6791 "number-of-instances", 1
6792 )
6793
6794 if kdu_replica_count < min_instance_count < instance_num:
6795 kdu_replica_count = min_instance_count
6796 if kdu_replica_count < min_instance_count:
6797 raise LcmException(
6798 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6799 "scaling-group-descriptor '{}'".format(
6800 instance_num, scaling_group
6801 )
6802 )
6803
6804 for x in range(kdu_delta.get("number-of-instances", 1)):
6805 vca_scaling_info.append(
6806 {
6807 "osm_kdu_id": kdu_name,
6808 "member-vnf-index": vnf_index,
6809 "type": "delete",
6810 "kdu_index": instance_num - x - 1,
6811 }
6812 )
6813 scaling_info["kdu-delete"][kdu_name].append(
6814 {
6815 "member-vnf-index": vnf_index,
6816 "type": "delete",
6817 "k8s-cluster-type": k8s_cluster_type,
6818 "resource-name": resource_name,
6819 "scale": kdu_replica_count,
6820 }
6821 )
6822
6823 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6824 vdu_delete = copy(scaling_info.get("vdu-delete"))
6825 if scaling_info["scaling_direction"] == "IN":
6826 for vdur in reversed(db_vnfr["vdur"]):
6827 if vdu_delete.get(vdur["vdu-id-ref"]):
6828 vdu_delete[vdur["vdu-id-ref"]] -= 1
6829 scaling_info["vdu"].append(
6830 {
6831 "name": vdur.get("name") or vdur.get("vdu-name"),
6832 "vdu_id": vdur["vdu-id-ref"],
6833 "interface": [],
6834 }
6835 )
6836 for interface in vdur["interfaces"]:
6837 scaling_info["vdu"][-1]["interface"].append(
6838 {
6839 "name": interface["name"],
6840 "ip_address": interface["ip-address"],
6841 "mac_address": interface.get("mac-address"),
6842 }
6843 )
6844 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6845
6846 # PRE-SCALE BEGIN
6847 step = "Executing pre-scale vnf-config-primitive"
6848 if scaling_descriptor.get("scaling-config-action"):
6849 for scaling_config_action in scaling_descriptor[
6850 "scaling-config-action"
6851 ]:
6852 if (
6853 scaling_config_action.get("trigger") == "pre-scale-in"
6854 and scaling_type == "SCALE_IN"
6855 ) or (
6856 scaling_config_action.get("trigger") == "pre-scale-out"
6857 and scaling_type == "SCALE_OUT"
6858 ):
6859 vnf_config_primitive = scaling_config_action[
6860 "vnf-config-primitive-name-ref"
6861 ]
6862 step = db_nslcmop_update[
6863 "detailed-status"
6864 ] = "executing pre-scale scaling-config-action '{}'".format(
6865 vnf_config_primitive
6866 )
6867
6868 # look for primitive
6869 for config_primitive in (
6870 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6871 ).get("config-primitive", ()):
6872 if config_primitive["name"] == vnf_config_primitive:
6873 break
6874 else:
6875 raise LcmException(
6876 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6877 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6878 "primitive".format(scaling_group, vnf_config_primitive)
6879 )
6880
6881 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6882 if db_vnfr.get("additionalParamsForVnf"):
6883 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6884
6885 scale_process = "VCA"
6886 db_nsr_update["config-status"] = "configuring pre-scaling"
6887 primitive_params = self._map_primitive_params(
6888 config_primitive, {}, vnfr_params
6889 )
6890
6891 # Pre-scale retry check: Check if this sub-operation has been executed before
6892 op_index = self._check_or_add_scale_suboperation(
6893 db_nslcmop,
6894 vnf_index,
6895 vnf_config_primitive,
6896 primitive_params,
6897 "PRE-SCALE",
6898 )
6899 if op_index == self.SUBOPERATION_STATUS_SKIP:
6900 # Skip sub-operation
6901 result = "COMPLETED"
6902 result_detail = "Done"
6903 self.logger.debug(
6904 logging_text
6905 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6906 vnf_config_primitive, result, result_detail
6907 )
6908 )
6909 else:
6910 if op_index == self.SUBOPERATION_STATUS_NEW:
6911 # New sub-operation: Get index of this sub-operation
6912 op_index = (
6913 len(db_nslcmop.get("_admin", {}).get("operations"))
6914 - 1
6915 )
6916 self.logger.debug(
6917 logging_text
6918 + "vnf_config_primitive={} New sub-operation".format(
6919 vnf_config_primitive
6920 )
6921 )
6922 else:
6923 # retry: Get registered params for this existing sub-operation
6924 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6925 op_index
6926 ]
6927 vnf_index = op.get("member_vnf_index")
6928 vnf_config_primitive = op.get("primitive")
6929 primitive_params = op.get("primitive_params")
6930 self.logger.debug(
6931 logging_text
6932 + "vnf_config_primitive={} Sub-operation retry".format(
6933 vnf_config_primitive
6934 )
6935 )
6936 # Execute the primitive, either with new (first-time) or registered (reintent) args
6937 ee_descriptor_id = config_primitive.get(
6938 "execution-environment-ref"
6939 )
6940 primitive_name = config_primitive.get(
6941 "execution-environment-primitive", vnf_config_primitive
6942 )
6943 ee_id, vca_type = self._look_for_deployed_vca(
6944 nsr_deployed["VCA"],
6945 member_vnf_index=vnf_index,
6946 vdu_id=None,
6947 vdu_count_index=None,
6948 ee_descriptor_id=ee_descriptor_id,
6949 )
6950 result, result_detail = await self._ns_execute_primitive(
6951 ee_id,
6952 primitive_name,
6953 primitive_params,
6954 vca_type=vca_type,
6955 vca_id=vca_id,
6956 )
6957 self.logger.debug(
6958 logging_text
6959 + "vnf_config_primitive={} Done with result {} {}".format(
6960 vnf_config_primitive, result, result_detail
6961 )
6962 )
6963 # Update operationState = COMPLETED | FAILED
6964 self._update_suboperation_status(
6965 db_nslcmop, op_index, result, result_detail
6966 )
6967
6968 if result == "FAILED":
6969 raise LcmException(result_detail)
6970 db_nsr_update["config-status"] = old_config_status
6971 scale_process = None
6972 # PRE-SCALE END
6973
6974 db_nsr_update[
6975 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6976 ] = nb_scale_op
6977 db_nsr_update[
6978 "_admin.scaling-group.{}.time".format(admin_scale_index)
6979 ] = time()
6980
6981 # SCALE-IN VCA - BEGIN
6982 if vca_scaling_info:
6983 step = db_nslcmop_update[
6984 "detailed-status"
6985 ] = "Deleting the execution environments"
6986 scale_process = "VCA"
6987 for vca_info in vca_scaling_info:
6988 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6989 member_vnf_index = str(vca_info["member-vnf-index"])
6990 self.logger.debug(
6991 logging_text + "vdu info: {}".format(vca_info)
6992 )
6993 if vca_info.get("osm_vdu_id"):
6994 vdu_id = vca_info["osm_vdu_id"]
6995 vdu_index = int(vca_info["vdu_index"])
6996 stage[
6997 1
6998 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6999 member_vnf_index, vdu_id, vdu_index
7000 )
7001 stage[2] = step = "Scaling in VCA"
7002 self._write_op_status(op_id=nslcmop_id, stage=stage)
7003 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
7004 config_update = db_nsr["configurationStatus"]
7005 for vca_index, vca in enumerate(vca_update):
7006 if (
7007 (vca or vca.get("ee_id"))
7008 and vca["member-vnf-index"] == member_vnf_index
7009 and vca["vdu_count_index"] == vdu_index
7010 ):
7011 if vca.get("vdu_id"):
7012 config_descriptor = get_configuration(
7013 db_vnfd, vca.get("vdu_id")
7014 )
7015 elif vca.get("kdu_name"):
7016 config_descriptor = get_configuration(
7017 db_vnfd, vca.get("kdu_name")
7018 )
7019 else:
7020 config_descriptor = get_configuration(
7021 db_vnfd, db_vnfd["id"]
7022 )
7023 operation_params = (
7024 db_nslcmop.get("operationParams") or {}
7025 )
7026 exec_terminate_primitives = not operation_params.get(
7027 "skip_terminate_primitives"
7028 ) and vca.get("needed_terminate")
7029 task = asyncio.ensure_future(
7030 asyncio.wait_for(
7031 self.destroy_N2VC(
7032 logging_text,
7033 db_nslcmop,
7034 vca,
7035 config_descriptor,
7036 vca_index,
7037 destroy_ee=True,
7038 exec_primitives=exec_terminate_primitives,
7039 scaling_in=True,
7040 vca_id=vca_id,
7041 ),
7042 timeout=self.timeout.charm_delete,
7043 )
7044 )
7045 tasks_dict_info[task] = "Terminating VCA {}".format(
7046 vca.get("ee_id")
7047 )
7048 del vca_update[vca_index]
7049 del config_update[vca_index]
7050 # wait for pending tasks of terminate primitives
7051 if tasks_dict_info:
7052 self.logger.debug(
7053 logging_text
7054 + "Waiting for tasks {}".format(
7055 list(tasks_dict_info.keys())
7056 )
7057 )
7058 error_list = await self._wait_for_tasks(
7059 logging_text,
7060 tasks_dict_info,
7061 min(
7062 self.timeout.charm_delete, self.timeout.ns_terminate
7063 ),
7064 stage,
7065 nslcmop_id,
7066 )
7067 tasks_dict_info.clear()
7068 if error_list:
7069 raise LcmException("; ".join(error_list))
7070
7071 db_vca_and_config_update = {
7072 "_admin.deployed.VCA": vca_update,
7073 "configurationStatus": config_update,
7074 }
7075 self.update_db_2(
7076 "nsrs", db_nsr["_id"], db_vca_and_config_update
7077 )
7078 scale_process = None
7079 # SCALE-IN VCA - END
7080
7081 # SCALE RO - BEGIN
7082 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
7083 scale_process = "RO"
7084 if self.ro_config.ng:
7085 await self._scale_ng_ro(
7086 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
7087 )
7088 scaling_info.pop("vdu-create", None)
7089 scaling_info.pop("vdu-delete", None)
7090
7091 scale_process = None
7092 # SCALE RO - END
7093
7094 # SCALE KDU - BEGIN
7095 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
7096 scale_process = "KDU"
7097 await self._scale_kdu(
7098 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7099 )
7100 scaling_info.pop("kdu-create", None)
7101 scaling_info.pop("kdu-delete", None)
7102
7103 scale_process = None
7104 # SCALE KDU - END
7105
7106 if db_nsr_update:
7107 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7108
7109 # SCALE-UP VCA - BEGIN
7110 if vca_scaling_info:
7111 step = db_nslcmop_update[
7112 "detailed-status"
7113 ] = "Creating new execution environments"
7114 scale_process = "VCA"
7115 for vca_info in vca_scaling_info:
7116 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
7117 member_vnf_index = str(vca_info["member-vnf-index"])
7118 self.logger.debug(
7119 logging_text + "vdu info: {}".format(vca_info)
7120 )
7121 vnfd_id = db_vnfr["vnfd-ref"]
7122 if vca_info.get("osm_vdu_id"):
7123 vdu_index = int(vca_info["vdu_index"])
7124 deploy_params = {"OSM": get_osm_params(db_vnfr)}
7125 if db_vnfr.get("additionalParamsForVnf"):
7126 deploy_params.update(
7127 parse_yaml_strings(
7128 db_vnfr["additionalParamsForVnf"].copy()
7129 )
7130 )
7131 descriptor_config = get_configuration(
7132 db_vnfd, db_vnfd["id"]
7133 )
7134 if descriptor_config:
7135 vdu_id = None
7136 vdu_name = None
7137 kdu_name = None
7138 kdu_index = None
7139 self._deploy_n2vc(
7140 logging_text=logging_text
7141 + "member_vnf_index={} ".format(member_vnf_index),
7142 db_nsr=db_nsr,
7143 db_vnfr=db_vnfr,
7144 nslcmop_id=nslcmop_id,
7145 nsr_id=nsr_id,
7146 nsi_id=nsi_id,
7147 vnfd_id=vnfd_id,
7148 vdu_id=vdu_id,
7149 kdu_name=kdu_name,
7150 kdu_index=kdu_index,
7151 member_vnf_index=member_vnf_index,
7152 vdu_index=vdu_index,
7153 vdu_name=vdu_name,
7154 deploy_params=deploy_params,
7155 descriptor_config=descriptor_config,
7156 base_folder=base_folder,
7157 task_instantiation_info=tasks_dict_info,
7158 stage=stage,
7159 )
7160 vdu_id = vca_info["osm_vdu_id"]
7161 vdur = find_in_list(
7162 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
7163 )
7164 descriptor_config = get_configuration(db_vnfd, vdu_id)
7165 if vdur.get("additionalParams"):
7166 deploy_params_vdu = parse_yaml_strings(
7167 vdur["additionalParams"]
7168 )
7169 else:
7170 deploy_params_vdu = deploy_params
7171 deploy_params_vdu["OSM"] = get_osm_params(
7172 db_vnfr, vdu_id, vdu_count_index=vdu_index
7173 )
7174 if descriptor_config:
7175 vdu_name = None
7176 kdu_name = None
7177 kdu_index = None
7178 stage[
7179 1
7180 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7181 member_vnf_index, vdu_id, vdu_index
7182 )
7183 stage[2] = step = "Scaling out VCA"
7184 self._write_op_status(op_id=nslcmop_id, stage=stage)
7185 self._deploy_n2vc(
7186 logging_text=logging_text
7187 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7188 member_vnf_index, vdu_id, vdu_index
7189 ),
7190 db_nsr=db_nsr,
7191 db_vnfr=db_vnfr,
7192 nslcmop_id=nslcmop_id,
7193 nsr_id=nsr_id,
7194 nsi_id=nsi_id,
7195 vnfd_id=vnfd_id,
7196 vdu_id=vdu_id,
7197 kdu_name=kdu_name,
7198 member_vnf_index=member_vnf_index,
7199 vdu_index=vdu_index,
7200 kdu_index=kdu_index,
7201 vdu_name=vdu_name,
7202 deploy_params=deploy_params_vdu,
7203 descriptor_config=descriptor_config,
7204 base_folder=base_folder,
7205 task_instantiation_info=tasks_dict_info,
7206 stage=stage,
7207 )
7208 # SCALE-UP VCA - END
7209 scale_process = None
7210
7211 # POST-SCALE BEGIN
7212 # execute primitive service POST-SCALING
7213 step = "Executing post-scale vnf-config-primitive"
7214 if scaling_descriptor.get("scaling-config-action"):
7215 for scaling_config_action in scaling_descriptor[
7216 "scaling-config-action"
7217 ]:
7218 if (
7219 scaling_config_action.get("trigger") == "post-scale-in"
7220 and scaling_type == "SCALE_IN"
7221 ) or (
7222 scaling_config_action.get("trigger") == "post-scale-out"
7223 and scaling_type == "SCALE_OUT"
7224 ):
7225 vnf_config_primitive = scaling_config_action[
7226 "vnf-config-primitive-name-ref"
7227 ]
7228 step = db_nslcmop_update[
7229 "detailed-status"
7230 ] = "executing post-scale scaling-config-action '{}'".format(
7231 vnf_config_primitive
7232 )
7233
7234 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
7235 if db_vnfr.get("additionalParamsForVnf"):
7236 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
7237
7238 # look for primitive
7239 for config_primitive in (
7240 get_configuration(db_vnfd, db_vnfd["id"]) or {}
7241 ).get("config-primitive", ()):
7242 if config_primitive["name"] == vnf_config_primitive:
7243 break
7244 else:
7245 raise LcmException(
7246 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
7247 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
7248 "config-primitive".format(
7249 scaling_group, vnf_config_primitive
7250 )
7251 )
7252 scale_process = "VCA"
7253 db_nsr_update["config-status"] = "configuring post-scaling"
7254 primitive_params = self._map_primitive_params(
7255 config_primitive, {}, vnfr_params
7256 )
7257
7258 # Post-scale retry check: Check if this sub-operation has been executed before
7259 op_index = self._check_or_add_scale_suboperation(
7260 db_nslcmop,
7261 vnf_index,
7262 vnf_config_primitive,
7263 primitive_params,
7264 "POST-SCALE",
7265 )
7266 if op_index == self.SUBOPERATION_STATUS_SKIP:
7267 # Skip sub-operation
7268 result = "COMPLETED"
7269 result_detail = "Done"
7270 self.logger.debug(
7271 logging_text
7272 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
7273 vnf_config_primitive, result, result_detail
7274 )
7275 )
7276 else:
7277 if op_index == self.SUBOPERATION_STATUS_NEW:
7278 # New sub-operation: Get index of this sub-operation
7279 op_index = (
7280 len(db_nslcmop.get("_admin", {}).get("operations"))
7281 - 1
7282 )
7283 self.logger.debug(
7284 logging_text
7285 + "vnf_config_primitive={} New sub-operation".format(
7286 vnf_config_primitive
7287 )
7288 )
7289 else:
7290 # retry: Get registered params for this existing sub-operation
7291 op = db_nslcmop.get("_admin", {}).get("operations", [])[
7292 op_index
7293 ]
7294 vnf_index = op.get("member_vnf_index")
7295 vnf_config_primitive = op.get("primitive")
7296 primitive_params = op.get("primitive_params")
7297 self.logger.debug(
7298 logging_text
7299 + "vnf_config_primitive={} Sub-operation retry".format(
7300 vnf_config_primitive
7301 )
7302 )
7303 # Execute the primitive, either with new (first-time) or registered (reintent) args
7304 ee_descriptor_id = config_primitive.get(
7305 "execution-environment-ref"
7306 )
7307 primitive_name = config_primitive.get(
7308 "execution-environment-primitive", vnf_config_primitive
7309 )
7310 ee_id, vca_type = self._look_for_deployed_vca(
7311 nsr_deployed["VCA"],
7312 member_vnf_index=vnf_index,
7313 vdu_id=None,
7314 vdu_count_index=None,
7315 ee_descriptor_id=ee_descriptor_id,
7316 )
7317 result, result_detail = await self._ns_execute_primitive(
7318 ee_id,
7319 primitive_name,
7320 primitive_params,
7321 vca_type=vca_type,
7322 vca_id=vca_id,
7323 )
7324 self.logger.debug(
7325 logging_text
7326 + "vnf_config_primitive={} Done with result {} {}".format(
7327 vnf_config_primitive, result, result_detail
7328 )
7329 )
7330 # Update operationState = COMPLETED | FAILED
7331 self._update_suboperation_status(
7332 db_nslcmop, op_index, result, result_detail
7333 )
7334
7335 if result == "FAILED":
7336 raise LcmException(result_detail)
7337 db_nsr_update["config-status"] = old_config_status
7338 scale_process = None
7339 # POST-SCALE END
7340
7341 db_nsr_update[
7342 "detailed-status"
7343 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
7344 db_nsr_update["operational-status"] = (
7345 "running"
7346 if old_operational_status == "failed"
7347 else old_operational_status
7348 )
7349 db_nsr_update["config-status"] = old_config_status
7350 return
7351 except (
7352 ROclient.ROClientException,
7353 DbException,
7354 LcmException,
7355 NgRoException,
7356 ) as e:
7357 self.logger.error(logging_text + "Exit Exception {}".format(e))
7358 exc = e
7359 except asyncio.CancelledError:
7360 self.logger.error(
7361 logging_text + "Cancelled Exception while '{}'".format(step)
7362 )
7363 exc = "Operation was cancelled"
7364 except Exception as e:
7365 exc = traceback.format_exc()
7366 self.logger.critical(
7367 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7368 exc_info=True,
7369 )
7370 finally:
7371 error_list = list()
7372 if exc:
7373 error_list.append(str(exc))
7374 self._write_ns_status(
7375 nsr_id=nsr_id,
7376 ns_state=None,
7377 current_operation="IDLE",
7378 current_operation_id=None,
7379 )
7380 try:
7381 if tasks_dict_info:
7382 stage[1] = "Waiting for instantiate pending tasks."
7383 self.logger.debug(logging_text + stage[1])
7384 exc = await self._wait_for_tasks(
7385 logging_text,
7386 tasks_dict_info,
7387 self.timeout.ns_deploy,
7388 stage,
7389 nslcmop_id,
7390 nsr_id=nsr_id,
7391 )
7392 except asyncio.CancelledError:
7393 error_list.append("Cancelled")
7394 await self._cancel_pending_tasks(logging_text, tasks_dict_info)
7395 await self._wait_for_tasks(
7396 logging_text,
7397 tasks_dict_info,
7398 self.timeout.ns_deploy,
7399 stage,
7400 nslcmop_id,
7401 nsr_id=nsr_id,
7402 )
7403 if error_list:
7404 error_detail = "; ".join(error_list)
7405 db_nslcmop_update[
7406 "detailed-status"
7407 ] = error_description_nslcmop = "FAILED {}: {}".format(
7408 step, error_detail
7409 )
7410 nslcmop_operation_state = "FAILED"
7411 if db_nsr:
7412 db_nsr_update["operational-status"] = old_operational_status
7413 db_nsr_update["config-status"] = old_config_status
7414 db_nsr_update["detailed-status"] = ""
7415 if scale_process:
7416 if "VCA" in scale_process:
7417 db_nsr_update["config-status"] = "failed"
7418 if "RO" in scale_process:
7419 db_nsr_update["operational-status"] = "failed"
7420 db_nsr_update[
7421 "detailed-status"
7422 ] = "FAILED scaling nslcmop={} {}: {}".format(
7423 nslcmop_id, step, error_detail
7424 )
7425 else:
7426 error_description_nslcmop = None
7427 nslcmop_operation_state = "COMPLETED"
7428 db_nslcmop_update["detailed-status"] = "Done"
7429
7430 self._write_op_status(
7431 op_id=nslcmop_id,
7432 stage="",
7433 error_message=error_description_nslcmop,
7434 operation_state=nslcmop_operation_state,
7435 other_update=db_nslcmop_update,
7436 )
7437 if db_nsr:
7438 self._write_ns_status(
7439 nsr_id=nsr_id,
7440 ns_state=None,
7441 current_operation="IDLE",
7442 current_operation_id=None,
7443 other_update=db_nsr_update,
7444 )
7445
7446 if nslcmop_operation_state:
7447 try:
7448 msg = {
7449 "nsr_id": nsr_id,
7450 "nslcmop_id": nslcmop_id,
7451 "operationState": nslcmop_operation_state,
7452 }
7453 await self.msg.aiowrite("ns", "scaled", msg)
7454 except Exception as e:
7455 self.logger.error(
7456 logging_text + "kafka_write notification Exception {}".format(e)
7457 )
7458 self.logger.debug(logging_text + "Exit")
7459 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7460
7461 async def _scale_kdu(
7462 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7463 ):
7464 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7465 for kdu_name in _scaling_info:
7466 for kdu_scaling_info in _scaling_info[kdu_name]:
7467 deployed_kdu, index = get_deployed_kdu(
7468 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7469 )
7470 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7471 kdu_instance = deployed_kdu["kdu-instance"]
7472 kdu_model = deployed_kdu.get("kdu-model")
7473 scale = int(kdu_scaling_info["scale"])
7474 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7475
7476 db_dict = {
7477 "collection": "nsrs",
7478 "filter": {"_id": nsr_id},
7479 "path": "_admin.deployed.K8s.{}".format(index),
7480 }
7481
7482 step = "scaling application {}".format(
7483 kdu_scaling_info["resource-name"]
7484 )
7485 self.logger.debug(logging_text + step)
7486
7487 if kdu_scaling_info["type"] == "delete":
7488 kdu_config = get_configuration(db_vnfd, kdu_name)
7489 if (
7490 kdu_config
7491 and kdu_config.get("terminate-config-primitive")
7492 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7493 ):
7494 terminate_config_primitive_list = kdu_config.get(
7495 "terminate-config-primitive"
7496 )
7497 terminate_config_primitive_list.sort(
7498 key=lambda val: int(val["seq"])
7499 )
7500
7501 for (
7502 terminate_config_primitive
7503 ) in terminate_config_primitive_list:
7504 primitive_params_ = self._map_primitive_params(
7505 terminate_config_primitive, {}, {}
7506 )
7507 step = "execute terminate config primitive"
7508 self.logger.debug(logging_text + step)
7509 await asyncio.wait_for(
7510 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7511 cluster_uuid=cluster_uuid,
7512 kdu_instance=kdu_instance,
7513 primitive_name=terminate_config_primitive["name"],
7514 params=primitive_params_,
7515 db_dict=db_dict,
7516 total_timeout=self.timeout.primitive,
7517 vca_id=vca_id,
7518 ),
7519 timeout=self.timeout.primitive
7520 * self.timeout.primitive_outer_factor,
7521 )
7522
7523 await asyncio.wait_for(
7524 self.k8scluster_map[k8s_cluster_type].scale(
7525 kdu_instance=kdu_instance,
7526 scale=scale,
7527 resource_name=kdu_scaling_info["resource-name"],
7528 total_timeout=self.timeout.scale_on_error,
7529 vca_id=vca_id,
7530 cluster_uuid=cluster_uuid,
7531 kdu_model=kdu_model,
7532 atomic=True,
7533 db_dict=db_dict,
7534 ),
7535 timeout=self.timeout.scale_on_error
7536 * self.timeout.scale_on_error_outer_factor,
7537 )
7538
7539 if kdu_scaling_info["type"] == "create":
7540 kdu_config = get_configuration(db_vnfd, kdu_name)
7541 if (
7542 kdu_config
7543 and kdu_config.get("initial-config-primitive")
7544 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7545 ):
7546 initial_config_primitive_list = kdu_config.get(
7547 "initial-config-primitive"
7548 )
7549 initial_config_primitive_list.sort(
7550 key=lambda val: int(val["seq"])
7551 )
7552
7553 for initial_config_primitive in initial_config_primitive_list:
7554 primitive_params_ = self._map_primitive_params(
7555 initial_config_primitive, {}, {}
7556 )
7557 step = "execute initial config primitive"
7558 self.logger.debug(logging_text + step)
7559 await asyncio.wait_for(
7560 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7561 cluster_uuid=cluster_uuid,
7562 kdu_instance=kdu_instance,
7563 primitive_name=initial_config_primitive["name"],
7564 params=primitive_params_,
7565 db_dict=db_dict,
7566 vca_id=vca_id,
7567 ),
7568 timeout=600,
7569 )
7570
7571 async def _scale_ng_ro(
7572 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7573 ):
7574 nsr_id = db_nslcmop["nsInstanceId"]
7575 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7576 db_vnfrs = {}
7577
7578 # read from db: vnfd's for every vnf
7579 db_vnfds = []
7580
7581 # for each vnf in ns, read vnfd
7582 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7583 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7584 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7585 # if we haven't this vnfd, read it from db
7586 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7587 # read from db
7588 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7589 db_vnfds.append(vnfd)
7590 n2vc_key = self.n2vc.get_public_key()
7591 n2vc_key_list = [n2vc_key]
7592 self.scale_vnfr(
7593 db_vnfr,
7594 vdu_scaling_info.get("vdu-create"),
7595 vdu_scaling_info.get("vdu-delete"),
7596 mark_delete=True,
7597 )
7598 # db_vnfr has been updated, update db_vnfrs to use it
7599 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7600 await self._instantiate_ng_ro(
7601 logging_text,
7602 nsr_id,
7603 db_nsd,
7604 db_nsr,
7605 db_nslcmop,
7606 db_vnfrs,
7607 db_vnfds,
7608 n2vc_key_list,
7609 stage=stage,
7610 start_deploy=time(),
7611 timeout_ns_deploy=self.timeout.ns_deploy,
7612 )
7613 if vdu_scaling_info.get("vdu-delete"):
7614 self.scale_vnfr(
7615 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7616 )
7617
7618 async def extract_prometheus_scrape_jobs(
7619 self,
7620 ee_id: str,
7621 artifact_path: str,
7622 ee_config_descriptor: dict,
7623 vnfr_id: str,
7624 nsr_id: str,
7625 target_ip: str,
7626 element_type: str,
7627 vnf_member_index: str = "",
7628 vdu_id: str = "",
7629 vdu_index: int = None,
7630 kdu_name: str = "",
7631 kdu_index: int = None,
7632 ) -> dict:
7633 """Method to extract prometheus scrape jobs from EE's Prometheus template job file
7634 This method will wait until the corresponding VDU or KDU is fully instantiated
7635
7636 Args:
7637 ee_id (str): Execution Environment ID
7638 artifact_path (str): Path where the EE's content is (including the Prometheus template file)
7639 ee_config_descriptor (dict): Execution Environment's configuration descriptor
7640 vnfr_id (str): VNFR ID where this EE applies
7641 nsr_id (str): NSR ID where this EE applies
7642 target_ip (str): VDU/KDU instance IP address
7643 element_type (str): NS or VNF or VDU or KDU
7644 vnf_member_index (str, optional): VNF index where this EE applies. Defaults to "".
7645 vdu_id (str, optional): VDU ID where this EE applies. Defaults to "".
7646 vdu_index (int, optional): VDU index where this EE applies. Defaults to None.
7647 kdu_name (str, optional): KDU name where this EE applies. Defaults to "".
7648 kdu_index (int, optional): KDU index where this EE applies. Defaults to None.
7649
7650 Raises:
7651 LcmException: When the VDU or KDU instance was not found in an hour
7652
7653 Returns:
7654 _type_: Prometheus jobs
7655 """
7656 # default the vdur and kdur names to an empty string, to avoid any later
7657 # problem with Prometheus when the element type is not VDU or KDU
7658 vdur_name = ""
7659 kdur_name = ""
7660
7661 # look if exist a file called 'prometheus*.j2' and
7662 artifact_content = self.fs.dir_ls(artifact_path)
7663 job_file = next(
7664 (
7665 f
7666 for f in artifact_content
7667 if f.startswith("prometheus") and f.endswith(".j2")
7668 ),
7669 None,
7670 )
7671 if not job_file:
7672 return
7673 self.logger.debug("Artifact path{}".format(artifact_path))
7674 self.logger.debug("job file{}".format(job_file))
7675 with self.fs.file_open((artifact_path, job_file), "r") as f:
7676 job_data = f.read()
7677
7678 # obtain the VDUR or KDUR, if the element type is VDU or KDU
7679 if element_type in ("VDU", "KDU"):
7680 for _ in range(360):
7681 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
7682 if vdu_id and vdu_index is not None:
7683 vdur = next(
7684 (
7685 x
7686 for x in get_iterable(db_vnfr, "vdur")
7687 if (
7688 x.get("vdu-id-ref") == vdu_id
7689 and x.get("count-index") == vdu_index
7690 )
7691 ),
7692 {},
7693 )
7694 if vdur.get("name"):
7695 vdur_name = vdur.get("name")
7696 break
7697 if kdu_name and kdu_index is not None:
7698 kdur = next(
7699 (
7700 x
7701 for x in get_iterable(db_vnfr, "kdur")
7702 if (
7703 x.get("kdu-name") == kdu_name
7704 and x.get("count-index") == kdu_index
7705 )
7706 ),
7707 {},
7708 )
7709 if kdur.get("name"):
7710 kdur_name = kdur.get("name")
7711 break
7712
7713 await asyncio.sleep(10)
7714 else:
7715 if vdu_id and vdu_index is not None:
7716 raise LcmException(
7717 f"Timeout waiting VDU with name={vdu_id} and index={vdu_index} to be intantiated"
7718 )
7719 if kdu_name and kdu_index is not None:
7720 raise LcmException(
7721 f"Timeout waiting KDU with name={kdu_name} and index={kdu_index} to be intantiated"
7722 )
7723
7724 if ee_id is not None:
7725 _, namespace, helm_id = get_ee_id_parts(
7726 ee_id
7727 ) # get namespace and EE gRPC service name
7728 host_name = f'{helm_id}-{ee_config_descriptor["metric-service"]}.{namespace}.svc' # svc_name.namespace.svc
7729 host_port = "80"
7730 vnfr_id = vnfr_id.replace("-", "")
7731 variables = {
7732 "JOB_NAME": vnfr_id,
7733 "TARGET_IP": target_ip,
7734 "EXPORTER_POD_IP": host_name,
7735 "EXPORTER_POD_PORT": host_port,
7736 "NSR_ID": nsr_id,
7737 "VNF_MEMBER_INDEX": vnf_member_index,
7738 "VDUR_NAME": vdur_name,
7739 "KDUR_NAME": kdur_name,
7740 "ELEMENT_TYPE": element_type,
7741 }
7742 else:
7743 metric_path = ee_config_descriptor["metric-path"]
7744 target_port = ee_config_descriptor["metric-port"]
7745 vnfr_id = vnfr_id.replace("-", "")
7746 variables = {
7747 "JOB_NAME": vnfr_id,
7748 "TARGET_IP": target_ip,
7749 "TARGET_PORT": target_port,
7750 "METRIC_PATH": metric_path,
7751 }
7752
7753 job_list = parse_job(job_data, variables)
7754 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7755 for job in job_list:
7756 if (
7757 not isinstance(job.get("job_name"), str)
7758 or vnfr_id not in job["job_name"]
7759 ):
7760 job["job_name"] = vnfr_id + "_" + str(SystemRandom().randint(1, 10000))
7761 job["nsr_id"] = nsr_id
7762 job["vnfr_id"] = vnfr_id
7763 return job_list
7764
7765 async def rebuild_start_stop(
7766 self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
7767 ):
7768 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7769 self.logger.info(logging_text + "Enter")
7770 stage = ["Preparing the environment", ""]
7771 # database nsrs record
7772 db_nsr_update = {}
7773 vdu_vim_name = None
7774 vim_vm_id = None
7775 # in case of error, indicates what part of scale was failed to put nsr at error status
7776 start_deploy = time()
7777 try:
7778 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7779 vim_account_id = db_vnfr.get("vim-account-id")
7780 vim_info_key = "vim:" + vim_account_id
7781 vdu_id = additional_param["vdu_id"]
7782 vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
7783 vdur = find_in_list(
7784 vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
7785 )
7786 if vdur:
7787 vdu_vim_name = vdur["name"]
7788 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7789 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7790 else:
7791 raise LcmException("Target vdu is not found")
7792 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7793 # wait for any previous tasks in process
7794 stage[1] = "Waiting for previous operations to terminate"
7795 self.logger.info(stage[1])
7796 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7797
7798 stage[1] = "Reading from database."
7799 self.logger.info(stage[1])
7800 self._write_ns_status(
7801 nsr_id=nsr_id,
7802 ns_state=None,
7803 current_operation=operation_type.upper(),
7804 current_operation_id=nslcmop_id,
7805 )
7806 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7807
7808 # read from db: ns
7809 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7810 db_nsr_update["operational-status"] = operation_type
7811 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7812 # Payload for RO
7813 desc = {
7814 operation_type: {
7815 "vim_vm_id": vim_vm_id,
7816 "vnf_id": vnf_id,
7817 "vdu_index": additional_param["count-index"],
7818 "vdu_id": vdur["id"],
7819 "target_vim": target_vim,
7820 "vim_account_id": vim_account_id,
7821 }
7822 }
7823 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7824 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7825 self.logger.info("ro nsr id: {}".format(nsr_id))
7826 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7827 self.logger.info("response from RO: {}".format(result_dict))
7828 action_id = result_dict["action_id"]
7829 await self._wait_ng_ro(
7830 nsr_id,
7831 action_id,
7832 nslcmop_id,
7833 start_deploy,
7834 self.timeout.operate,
7835 None,
7836 "start_stop_rebuild",
7837 )
7838 return "COMPLETED", "Done"
7839 except (ROclient.ROClientException, DbException, LcmException) as e:
7840 self.logger.error("Exit Exception {}".format(e))
7841 exc = e
7842 except asyncio.CancelledError:
7843 self.logger.error("Cancelled Exception while '{}'".format(stage))
7844 exc = "Operation was cancelled"
7845 except Exception as e:
7846 exc = traceback.format_exc()
7847 self.logger.critical(
7848 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7849 )
7850 return "FAILED", "Error in operate VNF {}".format(exc)
7851
7852 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7853 """
7854 Get VCA Cloud and VCA Cloud Credentials for the VIM account
7855
7856 :param: vim_account_id: VIM Account ID
7857
7858 :return: (cloud_name, cloud_credential)
7859 """
7860 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7861 return config.get("vca_cloud"), config.get("vca_cloud_credential")
7862
7863 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7864 """
7865 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7866
7867 :param: vim_account_id: VIM Account ID
7868
7869 :return: (cloud_name, cloud_credential)
7870 """
7871 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7872 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
7873
7874 async def migrate(self, nsr_id, nslcmop_id):
7875 """
7876 Migrate VNFs and VDUs instances in a NS
7877
7878 :param: nsr_id: NS Instance ID
7879 :param: nslcmop_id: nslcmop ID of migrate
7880
7881 """
7882 # Try to lock HA task here
7883 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7884 if not task_is_locked_by_me:
7885 return
7886 logging_text = "Task ns={} migrate ".format(nsr_id)
7887 self.logger.debug(logging_text + "Enter")
7888 # get all needed from database
7889 db_nslcmop = None
7890 db_nslcmop_update = {}
7891 nslcmop_operation_state = None
7892 db_nsr_update = {}
7893 target = {}
7894 exc = None
7895 # in case of error, indicates what part of scale was failed to put nsr at error status
7896 start_deploy = time()
7897
7898 try:
7899 # wait for any previous tasks in process
7900 step = "Waiting for previous operations to terminate"
7901 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7902
7903 self._write_ns_status(
7904 nsr_id=nsr_id,
7905 ns_state=None,
7906 current_operation="MIGRATING",
7907 current_operation_id=nslcmop_id,
7908 )
7909 step = "Getting nslcmop from database"
7910 self.logger.debug(
7911 step + " after having waited for previous tasks to be completed"
7912 )
7913 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7914 migrate_params = db_nslcmop.get("operationParams")
7915
7916 target = {}
7917 target.update(migrate_params)
7918 desc = await self.RO.migrate(nsr_id, target)
7919 self.logger.debug("RO return > {}".format(desc))
7920 action_id = desc["action_id"]
7921 await self._wait_ng_ro(
7922 nsr_id,
7923 action_id,
7924 nslcmop_id,
7925 start_deploy,
7926 self.timeout.migrate,
7927 operation="migrate",
7928 )
7929 except (ROclient.ROClientException, DbException, LcmException) as e:
7930 self.logger.error("Exit Exception {}".format(e))
7931 exc = e
7932 except asyncio.CancelledError:
7933 self.logger.error("Cancelled Exception while '{}'".format(step))
7934 exc = "Operation was cancelled"
7935 except Exception as e:
7936 exc = traceback.format_exc()
7937 self.logger.critical(
7938 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7939 )
7940 finally:
7941 self._write_ns_status(
7942 nsr_id=nsr_id,
7943 ns_state=None,
7944 current_operation="IDLE",
7945 current_operation_id=None,
7946 )
7947 if exc:
7948 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7949 nslcmop_operation_state = "FAILED"
7950 else:
7951 nslcmop_operation_state = "COMPLETED"
7952 db_nslcmop_update["detailed-status"] = "Done"
7953 db_nsr_update["detailed-status"] = "Done"
7954
7955 self._write_op_status(
7956 op_id=nslcmop_id,
7957 stage="",
7958 error_message="",
7959 operation_state=nslcmop_operation_state,
7960 other_update=db_nslcmop_update,
7961 )
7962 if nslcmop_operation_state:
7963 try:
7964 msg = {
7965 "nsr_id": nsr_id,
7966 "nslcmop_id": nslcmop_id,
7967 "operationState": nslcmop_operation_state,
7968 }
7969 await self.msg.aiowrite("ns", "migrated", msg)
7970 except Exception as e:
7971 self.logger.error(
7972 logging_text + "kafka_write notification Exception {}".format(e)
7973 )
7974 self.logger.debug(logging_text + "Exit")
7975 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7976
7977 async def heal(self, nsr_id, nslcmop_id):
7978 """
7979 Heal NS
7980
7981 :param nsr_id: ns instance to heal
7982 :param nslcmop_id: operation to run
7983 :return:
7984 """
7985
7986 # Try to lock HA task here
7987 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7988 if not task_is_locked_by_me:
7989 return
7990
7991 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7992 stage = ["", "", ""]
7993 tasks_dict_info = {}
7994 # ^ stage, step, VIM progress
7995 self.logger.debug(logging_text + "Enter")
7996 # get all needed from database
7997 db_nsr = None
7998 db_nslcmop_update = {}
7999 db_nsr_update = {}
8000 db_vnfrs = {} # vnf's info indexed by _id
8001 exc = None
8002 old_operational_status = ""
8003 old_config_status = ""
8004 nsi_id = None
8005 try:
8006 # wait for any previous tasks in process
8007 step = "Waiting for previous operations to terminate"
8008 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
8009 self._write_ns_status(
8010 nsr_id=nsr_id,
8011 ns_state=None,
8012 current_operation="HEALING",
8013 current_operation_id=nslcmop_id,
8014 )
8015
8016 step = "Getting nslcmop from database"
8017 self.logger.debug(
8018 step + " after having waited for previous tasks to be completed"
8019 )
8020 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8021
8022 step = "Getting nsr from database"
8023 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8024 old_operational_status = db_nsr["operational-status"]
8025 old_config_status = db_nsr["config-status"]
8026
8027 db_nsr_update = {
8028 "_admin.deployed.RO.operational-status": "healing",
8029 }
8030 self.update_db_2("nsrs", nsr_id, db_nsr_update)
8031
8032 step = "Sending heal order to VIM"
8033 await self.heal_RO(
8034 logging_text=logging_text,
8035 nsr_id=nsr_id,
8036 db_nslcmop=db_nslcmop,
8037 stage=stage,
8038 )
8039 # VCA tasks
8040 # read from db: nsd
8041 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
8042 self.logger.debug(logging_text + stage[1])
8043 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
8044 self.fs.sync(db_nsr["nsd-id"])
8045 db_nsr["nsd"] = nsd
8046 # read from db: vnfr's of this ns
8047 step = "Getting vnfrs from db"
8048 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
8049 for vnfr in db_vnfrs_list:
8050 db_vnfrs[vnfr["_id"]] = vnfr
8051 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
8052
8053 # Check for each target VNF
8054 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
8055 for target_vnf in target_list:
8056 # Find this VNF in the list from DB
8057 vnfr_id = target_vnf.get("vnfInstanceId", None)
8058 if vnfr_id:
8059 db_vnfr = db_vnfrs[vnfr_id]
8060 vnfd_id = db_vnfr.get("vnfd-id")
8061 vnfd_ref = db_vnfr.get("vnfd-ref")
8062 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
8063 base_folder = vnfd["_admin"]["storage"]
8064 vdu_id = None
8065 vdu_index = 0
8066 vdu_name = None
8067 kdu_name = None
8068 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
8069 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
8070
8071 # Check each target VDU and deploy N2VC
8072 target_vdu_list = target_vnf.get("additionalParams", {}).get(
8073 "vdu", []
8074 )
8075 if not target_vdu_list:
8076 # Codigo nuevo para crear diccionario
8077 target_vdu_list = []
8078 for existing_vdu in db_vnfr.get("vdur"):
8079 vdu_name = existing_vdu.get("vdu-name", None)
8080 vdu_index = existing_vdu.get("count-index", 0)
8081 vdu_run_day1 = target_vnf.get("additionalParams", {}).get(
8082 "run-day1", False
8083 )
8084 vdu_to_be_healed = {
8085 "vdu-id": vdu_name,
8086 "count-index": vdu_index,
8087 "run-day1": vdu_run_day1,
8088 }
8089 target_vdu_list.append(vdu_to_be_healed)
8090 for target_vdu in target_vdu_list:
8091 deploy_params_vdu = target_vdu
8092 # Set run-day1 vnf level value if not vdu level value exists
8093 if not deploy_params_vdu.get("run-day1") and target_vnf.get(
8094 "additionalParams", {}
8095 ).get("run-day1"):
8096 deploy_params_vdu["run-day1"] = target_vnf[
8097 "additionalParams"
8098 ].get("run-day1")
8099 vdu_name = target_vdu.get("vdu-id", None)
8100 # TODO: Get vdu_id from vdud.
8101 vdu_id = vdu_name
8102 # For multi instance VDU count-index is mandatory
8103 # For single session VDU count-indes is 0
8104 vdu_index = target_vdu.get("count-index", 0)
8105
8106 # n2vc_redesign STEP 3 to 6 Deploy N2VC
8107 stage[1] = "Deploying Execution Environments."
8108 self.logger.debug(logging_text + stage[1])
8109
8110 # VNF Level charm. Normal case when proxy charms.
8111 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
8112 descriptor_config = get_configuration(vnfd, vnfd_ref)
8113 if descriptor_config:
8114 # Continue if healed machine is management machine
8115 vnf_ip_address = db_vnfr.get("ip-address")
8116 target_instance = None
8117 for instance in db_vnfr.get("vdur", None):
8118 if (
8119 instance["vdu-name"] == vdu_name
8120 and instance["count-index"] == vdu_index
8121 ):
8122 target_instance = instance
8123 break
8124 if vnf_ip_address == target_instance.get("ip-address"):
8125 self._heal_n2vc(
8126 logging_text=logging_text
8127 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
8128 member_vnf_index, vdu_name, vdu_index
8129 ),
8130 db_nsr=db_nsr,
8131 db_vnfr=db_vnfr,
8132 nslcmop_id=nslcmop_id,
8133 nsr_id=nsr_id,
8134 nsi_id=nsi_id,
8135 vnfd_id=vnfd_ref,
8136 vdu_id=None,
8137 kdu_name=None,
8138 member_vnf_index=member_vnf_index,
8139 vdu_index=0,
8140 vdu_name=None,
8141 deploy_params=deploy_params_vdu,
8142 descriptor_config=descriptor_config,
8143 base_folder=base_folder,
8144 task_instantiation_info=tasks_dict_info,
8145 stage=stage,
8146 )
8147
8148 # VDU Level charm. Normal case with native charms.
8149 descriptor_config = get_configuration(vnfd, vdu_name)
8150 if descriptor_config:
8151 self._heal_n2vc(
8152 logging_text=logging_text
8153 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
8154 member_vnf_index, vdu_name, vdu_index
8155 ),
8156 db_nsr=db_nsr,
8157 db_vnfr=db_vnfr,
8158 nslcmop_id=nslcmop_id,
8159 nsr_id=nsr_id,
8160 nsi_id=nsi_id,
8161 vnfd_id=vnfd_ref,
8162 vdu_id=vdu_id,
8163 kdu_name=kdu_name,
8164 member_vnf_index=member_vnf_index,
8165 vdu_index=vdu_index,
8166 vdu_name=vdu_name,
8167 deploy_params=deploy_params_vdu,
8168 descriptor_config=descriptor_config,
8169 base_folder=base_folder,
8170 task_instantiation_info=tasks_dict_info,
8171 stage=stage,
8172 )
8173
8174 except (
8175 ROclient.ROClientException,
8176 DbException,
8177 LcmException,
8178 NgRoException,
8179 ) as e:
8180 self.logger.error(logging_text + "Exit Exception {}".format(e))
8181 exc = e
8182 except asyncio.CancelledError:
8183 self.logger.error(
8184 logging_text + "Cancelled Exception while '{}'".format(step)
8185 )
8186 exc = "Operation was cancelled"
8187 except Exception as e:
8188 exc = traceback.format_exc()
8189 self.logger.critical(
8190 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
8191 exc_info=True,
8192 )
8193 finally:
8194 error_list = list()
8195 if exc:
8196 error_list.append(str(exc))
8197 try:
8198 if tasks_dict_info:
8199 stage[1] = "Waiting for healing pending tasks."
8200 self.logger.debug(logging_text + stage[1])
8201 exc = await self._wait_for_tasks(
8202 logging_text,
8203 tasks_dict_info,
8204 self.timeout.ns_deploy,
8205 stage,
8206 nslcmop_id,
8207 nsr_id=nsr_id,
8208 )
8209 except asyncio.CancelledError:
8210 error_list.append("Cancelled")
8211 await self._cancel_pending_tasks(logging_text, tasks_dict_info)
8212 await self._wait_for_tasks(
8213 logging_text,
8214 tasks_dict_info,
8215 self.timeout.ns_deploy,
8216 stage,
8217 nslcmop_id,
8218 nsr_id=nsr_id,
8219 )
8220 if error_list:
8221 error_detail = "; ".join(error_list)
8222 db_nslcmop_update[
8223 "detailed-status"
8224 ] = error_description_nslcmop = "FAILED {}: {}".format(
8225 step, error_detail
8226 )
8227 nslcmop_operation_state = "FAILED"
8228 if db_nsr:
8229 db_nsr_update["operational-status"] = old_operational_status
8230 db_nsr_update["config-status"] = old_config_status
8231 db_nsr_update[
8232 "detailed-status"
8233 ] = "FAILED healing nslcmop={} {}: {}".format(
8234 nslcmop_id, step, error_detail
8235 )
8236 for task, task_name in tasks_dict_info.items():
8237 if not task.done() or task.cancelled() or task.exception():
8238 if task_name.startswith(self.task_name_deploy_vca):
8239 # A N2VC task is pending
8240 db_nsr_update["config-status"] = "failed"
8241 else:
8242 # RO task is pending
8243 db_nsr_update["operational-status"] = "failed"
8244 else:
8245 error_description_nslcmop = None
8246 nslcmop_operation_state = "COMPLETED"
8247 db_nslcmop_update["detailed-status"] = "Done"
8248 db_nsr_update["detailed-status"] = "Done"
8249 db_nsr_update["operational-status"] = "running"
8250 db_nsr_update["config-status"] = "configured"
8251
8252 self._write_op_status(
8253 op_id=nslcmop_id,
8254 stage="",
8255 error_message=error_description_nslcmop,
8256 operation_state=nslcmop_operation_state,
8257 other_update=db_nslcmop_update,
8258 )
8259 if db_nsr:
8260 self._write_ns_status(
8261 nsr_id=nsr_id,
8262 ns_state=None,
8263 current_operation="IDLE",
8264 current_operation_id=None,
8265 other_update=db_nsr_update,
8266 )
8267
8268 if nslcmop_operation_state:
8269 try:
8270 msg = {
8271 "nsr_id": nsr_id,
8272 "nslcmop_id": nslcmop_id,
8273 "operationState": nslcmop_operation_state,
8274 }
8275 await self.msg.aiowrite("ns", "healed", msg)
8276 except Exception as e:
8277 self.logger.error(
8278 logging_text + "kafka_write notification Exception {}".format(e)
8279 )
8280 self.logger.debug(logging_text + "Exit")
8281 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
8282
8283 async def heal_RO(
8284 self,
8285 logging_text,
8286 nsr_id,
8287 db_nslcmop,
8288 stage,
8289 ):
8290 """
8291 Heal at RO
8292 :param logging_text: preffix text to use at logging
8293 :param nsr_id: nsr identity
8294 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
8295 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
8296 :return: None or exception
8297 """
8298
8299 def get_vim_account(vim_account_id):
8300 nonlocal db_vims
8301 if vim_account_id in db_vims:
8302 return db_vims[vim_account_id]
8303 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
8304 db_vims[vim_account_id] = db_vim
8305 return db_vim
8306
8307 try:
8308 start_heal = time()
8309 ns_params = db_nslcmop.get("operationParams")
8310 if ns_params and ns_params.get("timeout_ns_heal"):
8311 timeout_ns_heal = ns_params["timeout_ns_heal"]
8312 else:
8313 timeout_ns_heal = self.timeout.ns_heal
8314
8315 db_vims = {}
8316
8317 nslcmop_id = db_nslcmop["_id"]
8318 target = {
8319 "action_id": nslcmop_id,
8320 }
8321 self.logger.warning(
8322 "db_nslcmop={} and timeout_ns_heal={}".format(
8323 db_nslcmop, timeout_ns_heal
8324 )
8325 )
8326 target.update(db_nslcmop.get("operationParams", {}))
8327
8328 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
8329 desc = await self.RO.recreate(nsr_id, target)
8330 self.logger.debug("RO return > {}".format(desc))
8331 action_id = desc["action_id"]
8332 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
8333 await self._wait_ng_ro(
8334 nsr_id,
8335 action_id,
8336 nslcmop_id,
8337 start_heal,
8338 timeout_ns_heal,
8339 stage,
8340 operation="healing",
8341 )
8342
8343 # Updating NSR
8344 db_nsr_update = {
8345 "_admin.deployed.RO.operational-status": "running",
8346 "detailed-status": " ".join(stage),
8347 }
8348 self.update_db_2("nsrs", nsr_id, db_nsr_update)
8349 self._write_op_status(nslcmop_id, stage)
8350 self.logger.debug(
8351 logging_text + "ns healed at RO. RO_id={}".format(action_id)
8352 )
8353
8354 except Exception as e:
8355 stage[2] = "ERROR healing at VIM"
8356 # self.set_vnfr_at_error(db_vnfrs, str(e))
8357 self.logger.error(
8358 "Error healing at VIM {}".format(e),
8359 exc_info=not isinstance(
8360 e,
8361 (
8362 ROclient.ROClientException,
8363 LcmException,
8364 DbException,
8365 NgRoException,
8366 ),
8367 ),
8368 )
8369 raise
8370
8371 def _heal_n2vc(
8372 self,
8373 logging_text,
8374 db_nsr,
8375 db_vnfr,
8376 nslcmop_id,
8377 nsr_id,
8378 nsi_id,
8379 vnfd_id,
8380 vdu_id,
8381 kdu_name,
8382 member_vnf_index,
8383 vdu_index,
8384 vdu_name,
8385 deploy_params,
8386 descriptor_config,
8387 base_folder,
8388 task_instantiation_info,
8389 stage,
8390 ):
8391 # launch instantiate_N2VC in a asyncio task and register task object
8392 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
8393 # if not found, create one entry and update database
8394 # fill db_nsr._admin.deployed.VCA.<index>
8395
8396 self.logger.debug(
8397 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
8398 )
8399
8400 charm_name = ""
8401 get_charm_name = False
8402 if "execution-environment-list" in descriptor_config:
8403 ee_list = descriptor_config.get("execution-environment-list", [])
8404 elif "juju" in descriptor_config:
8405 ee_list = [descriptor_config] # ns charms
8406 if "execution-environment-list" not in descriptor_config:
8407 # charm name is only required for ns charms
8408 get_charm_name = True
8409 else: # other types as script are not supported
8410 ee_list = []
8411
8412 for ee_item in ee_list:
8413 self.logger.debug(
8414 logging_text
8415 + "_deploy_n2vc ee_item juju={}, helm={}".format(
8416 ee_item.get("juju"), ee_item.get("helm-chart")
8417 )
8418 )
8419 ee_descriptor_id = ee_item.get("id")
8420 if ee_item.get("juju"):
8421 vca_name = ee_item["juju"].get("charm")
8422 if get_charm_name:
8423 charm_name = self.find_charm_name(db_nsr, str(vca_name))
8424 vca_type = (
8425 "lxc_proxy_charm"
8426 if ee_item["juju"].get("charm") is not None
8427 else "native_charm"
8428 )
8429 if ee_item["juju"].get("cloud") == "k8s":
8430 vca_type = "k8s_proxy_charm"
8431 elif ee_item["juju"].get("proxy") is False:
8432 vca_type = "native_charm"
8433 elif ee_item.get("helm-chart"):
8434 vca_name = ee_item["helm-chart"]
8435 vca_type = "helm-v3"
8436 else:
8437 self.logger.debug(
8438 logging_text + "skipping non juju neither charm configuration"
8439 )
8440 continue
8441
8442 vca_index = -1
8443 for vca_index, vca_deployed in enumerate(
8444 db_nsr["_admin"]["deployed"]["VCA"]
8445 ):
8446 if not vca_deployed:
8447 continue
8448 if (
8449 vca_deployed.get("member-vnf-index") == member_vnf_index
8450 and vca_deployed.get("vdu_id") == vdu_id
8451 and vca_deployed.get("kdu_name") == kdu_name
8452 and vca_deployed.get("vdu_count_index", 0) == vdu_index
8453 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
8454 ):
8455 break
8456 else:
8457 # not found, create one.
8458 target = (
8459 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
8460 )
8461 if vdu_id:
8462 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
8463 elif kdu_name:
8464 target += "/kdu/{}".format(kdu_name)
8465 vca_deployed = {
8466 "target_element": target,
8467 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8468 "member-vnf-index": member_vnf_index,
8469 "vdu_id": vdu_id,
8470 "kdu_name": kdu_name,
8471 "vdu_count_index": vdu_index,
8472 "operational-status": "init", # TODO revise
8473 "detailed-status": "", # TODO revise
8474 "step": "initial-deploy", # TODO revise
8475 "vnfd_id": vnfd_id,
8476 "vdu_name": vdu_name,
8477 "type": vca_type,
8478 "ee_descriptor_id": ee_descriptor_id,
8479 "charm_name": charm_name,
8480 }
8481 vca_index += 1
8482
8483 # create VCA and configurationStatus in db
8484 db_dict = {
8485 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
8486 "configurationStatus.{}".format(vca_index): dict(),
8487 }
8488 self.update_db_2("nsrs", nsr_id, db_dict)
8489
8490 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
8491
8492 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
8493 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
8494 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
8495
8496 # Launch task
8497 task_n2vc = asyncio.ensure_future(
8498 self.heal_N2VC(
8499 logging_text=logging_text,
8500 vca_index=vca_index,
8501 nsi_id=nsi_id,
8502 db_nsr=db_nsr,
8503 db_vnfr=db_vnfr,
8504 vdu_id=vdu_id,
8505 kdu_name=kdu_name,
8506 vdu_index=vdu_index,
8507 deploy_params=deploy_params,
8508 config_descriptor=descriptor_config,
8509 base_folder=base_folder,
8510 nslcmop_id=nslcmop_id,
8511 stage=stage,
8512 vca_type=vca_type,
8513 vca_name=vca_name,
8514 ee_config_descriptor=ee_item,
8515 )
8516 )
8517 self.lcm_tasks.register(
8518 "ns",
8519 nsr_id,
8520 nslcmop_id,
8521 "instantiate_N2VC-{}".format(vca_index),
8522 task_n2vc,
8523 )
8524 task_instantiation_info[
8525 task_n2vc
8526 ] = self.task_name_deploy_vca + " {}.{}".format(
8527 member_vnf_index or "", vdu_id or ""
8528 )
8529
8530 async def heal_N2VC(
8531 self,
8532 logging_text,
8533 vca_index,
8534 nsi_id,
8535 db_nsr,
8536 db_vnfr,
8537 vdu_id,
8538 kdu_name,
8539 vdu_index,
8540 config_descriptor,
8541 deploy_params,
8542 base_folder,
8543 nslcmop_id,
8544 stage,
8545 vca_type,
8546 vca_name,
8547 ee_config_descriptor,
8548 ):
8549 nsr_id = db_nsr["_id"]
8550 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
8551 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
8552 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
8553 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
8554 db_dict = {
8555 "collection": "nsrs",
8556 "filter": {"_id": nsr_id},
8557 "path": db_update_entry,
8558 }
8559 step = ""
8560 try:
8561 element_type = "NS"
8562 element_under_configuration = nsr_id
8563
8564 vnfr_id = None
8565 if db_vnfr:
8566 vnfr_id = db_vnfr["_id"]
8567 osm_config["osm"]["vnf_id"] = vnfr_id
8568
8569 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8570
8571 if vca_type == "native_charm":
8572 index_number = 0
8573 else:
8574 index_number = vdu_index or 0
8575
8576 if vnfr_id:
8577 element_type = "VNF"
8578 element_under_configuration = vnfr_id
8579 namespace += ".{}-{}".format(vnfr_id, index_number)
8580 if vdu_id:
8581 namespace += ".{}-{}".format(vdu_id, index_number)
8582 element_type = "VDU"
8583 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8584 osm_config["osm"]["vdu_id"] = vdu_id
8585 elif kdu_name:
8586 namespace += ".{}".format(kdu_name)
8587 element_type = "KDU"
8588 element_under_configuration = kdu_name
8589 osm_config["osm"]["kdu_name"] = kdu_name
8590
8591 # Get artifact path
8592 if base_folder["pkg-dir"]:
8593 artifact_path = "{}/{}/{}/{}".format(
8594 base_folder["folder"],
8595 base_folder["pkg-dir"],
8596 "charms"
8597 if vca_type
8598 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8599 else "helm-charts",
8600 vca_name,
8601 )
8602 else:
8603 artifact_path = "{}/Scripts/{}/{}/".format(
8604 base_folder["folder"],
8605 "charms"
8606 if vca_type
8607 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8608 else "helm-charts",
8609 vca_name,
8610 )
8611
8612 self.logger.debug("Artifact path > {}".format(artifact_path))
8613
8614 # get initial_config_primitive_list that applies to this element
8615 initial_config_primitive_list = config_descriptor.get(
8616 "initial-config-primitive"
8617 )
8618
8619 self.logger.debug(
8620 "Initial config primitive list > {}".format(
8621 initial_config_primitive_list
8622 )
8623 )
8624
8625 # add config if not present for NS charm
8626 ee_descriptor_id = ee_config_descriptor.get("id")
8627 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8628 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8629 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8630 )
8631
8632 self.logger.debug(
8633 "Initial config primitive list #2 > {}".format(
8634 initial_config_primitive_list
8635 )
8636 )
8637 # n2vc_redesign STEP 3.1
8638 # find old ee_id if exists
8639 ee_id = vca_deployed.get("ee_id")
8640
8641 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8642 # create or register execution environment in VCA. Only for native charms when healing
8643 if vca_type == "native_charm":
8644 step = "Waiting to VM being up and getting IP address"
8645 self.logger.debug(logging_text + step)
8646 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8647 logging_text,
8648 nsr_id,
8649 vnfr_id,
8650 vdu_id,
8651 vdu_index,
8652 user=None,
8653 pub_key=None,
8654 )
8655 credentials = {"hostname": rw_mgmt_ip}
8656 # get username
8657 username = deep_get(
8658 config_descriptor, ("config-access", "ssh-access", "default-user")
8659 )
8660 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8661 # merged. Meanwhile let's get username from initial-config-primitive
8662 if not username and initial_config_primitive_list:
8663 for config_primitive in initial_config_primitive_list:
8664 for param in config_primitive.get("parameter", ()):
8665 if param["name"] == "ssh-username":
8666 username = param["value"]
8667 break
8668 if not username:
8669 raise LcmException(
8670 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8671 "'config-access.ssh-access.default-user'"
8672 )
8673 credentials["username"] = username
8674
8675 # n2vc_redesign STEP 3.2
8676 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8677 self._write_configuration_status(
8678 nsr_id=nsr_id,
8679 vca_index=vca_index,
8680 status="REGISTERING",
8681 element_under_configuration=element_under_configuration,
8682 element_type=element_type,
8683 )
8684
8685 step = "register execution environment {}".format(credentials)
8686 self.logger.debug(logging_text + step)
8687 ee_id = await self.vca_map[vca_type].register_execution_environment(
8688 credentials=credentials,
8689 namespace=namespace,
8690 db_dict=db_dict,
8691 vca_id=vca_id,
8692 )
8693
8694 # update ee_id en db
8695 db_dict_ee_id = {
8696 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8697 }
8698 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8699
8700 # for compatibility with MON/POL modules, the need model and application name at database
8701 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8702 # Not sure if this need to be done when healing
8703 """
8704 ee_id_parts = ee_id.split(".")
8705 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8706 if len(ee_id_parts) >= 2:
8707 model_name = ee_id_parts[0]
8708 application_name = ee_id_parts[1]
8709 db_nsr_update[db_update_entry + "model"] = model_name
8710 db_nsr_update[db_update_entry + "application"] = application_name
8711 """
8712
8713 # n2vc_redesign STEP 3.3
8714 # Install configuration software. Only for native charms.
8715 step = "Install configuration Software"
8716
8717 self._write_configuration_status(
8718 nsr_id=nsr_id,
8719 vca_index=vca_index,
8720 status="INSTALLING SW",
8721 element_under_configuration=element_under_configuration,
8722 element_type=element_type,
8723 # other_update=db_nsr_update,
8724 other_update=None,
8725 )
8726
8727 # TODO check if already done
8728 self.logger.debug(logging_text + step)
8729 config = None
8730 if vca_type == "native_charm":
8731 config_primitive = next(
8732 (p for p in initial_config_primitive_list if p["name"] == "config"),
8733 None,
8734 )
8735 if config_primitive:
8736 config = self._map_primitive_params(
8737 config_primitive, {}, deploy_params
8738 )
8739 await self.vca_map[vca_type].install_configuration_sw(
8740 ee_id=ee_id,
8741 artifact_path=artifact_path,
8742 db_dict=db_dict,
8743 config=config,
8744 num_units=1,
8745 vca_id=vca_id,
8746 vca_type=vca_type,
8747 )
8748
8749 # write in db flag of configuration_sw already installed
8750 self.update_db_2(
8751 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8752 )
8753
8754 # Not sure if this need to be done when healing
8755 """
8756 # add relations for this VCA (wait for other peers related with this VCA)
8757 await self._add_vca_relations(
8758 logging_text=logging_text,
8759 nsr_id=nsr_id,
8760 vca_type=vca_type,
8761 vca_index=vca_index,
8762 )
8763 """
8764
8765 # if SSH access is required, then get execution environment SSH public
8766 # if native charm we have waited already to VM be UP
8767 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm-v3"):
8768 pub_key = None
8769 user = None
8770 # self.logger.debug("get ssh key block")
8771 if deep_get(
8772 config_descriptor, ("config-access", "ssh-access", "required")
8773 ):
8774 # self.logger.debug("ssh key needed")
8775 # Needed to inject a ssh key
8776 user = deep_get(
8777 config_descriptor,
8778 ("config-access", "ssh-access", "default-user"),
8779 )
8780 step = "Install configuration Software, getting public ssh key"
8781 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8782 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8783 )
8784
8785 step = "Insert public key into VM user={} ssh_key={}".format(
8786 user, pub_key
8787 )
8788 else:
8789 # self.logger.debug("no need to get ssh key")
8790 step = "Waiting to VM being up and getting IP address"
8791 self.logger.debug(logging_text + step)
8792
8793 # n2vc_redesign STEP 5.1
8794 # wait for RO (ip-address) Insert pub_key into VM
8795 # IMPORTANT: We need do wait for RO to complete healing operation.
8796 await self._wait_heal_ro(nsr_id, self.timeout.ns_heal)
8797 if vnfr_id:
8798 if kdu_name:
8799 rw_mgmt_ip = await self.wait_kdu_up(
8800 logging_text, nsr_id, vnfr_id, kdu_name
8801 )
8802 else:
8803 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8804 logging_text,
8805 nsr_id,
8806 vnfr_id,
8807 vdu_id,
8808 vdu_index,
8809 user=user,
8810 pub_key=pub_key,
8811 )
8812 else:
8813 rw_mgmt_ip = None # This is for a NS configuration
8814
8815 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8816
8817 # store rw_mgmt_ip in deploy params for later replacement
8818 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8819
8820 # Day1 operations.
8821 # get run-day1 operation parameter
8822 runDay1 = deploy_params.get("run-day1", False)
8823 self.logger.debug(
8824 "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id, vdu_id, runDay1)
8825 )
8826 if runDay1:
8827 # n2vc_redesign STEP 6 Execute initial config primitive
8828 step = "execute initial config primitive"
8829
8830 # wait for dependent primitives execution (NS -> VNF -> VDU)
8831 if initial_config_primitive_list:
8832 await self._wait_dependent_n2vc(
8833 nsr_id, vca_deployed_list, vca_index
8834 )
8835
8836 # stage, in function of element type: vdu, kdu, vnf or ns
8837 my_vca = vca_deployed_list[vca_index]
8838 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8839 # VDU or KDU
8840 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8841 elif my_vca.get("member-vnf-index"):
8842 # VNF
8843 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8844 else:
8845 # NS
8846 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8847
8848 self._write_configuration_status(
8849 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8850 )
8851
8852 self._write_op_status(op_id=nslcmop_id, stage=stage)
8853
8854 check_if_terminated_needed = True
8855 for initial_config_primitive in initial_config_primitive_list:
8856 # adding information on the vca_deployed if it is a NS execution environment
8857 if not vca_deployed["member-vnf-index"]:
8858 deploy_params["ns_config_info"] = json.dumps(
8859 self._get_ns_config_info(nsr_id)
8860 )
8861 # TODO check if already done
8862 primitive_params_ = self._map_primitive_params(
8863 initial_config_primitive, {}, deploy_params
8864 )
8865
8866 step = "execute primitive '{}' params '{}'".format(
8867 initial_config_primitive["name"], primitive_params_
8868 )
8869 self.logger.debug(logging_text + step)
8870 await self.vca_map[vca_type].exec_primitive(
8871 ee_id=ee_id,
8872 primitive_name=initial_config_primitive["name"],
8873 params_dict=primitive_params_,
8874 db_dict=db_dict,
8875 vca_id=vca_id,
8876 vca_type=vca_type,
8877 )
8878 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8879 if check_if_terminated_needed:
8880 if config_descriptor.get("terminate-config-primitive"):
8881 self.update_db_2(
8882 "nsrs",
8883 nsr_id,
8884 {db_update_entry + "needed_terminate": True},
8885 )
8886 check_if_terminated_needed = False
8887
8888 # TODO register in database that primitive is done
8889
8890 # STEP 7 Configure metrics
8891 # Not sure if this need to be done when healing
8892 """
8893 if vca_type == "helm" or vca_type == "helm-v3":
8894 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8895 ee_id=ee_id,
8896 artifact_path=artifact_path,
8897 ee_config_descriptor=ee_config_descriptor,
8898 vnfr_id=vnfr_id,
8899 nsr_id=nsr_id,
8900 target_ip=rw_mgmt_ip,
8901 )
8902 if prometheus_jobs:
8903 self.update_db_2(
8904 "nsrs",
8905 nsr_id,
8906 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8907 )
8908
8909 for job in prometheus_jobs:
8910 self.db.set_one(
8911 "prometheus_jobs",
8912 {"job_name": job["job_name"]},
8913 job,
8914 upsert=True,
8915 fail_on_empty=False,
8916 )
8917
8918 """
8919 step = "instantiated at VCA"
8920 self.logger.debug(logging_text + step)
8921
8922 self._write_configuration_status(
8923 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8924 )
8925
8926 except Exception as e: # TODO not use Exception but N2VC exception
8927 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8928 if not isinstance(
8929 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8930 ):
8931 self.logger.error(
8932 "Exception while {} : {}".format(step, e), exc_info=True
8933 )
8934 self._write_configuration_status(
8935 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8936 )
8937 raise LcmException("{} {}".format(step, e)) from e
8938
8939 async def _wait_heal_ro(
8940 self,
8941 nsr_id,
8942 timeout=600,
8943 ):
8944 start_time = time()
8945 while time() <= start_time + timeout:
8946 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8947 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"][
8948 "operational-status"
8949 ]
8950 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8951 if operational_status_ro != "healing":
8952 break
8953 await asyncio.sleep(15)
8954 else: # timeout_ns_deploy
8955 raise NgRoException("Timeout waiting ns to deploy")
8956
8957 async def vertical_scale(self, nsr_id, nslcmop_id):
8958 """
8959 Vertical Scale the VDUs in a NS
8960
8961 :param: nsr_id: NS Instance ID
8962 :param: nslcmop_id: nslcmop ID of migrate
8963
8964 """
8965 # Try to lock HA task here
8966 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8967 if not task_is_locked_by_me:
8968 return
8969 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8970 self.logger.debug(logging_text + "Enter")
8971 # get all needed from database
8972 db_nslcmop = None
8973 db_nslcmop_update = {}
8974 nslcmop_operation_state = None
8975 db_nsr_update = {}
8976 target = {}
8977 exc = None
8978 # in case of error, indicates what part of scale was failed to put nsr at error status
8979 start_deploy = time()
8980
8981 try:
8982 # wait for any previous tasks in process
8983 step = "Waiting for previous operations to terminate"
8984 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
8985
8986 self._write_ns_status(
8987 nsr_id=nsr_id,
8988 ns_state=None,
8989 current_operation="VerticalScale",
8990 current_operation_id=nslcmop_id,
8991 )
8992 step = "Getting nslcmop from database"
8993 self.logger.debug(
8994 step + " after having waited for previous tasks to be completed"
8995 )
8996 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8997 operationParams = db_nslcmop.get("operationParams")
8998 target = {}
8999 target.update(operationParams)
9000 desc = await self.RO.vertical_scale(nsr_id, target)
9001 self.logger.debug("RO return > {}".format(desc))
9002 action_id = desc["action_id"]
9003 await self._wait_ng_ro(
9004 nsr_id,
9005 action_id,
9006 nslcmop_id,
9007 start_deploy,
9008 self.timeout.verticalscale,
9009 operation="verticalscale",
9010 )
9011 except (ROclient.ROClientException, DbException, LcmException) as e:
9012 self.logger.error("Exit Exception {}".format(e))
9013 exc = e
9014 except asyncio.CancelledError:
9015 self.logger.error("Cancelled Exception while '{}'".format(step))
9016 exc = "Operation was cancelled"
9017 except Exception as e:
9018 exc = traceback.format_exc()
9019 self.logger.critical(
9020 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
9021 )
9022 finally:
9023 self._write_ns_status(
9024 nsr_id=nsr_id,
9025 ns_state=None,
9026 current_operation="IDLE",
9027 current_operation_id=None,
9028 )
9029 if exc:
9030 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
9031 nslcmop_operation_state = "FAILED"
9032 else:
9033 nslcmop_operation_state = "COMPLETED"
9034 db_nslcmop_update["detailed-status"] = "Done"
9035 db_nsr_update["detailed-status"] = "Done"
9036
9037 self._write_op_status(
9038 op_id=nslcmop_id,
9039 stage="",
9040 error_message="",
9041 operation_state=nslcmop_operation_state,
9042 other_update=db_nslcmop_update,
9043 )
9044 if nslcmop_operation_state:
9045 try:
9046 msg = {
9047 "nsr_id": nsr_id,
9048 "nslcmop_id": nslcmop_id,
9049 "operationState": nslcmop_operation_state,
9050 }
9051 await self.msg.aiowrite("ns", "verticalscaled", msg)
9052 except Exception as e:
9053 self.logger.error(
9054 logging_text + "kafka_write notification Exception {}".format(e)
9055 )
9056 self.logger.debug(logging_text + "Exit")
9057 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")