Code Coverage

Cobertura Coverage Report > osm_lcm >

ns.py

Trend

File Coverage summary

NameClassesLinesConditionals
ns.py
100%
1/1
26%
932/3629
100%
0/0

Coverage Breakdown by Class

NameLinesConditionals
ns.py
26%
932/3629
N/A

Source

osm_lcm/ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 #         http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 1 import asyncio
20 1 import shutil
21 1 from typing import Any, Dict, List
22 1 import yaml
23 1 import logging
24 1 import logging.handlers
25 1 import traceback
26 1 import json
27 1 from jinja2 import (
28     Environment,
29     TemplateError,
30     TemplateNotFound,
31     StrictUndefined,
32     UndefinedError,
33     select_autoescape,
34 )
35
36 1 from osm_lcm import ROclient
37 1 from osm_lcm.data_utils.lcm_config import LcmCfg
38 1 from osm_lcm.data_utils.nsr import (
39     get_deployed_kdu,
40     get_deployed_vca,
41     get_deployed_vca_list,
42     get_nsd,
43 )
44 1 from osm_lcm.data_utils.vca import (
45     DeployedComponent,
46     DeployedK8sResource,
47     DeployedVCA,
48     EELevel,
49     Relation,
50     EERelation,
51     safe_get_ee_relation,
52 )
53 1 from osm_lcm.ng_ro import NgRoClient, NgRoException
54 1 from osm_lcm.lcm_utils import (
55     LcmException,
56     LcmExceptionNoMgmtIP,
57     LcmBase,
58     deep_get,
59     get_iterable,
60     populate_dict,
61     check_juju_bundle_existence,
62     get_charm_artifact_path,
63     get_ee_id_parts,
64 )
65 1 from osm_lcm.data_utils.nsd import (
66     get_ns_configuration_relation_list,
67     get_vnf_profile,
68     get_vnf_profiles,
69 )
70 1 from osm_lcm.data_utils.vnfd import (
71     get_kdu,
72     get_kdu_services,
73     get_relation_list,
74     get_vdu_list,
75     get_vdu_profile,
76     get_ee_sorted_initial_config_primitive_list,
77     get_ee_sorted_terminate_config_primitive_list,
78     get_kdu_list,
79     get_virtual_link_profiles,
80     get_vdu,
81     get_configuration,
82     get_vdu_index,
83     get_scaling_aspect,
84     get_number_of_instances,
85     get_juju_ee_ref,
86     get_kdu_resource_profile,
87     find_software_version,
88     check_helm_ee_in_ns,
89 )
90 1 from osm_lcm.data_utils.list_utils import find_in_list
91 1 from osm_lcm.data_utils.vnfr import (
92     get_osm_params,
93     get_vdur_index,
94     get_kdur,
95     get_volumes_from_instantiation_params,
96 )
97 1 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
98 1 from osm_lcm.data_utils.database.vim_account import VimAccountDB
99 1 from n2vc.definitions import RelationEndpoint
100 1 from n2vc.k8s_helm_conn import K8sHelmConnector
101 1 from n2vc.k8s_helm3_conn import K8sHelm3Connector
102 1 from n2vc.k8s_juju_conn import K8sJujuConnector
103
104 1 from osm_common.dbbase import DbException
105 1 from osm_common.fsbase import FsException
106
107 1 from osm_lcm.data_utils.database.database import Database
108 1 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
109 1 from osm_lcm.data_utils.wim import (
110     get_sdn_ports,
111     get_target_wim_attrs,
112     select_feasible_wim_account,
113 )
114
115 1 from n2vc.n2vc_juju_conn import N2VCJujuConnector
116 1 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
117
118 1 from osm_lcm.lcm_helm_conn import LCMHelmConn
119 1 from osm_lcm.osm_config import OsmConfigBuilder
120 1 from osm_lcm.prometheus import parse_job
121
122 1 from copy import copy, deepcopy
123 1 from time import time
124 1 from uuid import uuid4
125
126 1 from random import randint
127
128 1 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
129
130
131 1 class NsLcm(LcmBase):
132 1     SUBOPERATION_STATUS_NOT_FOUND = -1
133 1     SUBOPERATION_STATUS_NEW = -2
134 1     SUBOPERATION_STATUS_SKIP = -3
135 1     task_name_deploy_vca = "Deploying VCA"
136
137 1     def __init__(self, msg, lcm_tasks, config: LcmCfg, loop):
138         """
139         Init, Connect to database, filesystem storage, and messaging
140         :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
141         :return: None
142         """
143 1         super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
144
145 1         self.db = Database().instance.db
146 1         self.fs = Filesystem().instance.fs
147 1         self.loop = loop
148 1         self.lcm_tasks = lcm_tasks
149 1         self.timeout = config.timeout
150 1         self.ro_config = config.RO
151 1         self.vca_config = config.VCA
152
153         # create N2VC connector
154 1         self.n2vc = N2VCJujuConnector(
155             log=self.logger,
156             loop=self.loop,
157             on_update_db=self._on_update_n2vc_db,
158             fs=self.fs,
159             db=self.db,
160         )
161
162 1         self.conn_helm_ee = LCMHelmConn(
163             log=self.logger,
164             loop=self.loop,
165             vca_config=self.vca_config,
166             on_update_db=self._on_update_n2vc_db,
167         )
168
169 1         self.k8sclusterhelm2 = K8sHelmConnector(
170             kubectl_command=self.vca_config.kubectlpath,
171             helm_command=self.vca_config.helmpath,
172             log=self.logger,
173             on_update_db=None,
174             fs=self.fs,
175             db=self.db,
176         )
177
178 1         self.k8sclusterhelm3 = K8sHelm3Connector(
179             kubectl_command=self.vca_config.kubectlpath,
180             helm_command=self.vca_config.helm3path,
181             fs=self.fs,
182             log=self.logger,
183             db=self.db,
184             on_update_db=None,
185         )
186
187 1         self.k8sclusterjuju = K8sJujuConnector(
188             kubectl_command=self.vca_config.kubectlpath,
189             juju_command=self.vca_config.jujupath,
190             log=self.logger,
191             loop=self.loop,
192             on_update_db=self._on_update_k8s_db,
193             fs=self.fs,
194             db=self.db,
195         )
196
197 1         self.k8scluster_map = {
198             "helm-chart": self.k8sclusterhelm2,
199             "helm-chart-v3": self.k8sclusterhelm3,
200             "chart": self.k8sclusterhelm3,
201             "juju-bundle": self.k8sclusterjuju,
202             "juju": self.k8sclusterjuju,
203         }
204
205 1         self.vca_map = {
206             "lxc_proxy_charm": self.n2vc,
207             "native_charm": self.n2vc,
208             "k8s_proxy_charm": self.n2vc,
209             "helm": self.conn_helm_ee,
210             "helm-v3": self.conn_helm_ee,
211         }
212
213         # create RO client
214 1         self.RO = NgRoClient(self.loop, **self.ro_config.to_dict())
215
216 1         self.op_status_map = {
217             "instantiation": self.RO.status,
218             "termination": self.RO.status,
219             "migrate": self.RO.status,
220             "healing": self.RO.recreate_status,
221             "verticalscale": self.RO.status,
222             "start_stop_rebuild": self.RO.status,
223         }
224
225 1     @staticmethod
226 1     def increment_ip_mac(ip_mac, vm_index=1):
227 0         if not isinstance(ip_mac, str):
228 0             return ip_mac
229 0         try:
230             # try with ipv4 look for last dot
231 0             i = ip_mac.rfind(".")
232 0             if i > 0:
233 0                 i += 1
234 0                 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
235             # try with ipv6 or mac look for last colon. Operate in hex
236 0             i = ip_mac.rfind(":")
237 0             if i > 0:
238 0                 i += 1
239                 # format in hex, len can be 2 for mac or 4 for ipv6
240 0                 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
241                     ip_mac[:i], int(ip_mac[i:], 16) + vm_index
242                 )
243 0         except Exception:
244 0             pass
245 0         return None
246
247 1     def _on_update_ro_db(self, nsrs_id, ro_descriptor):
248         # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
249
250 0         try:
251             # TODO filter RO descriptor fields...
252
253             # write to database
254 0             db_dict = dict()
255             # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
256 0             db_dict["deploymentStatus"] = ro_descriptor
257 0             self.update_db_2("nsrs", nsrs_id, db_dict)
258
259 0         except Exception as e:
260 0             self.logger.warn(
261                 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
262             )
263
264 1     async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
265         # remove last dot from path (if exists)
266 1         if path.endswith("."):
267 1             path = path[:-1]
268
269         # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
270         #                   .format(table, filter, path, updated_data))
271 1         try:
272 1             nsr_id = filter.get("_id")
273
274             # read ns record from database
275 1             nsr = self.db.get_one(table="nsrs", q_filter=filter)
276 1             current_ns_status = nsr.get("nsState")
277
278             # get vca status for NS
279 1             status_dict = await self.n2vc.get_status(
280                 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
281             )
282
283             # vcaStatus
284 0             db_dict = dict()
285 0             db_dict["vcaStatus"] = status_dict
286
287             # update configurationStatus for this VCA
288 0             try:
289 0                 vca_index = int(path[path.rfind(".") + 1 :])
290
291 0                 vca_list = deep_get(
292                     target_dict=nsr, key_list=("_admin", "deployed", "VCA")
293                 )
294 0                 vca_status = vca_list[vca_index].get("status")
295
296 0                 configuration_status_list = nsr.get("configurationStatus")
297 0                 config_status = configuration_status_list[vca_index].get("status")
298
299 0                 if config_status == "BROKEN" and vca_status != "failed":
300 0                     db_dict["configurationStatus"][vca_index] = "READY"
301 0                 elif config_status != "BROKEN" and vca_status == "failed":
302 0                     db_dict["configurationStatus"][vca_index] = "BROKEN"
303 0             except Exception as e:
304                 # not update configurationStatus
305 0                 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
306
307             # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
308             # if nsState = 'DEGRADED' check if all is OK
309 0             is_degraded = False
310 0             if current_ns_status in ("READY", "DEGRADED"):
311 0                 error_description = ""
312                 # check machines
313 0                 if status_dict.get("machines"):
314 0                     for machine_id in status_dict.get("machines"):
315 0                         machine = status_dict.get("machines").get(machine_id)
316                         # check machine agent-status
317 0                         if machine.get("agent-status"):
318 0                             s = machine.get("agent-status").get("status")
319 0                             if s != "started":
320 0                                 is_degraded = True
321 0                                 error_description += (
322                                     "machine {} agent-status={} ; ".format(
323                                         machine_id, s
324                                     )
325                                 )
326                         # check machine instance status
327 0                         if machine.get("instance-status"):
328 0                             s = machine.get("instance-status").get("status")
329 0                             if s != "running":
330 0                                 is_degraded = True
331 0                                 error_description += (
332                                     "machine {} instance-status={} ; ".format(
333                                         machine_id, s
334                                     )
335                                 )
336                 # check applications
337 0                 if status_dict.get("applications"):
338 0                     for app_id in status_dict.get("applications"):
339 0                         app = status_dict.get("applications").get(app_id)
340                         # check application status
341 0                         if app.get("status"):
342 0                             s = app.get("status").get("status")
343 0                             if s != "active":
344 0                                 is_degraded = True
345 0                                 error_description += (
346                                     "application {} status={} ; ".format(app_id, s)
347                                 )
348
349 0                 if error_description:
350 0                     db_dict["errorDescription"] = error_description
351 0                 if current_ns_status == "READY" and is_degraded:
352 0                     db_dict["nsState"] = "DEGRADED"
353 0                 if current_ns_status == "DEGRADED" and not is_degraded:
354 0                     db_dict["nsState"] = "READY"
355
356             # write to database
357 0             self.update_db_2("nsrs", nsr_id, db_dict)
358
359 1         except (asyncio.CancelledError, asyncio.TimeoutError):
360 0             raise
361 1         except Exception as e:
362 1             self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
363
364 1     async def _on_update_k8s_db(
365         self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
366     ):
367         """
368         Updating vca status in NSR record
369         :param cluster_uuid: UUID of a k8s cluster
370         :param kdu_instance: The unique name of the KDU instance
371         :param filter: To get nsr_id
372         :cluster_type: The cluster type (juju, k8s)
373         :return: none
374         """
375
376         # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
377         #                   .format(cluster_uuid, kdu_instance, filter))
378
379 0         nsr_id = filter.get("_id")
380 0         try:
381 0             vca_status = await self.k8scluster_map[cluster_type].status_kdu(
382                 cluster_uuid=cluster_uuid,
383                 kdu_instance=kdu_instance,
384                 yaml_format=False,
385                 complete_status=True,
386                 vca_id=vca_id,
387             )
388
389             # vcaStatus
390 0             db_dict = dict()
391 0             db_dict["vcaStatus"] = {nsr_id: vca_status}
392
393 0             self.logger.debug(
394                 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
395             )
396
397             # write to database
398 0             self.update_db_2("nsrs", nsr_id, db_dict)
399 0         except (asyncio.CancelledError, asyncio.TimeoutError):
400 0             raise
401 0         except Exception as e:
402 0             self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
403
404 1     @staticmethod
405 1     def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
406 0         try:
407 0             env = Environment(
408                 undefined=StrictUndefined,
409                 autoescape=select_autoescape(default_for_string=True, default=True),
410             )
411 0             template = env.from_string(cloud_init_text)
412 0             return template.render(additional_params or {})
413 0         except UndefinedError as e:
414 0             raise LcmException(
415                 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
416                 "file, must be provided in the instantiation parameters inside the "
417                 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
418             )
419 0         except (TemplateError, TemplateNotFound) as e:
420 0             raise LcmException(
421                 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
422                     vnfd_id, vdu_id, e
423                 )
424             )
425
426 1     def _get_vdu_cloud_init_content(self, vdu, vnfd):
427 0         cloud_init_content = cloud_init_file = None
428 0         try:
429 0             if vdu.get("cloud-init-file"):
430 0                 base_folder = vnfd["_admin"]["storage"]
431 0                 if base_folder["pkg-dir"]:
432 0                     cloud_init_file = "{}/{}/cloud_init/{}".format(
433                         base_folder["folder"],
434                         base_folder["pkg-dir"],
435                         vdu["cloud-init-file"],
436                     )
437                 else:
438 0                     cloud_init_file = "{}/Scripts/cloud_init/{}".format(
439                         base_folder["folder"],
440                         vdu["cloud-init-file"],
441                     )
442 0                 with self.fs.file_open(cloud_init_file, "r") as ci_file:
443 0                     cloud_init_content = ci_file.read()
444 0             elif vdu.get("cloud-init"):
445 0                 cloud_init_content = vdu["cloud-init"]
446
447 0             return cloud_init_content
448 0         except FsException as e:
449 0             raise LcmException(
450                 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
451                     vnfd["id"], vdu["id"], cloud_init_file, e
452                 )
453             )
454
455 1     def _get_vdu_additional_params(self, db_vnfr, vdu_id):
456 0         vdur = next(
457             (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
458         )
459 0         additional_params = vdur.get("additionalParams")
460 0         return parse_yaml_strings(additional_params)
461
462 1     def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
463         """
464         Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
465         :param vnfd: input vnfd
466         :param new_id: overrides vnf id if provided
467         :param additionalParams: Instantiation params for VNFs provided
468         :param nsrId: Id of the NSR
469         :return: copy of vnfd
470         """
471 0         vnfd_RO = deepcopy(vnfd)
472         # remove unused by RO configuration, monitoring, scaling and internal keys
473 0         vnfd_RO.pop("_id", None)
474 0         vnfd_RO.pop("_admin", None)
475 0         vnfd_RO.pop("monitoring-param", None)
476 0         vnfd_RO.pop("scaling-group-descriptor", None)
477 0         vnfd_RO.pop("kdu", None)
478 0         vnfd_RO.pop("k8s-cluster", None)
479 0         if new_id:
480 0             vnfd_RO["id"] = new_id
481
482         # parse cloud-init or cloud-init-file with the provided variables using Jinja2
483 0         for vdu in get_iterable(vnfd_RO, "vdu"):
484 0             vdu.pop("cloud-init-file", None)
485 0             vdu.pop("cloud-init", None)
486 0         return vnfd_RO
487
488 1     @staticmethod
489 1     def ip_profile_2_RO(ip_profile):
490 0         RO_ip_profile = deepcopy(ip_profile)
491 0         if "dns-server" in RO_ip_profile:
492 0             if isinstance(RO_ip_profile["dns-server"], list):
493 0                 RO_ip_profile["dns-address"] = []
494 0                 for ds in RO_ip_profile.pop("dns-server"):
495 0                     RO_ip_profile["dns-address"].append(ds["address"])
496             else:
497 0                 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
498 0         if RO_ip_profile.get("ip-version") == "ipv4":
499 0             RO_ip_profile["ip-version"] = "IPv4"
500 0         if RO_ip_profile.get("ip-version") == "ipv6":
501 0             RO_ip_profile["ip-version"] = "IPv6"
502 0         if "dhcp-params" in RO_ip_profile:
503 0             RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
504 0         return RO_ip_profile
505
506 1     def _get_ro_vim_id_for_vim_account(self, vim_account):
507 0         db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
508 0         if db_vim["_admin"]["operationalState"] != "ENABLED":
509 0             raise LcmException(
510                 "VIM={} is not available. operationalState={}".format(
511                     vim_account, db_vim["_admin"]["operationalState"]
512                 )
513             )
514 0         RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
515 0         return RO_vim_id
516
517 1     def get_ro_wim_id_for_wim_account(self, wim_account):
518 0         if isinstance(wim_account, str):
519 0             db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
520 0             if db_wim["_admin"]["operationalState"] != "ENABLED":
521 0                 raise LcmException(
522                     "WIM={} is not available. operationalState={}".format(
523                         wim_account, db_wim["_admin"]["operationalState"]
524                     )
525                 )
526 0             RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
527 0             return RO_wim_id
528         else:
529 0             return wim_account
530
531 1     def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
532 1         db_vdu_push_list = []
533 1         template_vdur = []
534 1         db_update = {"_admin.modified": time()}
535 1         if vdu_create:
536 0             for vdu_id, vdu_count in vdu_create.items():
537 0                 vdur = next(
538                     (
539                         vdur
540                         for vdur in reversed(db_vnfr["vdur"])
541                         if vdur["vdu-id-ref"] == vdu_id
542                     ),
543                     None,
544                 )
545 0                 if not vdur:
546                     # Read the template saved in the db:
547 0                     self.logger.debug(
548                         "No vdur in the database. Using the vdur-template to scale"
549                     )
550 0                     vdur_template = db_vnfr.get("vdur-template")
551 0                     if not vdur_template:
552 0                         raise LcmException(
553                             "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
554                                 vdu_id
555                             )
556                         )
557 0                     vdur = vdur_template[0]
558                     # Delete a template from the database after using it
559 0                     self.db.set_one(
560                         "vnfrs",
561                         {"_id": db_vnfr["_id"]},
562                         None,
563                         pull={"vdur-template": {"_id": vdur["_id"]}},
564                     )
565 0                 for count in range(vdu_count):
566 0                     vdur_copy = deepcopy(vdur)
567 0                     vdur_copy["status"] = "BUILD"
568 0                     vdur_copy["status-detailed"] = None
569 0                     vdur_copy["ip-address"] = None
570 0                     vdur_copy["_id"] = str(uuid4())
571 0                     vdur_copy["count-index"] += count + 1
572 0                     vdur_copy["id"] = "{}-{}".format(
573                         vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
574                     )
575 0                     vdur_copy.pop("vim_info", None)
576 0                     for iface in vdur_copy["interfaces"]:
577 0                         if iface.get("fixed-ip"):
578 0                             iface["ip-address"] = self.increment_ip_mac(
579                                 iface["ip-address"], count + 1
580                             )
581                         else:
582 0                             iface.pop("ip-address", None)
583 0                         if iface.get("fixed-mac"):
584 0                             iface["mac-address"] = self.increment_ip_mac(
585                                 iface["mac-address"], count + 1
586                             )
587                         else:
588 0                             iface.pop("mac-address", None)
589 0                         if db_vnfr["vdur"]:
590 0                             iface.pop(
591                                 "mgmt_vnf", None
592                             )  # only first vdu can be managment of vnf
593 0                     db_vdu_push_list.append(vdur_copy)
594                     # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
595 1         if vdu_delete:
596 1             if len(db_vnfr["vdur"]) == 1:
597                 # The scale will move to 0 instances
598 0                 self.logger.debug(
599                     "Scaling to 0 !, creating the template with the last vdur"
600                 )
601 0                 template_vdur = [db_vnfr["vdur"][0]]
602 1             for vdu_id, vdu_count in vdu_delete.items():
603 1                 if mark_delete:
604 1                     indexes_to_delete = [
605                         iv[0]
606                         for iv in enumerate(db_vnfr["vdur"])
607                         if iv[1]["vdu-id-ref"] == vdu_id
608                     ]
609 1                     db_update.update(
610                         {
611                             "vdur.{}.status".format(i): "DELETING"
612                             for i in indexes_to_delete[-vdu_count:]
613                         }
614                     )
615                 else:
616                     # it must be deleted one by one because common.db does not allow otherwise
617 1                     vdus_to_delete = [
618                         v
619                         for v in reversed(db_vnfr["vdur"])
620                         if v["vdu-id-ref"] == vdu_id
621                     ]
622 1                     for vdu in vdus_to_delete[:vdu_count]:
623 0                         self.db.set_one(
624                             "vnfrs",
625                             {"_id": db_vnfr["_id"]},
626                             None,
627                             pull={"vdur": {"_id": vdu["_id"]}},
628                         )
629 1         db_push = {}
630 1         if db_vdu_push_list:
631 0             db_push["vdur"] = db_vdu_push_list
632 1         if template_vdur:
633 0             db_push["vdur-template"] = template_vdur
634 1         if not db_push:
635 1             db_push = None
636 1         db_vnfr["vdur-template"] = template_vdur
637 1         self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
638         # modify passed dictionary db_vnfr
639 1         db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
640 1         db_vnfr["vdur"] = db_vnfr_["vdur"]
641
642 1     def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
643         """
644         Updates database nsr with the RO info for the created vld
645         :param ns_update_nsr: dictionary to be filled with the updated info
646         :param db_nsr: content of db_nsr. This is also modified
647         :param nsr_desc_RO: nsr descriptor from RO
648         :return: Nothing, LcmException is raised on errors
649         """
650
651 0         for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
652 0             for net_RO in get_iterable(nsr_desc_RO, "nets"):
653 0                 if vld["id"] != net_RO.get("ns_net_osm_id"):
654 0                     continue
655 0                 vld["vim-id"] = net_RO.get("vim_net_id")
656 0                 vld["name"] = net_RO.get("vim_name")
657 0                 vld["status"] = net_RO.get("status")
658 0                 vld["status-detailed"] = net_RO.get("error_msg")
659 0                 ns_update_nsr["vld.{}".format(vld_index)] = vld
660 0                 break
661             else:
662 0                 raise LcmException(
663                     "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
664                 )
665
666 1     def set_vnfr_at_error(self, db_vnfrs, error_text):
667 0         try:
668 0             for db_vnfr in db_vnfrs.values():
669 0                 vnfr_update = {"status": "ERROR"}
670 0                 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
671 0                     if "status" not in vdur:
672 0                         vdur["status"] = "ERROR"
673 0                         vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
674 0                         if error_text:
675 0                             vdur["status-detailed"] = str(error_text)
676 0                             vnfr_update[
677                                 "vdur.{}.status-detailed".format(vdu_index)
678                             ] = "ERROR"
679 0                 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
680 0         except DbException as e:
681 0             self.logger.error("Cannot update vnf. {}".format(e))
682
683 1     def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
684         """
685         Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
686         :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
687         :param nsr_desc_RO: nsr descriptor from RO
688         :return: Nothing, LcmException is raised on errors
689         """
690 0         for vnf_index, db_vnfr in db_vnfrs.items():
691 0             for vnf_RO in nsr_desc_RO["vnfs"]:
692 0                 if vnf_RO["member_vnf_index"] != vnf_index:
693 0                     continue
694 0                 vnfr_update = {}
695 0                 if vnf_RO.get("ip_address"):
696 0                     db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
697                         "ip_address"
698                     ].split(";")[0]
699 0                 elif not db_vnfr.get("ip-address"):
700 0                     if db_vnfr.get("vdur"):  # if not VDUs, there is not ip_address
701 0                         raise LcmExceptionNoMgmtIP(
702                             "ns member_vnf_index '{}' has no IP address".format(
703                                 vnf_index
704                             )
705                         )
706
707 0                 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
708 0                     vdur_RO_count_index = 0
709 0                     if vdur.get("pdu-type"):
710 0                         continue
711 0                     for vdur_RO in get_iterable(vnf_RO, "vms"):
712 0                         if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
713 0                             continue
714 0                         if vdur["count-index"] != vdur_RO_count_index:
715 0                             vdur_RO_count_index += 1
716 0                             continue
717 0                         vdur["vim-id"] = vdur_RO.get("vim_vm_id")
718 0                         if vdur_RO.get("ip_address"):
719 0                             vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
720                         else:
721 0                             vdur["ip-address"] = None
722 0                         vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
723 0                         vdur["name"] = vdur_RO.get("vim_name")
724 0                         vdur["status"] = vdur_RO.get("status")
725 0                         vdur["status-detailed"] = vdur_RO.get("error_msg")
726 0                         for ifacer in get_iterable(vdur, "interfaces"):
727 0                             for interface_RO in get_iterable(vdur_RO, "interfaces"):
728 0                                 if ifacer["name"] == interface_RO.get("internal_name"):
729 0                                     ifacer["ip-address"] = interface_RO.get(
730                                         "ip_address"
731                                     )
732 0                                     ifacer["mac-address"] = interface_RO.get(
733                                         "mac_address"
734                                     )
735 0                                     break
736                             else:
737 0                                 raise LcmException(
738                                     "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
739                                     "from VIM info".format(
740                                         vnf_index, vdur["vdu-id-ref"], ifacer["name"]
741                                     )
742                                 )
743 0                         vnfr_update["vdur.{}".format(vdu_index)] = vdur
744 0                         break
745                     else:
746 0                         raise LcmException(
747                             "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
748                             "VIM info".format(
749                                 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
750                             )
751                         )
752
753 0                 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
754 0                     for net_RO in get_iterable(nsr_desc_RO, "nets"):
755 0                         if vld["id"] != net_RO.get("vnf_net_osm_id"):
756 0                             continue
757 0                         vld["vim-id"] = net_RO.get("vim_net_id")
758 0                         vld["name"] = net_RO.get("vim_name")
759 0                         vld["status"] = net_RO.get("status")
760 0                         vld["status-detailed"] = net_RO.get("error_msg")
761 0                         vnfr_update["vld.{}".format(vld_index)] = vld
762 0                         break
763                     else:
764 0                         raise LcmException(
765                             "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
766                                 vnf_index, vld["id"]
767                             )
768                         )
769
770 0                 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
771 0                 break
772
773             else:
774 0                 raise LcmException(
775                     "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
776                         vnf_index
777                     )
778                 )
779
780 1     def _get_ns_config_info(self, nsr_id):
781         """
782         Generates a mapping between vnf,vdu elements and the N2VC id
783         :param nsr_id: id of nsr to get last  database _admin.deployed.VCA that contains this list
784         :return: a dictionary with {osm-config-mapping: {}} where its element contains:
785             "<member-vnf-index>": <N2VC-id>  for a vnf configuration, or
786             "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id>  for a vdu configuration
787         """
788 0         db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
789 0         vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
790 0         mapping = {}
791 0         ns_config_info = {"osm-config-mapping": mapping}
792 0         for vca in vca_deployed_list:
793 0             if not vca["member-vnf-index"]:
794 0                 continue
795 0             if not vca["vdu_id"]:
796 0                 mapping[vca["member-vnf-index"]] = vca["application"]
797             else:
798 0                 mapping[
799                     "{}.{}.{}".format(
800                         vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
801                     )
802                 ] = vca["application"]
803 0         return ns_config_info
804
805 1     async def _instantiate_ng_ro(
806         self,
807         logging_text,
808         nsr_id,
809         nsd,
810         db_nsr,
811         db_nslcmop,
812         db_vnfrs,
813         db_vnfds,
814         n2vc_key_list,
815         stage,
816         start_deploy,
817         timeout_ns_deploy,
818     ):
819 1         db_vims = {}
820
821 1         def get_vim_account(vim_account_id):
822             nonlocal db_vims
823 0             if vim_account_id in db_vims:
824 0                 return db_vims[vim_account_id]
825 0             db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
826 0             db_vims[vim_account_id] = db_vim
827 0             return db_vim
828
829         # modify target_vld info with instantiation parameters
830 1         def parse_vld_instantiation_params(
831             target_vim, target_vld, vld_params, target_sdn
832         ):
833 1             if vld_params.get("ip-profile"):
834 0                 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
835                     "ip-profile"
836                 ]
837 1             if vld_params.get("provider-network"):
838 0                 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
839                     "provider-network"
840                 ]
841 0                 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
842 0                     target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
843                         "provider-network"
844                     ]["sdn-ports"]
845
846             # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
847             # if wim_account_id is specified in vld_params, validate if it is feasible.
848 1             wim_account_id, db_wim = select_feasible_wim_account(
849                 db_nsr, db_vnfrs, target_vld, vld_params, self.logger
850             )
851
852 1             if wim_account_id:
853                 # WIM is needed and a feasible one was found, populate WIM target and SDN ports
854 0                 self.logger.info("WIM selected: {:s}".format(str(wim_account_id)))
855                 # update vld_params with correct WIM account Id
856 0                 vld_params["wimAccountId"] = wim_account_id
857
858 0                 target_wim = "wim:{}".format(wim_account_id)
859 0                 target_wim_attrs = get_target_wim_attrs(nsr_id, target_vld, vld_params)
860 0                 sdn_ports = get_sdn_ports(vld_params, db_wim)
861 0                 if len(sdn_ports) > 0:
862 0                     target_vld["vim_info"][target_wim] = target_wim_attrs
863 0                     target_vld["vim_info"][target_wim]["sdn-ports"] = sdn_ports
864
865 0                 self.logger.debug(
866                     "Target VLD with WIM data: {:s}".format(str(target_vld))
867                 )
868
869 1             for param in ("vim-network-name", "vim-network-id"):
870 1                 if vld_params.get(param):
871 0                     if isinstance(vld_params[param], dict):
872 0                         for vim, vim_net in vld_params[param].items():
873 0                             other_target_vim = "vim:" + vim
874 0                             populate_dict(
875                                 target_vld["vim_info"],
876                                 (other_target_vim, param.replace("-", "_")),
877                                 vim_net,
878                             )
879                     else:  # isinstance str
880 0                         target_vld["vim_info"][target_vim][
881                             param.replace("-", "_")
882                         ] = vld_params[param]
883 1             if vld_params.get("common_id"):
884 0                 target_vld["common_id"] = vld_params.get("common_id")
885
886         # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
887 1         def update_ns_vld_target(target, ns_params):
888 1             for vnf_params in ns_params.get("vnf", ()):
889 0                 if vnf_params.get("vimAccountId"):
890 0                     target_vnf = next(
891                         (
892                             vnfr
893                             for vnfr in db_vnfrs.values()
894                             if vnf_params["member-vnf-index"]
895                             == vnfr["member-vnf-index-ref"]
896                         ),
897                         None,
898                     )
899 0                     vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
900 0                     if not vdur:
901 0                         return
902 0                     for a_index, a_vld in enumerate(target["ns"]["vld"]):
903 0                         target_vld = find_in_list(
904                             get_iterable(vdur, "interfaces"),
905                             lambda iface: iface.get("ns-vld-id") == a_vld["name"],
906                         )
907
908 0                         vld_params = find_in_list(
909                             get_iterable(ns_params, "vld"),
910                             lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
911                         )
912 0                         if target_vld:
913 0                             if vnf_params.get("vimAccountId") not in a_vld.get(
914                                 "vim_info", {}
915                             ):
916 0                                 target_vim_network_list = [
917                                     v for _, v in a_vld.get("vim_info").items()
918                                 ]
919 0                                 target_vim_network_name = next(
920                                     (
921                                         item.get("vim_network_name", "")
922                                         for item in target_vim_network_list
923                                     ),
924                                     "",
925                                 )
926
927 0                                 target["ns"]["vld"][a_index].get("vim_info").update(
928                                     {
929                                         "vim:{}".format(vnf_params["vimAccountId"]): {
930                                             "vim_network_name": target_vim_network_name,
931                                         }
932                                     }
933                                 )
934
935 0                                 if vld_params:
936 0                                     for param in ("vim-network-name", "vim-network-id"):
937 0                                         if vld_params.get(param) and isinstance(
938                                             vld_params[param], dict
939                                         ):
940 0                                             for vim, vim_net in vld_params[
941                                                 param
942                                             ].items():
943 0                                                 other_target_vim = "vim:" + vim
944 0                                                 populate_dict(
945                                                     target["ns"]["vld"][a_index].get(
946                                                         "vim_info"
947                                                     ),
948                                                     (
949                                                         other_target_vim,
950                                                         param.replace("-", "_"),
951                                                     ),
952                                                     vim_net,
953                                                 )
954
955 1         nslcmop_id = db_nslcmop["_id"]
956 1         target = {
957             "name": db_nsr["name"],
958             "ns": {"vld": []},
959             "vnf": [],
960             "image": deepcopy(db_nsr["image"]),
961             "flavor": deepcopy(db_nsr["flavor"]),
962             "action_id": nslcmop_id,
963             "cloud_init_content": {},
964         }
965 1         for image in target["image"]:
966 1             image["vim_info"] = {}
967 1         for flavor in target["flavor"]:
968 1             flavor["vim_info"] = {}
969 1         if db_nsr.get("affinity-or-anti-affinity-group"):
970 0             target["affinity-or-anti-affinity-group"] = deepcopy(
971                 db_nsr["affinity-or-anti-affinity-group"]
972             )
973 0             for affinity_or_anti_affinity_group in target[
974                 "affinity-or-anti-affinity-group"
975             ]:
976 0                 affinity_or_anti_affinity_group["vim_info"] = {}
977
978 1         if db_nslcmop.get("lcmOperationType") != "instantiate":
979             # get parameters of instantiation:
980 1             db_nslcmop_instantiate = self.db.get_list(
981                 "nslcmops",
982                 {
983                     "nsInstanceId": db_nslcmop["nsInstanceId"],
984                     "lcmOperationType": "instantiate",
985                 },
986             )[-1]
987 1             ns_params = db_nslcmop_instantiate.get("operationParams")
988         else:
989 0             ns_params = db_nslcmop.get("operationParams")
990 1         ssh_keys_instantiation = ns_params.get("ssh_keys") or []
991 1         ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
992
993 1         cp2target = {}
994 1         for vld_index, vld in enumerate(db_nsr.get("vld")):
995 1             target_vim = "vim:{}".format(ns_params["vimAccountId"])
996 1             target_vld = {
997                 "id": vld["id"],
998                 "name": vld["name"],
999                 "mgmt-network": vld.get("mgmt-network", False),
1000                 "type": vld.get("type"),
1001                 "vim_info": {
1002                     target_vim: {
1003                         "vim_network_name": vld.get("vim-network-name"),
1004                         "vim_account_id": ns_params["vimAccountId"],
1005                     }
1006                 },
1007             }
1008             # check if this network needs SDN assist
1009 1             if vld.get("pci-interfaces"):
1010 0                 db_vim = get_vim_account(ns_params["vimAccountId"])
1011 0                 sdnc_id = db_vim["config"].get("sdn-controller")
1012 0                 if sdnc_id:
1013 0                     sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
1014 0                     target_sdn = "sdn:{}".format(sdnc_id)
1015 0                     target_vld["vim_info"][target_sdn] = {
1016                         "sdn": True,
1017                         "target_vim": target_vim,
1018                         "vlds": [sdn_vld],
1019                         "type": vld.get("type"),
1020                     }
1021
1022 1             nsd_vnf_profiles = get_vnf_profiles(nsd)
1023 1             for nsd_vnf_profile in nsd_vnf_profiles:
1024 1                 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
1025 1                     if cp["virtual-link-profile-id"] == vld["id"]:
1026 1                         cp2target[
1027                             "member_vnf:{}.{}".format(
1028                                 cp["constituent-cpd-id"][0][
1029                                     "constituent-base-element-id"
1030                                 ],
1031                                 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
1032                             )
1033                         ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
1034
1035             # check at nsd descriptor, if there is an ip-profile
1036 1             vld_params = {}
1037 1             nsd_vlp = find_in_list(
1038                 get_virtual_link_profiles(nsd),
1039                 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
1040                 == vld["id"],
1041             )
1042 1             if (
1043                 nsd_vlp
1044                 and nsd_vlp.get("virtual-link-protocol-data")
1045                 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1046             ):
1047 0                 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
1048                     "l3-protocol-data"
1049                 ]
1050 0                 ip_profile_dest_data = {}
1051 0                 if "ip-version" in ip_profile_source_data:
1052 0                     ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1053                         "ip-version"
1054                     ]
1055 0                 if "cidr" in ip_profile_source_data:
1056 0                     ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1057                         "cidr"
1058                     ]
1059 0                 if "gateway-ip" in ip_profile_source_data:
1060 0                     ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
1061                         "gateway-ip"
1062                     ]
1063 0                 if "dhcp-enabled" in ip_profile_source_data:
1064 0                     ip_profile_dest_data["dhcp-params"] = {
1065                         "enabled": ip_profile_source_data["dhcp-enabled"]
1066                     }
1067 0                 vld_params["ip-profile"] = ip_profile_dest_data
1068
1069             # update vld_params with instantiation params
1070 1             vld_instantiation_params = find_in_list(
1071                 get_iterable(ns_params, "vld"),
1072                 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
1073             )
1074 1             if vld_instantiation_params:
1075 0                 vld_params.update(vld_instantiation_params)
1076 1             parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
1077 1             target["ns"]["vld"].append(target_vld)
1078         # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1079 1         update_ns_vld_target(target, ns_params)
1080
1081 1         for vnfr in db_vnfrs.values():
1082 1             vnfd = find_in_list(
1083                 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
1084             )
1085 1             vnf_params = find_in_list(
1086                 get_iterable(ns_params, "vnf"),
1087                 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
1088             )
1089 1             target_vnf = deepcopy(vnfr)
1090 1             target_vim = "vim:{}".format(vnfr["vim-account-id"])
1091 1             for vld in target_vnf.get("vld", ()):
1092                 # check if connected to a ns.vld, to fill target'
1093 1                 vnf_cp = find_in_list(
1094                     vnfd.get("int-virtual-link-desc", ()),
1095                     lambda cpd: cpd.get("id") == vld["id"],
1096                 )
1097 1                 if vnf_cp:
1098 1                     ns_cp = "member_vnf:{}.{}".format(
1099                         vnfr["member-vnf-index-ref"], vnf_cp["id"]
1100                     )
1101 1                     if cp2target.get(ns_cp):
1102 0                         vld["target"] = cp2target[ns_cp]
1103
1104 1                 vld["vim_info"] = {
1105                     target_vim: {"vim_network_name": vld.get("vim-network-name")}
1106                 }
1107                 # check if this network needs SDN assist
1108 1                 target_sdn = None
1109 1                 if vld.get("pci-interfaces"):
1110 0                     db_vim = get_vim_account(vnfr["vim-account-id"])
1111 0                     sdnc_id = db_vim["config"].get("sdn-controller")
1112 0                     if sdnc_id:
1113 0                         sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1114 0                         target_sdn = "sdn:{}".format(sdnc_id)
1115 0                         vld["vim_info"][target_sdn] = {
1116                             "sdn": True,
1117                             "target_vim": target_vim,
1118                             "vlds": [sdn_vld],
1119                             "type": vld.get("type"),
1120                         }
1121
1122                 # check at vnfd descriptor, if there is an ip-profile
1123 1                 vld_params = {}
1124 1                 vnfd_vlp = find_in_list(
1125                     get_virtual_link_profiles(vnfd),
1126                     lambda a_link_profile: a_link_profile["id"] == vld["id"],
1127                 )
1128 1                 if (
1129                     vnfd_vlp
1130                     and vnfd_vlp.get("virtual-link-protocol-data")
1131                     and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1132                 ):
1133 0                     ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
1134                         "l3-protocol-data"
1135                     ]
1136 0                     ip_profile_dest_data = {}
1137 0                     if "ip-version" in ip_profile_source_data:
1138 0                         ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1139                             "ip-version"
1140                         ]
1141 0                     if "cidr" in ip_profile_source_data:
1142 0                         ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1143                             "cidr"
1144                         ]
1145 0                     if "gateway-ip" in ip_profile_source_data:
1146 0                         ip_profile_dest_data[
1147                             "gateway-address"
1148                         ] = ip_profile_source_data["gateway-ip"]
1149 0                     if "dhcp-enabled" in ip_profile_source_data:
1150 0                         ip_profile_dest_data["dhcp-params"] = {
1151                             "enabled": ip_profile_source_data["dhcp-enabled"]
1152                         }
1153
1154 0                     vld_params["ip-profile"] = ip_profile_dest_data
1155                 # update vld_params with instantiation params
1156 1                 if vnf_params:
1157 0                     vld_instantiation_params = find_in_list(
1158                         get_iterable(vnf_params, "internal-vld"),
1159                         lambda i_vld: i_vld["name"] == vld["id"],
1160                     )
1161 0                     if vld_instantiation_params:
1162 0                         vld_params.update(vld_instantiation_params)
1163 1                 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1164
1165 1             vdur_list = []
1166 1             for vdur in target_vnf.get("vdur", ()):
1167 1                 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1168 0                     continue  # This vdu must not be created
1169 1                 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1170
1171 1                 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1172
1173 1                 if ssh_keys_all:
1174 1                     vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1175 1                     vnf_configuration = get_configuration(vnfd, vnfd["id"])
1176 1                     if (
1177                         vdu_configuration
1178                         and vdu_configuration.get("config-access")
1179                         and vdu_configuration.get("config-access").get("ssh-access")
1180                     ):
1181 0                         vdur["ssh-keys"] = ssh_keys_all
1182 0                         vdur["ssh-access-required"] = vdu_configuration[
1183                             "config-access"
1184                         ]["ssh-access"]["required"]
1185 1                     elif (
1186                         vnf_configuration
1187                         and vnf_configuration.get("config-access")
1188                         and vnf_configuration.get("config-access").get("ssh-access")
1189                         and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1190                     ):
1191 0                         vdur["ssh-keys"] = ssh_keys_all
1192 0                         vdur["ssh-access-required"] = vnf_configuration[
1193                             "config-access"
1194                         ]["ssh-access"]["required"]
1195 1                     elif ssh_keys_instantiation and find_in_list(
1196                         vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1197                     ):
1198 0                         vdur["ssh-keys"] = ssh_keys_instantiation
1199
1200 1                 self.logger.debug("NS > vdur > {}".format(vdur))
1201
1202 1                 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1203                 # cloud-init
1204 1                 if vdud.get("cloud-init-file"):
1205 1                     vdur["cloud-init"] = "{}:file:{}".format(
1206                         vnfd["_id"], vdud.get("cloud-init-file")
1207                     )
1208                     # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1209 1                     if vdur["cloud-init"] not in target["cloud_init_content"]:
1210 1                         base_folder = vnfd["_admin"]["storage"]
1211 1                         if base_folder["pkg-dir"]:
1212 1                             cloud_init_file = "{}/{}/cloud_init/{}".format(
1213                                 base_folder["folder"],
1214                                 base_folder["pkg-dir"],
1215                                 vdud.get("cloud-init-file"),
1216                             )
1217                         else:
1218 0                             cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1219                                 base_folder["folder"],
1220                                 vdud.get("cloud-init-file"),
1221                             )
1222 1                         with self.fs.file_open(cloud_init_file, "r") as ci_file:
1223 1                             target["cloud_init_content"][
1224                                 vdur["cloud-init"]
1225                             ] = ci_file.read()
1226 1                 elif vdud.get("cloud-init"):
1227 0                     vdur["cloud-init"] = "{}:vdu:{}".format(
1228                         vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1229                     )
1230                     # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1231 0                     target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1232                         "cloud-init"
1233                     ]
1234 1                 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1235 1                 deploy_params_vdu = self._format_additional_params(
1236                     vdur.get("additionalParams") or {}
1237                 )
1238 1                 deploy_params_vdu["OSM"] = get_osm_params(
1239                     vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1240                 )
1241 1                 vdur["additionalParams"] = deploy_params_vdu
1242
1243                 # flavor
1244 1                 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1245 1                 if target_vim not in ns_flavor["vim_info"]:
1246 1                     ns_flavor["vim_info"][target_vim] = {}
1247
1248                 # deal with images
1249                 # in case alternative images are provided we must check if they should be applied
1250                 # for the vim_type, modify the vim_type taking into account
1251 1                 ns_image_id = int(vdur["ns-image-id"])
1252 1                 if vdur.get("alt-image-ids"):
1253 0                     db_vim = get_vim_account(vnfr["vim-account-id"])
1254 0                     vim_type = db_vim["vim_type"]
1255 0                     for alt_image_id in vdur.get("alt-image-ids"):
1256 0                         ns_alt_image = target["image"][int(alt_image_id)]
1257 0                         if vim_type == ns_alt_image.get("vim-type"):
1258                             # must use alternative image
1259 0                             self.logger.debug(
1260                                 "use alternative image id: {}".format(alt_image_id)
1261                             )
1262 0                             ns_image_id = alt_image_id
1263 0                             vdur["ns-image-id"] = ns_image_id
1264 0                             break
1265 1                 ns_image = target["image"][int(ns_image_id)]
1266 1                 if target_vim not in ns_image["vim_info"]:
1267 1                     ns_image["vim_info"][target_vim] = {}
1268
1269                 # Affinity groups
1270 1                 if vdur.get("affinity-or-anti-affinity-group-id"):
1271 0                     for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1272 0                         ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1273 0                         if target_vim not in ns_ags["vim_info"]:
1274 0                             ns_ags["vim_info"][target_vim] = {}
1275
1276 1                 vdur["vim_info"] = {target_vim: {}}
1277                 # instantiation parameters
1278 1                 if vnf_params:
1279 0                     vdu_instantiation_params = find_in_list(
1280                         get_iterable(vnf_params, "vdu"),
1281                         lambda i_vdu: i_vdu["id"] == vdud["id"],
1282                     )
1283 0                     if vdu_instantiation_params:
1284                         # Parse the vdu_volumes from the instantiation params
1285 0                         vdu_volumes = get_volumes_from_instantiation_params(
1286                             vdu_instantiation_params, vdud
1287                         )
1288 0                         vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1289 1                 vdur_list.append(vdur)
1290 1             target_vnf["vdur"] = vdur_list
1291 1             target["vnf"].append(target_vnf)
1292
1293 1         self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1294 1         desc = await self.RO.deploy(nsr_id, target)
1295 1         self.logger.debug("RO return > {}".format(desc))
1296 1         action_id = desc["action_id"]
1297 1         await self._wait_ng_ro(
1298             nsr_id,
1299             action_id,
1300             nslcmop_id,
1301             start_deploy,
1302             timeout_ns_deploy,
1303             stage,
1304             operation="instantiation",
1305         )
1306
1307         # Updating NSR
1308 1         db_nsr_update = {
1309             "_admin.deployed.RO.operational-status": "running",
1310             "detailed-status": " ".join(stage),
1311         }
1312         # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1313 1         self.update_db_2("nsrs", nsr_id, db_nsr_update)
1314 1         self._write_op_status(nslcmop_id, stage)
1315 1         self.logger.debug(
1316             logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1317         )
1318 1         return
1319
1320 1     async def _wait_ng_ro(
1321         self,
1322         nsr_id,
1323         action_id,
1324         nslcmop_id=None,
1325         start_time=None,
1326         timeout=600,
1327         stage=None,
1328         operation=None,
1329     ):
1330 1         detailed_status_old = None
1331 1         db_nsr_update = {}
1332 1         start_time = start_time or time()
1333 1         while time() <= start_time + timeout:
1334 1             desc_status = await self.op_status_map[operation](nsr_id, action_id)
1335 0             self.logger.debug("Wait NG RO > {}".format(desc_status))
1336 0             if desc_status["status"] == "FAILED":
1337 0                 raise NgRoException(desc_status["details"])
1338 0             elif desc_status["status"] == "BUILD":
1339 0                 if stage:
1340 0                     stage[2] = "VIM: ({})".format(desc_status["details"])
1341 0             elif desc_status["status"] == "DONE":
1342 0                 if stage:
1343 0                     stage[2] = "Deployed at VIM"
1344 0                 break
1345             else:
1346 0                 assert False, "ROclient.check_ns_status returns unknown {}".format(
1347                     desc_status["status"]
1348                 )
1349 0             if stage and nslcmop_id and stage[2] != detailed_status_old:
1350 0                 detailed_status_old = stage[2]
1351 0                 db_nsr_update["detailed-status"] = " ".join(stage)
1352 0                 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1353 0                 self._write_op_status(nslcmop_id, stage)
1354 0             await asyncio.sleep(15, loop=self.loop)
1355         else:  # timeout_ns_deploy
1356 0             raise NgRoException("Timeout waiting ns to deploy")
1357
1358 1     async def _terminate_ng_ro(
1359         self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1360     ):
1361 0         db_nsr_update = {}
1362 0         failed_detail = []
1363 0         action_id = None
1364 0         start_deploy = time()
1365 0         try:
1366 0             target = {
1367                 "ns": {"vld": []},
1368                 "vnf": [],
1369                 "image": [],
1370                 "flavor": [],
1371                 "action_id": nslcmop_id,
1372             }
1373 0             desc = await self.RO.deploy(nsr_id, target)
1374 0             action_id = desc["action_id"]
1375 0             db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
1376 0             db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1377 0             self.logger.debug(
1378                 logging_text
1379                 + "ns terminate action at RO. action_id={}".format(action_id)
1380             )
1381
1382             # wait until done
1383 0             delete_timeout = 20 * 60  # 20 minutes
1384 0             await self._wait_ng_ro(
1385                 nsr_id,
1386                 action_id,
1387                 nslcmop_id,
1388                 start_deploy,
1389                 delete_timeout,
1390                 stage,
1391                 operation="termination",
1392             )
1393
1394 0             db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1395 0             db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1396             # delete all nsr
1397 0             await self.RO.delete(nsr_id)
1398 0         except Exception as e:
1399 0             if isinstance(e, NgRoException) and e.http_code == 404:  # not found
1400 0                 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1401 0                 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1402 0                 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1403 0                 self.logger.debug(
1404                     logging_text + "RO_action_id={} already deleted".format(action_id)
1405                 )
1406 0             elif isinstance(e, NgRoException) and e.http_code == 409:  # conflict
1407 0                 failed_detail.append("delete conflict: {}".format(e))
1408 0                 self.logger.debug(
1409                     logging_text
1410                     + "RO_action_id={} delete conflict: {}".format(action_id, e)
1411                 )
1412             else:
1413 0                 failed_detail.append("delete error: {}".format(e))
1414 0                 self.logger.error(
1415                     logging_text
1416                     + "RO_action_id={} delete error: {}".format(action_id, e)
1417                 )
1418
1419 0         if failed_detail:
1420 0             stage[2] = "Error deleting from VIM"
1421         else:
1422 0             stage[2] = "Deleted from VIM"
1423 0         db_nsr_update["detailed-status"] = " ".join(stage)
1424 0         self.update_db_2("nsrs", nsr_id, db_nsr_update)
1425 0         self._write_op_status(nslcmop_id, stage)
1426
1427 0         if failed_detail:
1428 0             raise LcmException("; ".join(failed_detail))
1429 0         return
1430
1431 1     async def instantiate_RO(
1432         self,
1433         logging_text,
1434         nsr_id,
1435         nsd,
1436         db_nsr,
1437         db_nslcmop,
1438         db_vnfrs,
1439         db_vnfds,
1440         n2vc_key_list,
1441         stage,
1442     ):
1443         """
1444         Instantiate at RO
1445         :param logging_text: preffix text to use at logging
1446         :param nsr_id: nsr identity
1447         :param nsd: database content of ns descriptor
1448         :param db_nsr: database content of ns record
1449         :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1450         :param db_vnfrs:
1451         :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1452         :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1453         :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1454         :return: None or exception
1455         """
1456 0         try:
1457 0             start_deploy = time()
1458 0             ns_params = db_nslcmop.get("operationParams")
1459 0             if ns_params and ns_params.get("timeout_ns_deploy"):
1460 0                 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1461             else:
1462 0                 timeout_ns_deploy = self.timeout.ns_deploy
1463
1464             # Check for and optionally request placement optimization. Database will be updated if placement activated
1465 0             stage[2] = "Waiting for Placement."
1466 0             if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1467                 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1468 0                 for vnfr in db_vnfrs.values():
1469 0                     if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1470 0                         break
1471                 else:
1472 0                     ns_params["vimAccountId"] == vnfr["vim-account-id"]
1473
1474 0             return await self._instantiate_ng_ro(
1475                 logging_text,
1476                 nsr_id,
1477                 nsd,
1478                 db_nsr,
1479                 db_nslcmop,
1480                 db_vnfrs,
1481                 db_vnfds,
1482                 n2vc_key_list,
1483                 stage,
1484                 start_deploy,
1485                 timeout_ns_deploy,
1486             )
1487 0         except Exception as e:
1488 0             stage[2] = "ERROR deploying at VIM"
1489 0             self.set_vnfr_at_error(db_vnfrs, str(e))
1490 0             self.logger.error(
1491                 "Error deploying at VIM {}".format(e),
1492                 exc_info=not isinstance(
1493                     e,
1494                     (
1495                         ROclient.ROClientException,
1496                         LcmException,
1497                         DbException,
1498                         NgRoException,
1499                     ),
1500                 ),
1501             )
1502 0             raise
1503
1504 1     async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1505         """
1506         Wait for kdu to be up, get ip address
1507         :param logging_text: prefix use for logging
1508         :param nsr_id:
1509         :param vnfr_id:
1510         :param kdu_name:
1511         :return: IP address, K8s services
1512         """
1513
1514         # self.logger.debug(logging_text + "Starting wait_kdu_up")
1515 0         nb_tries = 0
1516
1517 0         while nb_tries < 360:
1518 0             db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1519 0             kdur = next(
1520                 (
1521                     x
1522                     for x in get_iterable(db_vnfr, "kdur")
1523                     if x.get("kdu-name") == kdu_name
1524                 ),
1525                 None,
1526             )
1527 0             if not kdur:
1528 0                 raise LcmException(
1529                     "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1530                 )
1531 0             if kdur.get("status"):
1532 0                 if kdur["status"] in ("READY", "ENABLED"):
1533 0                     return kdur.get("ip-address"), kdur.get("services")
1534                 else:
1535 0                     raise LcmException(
1536                         "target KDU={} is in error state".format(kdu_name)
1537                     )
1538
1539 0             await asyncio.sleep(10, loop=self.loop)
1540 0             nb_tries += 1
1541 0         raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1542
1543 1     async def wait_vm_up_insert_key_ro(
1544         self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1545     ):
1546         """
1547         Wait for ip addres at RO, and optionally, insert public key in virtual machine
1548         :param logging_text: prefix use for logging
1549         :param nsr_id:
1550         :param vnfr_id:
1551         :param vdu_id:
1552         :param vdu_index:
1553         :param pub_key: public ssh key to inject, None to skip
1554         :param user: user to apply the public ssh key
1555         :return: IP address
1556         """
1557
1558 0         self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1559 0         ro_nsr_id = None
1560 0         ip_address = None
1561 0         nb_tries = 0
1562 0         target_vdu_id = None
1563 0         ro_retries = 0
1564
1565         while True:
1566 0             ro_retries += 1
1567 0             if ro_retries >= 360:  # 1 hour
1568 0                 raise LcmException(
1569                     "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1570                 )
1571
1572 0             await asyncio.sleep(10, loop=self.loop)
1573
1574             # get ip address
1575 0             if not target_vdu_id:
1576 0                 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1577
1578 0                 if not vdu_id:  # for the VNF case
1579 0                     if db_vnfr.get("status") == "ERROR":
1580 0                         raise LcmException(
1581                             "Cannot inject ssh-key because target VNF is in error state"
1582                         )
1583 0                     ip_address = db_vnfr.get("ip-address")
1584 0                     if not ip_address:
1585 0                         continue
1586 0                     vdur = next(
1587                         (
1588                             x
1589                             for x in get_iterable(db_vnfr, "vdur")
1590                             if x.get("ip-address") == ip_address
1591                         ),
1592                         None,
1593                     )
1594                 else:  # VDU case
1595 0                     vdur = next(
1596                         (
1597                             x
1598                             for x in get_iterable(db_vnfr, "vdur")
1599                             if x.get("vdu-id-ref") == vdu_id
1600                             and x.get("count-index") == vdu_index
1601                         ),
1602                         None,
1603                     )
1604
1605 0                 if (
1606                     not vdur and len(db_vnfr.get("vdur", ())) == 1
1607                 ):  # If only one, this should be the target vdu
1608 0                     vdur = db_vnfr["vdur"][0]
1609 0                 if not vdur:
1610 0                     raise LcmException(
1611                         "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1612                             vnfr_id, vdu_id, vdu_index
1613                         )
1614                     )
1615                 # New generation RO stores information at "vim_info"
1616 0                 ng_ro_status = None
1617 0                 target_vim = None
1618 0                 if vdur.get("vim_info"):
1619 0                     target_vim = next(
1620                         t for t in vdur["vim_info"]
1621                     )  # there should be only one key
1622 0                     ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1623 0                 if (
1624                     vdur.get("pdu-type")
1625                     or vdur.get("status") == "ACTIVE"
1626                     or ng_ro_status == "ACTIVE"
1627                 ):
1628 0                     ip_address = vdur.get("ip-address")
1629 0                     if not ip_address:
1630 0                         continue
1631 0                     target_vdu_id = vdur["vdu-id-ref"]
1632 0                 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1633 0                     raise LcmException(
1634                         "Cannot inject ssh-key because target VM is in error state"
1635                     )
1636
1637 0             if not target_vdu_id:
1638 0                 continue
1639
1640             # inject public key into machine
1641 0             if pub_key and user:
1642 0                 self.logger.debug(logging_text + "Inserting RO key")
1643 0                 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1644 0                 if vdur.get("pdu-type"):
1645 0                     self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1646 0                     return ip_address
1647 0                 try:
1648 0                     ro_vm_id = "{}-{}".format(
1649                         db_vnfr["member-vnf-index-ref"], target_vdu_id
1650                     )  # TODO add vdu_index
1651 0                     if self.ro_config.ng:
1652 0                         target = {
1653                             "action": {
1654                                 "action": "inject_ssh_key",
1655                                 "key": pub_key,
1656                                 "user": user,
1657                             },
1658                             "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1659                         }
1660 0                         desc = await self.RO.deploy(nsr_id, target)
1661 0                         action_id = desc["action_id"]
1662 0                         await self._wait_ng_ro(
1663                             nsr_id, action_id, timeout=600, operation="instantiation"
1664                         )
1665 0                         break
1666                     else:
1667                         # wait until NS is deployed at RO
1668 0                         if not ro_nsr_id:
1669 0                             db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1670 0                             ro_nsr_id = deep_get(
1671                                 db_nsrs, ("_admin", "deployed", "RO", "nsr_id")
1672                             )
1673 0                         if not ro_nsr_id:
1674 0                             continue
1675 0                         result_dict = await self.RO.create_action(
1676                             item="ns",
1677                             item_id_name=ro_nsr_id,
1678                             descriptor={
1679                                 "add_public_key": pub_key,
1680                                 "vms": [ro_vm_id],
1681                                 "user": user,
1682                             },
1683                         )
1684                         # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1685 0                         if not result_dict or not isinstance(result_dict, dict):
1686 0                             raise LcmException(
1687                                 "Unknown response from RO when injecting key"
1688                             )
1689 0                         for result in result_dict.values():
1690 0                             if result.get("vim_result") == 200:
1691 0                                 break
1692                             else:
1693 0                                 raise ROclient.ROClientException(
1694                                     "error injecting key: {}".format(
1695                                         result.get("description")
1696                                     )
1697                                 )
1698 0                         break
1699 0                 except NgRoException as e:
1700 0                     raise LcmException(
1701                         "Reaching max tries injecting key. Error: {}".format(e)
1702                     )
1703 0                 except ROclient.ROClientException as e:
1704 0                     if not nb_tries:
1705 0                         self.logger.debug(
1706                             logging_text
1707                             + "error injecting key: {}. Retrying until {} seconds".format(
1708                                 e, 20 * 10
1709                             )
1710                         )
1711 0                     nb_tries += 1
1712 0                     if nb_tries >= 20:
1713 0                         raise LcmException(
1714                             "Reaching max tries injecting key. Error: {}".format(e)
1715                         )
1716             else:
1717 0                 break
1718
1719 0         return ip_address
1720
1721 1     async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1722         """
1723         Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1724         """
1725 0         my_vca = vca_deployed_list[vca_index]
1726 0         if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1727             # vdu or kdu: no dependencies
1728 0             return
1729 0         timeout = 300
1730 0         while timeout >= 0:
1731 0             db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1732 0             vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1733 0             configuration_status_list = db_nsr["configurationStatus"]
1734 0             for index, vca_deployed in enumerate(configuration_status_list):
1735 0                 if index == vca_index:
1736                     # myself
1737 0                     continue
1738 0                 if not my_vca.get("member-vnf-index") or (
1739                     vca_deployed.get("member-vnf-index")
1740                     == my_vca.get("member-vnf-index")
1741                 ):
1742 0                     internal_status = configuration_status_list[index].get("status")
1743 0                     if internal_status == "READY":
1744 0                         continue
1745 0                     elif internal_status == "BROKEN":
1746 0                         raise LcmException(
1747                             "Configuration aborted because dependent charm/s has failed"
1748                         )
1749                     else:
1750 0                         break
1751             else:
1752                 # no dependencies, return
1753 0                 return
1754 0             await asyncio.sleep(10)
1755 0             timeout -= 1
1756
1757 0         raise LcmException("Configuration aborted because dependent charm/s timeout")
1758
1759 1     def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1760 1         vca_id = None
1761 1         if db_vnfr:
1762 1             vca_id = deep_get(db_vnfr, ("vca-id",))
1763 1         elif db_nsr:
1764 1             vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1765 1             vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1766 1         return vca_id
1767
1768 1     async def instantiate_N2VC(
1769         self,
1770         logging_text,
1771         vca_index,
1772         nsi_id,
1773         db_nsr,
1774         db_vnfr,
1775         vdu_id,
1776         kdu_name,
1777         vdu_index,
1778         config_descriptor,
1779         deploy_params,
1780         base_folder,
1781         nslcmop_id,
1782         stage,
1783         vca_type,
1784         vca_name,
1785         ee_config_descriptor,
1786     ):
1787 0         nsr_id = db_nsr["_id"]
1788 0         db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1789 0         vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1790 0         vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1791 0         osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1792 0         db_dict = {
1793             "collection": "nsrs",
1794             "filter": {"_id": nsr_id},
1795             "path": db_update_entry,
1796         }
1797 0         step = ""
1798 0         try:
1799 0             element_type = "NS"
1800 0             element_under_configuration = nsr_id
1801
1802 0             vnfr_id = None
1803 0             if db_vnfr:
1804 0                 vnfr_id = db_vnfr["_id"]
1805 0                 osm_config["osm"]["vnf_id"] = vnfr_id
1806
1807 0             namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1808
1809 0             if vca_type == "native_charm":
1810 0                 index_number = 0
1811             else:
1812 0                 index_number = vdu_index or 0
1813
1814 0             if vnfr_id:
1815 0                 element_type = "VNF"
1816 0                 element_under_configuration = vnfr_id
1817 0                 namespace += ".{}-{}".format(vnfr_id, index_number)
1818 0                 if vdu_id:
1819 0                     namespace += ".{}-{}".format(vdu_id, index_number)
1820 0                     element_type = "VDU"
1821 0                     element_under_configuration = "{}-{}".format(vdu_id, index_number)
1822 0                     osm_config["osm"]["vdu_id"] = vdu_id
1823 0                 elif kdu_name:
1824 0                     namespace += ".{}".format(kdu_name)
1825 0                     element_type = "KDU"
1826 0                     element_under_configuration = kdu_name
1827 0                     osm_config["osm"]["kdu_name"] = kdu_name
1828
1829             # Get artifact path
1830 0             if base_folder["pkg-dir"]:
1831 0                 artifact_path = "{}/{}/{}/{}".format(
1832                     base_folder["folder"],
1833                     base_folder["pkg-dir"],
1834                     "charms"
1835                     if vca_type
1836                     in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1837                     else "helm-charts",
1838                     vca_name,
1839                 )
1840             else:
1841 0                 artifact_path = "{}/Scripts/{}/{}/".format(
1842                     base_folder["folder"],
1843                     "charms"
1844                     if vca_type
1845                     in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1846                     else "helm-charts",
1847                     vca_name,
1848                 )
1849
1850 0             self.logger.debug("Artifact path > {}".format(artifact_path))
1851
1852             # get initial_config_primitive_list that applies to this element
1853 0             initial_config_primitive_list = config_descriptor.get(
1854                 "initial-config-primitive"
1855             )
1856
1857 0             self.logger.debug(
1858                 "Initial config primitive list > {}".format(
1859                     initial_config_primitive_list
1860                 )
1861             )
1862
1863             # add config if not present for NS charm
1864 0             ee_descriptor_id = ee_config_descriptor.get("id")
1865 0             self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1866 0             initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1867                 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1868             )
1869
1870 0             self.logger.debug(
1871                 "Initial config primitive list #2 > {}".format(
1872                     initial_config_primitive_list
1873                 )
1874             )
1875             # n2vc_redesign STEP 3.1
1876             # find old ee_id if exists
1877 0             ee_id = vca_deployed.get("ee_id")
1878
1879 0             vca_id = self.get_vca_id(db_vnfr, db_nsr)
1880             # create or register execution environment in VCA
1881 0             if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1882 0                 self._write_configuration_status(
1883                     nsr_id=nsr_id,
1884                     vca_index=vca_index,
1885                     status="CREATING",
1886                     element_under_configuration=element_under_configuration,
1887                     element_type=element_type,
1888                 )
1889
1890 0                 step = "create execution environment"
1891 0                 self.logger.debug(logging_text + step)
1892
1893 0                 ee_id = None
1894 0                 credentials = None
1895 0                 if vca_type == "k8s_proxy_charm":
1896 0                     ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1897                         charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1898                         namespace=namespace,
1899                         artifact_path=artifact_path,
1900                         db_dict=db_dict,
1901                         vca_id=vca_id,
1902                     )
1903 0                 elif vca_type == "helm" or vca_type == "helm-v3":
1904 0                     ee_id, credentials = await self.vca_map[
1905                         vca_type
1906                     ].create_execution_environment(
1907                         namespace=namespace,
1908                         reuse_ee_id=ee_id,
1909                         db_dict=db_dict,
1910                         config=osm_config,
1911                         artifact_path=artifact_path,
1912                         chart_model=vca_name,
1913                         vca_type=vca_type,
1914                     )
1915                 else:
1916 0                     ee_id, credentials = await self.vca_map[
1917                         vca_type
1918                     ].create_execution_environment(
1919                         namespace=namespace,
1920                         reuse_ee_id=ee_id,
1921                         db_dict=db_dict,
1922                         vca_id=vca_id,
1923                     )
1924
1925 0             elif vca_type == "native_charm":
1926 0                 step = "Waiting to VM being up and getting IP address"
1927 0                 self.logger.debug(logging_text + step)
1928 0                 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1929                     logging_text,
1930                     nsr_id,
1931                     vnfr_id,
1932                     vdu_id,
1933                     vdu_index,
1934                     user=None,
1935                     pub_key=None,
1936                 )
1937 0                 credentials = {"hostname": rw_mgmt_ip}
1938                 # get username
1939 0                 username = deep_get(
1940                     config_descriptor, ("config-access", "ssh-access", "default-user")
1941                 )
1942                 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1943                 #  merged. Meanwhile let's get username from initial-config-primitive
1944 0                 if not username and initial_config_primitive_list:
1945 0                     for config_primitive in initial_config_primitive_list:
1946 0                         for param in config_primitive.get("parameter", ()):
1947 0                             if param["name"] == "ssh-username":
1948 0                                 username = param["value"]
1949 0                                 break
1950 0                 if not username:
1951 0                     raise LcmException(
1952                         "Cannot determine the username neither with 'initial-config-primitive' nor with "
1953                         "'config-access.ssh-access.default-user'"
1954                     )
1955 0                 credentials["username"] = username
1956                 # n2vc_redesign STEP 3.2
1957
1958 0                 self._write_configuration_status(
1959                     nsr_id=nsr_id,
1960                     vca_index=vca_index,
1961                     status="REGISTERING",
1962                     element_under_configuration=element_under_configuration,
1963                     element_type=element_type,
1964                 )
1965
1966 0                 step = "register execution environment {}".format(credentials)
1967 0                 self.logger.debug(logging_text + step)
1968 0                 ee_id = await self.vca_map[vca_type].register_execution_environment(
1969                     credentials=credentials,
1970                     namespace=namespace,
1971                     db_dict=db_dict,
1972                     vca_id=vca_id,
1973                 )
1974
1975             # for compatibility with MON/POL modules, the need model and application name at database
1976             # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1977 0             ee_id_parts = ee_id.split(".")
1978 0             db_nsr_update = {db_update_entry + "ee_id": ee_id}
1979 0             if len(ee_id_parts) >= 2:
1980 0                 model_name = ee_id_parts[0]
1981 0                 application_name = ee_id_parts[1]
1982 0                 db_nsr_update[db_update_entry + "model"] = model_name
1983 0                 db_nsr_update[db_update_entry + "application"] = application_name
1984
1985             # n2vc_redesign STEP 3.3
1986 0             step = "Install configuration Software"
1987
1988 0             self._write_configuration_status(
1989                 nsr_id=nsr_id,
1990                 vca_index=vca_index,
1991                 status="INSTALLING SW",
1992                 element_under_configuration=element_under_configuration,
1993                 element_type=element_type,
1994                 other_update=db_nsr_update,
1995             )
1996
1997             # TODO check if already done
1998 0             self.logger.debug(logging_text + step)
1999 0             config = None
2000 0             if vca_type == "native_charm":
2001 0                 config_primitive = next(
2002                     (p for p in initial_config_primitive_list if p["name"] == "config"),
2003                     None,
2004                 )
2005 0                 if config_primitive:
2006 0                     config = self._map_primitive_params(
2007                         config_primitive, {}, deploy_params
2008                     )
2009 0             num_units = 1
2010 0             if vca_type == "lxc_proxy_charm":
2011 0                 if element_type == "NS":
2012 0                     num_units = db_nsr.get("config-units") or 1
2013 0                 elif element_type == "VNF":
2014 0                     num_units = db_vnfr.get("config-units") or 1
2015 0                 elif element_type == "VDU":
2016 0                     for v in db_vnfr["vdur"]:
2017 0                         if vdu_id == v["vdu-id-ref"]:
2018 0                             num_units = v.get("config-units") or 1
2019 0                             break
2020 0             if vca_type != "k8s_proxy_charm":
2021 0                 await self.vca_map[vca_type].install_configuration_sw(
2022                     ee_id=ee_id,
2023                     artifact_path=artifact_path,
2024                     db_dict=db_dict,
2025                     config=config,
2026                     num_units=num_units,
2027                     vca_id=vca_id,
2028                     vca_type=vca_type,
2029                 )
2030
2031             # write in db flag of configuration_sw already installed
2032 0             self.update_db_2(
2033                 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
2034             )
2035
2036             # add relations for this VCA (wait for other peers related with this VCA)
2037 0             await self._add_vca_relations(
2038                 logging_text=logging_text,
2039                 nsr_id=nsr_id,
2040                 vca_type=vca_type,
2041                 vca_index=vca_index,
2042             )
2043
2044             # if SSH access is required, then get execution environment SSH public
2045             # if native charm we have waited already to VM be UP
2046 0             if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
2047 0                 pub_key = None
2048 0                 user = None
2049                 # self.logger.debug("get ssh key block")
2050 0                 if deep_get(
2051                     config_descriptor, ("config-access", "ssh-access", "required")
2052                 ):
2053                     # self.logger.debug("ssh key needed")
2054                     # Needed to inject a ssh key
2055 0                     user = deep_get(
2056                         config_descriptor,
2057                         ("config-access", "ssh-access", "default-user"),
2058                     )
2059 0                     step = "Install configuration Software, getting public ssh key"
2060 0                     pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
2061                         ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
2062                     )
2063
2064 0                     step = "Insert public key into VM user={} ssh_key={}".format(
2065                         user, pub_key
2066                     )
2067                 else:
2068                     # self.logger.debug("no need to get ssh key")
2069 0                     step = "Waiting to VM being up and getting IP address"
2070 0                 self.logger.debug(logging_text + step)
2071
2072                 # default rw_mgmt_ip to None, avoiding the non definition of the variable
2073 0                 rw_mgmt_ip = None
2074
2075                 # n2vc_redesign STEP 5.1
2076                 # wait for RO (ip-address) Insert pub_key into VM
2077 0                 if vnfr_id:
2078 0                     if kdu_name:
2079 0                         rw_mgmt_ip, services = await self.wait_kdu_up(
2080                             logging_text, nsr_id, vnfr_id, kdu_name
2081                         )
2082 0                         vnfd = self.db.get_one(
2083                             "vnfds_revisions",
2084                             {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2085                         )
2086 0                         kdu = get_kdu(vnfd, kdu_name)
2087 0                         kdu_services = [
2088                             service["name"] for service in get_kdu_services(kdu)
2089                         ]
2090 0                         exposed_services = []
2091 0                         for service in services:
2092 0                             if any(s in service["name"] for s in kdu_services):
2093 0                                 exposed_services.append(service)
2094 0                         await self.vca_map[vca_type].exec_primitive(
2095                             ee_id=ee_id,
2096                             primitive_name="config",
2097                             params_dict={
2098                                 "osm-config": json.dumps(
2099                                     OsmConfigBuilder(
2100                                         k8s={"services": exposed_services}
2101                                     ).build()
2102                                 )
2103                             },
2104                             vca_id=vca_id,
2105                         )
2106
2107                     # This verification is needed in order to avoid trying to add a public key
2108                     # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
2109                     # for a KNF and not for its KDUs, the previous verification gives False, and the code
2110                     # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
2111                     # or it is a KNF)
2112 0                     elif db_vnfr.get("vdur"):
2113 0                         rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2114                             logging_text,
2115                             nsr_id,
2116                             vnfr_id,
2117                             vdu_id,
2118                             vdu_index,
2119                             user=user,
2120                             pub_key=pub_key,
2121                         )
2122
2123 0                 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
2124
2125             # store rw_mgmt_ip in deploy params for later replacement
2126 0             deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
2127
2128             # n2vc_redesign STEP 6  Execute initial config primitive
2129 0             step = "execute initial config primitive"
2130
2131             # wait for dependent primitives execution (NS -> VNF -> VDU)
2132 0             if initial_config_primitive_list:
2133 0                 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
2134
2135             # stage, in function of element type: vdu, kdu, vnf or ns
2136 0             my_vca = vca_deployed_list[vca_index]
2137 0             if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
2138                 # VDU or KDU
2139 0                 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
2140 0             elif my_vca.get("member-vnf-index"):
2141                 # VNF
2142 0                 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
2143             else:
2144                 # NS
2145 0                 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
2146
2147 0             self._write_configuration_status(
2148                 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
2149             )
2150
2151 0             self._write_op_status(op_id=nslcmop_id, stage=stage)
2152
2153 0             check_if_terminated_needed = True
2154 0             for initial_config_primitive in initial_config_primitive_list:
2155                 # adding information on the vca_deployed if it is a NS execution environment
2156 0                 if not vca_deployed["member-vnf-index"]:
2157 0                     deploy_params["ns_config_info"] = json.dumps(
2158                         self._get_ns_config_info(nsr_id)
2159                     )
2160                 # TODO check if already done
2161 0                 primitive_params_ = self._map_primitive_params(
2162                     initial_config_primitive, {}, deploy_params
2163                 )
2164
2165 0                 step = "execute primitive '{}' params '{}'".format(
2166                     initial_config_primitive["name"], primitive_params_
2167                 )
2168 0                 self.logger.debug(logging_text + step)
2169 0                 await self.vca_map[vca_type].exec_primitive(
2170                     ee_id=ee_id,
2171                     primitive_name=initial_config_primitive["name"],
2172                     params_dict=primitive_params_,
2173                     db_dict=db_dict,
2174                     vca_id=vca_id,
2175                     vca_type=vca_type,
2176                 )
2177                 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2178 0                 if check_if_terminated_needed:
2179 0                     if config_descriptor.get("terminate-config-primitive"):
2180 0                         self.update_db_2(
2181                             "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2182                         )
2183 0                     check_if_terminated_needed = False
2184
2185                 # TODO register in database that primitive is done
2186
2187             # STEP 7 Configure metrics
2188 0             if vca_type == "helm" or vca_type == "helm-v3":
2189                 # TODO: review for those cases where the helm chart is a reference and
2190                 # is not part of the NF package
2191 0                 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2192                     ee_id=ee_id,
2193                     artifact_path=artifact_path,
2194                     ee_config_descriptor=ee_config_descriptor,
2195                     vnfr_id=vnfr_id,
2196                     nsr_id=nsr_id,
2197                     target_ip=rw_mgmt_ip,
2198                 )
2199 0                 if prometheus_jobs:
2200 0                     self.update_db_2(
2201                         "nsrs",
2202                         nsr_id,
2203                         {db_update_entry + "prometheus_jobs": prometheus_jobs},
2204                     )
2205
2206 0                     for job in prometheus_jobs:
2207 0                         self.db.set_one(
2208                             "prometheus_jobs",
2209                             {"job_name": job["job_name"]},
2210                             job,
2211                             upsert=True,
2212                             fail_on_empty=False,
2213                         )
2214
2215 0             step = "instantiated at VCA"
2216 0             self.logger.debug(logging_text + step)
2217
2218 0             self._write_configuration_status(
2219                 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2220             )
2221
2222 0         except Exception as e:  # TODO not use Exception but N2VC exception
2223             # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2224 0             if not isinstance(
2225                 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2226             ):
2227 0                 self.logger.error(
2228                     "Exception while {} : {}".format(step, e), exc_info=True
2229                 )
2230 0             self._write_configuration_status(
2231                 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2232             )
2233 0             raise LcmException("{} {}".format(step, e)) from e
2234
2235 1     def _write_ns_status(
2236         self,
2237         nsr_id: str,
2238         ns_state: str,
2239         current_operation: str,
2240         current_operation_id: str,
2241         error_description: str = None,
2242         error_detail: str = None,
2243         other_update: dict = None,
2244     ):
2245         """
2246         Update db_nsr fields.
2247         :param nsr_id:
2248         :param ns_state:
2249         :param current_operation:
2250         :param current_operation_id:
2251         :param error_description:
2252         :param error_detail:
2253         :param other_update: Other required changes at database if provided, will be cleared
2254         :return:
2255         """
2256 1         try:
2257 1             db_dict = other_update or {}
2258 1             db_dict[
2259                 "_admin.nslcmop"
2260             ] = current_operation_id  # for backward compatibility
2261 1             db_dict["_admin.current-operation"] = current_operation_id
2262 1             db_dict["_admin.operation-type"] = (
2263                 current_operation if current_operation != "IDLE" else None
2264             )
2265 1             db_dict["currentOperation"] = current_operation
2266 1             db_dict["currentOperationID"] = current_operation_id
2267 1             db_dict["errorDescription"] = error_description
2268 1             db_dict["errorDetail"] = error_detail
2269
2270 1             if ns_state:
2271 1                 db_dict["nsState"] = ns_state
2272 1             self.update_db_2("nsrs", nsr_id, db_dict)
2273 0         except DbException as e:
2274 0             self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2275
2276 1     def _write_op_status(
2277         self,
2278         op_id: str,
2279         stage: list = None,
2280         error_message: str = None,
2281         queuePosition: int = 0,
2282         operation_state: str = None,
2283         other_update: dict = None,
2284     ):
2285 1         try:
2286 1             db_dict = other_update or {}
2287 1             db_dict["queuePosition"] = queuePosition
2288 1             if isinstance(stage, list):
2289 1                 db_dict["stage"] = stage[0]
2290 1                 db_dict["detailed-status"] = " ".join(stage)
2291 1             elif stage is not None:
2292 1                 db_dict["stage"] = str(stage)
2293
2294 1             if error_message is not None:
2295 1                 db_dict["errorMessage"] = error_message
2296 1             if operation_state is not None:
2297 1                 db_dict["operationState"] = operation_state
2298 1                 db_dict["statusEnteredTime"] = time()
2299 1             self.update_db_2("nslcmops", op_id, db_dict)
2300 0         except DbException as e:
2301 0             self.logger.warn(
2302                 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2303             )
2304
2305 1     def _write_all_config_status(self, db_nsr: dict, status: str):
2306 0         try:
2307 0             nsr_id = db_nsr["_id"]
2308             # configurationStatus
2309 0             config_status = db_nsr.get("configurationStatus")
2310 0             if config_status:
2311 0                 db_nsr_update = {
2312                     "configurationStatus.{}.status".format(index): status
2313                     for index, v in enumerate(config_status)
2314                     if v
2315                 }
2316                 # update status
2317 0                 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2318
2319 0         except DbException as e:
2320 0             self.logger.warn(
2321                 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2322             )
2323
2324 1     def _write_configuration_status(
2325         self,
2326         nsr_id: str,
2327         vca_index: int,
2328         status: str = None,
2329         element_under_configuration: str = None,
2330         element_type: str = None,
2331         other_update: dict = None,
2332     ):
2333         # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2334         #                   .format(vca_index, status))
2335
2336 0         try:
2337 0             db_path = "configurationStatus.{}.".format(vca_index)
2338 0             db_dict = other_update or {}
2339 0             if status:
2340 0                 db_dict[db_path + "status"] = status
2341 0             if element_under_configuration:
2342 0                 db_dict[
2343                     db_path + "elementUnderConfiguration"
2344                 ] = element_under_configuration
2345 0             if element_type:
2346 0                 db_dict[db_path + "elementType"] = element_type
2347 0             self.update_db_2("nsrs", nsr_id, db_dict)
2348 0         except DbException as e:
2349 0             self.logger.warn(
2350                 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2351                     status, nsr_id, vca_index, e
2352                 )
2353             )
2354
2355 1     async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2356         """
2357         Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2358         sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2359         Database is used because the result can be obtained from a different LCM worker in case of HA.
2360         :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2361         :param db_nslcmop: database content of nslcmop
2362         :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2363         :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2364             computed 'vim-account-id'
2365         """
2366 0         modified = False
2367 0         nslcmop_id = db_nslcmop["_id"]
2368 0         placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2369 0         if placement_engine == "PLA":
2370 0             self.logger.debug(
2371                 logging_text + "Invoke and wait for placement optimization"
2372             )
2373 0             await self.msg.aiowrite(
2374                 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2375             )
2376 0             db_poll_interval = 5
2377 0             wait = db_poll_interval * 10
2378 0             pla_result = None
2379 0             while not pla_result and wait >= 0:
2380 0                 await asyncio.sleep(db_poll_interval)
2381 0                 wait -= db_poll_interval
2382 0                 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2383 0                 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2384
2385 0             if not pla_result:
2386 0                 raise LcmException(
2387                     "Placement timeout for nslcmopId={}".format(nslcmop_id)
2388                 )
2389
2390 0             for pla_vnf in pla_result["vnf"]:
2391 0                 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2392 0                 if not pla_vnf.get("vimAccountId") or not vnfr:
2393 0                     continue
2394 0                 modified = True
2395 0                 self.db.set_one(
2396                     "vnfrs",
2397                     {"_id": vnfr["_id"]},
2398                     {"vim-account-id": pla_vnf["vimAccountId"]},
2399                 )
2400                 # Modifies db_vnfrs
2401 0                 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2402 0         return modified
2403
2404 1     def update_nsrs_with_pla_result(self, params):
2405 0         try:
2406 0             nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2407 0             self.update_db_2(
2408                 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2409             )
2410 0         except Exception as e:
2411 0             self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2412
2413 1     async def instantiate(self, nsr_id, nslcmop_id):
2414         """
2415
2416         :param nsr_id: ns instance to deploy
2417         :param nslcmop_id: operation to run
2418         :return:
2419         """
2420
2421         # Try to lock HA task here
2422 0         task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2423 0         if not task_is_locked_by_me:
2424 0             self.logger.debug(
2425                 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2426             )
2427 0             return
2428
2429 0         logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2430 0         self.logger.debug(logging_text + "Enter")
2431
2432         # get all needed from database
2433
2434         # database nsrs record
2435 0         db_nsr = None
2436
2437         # database nslcmops record
2438 0         db_nslcmop = None
2439
2440         # update operation on nsrs
2441 0         db_nsr_update = {}
2442         # update operation on nslcmops
2443 0         db_nslcmop_update = {}
2444
2445 0         nslcmop_operation_state = None
2446 0         db_vnfrs = {}  # vnf's info indexed by member-index
2447         # n2vc_info = {}
2448 0         tasks_dict_info = {}  # from task to info text
2449 0         exc = None
2450 0         error_list = []
2451 0         stage = [
2452             "Stage 1/5: preparation of the environment.",
2453             "Waiting for previous operations to terminate.",
2454             "",
2455         ]
2456         # ^ stage, step, VIM progress
2457 0         try:
2458             # wait for any previous tasks in process
2459 0             await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2460
2461             # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2462 0             stage[1] = "Reading from database."
2463             # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2464 0             db_nsr_update["detailed-status"] = "creating"
2465 0             db_nsr_update["operational-status"] = "init"
2466 0             self._write_ns_status(
2467                 nsr_id=nsr_id,
2468                 ns_state="BUILDING",
2469                 current_operation="INSTANTIATING",
2470                 current_operation_id=nslcmop_id,
2471                 other_update=db_nsr_update,
2472             )
2473 0             self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2474
2475             # read from db: operation
2476 0             stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2477 0             db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2478 0             if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2479 0                 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2480                     db_nslcmop["operationParams"]["additionalParamsForVnf"]
2481                 )
2482 0             ns_params = db_nslcmop.get("operationParams")
2483 0             if ns_params and ns_params.get("timeout_ns_deploy"):
2484 0                 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2485             else:
2486 0                 timeout_ns_deploy = self.timeout.ns_deploy
2487
2488             # read from db: ns
2489 0             stage[1] = "Getting nsr={} from db.".format(nsr_id)
2490 0             self.logger.debug(logging_text + stage[1])
2491 0             db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2492 0             stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2493 0             self.logger.debug(logging_text + stage[1])
2494 0             nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2495 0             self.fs.sync(db_nsr["nsd-id"])
2496 0             db_nsr["nsd"] = nsd
2497             # nsr_name = db_nsr["name"]   # TODO short-name??
2498
2499             # read from db: vnf's of this ns
2500 0             stage[1] = "Getting vnfrs from db."
2501 0             self.logger.debug(logging_text + stage[1])
2502 0             db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2503
2504             # read from db: vnfd's for every vnf
2505 0             db_vnfds = []  # every vnfd data
2506
2507             # for each vnf in ns, read vnfd
2508 0             for vnfr in db_vnfrs_list:
2509 0                 if vnfr.get("kdur"):
2510 0                     kdur_list = []
2511 0                     for kdur in vnfr["kdur"]:
2512 0                         if kdur.get("additionalParams"):
2513 0                             kdur["additionalParams"] = json.loads(
2514                                 kdur["additionalParams"]
2515                             )
2516 0                         kdur_list.append(kdur)
2517 0                     vnfr["kdur"] = kdur_list
2518
2519 0                 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2520 0                 vnfd_id = vnfr["vnfd-id"]
2521 0                 vnfd_ref = vnfr["vnfd-ref"]
2522 0                 self.fs.sync(vnfd_id)
2523
2524                 # if we haven't this vnfd, read it from db
2525 0                 if vnfd_id not in db_vnfds:
2526                     # read from db
2527 0                     stage[1] = "Getting vnfd={} id='{}' from db.".format(
2528                         vnfd_id, vnfd_ref
2529                     )
2530 0                     self.logger.debug(logging_text + stage[1])
2531 0                     vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2532
2533                     # store vnfd
2534 0                     db_vnfds.append(vnfd)
2535
2536             # Get or generates the _admin.deployed.VCA list
2537 0             vca_deployed_list = None
2538 0             if db_nsr["_admin"].get("deployed"):
2539 0                 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2540 0             if vca_deployed_list is None:
2541 0                 vca_deployed_list = []
2542 0                 configuration_status_list = []
2543 0                 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2544 0                 db_nsr_update["configurationStatus"] = configuration_status_list
2545                 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2546 0                 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2547 0             elif isinstance(vca_deployed_list, dict):
2548                 # maintain backward compatibility. Change a dict to list at database
2549 0                 vca_deployed_list = list(vca_deployed_list.values())
2550 0                 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2551 0                 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2552
2553 0             if not isinstance(
2554                 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2555             ):
2556 0                 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2557 0                 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2558
2559             # set state to INSTANTIATED. When instantiated NBI will not delete directly
2560 0             db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2561 0             self.update_db_2("nsrs", nsr_id, db_nsr_update)
2562 0             self.db.set_list(
2563                 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2564             )
2565
2566             # n2vc_redesign STEP 2 Deploy Network Scenario
2567 0             stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2568 0             self._write_op_status(op_id=nslcmop_id, stage=stage)
2569
2570 0             stage[1] = "Deploying KDUs."
2571             # self.logger.debug(logging_text + "Before deploy_kdus")
2572             # Call to deploy_kdus in case exists the "vdu:kdu" param
2573 0             await self.deploy_kdus(
2574                 logging_text=logging_text,
2575                 nsr_id=nsr_id,
2576                 nslcmop_id=nslcmop_id,
2577                 db_vnfrs=db_vnfrs,
2578                 db_vnfds=db_vnfds,
2579                 task_instantiation_info=tasks_dict_info,
2580             )
2581
2582 0             stage[1] = "Getting VCA public key."
2583             # n2vc_redesign STEP 1 Get VCA public ssh-key
2584             # feature 1429. Add n2vc public key to needed VMs
2585 0             n2vc_key = self.n2vc.get_public_key()
2586 0             n2vc_key_list = [n2vc_key]
2587 0             if self.vca_config.public_key:
2588 0                 n2vc_key_list.append(self.vca_config.public_key)
2589
2590 0             stage[1] = "Deploying NS at VIM."
2591 0             task_ro = asyncio.ensure_future(
2592                 self.instantiate_RO(
2593                     logging_text=logging_text,
2594                     nsr_id=nsr_id,
2595                     nsd=nsd,
2596                     db_nsr=db_nsr,
2597                     db_nslcmop=db_nslcmop,
2598                     db_vnfrs=db_vnfrs,
2599                     db_vnfds=db_vnfds,
2600                     n2vc_key_list=n2vc_key_list,
2601                     stage=stage,
2602                 )
2603             )
2604 0             self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2605 0             tasks_dict_info[task_ro] = "Deploying at VIM"
2606
2607             # n2vc_redesign STEP 3 to 6 Deploy N2VC
2608 0             stage[1] = "Deploying Execution Environments."
2609 0             self.logger.debug(logging_text + stage[1])
2610
2611             # create namespace and certificate if any helm based EE is present in the NS
2612 0             if check_helm_ee_in_ns(db_vnfds):
2613                 # TODO: create EE namespace
2614                 # create TLS certificates
2615 0                 await self.vca_map["helm-v3"].create_tls_certificate(
2616                     secret_name="ee-tls-{}".format(nsr_id),
2617                     dns_prefix="*",
2618                     nsr_id=nsr_id,
2619                     usage="server auth",
2620                 )
2621
2622 0             nsi_id = None  # TODO put nsi_id when this nsr belongs to a NSI
2623 0             for vnf_profile in get_vnf_profiles(nsd):
2624 0                 vnfd_id = vnf_profile["vnfd-id"]
2625 0                 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2626 0                 member_vnf_index = str(vnf_profile["id"])
2627 0                 db_vnfr = db_vnfrs[member_vnf_index]
2628 0                 base_folder = vnfd["_admin"]["storage"]
2629 0                 vdu_id = None
2630 0                 vdu_index = 0
2631 0                 vdu_name = None
2632 0                 kdu_name = None
2633
2634                 # Get additional parameters
2635 0                 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2636 0                 if db_vnfr.get("additionalParamsForVnf"):
2637 0                     deploy_params.update(
2638                         parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2639                     )
2640
2641 0                 descriptor_config = get_configuration(vnfd, vnfd["id"])
2642 0                 if descriptor_config:
2643 0                     self._deploy_n2vc(
2644                         logging_text=logging_text
2645                         + "member_vnf_index={} ".format(member_vnf_index),
2646                         db_nsr=db_nsr,
2647                         db_vnfr=db_vnfr,
2648                         nslcmop_id=nslcmop_id,
2649                         nsr_id=nsr_id,
2650                         nsi_id=nsi_id,
2651                         vnfd_id=vnfd_id,
2652                         vdu_id=vdu_id,
2653                         kdu_name=kdu_name,
2654                         member_vnf_index=member_vnf_index,
2655                         vdu_index=vdu_index,
2656                         vdu_name=vdu_name,
2657                         deploy_params=deploy_params,
2658                         descriptor_config=descriptor_config,
2659                         base_folder=base_folder,
2660                         task_instantiation_info=tasks_dict_info,
2661                         stage=stage,
2662                     )
2663
2664                 # Deploy charms for each VDU that supports one.
2665 0                 for vdud in get_vdu_list(vnfd):
2666 0                     vdu_id = vdud["id"]
2667 0                     descriptor_config = get_configuration(vnfd, vdu_id)
2668 0                     vdur = find_in_list(
2669                         db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2670                     )
2671
2672 0                     if vdur.get("additionalParams"):
2673 0                         deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2674                     else:
2675 0                         deploy_params_vdu = deploy_params
2676 0                     deploy_params_vdu["OSM"] = get_osm_params(
2677                         db_vnfr, vdu_id, vdu_count_index=0
2678                     )
2679 0                     vdud_count = get_number_of_instances(vnfd, vdu_id)
2680
2681 0                     self.logger.debug("VDUD > {}".format(vdud))
2682 0                     self.logger.debug(
2683                         "Descriptor config > {}".format(descriptor_config)
2684                     )
2685 0                     if descriptor_config:
2686 0                         vdu_name = None
2687 0                         kdu_name = None
2688 0                         for vdu_index in range(vdud_count):
2689                             # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2690 0                             self._deploy_n2vc(
2691                                 logging_text=logging_text
2692                                 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2693                                     member_vnf_index, vdu_id, vdu_index
2694                                 ),
2695                                 db_nsr=db_nsr,
2696                                 db_vnfr=db_vnfr,
2697                                 nslcmop_id=nslcmop_id,
2698                                 nsr_id=nsr_id,
2699                                 nsi_id=nsi_id,
2700                                 vnfd_id=vnfd_id,
2701                                 vdu_id=vdu_id,
2702                                 kdu_name=kdu_name,
2703                                 member_vnf_index=member_vnf_index,
2704                                 vdu_index=vdu_index,
2705                                 vdu_name=vdu_name,
2706                                 deploy_params=deploy_params_vdu,
2707                                 descriptor_config=descriptor_config,
2708                                 base_folder=base_folder,
2709                                 task_instantiation_info=tasks_dict_info,
2710                                 stage=stage,
2711                             )
2712 0                 for kdud in get_kdu_list(vnfd):
2713 0                     kdu_name = kdud["name"]
2714 0                     descriptor_config = get_configuration(vnfd, kdu_name)
2715 0                     if descriptor_config:
2716 0                         vdu_id = None
2717 0                         vdu_index = 0
2718 0                         vdu_name = None
2719 0                         kdur = next(
2720                             x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name
2721                         )
2722 0                         deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2723 0                         if kdur.get("additionalParams"):
2724 0                             deploy_params_kdu.update(
2725                                 parse_yaml_strings(kdur["additionalParams"].copy())
2726                             )
2727
2728 0                         self._deploy_n2vc(
2729                             logging_text=logging_text,
2730                             db_nsr=db_nsr,
2731                             db_vnfr=db_vnfr,
2732                             nslcmop_id=nslcmop_id,
2733                             nsr_id=nsr_id,
2734                             nsi_id=nsi_id,
2735                             vnfd_id=vnfd_id,
2736                             vdu_id=vdu_id,
2737                             kdu_name=kdu_name,
2738                             member_vnf_index=member_vnf_index,
2739                             vdu_index=vdu_index,
2740                             vdu_name=vdu_name,
2741                             deploy_params=deploy_params_kdu,
2742                             descriptor_config=descriptor_config,
2743                             base_folder=base_folder,
2744                             task_instantiation_info=tasks_dict_info,
2745                             stage=stage,
2746                         )
2747
2748             # Check if this NS has a charm configuration
2749 0             descriptor_config = nsd.get("ns-configuration")
2750 0             if descriptor_config and descriptor_config.get("juju"):
2751 0                 vnfd_id = None
2752 0                 db_vnfr = None
2753 0                 member_vnf_index = None
2754 0                 vdu_id = None
2755 0                 kdu_name = None
2756 0                 vdu_index = 0
2757 0                 vdu_name = None
2758
2759                 # Get additional parameters
2760 0                 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2761 0                 if db_nsr.get("additionalParamsForNs"):
2762 0                     deploy_params.update(
2763                         parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2764                     )
2765 0                 base_folder = nsd["_admin"]["storage"]
2766 0                 self._deploy_n2vc(
2767                     logging_text=logging_text,
2768                     db_nsr=db_nsr,
2769                     db_vnfr=db_vnfr,
2770                     nslcmop_id=nslcmop_id,
2771                     nsr_id=nsr_id,
2772                     nsi_id=nsi_id,
2773                     vnfd_id=vnfd_id,
2774                     vdu_id=vdu_id,
2775                     kdu_name=kdu_name,
2776                     member_vnf_index=member_vnf_index,
2777                     vdu_index=vdu_index,
2778                     vdu_name=vdu_name,
2779                     deploy_params=deploy_params,
2780                     descriptor_config=descriptor_config,
2781                     base_folder=base_folder,
2782                     task_instantiation_info=tasks_dict_info,
2783                     stage=stage,
2784                 )
2785
2786             # rest of staff will be done at finally
2787
2788 0         except (
2789             ROclient.ROClientException,
2790             DbException,
2791             LcmException,
2792             N2VCException,
2793         ) as e:
2794 0             self.logger.error(
2795                 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2796             )
2797 0             exc = e
2798 0         except asyncio.CancelledError:
2799 0             self.logger.error(
2800                 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2801             )
2802 0             exc = "Operation was cancelled"
2803 0         except Exception as e:
2804 0             exc = traceback.format_exc()
2805 0             self.logger.critical(
2806                 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2807                 exc_info=True,
2808             )
2809         finally:
2810 0             if exc:
2811 0                 error_list.append(str(exc))
2812 0             try:
2813                 # wait for pending tasks
2814 0                 if tasks_dict_info:
2815 0                     stage[1] = "Waiting for instantiate pending tasks."
2816 0                     self.logger.debug(logging_text + stage[1])
2817 0                     error_list += await self._wait_for_tasks(
2818                         logging_text,
2819                         tasks_dict_info,
2820                         timeout_ns_deploy,
2821                         stage,
2822                         nslcmop_id,
2823                         nsr_id=nsr_id,
2824                     )
2825 0                 stage[1] = stage[2] = ""
2826 0             except asyncio.CancelledError:
2827 0                 error_list.append("Cancelled")
2828                 # TODO cancel all tasks
2829 0             except Exception as exc:
2830 0                 error_list.append(str(exc))
2831
2832             # update operation-status
2833 0             db_nsr_update["operational-status"] = "running"
2834             # let's begin with VCA 'configured' status (later we can change it)
2835 0             db_nsr_update["config-status"] = "configured"
2836 0             for task, task_name in tasks_dict_info.items():
2837 0                 if not task.done() or task.cancelled() or task.exception():
2838 0                     if task_name.startswith(self.task_name_deploy_vca):
2839                         # A N2VC task is pending
2840 0                         db_nsr_update["config-status"] = "failed"
2841                     else:
2842                         # RO or KDU task is pending
2843 0                         db_nsr_update["operational-status"] = "failed"
2844
2845             # update status at database
2846 0             if error_list:
2847 0                 error_detail = ". ".join(error_list)
2848 0                 self.logger.error(logging_text + error_detail)
2849 0                 error_description_nslcmop = "{} Detail: {}".format(
2850                     stage[0], error_detail
2851                 )
2852 0                 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2853                     nslcmop_id, stage[0]
2854                 )
2855
2856 0                 db_nsr_update["detailed-status"] = (
2857                     error_description_nsr + " Detail: " + error_detail
2858                 )
2859 0                 db_nslcmop_update["detailed-status"] = error_detail
2860 0                 nslcmop_operation_state = "FAILED"
2861 0                 ns_state = "BROKEN"
2862             else:
2863 0                 error_detail = None
2864 0                 error_description_nsr = error_description_nslcmop = None
2865 0                 ns_state = "READY"
2866 0                 db_nsr_update["detailed-status"] = "Done"
2867 0                 db_nslcmop_update["detailed-status"] = "Done"
2868 0                 nslcmop_operation_state = "COMPLETED"
2869
2870 0             if db_nsr:
2871 0                 self._write_ns_status(
2872                     nsr_id=nsr_id,
2873                     ns_state=ns_state,
2874                     current_operation="IDLE",
2875                     current_operation_id=None,
2876                     error_description=error_description_nsr,
2877                     error_detail=error_detail,
2878                     other_update=db_nsr_update,
2879                 )
2880 0             self._write_op_status(
2881                 op_id=nslcmop_id,
2882                 stage="",
2883                 error_message=error_description_nslcmop,
2884                 operation_state=nslcmop_operation_state,
2885                 other_update=db_nslcmop_update,
2886             )
2887
2888 0             if nslcmop_operation_state:
2889 0                 try:
2890 0                     await self.msg.aiowrite(
2891                         "ns",
2892                         "instantiated",
2893                         {
2894                             "nsr_id": nsr_id,
2895                             "nslcmop_id": nslcmop_id,
2896                             "operationState": nslcmop_operation_state,
2897                         },
2898                         loop=self.loop,
2899                     )
2900 0                 except Exception as e:
2901 0                     self.logger.error(
2902                         logging_text + "kafka_write notification Exception {}".format(e)
2903                     )
2904
2905 0             self.logger.debug(logging_text + "Exit")
2906 0             self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2907
2908 1     def _get_vnfd(self, vnfd_id: str, projects_read: str, cached_vnfds: Dict[str, Any]):
2909 0         if vnfd_id not in cached_vnfds:
2910 0             cached_vnfds[vnfd_id] = self.db.get_one(
2911                 "vnfds", {"id": vnfd_id, "_admin.projects_read": projects_read}
2912             )
2913 0         return cached_vnfds[vnfd_id]
2914
2915 1     def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
2916 0         if vnf_profile_id not in cached_vnfrs:
2917 0             cached_vnfrs[vnf_profile_id] = self.db.get_one(
2918                 "vnfrs",
2919                 {
2920                     "member-vnf-index-ref": vnf_profile_id,
2921                     "nsr-id-ref": nsr_id,
2922                 },
2923             )
2924 0         return cached_vnfrs[vnf_profile_id]
2925
2926 1     def _is_deployed_vca_in_relation(
2927         self, vca: DeployedVCA, relation: Relation
2928     ) -> bool:
2929 0         found = False
2930 0         for endpoint in (relation.provider, relation.requirer):
2931 0             if endpoint["kdu-resource-profile-id"]:
2932 0                 continue
2933 0             found = (
2934                 vca.vnf_profile_id == endpoint.vnf_profile_id
2935                 and vca.vdu_profile_id == endpoint.vdu_profile_id
2936                 and vca.execution_environment_ref == endpoint.execution_environment_ref
2937             )
2938 0             if found:
2939 0                 break
2940 0         return found
2941
2942 1     def _update_ee_relation_data_with_implicit_data(
2943         self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
2944     ):
2945 0         ee_relation_data = safe_get_ee_relation(
2946             nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
2947         )
2948 0         ee_relation_level = EELevel.get_level(ee_relation_data)
2949 0         if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
2950             "execution-environment-ref"
2951         ]:
2952 0             vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
2953 0             vnfd_id = vnf_profile["vnfd-id"]
2954 0             project = nsd["_admin"]["projects_read"][0]
2955 0             db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
2956 0             entity_id = (
2957                 vnfd_id
2958                 if ee_relation_level == EELevel.VNF
2959                 else ee_relation_data["vdu-profile-id"]
2960             )
2961 0             ee = get_juju_ee_ref(db_vnfd, entity_id)
2962 0             if not ee:
2963 0                 raise Exception(
2964                     f"not execution environments found for ee_relation {ee_relation_data}"
2965                 )
2966 0             ee_relation_data["execution-environment-ref"] = ee["id"]
2967 0         return ee_relation_data
2968
2969 1     def _get_ns_relations(
2970         self,
2971         nsr_id: str,
2972         nsd: Dict[str, Any],
2973         vca: DeployedVCA,
2974         cached_vnfds: Dict[str, Any],
2975     ) -> List[Relation]:
2976 0         relations = []
2977 0         db_ns_relations = get_ns_configuration_relation_list(nsd)
2978 0         for r in db_ns_relations:
2979 0             provider_dict = None
2980 0             requirer_dict = None
2981 0             if all(key in r for key in ("provider", "requirer")):
2982 0                 provider_dict = r["provider"]
2983 0                 requirer_dict = r["requirer"]
2984 0             elif "entities" in r:
2985 0                 provider_id = r["entities"][0]["id"]
2986 0                 provider_dict = {
2987                     "nsr-id": nsr_id,
2988                     "endpoint": r["entities"][0]["endpoint"],
2989                 }
2990 0                 if provider_id != nsd["id"]:
2991 0                     provider_dict["vnf-profile-id"] = provider_id
2992 0                 requirer_id = r["entities"][1]["id"]
2993 0                 requirer_dict = {
2994                     "nsr-id": nsr_id,
2995                     "endpoint": r["entities"][1]["endpoint"],
2996                 }
2997 0                 if requirer_id != nsd["id"]:
2998 0                     requirer_dict["vnf-profile-id"] = requirer_id
2999             else:
3000 0                 raise Exception(
3001                     "provider/requirer or entities must be included in the relation."
3002                 )
3003 0             relation_provider = self._update_ee_relation_data_with_implicit_data(
3004                 nsr_id, nsd, provider_dict, cached_vnfds
3005             )
3006 0             relation_requirer = self._update_ee_relation_data_with_implicit_data(
3007                 nsr_id, nsd, requirer_dict, cached_vnfds
3008             )
3009 0             provider = EERelation(relation_provider)
3010 0             requirer = EERelation(relation_requirer)
3011 0             relation = Relation(r["name"], provider, requirer)
3012 0             vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3013 0             if vca_in_relation:
3014 0                 relations.append(relation)
3015 0         return relations
3016
3017 1     def _get_vnf_relations(
3018         self,
3019         nsr_id: str,
3020         nsd: Dict[str, Any],
3021         vca: DeployedVCA,
3022         cached_vnfds: Dict[str, Any],
3023     ) -> List[Relation]:
3024 0         relations = []
3025 0         vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
3026 0         vnf_profile_id = vnf_profile["id"]
3027 0         vnfd_id = vnf_profile["vnfd-id"]
3028 0         project = nsd["_admin"]["projects_read"][0]
3029 0         db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3030 0         db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
3031 0         for r in db_vnf_relations:
3032 0             provider_dict = None
3033 0             requirer_dict = None
3034 0             if all(key in r for key in ("provider", "requirer")):
3035 0                 provider_dict = r["provider"]
3036 0                 requirer_dict = r["requirer"]
3037 0             elif "entities" in r:
3038 0                 provider_id = r["entities"][0]["id"]
3039 0                 provider_dict = {
3040                     "nsr-id": nsr_id,
3041                     "vnf-profile-id": vnf_profile_id,
3042                     "endpoint": r["entities"][0]["endpoint"],
3043                 }
3044 0                 if provider_id != vnfd_id:
3045 0                     provider_dict["vdu-profile-id"] = provider_id
3046 0                 requirer_id = r["entities"][1]["id"]
3047 0                 requirer_dict = {
3048                     "nsr-id": nsr_id,
3049                     "vnf-profile-id": vnf_profile_id,
3050                     "endpoint": r["entities"][1]["endpoint"],
3051                 }
3052 0                 if requirer_id != vnfd_id:
3053 0                     requirer_dict["vdu-profile-id"] = requirer_id
3054             else:
3055 0                 raise Exception(
3056                     "provider/requirer or entities must be included in the relation."
3057                 )
3058 0             relation_provider = self._update_ee_relation_data_with_implicit_data(
3059                 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3060             )
3061 0             relation_requirer = self._update_ee_relation_data_with_implicit_data(
3062                 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3063             )
3064 0             provider = EERelation(relation_provider)
3065 0             requirer = EERelation(relation_requirer)
3066 0             relation = Relation(r["name"], provider, requirer)
3067 0             vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3068 0             if vca_in_relation:
3069 0                 relations.append(relation)
3070 0         return relations
3071
3072 1     def _get_kdu_resource_data(
3073         self,
3074         ee_relation: EERelation,
3075         db_nsr: Dict[str, Any],
3076         cached_vnfds: Dict[str, Any],
3077     ) -> DeployedK8sResource:
3078 0         nsd = get_nsd(db_nsr)
3079 0         vnf_profiles = get_vnf_profiles(nsd)
3080 0         vnfd_id = find_in_list(
3081             vnf_profiles,
3082             lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3083         )["vnfd-id"]
3084 0         project = nsd["_admin"]["projects_read"][0]
3085 0         db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3086 0         kdu_resource_profile = get_kdu_resource_profile(
3087             db_vnfd, ee_relation.kdu_resource_profile_id
3088         )
3089 0         kdu_name = kdu_resource_profile["kdu-name"]
3090 0         deployed_kdu, _ = get_deployed_kdu(
3091             db_nsr.get("_admin", ()).get("deployed", ()),
3092             kdu_name,
3093             ee_relation.vnf_profile_id,
3094         )
3095 0         deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3096 0         return deployed_kdu
3097
3098 1     def _get_deployed_component(
3099         self,
3100         ee_relation: EERelation,
3101         db_nsr: Dict[str, Any],
3102         cached_vnfds: Dict[str, Any],
3103     ) -> DeployedComponent:
3104 0         nsr_id = db_nsr["_id"]
3105 0         deployed_component = None
3106 0         ee_level = EELevel.get_level(ee_relation)
3107 0         if ee_level == EELevel.NS:
3108 0             vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3109 0             if vca:
3110 0                 deployed_component = DeployedVCA(nsr_id, vca)
3111 0         elif ee_level == EELevel.VNF:
3112 0             vca = get_deployed_vca(
3113                 db_nsr,
3114                 {
3115                     "vdu_id": None,
3116                     "member-vnf-index": ee_relation.vnf_profile_id,
3117                     "ee_descriptor_id": ee_relation.execution_environment_ref,
3118                 },
3119             )
3120 0             if vca:
3121 0                 deployed_component = DeployedVCA(nsr_id, vca)
3122 0         elif ee_level == EELevel.VDU:
3123 0             vca = get_deployed_vca(
3124                 db_nsr,
3125                 {
3126                     "vdu_id": ee_relation.vdu_profile_id,
3127                     "member-vnf-index": ee_relation.vnf_profile_id,
3128                     "ee_descriptor_id": ee_relation.execution_environment_ref,
3129                 },
3130             )
3131 0             if vca:
3132 0                 deployed_component = DeployedVCA(nsr_id, vca)
3133 0         elif ee_level == EELevel.KDU:
3134 0             kdu_resource_data = self._get_kdu_resource_data(
3135                 ee_relation, db_nsr, cached_vnfds
3136             )
3137 0             if kdu_resource_data:
3138 0                 deployed_component = DeployedK8sResource(kdu_resource_data)
3139 0         return deployed_component
3140
3141 1     async def _add_relation(
3142         self,
3143         relation: Relation,
3144         vca_type: str,
3145         db_nsr: Dict[str, Any],
3146         cached_vnfds: Dict[str, Any],
3147         cached_vnfrs: Dict[str, Any],
3148     ) -> bool:
3149 0         deployed_provider = self._get_deployed_component(
3150             relation.provider, db_nsr, cached_vnfds
3151         )
3152 0         deployed_requirer = self._get_deployed_component(
3153             relation.requirer, db_nsr, cached_vnfds
3154         )
3155 0         if (
3156             deployed_provider
3157             and deployed_requirer
3158             and deployed_provider.config_sw_installed
3159             and deployed_requirer.config_sw_installed
3160         ):
3161 0             provider_db_vnfr = (
3162                 self._get_vnfr(
3163                     relation.provider.nsr_id,
3164                     relation.provider.vnf_profile_id,
3165                     cached_vnfrs,
3166                 )
3167                 if relation.provider.vnf_profile_id
3168                 else None
3169             )
3170 0             requirer_db_vnfr = (
3171                 self._get_vnfr(
3172                     relation.requirer.nsr_id,
3173                     relation.requirer.vnf_profile_id,
3174                     cached_vnfrs,
3175                 )
3176                 if relation.requirer.vnf_profile_id
3177                 else None
3178             )
3179 0             provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3180 0             requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3181 0             provider_relation_endpoint = RelationEndpoint(
3182                 deployed_provider.ee_id,
3183                 provider_vca_id,
3184                 relation.provider.endpoint,
3185             )
3186 0             requirer_relation_endpoint = RelationEndpoint(
3187                 deployed_requirer.ee_id,
3188                 requirer_vca_id,
3189                 relation.requirer.endpoint,
3190             )
3191 0             await self.vca_map[vca_type].add_relation(
3192                 provider=provider_relation_endpoint,
3193                 requirer=requirer_relation_endpoint,
3194             )
3195             # remove entry from relations list
3196 0             return True
3197 0         return False
3198
3199 1     async def _add_vca_relations(
3200         self,
3201         logging_text,
3202         nsr_id,
3203         vca_type: str,
3204         vca_index: int,
3205         timeout: int = 3600,
3206     ) -> bool:
3207         # steps:
3208         # 1. find all relations for this VCA
3209         # 2. wait for other peers related
3210         # 3. add relations
3211
3212 0         try:
3213             # STEP 1: find all relations for this VCA
3214
3215             # read nsr record
3216 0             db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3217 0             nsd = get_nsd(db_nsr)
3218
3219             # this VCA data
3220 0             deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3221 0             my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3222
3223 0             cached_vnfds = {}
3224 0             cached_vnfrs = {}
3225 0             relations = []
3226 0             relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3227 0             relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3228
3229             # if no relations, terminate
3230 0             if not relations:
3231 0                 self.logger.debug(logging_text + " No relations")
3232 0                 return True
3233
3234 0             self.logger.debug(logging_text + " adding relations {}".format(relations))
3235
3236             # add all relations
3237 0             start = time()
3238             while True:
3239                 # check timeout
3240 0                 now = time()
3241 0                 if now - start >= timeout:
3242 0                     self.logger.error(logging_text + " : timeout adding relations")
3243 0                     return False
3244
3245                 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3246 0                 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3247
3248                 # for each relation, find the VCA's related
3249 0                 for relation in relations.copy():
3250 0                     added = await self._add_relation(
3251                         relation,
3252                         vca_type,
3253                         db_nsr,
3254                         cached_vnfds,
3255                         cached_vnfrs,
3256                     )
3257 0                     if added:
3258 0                         relations.remove(relation)
3259
3260 0                 if not relations:
3261 0                     self.logger.debug("Relations added")
3262 0                     break
3263 0                 await asyncio.sleep(5.0)
3264
3265 0             return True
3266
3267 0         except Exception as e:
3268 0             self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3269 0             return False
3270
3271 1     async def _install_kdu(
3272         self,
3273         nsr_id: str,
3274         nsr_db_path: str,
3275         vnfr_data: dict,
3276         kdu_index: int,
3277         kdud: dict,
3278         vnfd: dict,
3279         k8s_instance_info: dict,
3280         k8params: dict = None,
3281         timeout: int = 600,
3282         vca_id: str = None,
3283     ):
3284 1         try:
3285 1             k8sclustertype = k8s_instance_info["k8scluster-type"]
3286             # Instantiate kdu
3287 1             db_dict_install = {
3288                 "collection": "nsrs",
3289                 "filter": {"_id": nsr_id},
3290                 "path": nsr_db_path,
3291             }
3292
3293 1             if k8s_instance_info.get("kdu-deployment-name"):
3294 0                 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3295             else:
3296 1                 kdu_instance = self.k8scluster_map[
3297                     k8sclustertype
3298                 ].generate_kdu_instance_name(
3299                     db_dict=db_dict_install,
3300                     kdu_model=k8s_instance_info["kdu-model"],
3301                     kdu_name=k8s_instance_info["kdu-name"],
3302                 )
3303
3304             # Update the nsrs table with the kdu-instance value
3305 1             self.update_db_2(
3306                 item="nsrs",
3307                 _id=nsr_id,
3308                 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3309             )
3310
3311             # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3312             # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3313             # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3314             # namespace, this first verification could be removed, and the next step would be done for any kind
3315             # of KNF.
3316             # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3317             # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3318 1             if k8sclustertype in ("juju", "juju-bundle"):
3319                 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3320                 # that the user passed a namespace which he wants its KDU to be deployed in)
3321 0                 if (
3322                     self.db.count(
3323                         table="nsrs",
3324                         q_filter={
3325                             "_id": nsr_id,
3326                             "_admin.projects_write": k8s_instance_info["namespace"],
3327                             "_admin.projects_read": k8s_instance_info["namespace"],
3328                         },
3329                     )
3330                     > 0
3331                 ):
3332 0                     self.logger.debug(
3333                         f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3334                     )
3335 0                     self.update_db_2(
3336                         item="nsrs",
3337                         _id=nsr_id,
3338                         _desc={f"{nsr_db_path}.namespace": kdu_instance},
3339                     )
3340 0                     k8s_instance_info["namespace"] = kdu_instance
3341
3342 1             await self.k8scluster_map[k8sclustertype].install(
3343                 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3344                 kdu_model=k8s_instance_info["kdu-model"],
3345                 atomic=True,
3346                 params=k8params,
3347                 db_dict=db_dict_install,
3348                 timeout=timeout,
3349                 kdu_name=k8s_instance_info["kdu-name"],
3350                 namespace=k8s_instance_info["namespace"],
3351                 kdu_instance=kdu_instance,
3352                 vca_id=vca_id,
3353             )
3354
3355             # Obtain services to obtain management service ip
3356 1             services = await self.k8scluster_map[k8sclustertype].get_services(
3357                 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3358                 kdu_instance=kdu_instance,
3359                 namespace=k8s_instance_info["namespace"],
3360             )
3361
3362             # Obtain management service info (if exists)
3363 1             vnfr_update_dict = {}
3364 1             kdu_config = get_configuration(vnfd, kdud["name"])
3365 1             if kdu_config:
3366 0                 target_ee_list = kdu_config.get("execution-environment-list", [])
3367             else:
3368 1                 target_ee_list = []
3369
3370 1             if services:
3371 0                 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3372 0                 mgmt_services = [
3373                     service
3374                     for service in kdud.get("service", [])
3375                     if service.get("mgmt-service")
3376                 ]
3377 0                 for mgmt_service in mgmt_services:
3378 0                     for service in services:
3379 0                         if service["name"].startswith(mgmt_service["name"]):
3380                             # Mgmt service found, Obtain service ip
3381 0                             ip = service.get("external_ip", service.get("cluster_ip"))
3382 0                             if isinstance(ip, list) and len(ip) == 1:
3383 0                                 ip = ip[0]
3384
3385 0                             vnfr_update_dict[
3386                                 "kdur.{}.ip-address".format(kdu_index)
3387                             ] = ip
3388
3389                             # Check if must update also mgmt ip at the vnf
3390 0                             service_external_cp = mgmt_service.get(
3391                                 "external-connection-point-ref"
3392                             )
3393 0                             if service_external_cp:
3394 0                                 if (
3395                                     deep_get(vnfd, ("mgmt-interface", "cp"))
3396                                     == service_external_cp
3397                                 ):
3398 0                                     vnfr_update_dict["ip-address"] = ip
3399
3400 0                                 if find_in_list(
3401                                     target_ee_list,
3402                                     lambda ee: ee.get(
3403                                         "external-connection-point-ref", ""
3404                                     )
3405                                     == service_external_cp,
3406                                 ):
3407 0                                     vnfr_update_dict[
3408                                         "kdur.{}.ip-address".format(kdu_index)
3409                                     ] = ip
3410 0                             break
3411                     else:
3412 0                         self.logger.warn(
3413                             "Mgmt service name: {} not found".format(
3414                                 mgmt_service["name"]
3415                             )
3416                         )
3417
3418 1             vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3419 1             self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3420
3421 1             kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3422 1             if (
3423                 kdu_config
3424                 and kdu_config.get("initial-config-primitive")
3425                 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3426             ):
3427 0                 initial_config_primitive_list = kdu_config.get(
3428                     "initial-config-primitive"
3429                 )
3430 0                 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3431
3432 0                 for initial_config_primitive in initial_config_primitive_list:
3433 0                     primitive_params_ = self._map_primitive_params(
3434                         initial_config_primitive, {}, {}
3435                     )
3436
3437 0                     await asyncio.wait_for(
3438                         self.k8scluster_map[k8sclustertype].exec_primitive(
3439                             cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3440                             kdu_instance=kdu_instance,
3441                             primitive_name=initial_config_primitive["name"],
3442                             params=primitive_params_,
3443                             db_dict=db_dict_install,
3444                             vca_id=vca_id,
3445                         ),
3446                         timeout=timeout,
3447                     )
3448
3449 0         except Exception as e:
3450             # Prepare update db with error and raise exception
3451 0             try:
3452 0                 self.update_db_2(
3453                     "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3454                 )
3455 0                 self.update_db_2(
3456                     "vnfrs",
3457                     vnfr_data.get("_id"),
3458                     {"kdur.{}.status".format(kdu_index): "ERROR"},
3459                 )
3460 0             except Exception:
3461                 # ignore to keep original exception
3462 0                 pass
3463             # reraise original error
3464 0             raise
3465
3466 1         return kdu_instance
3467
3468 1     async def deploy_kdus(
3469         self,
3470         logging_text,
3471         nsr_id,
3472         nslcmop_id,
3473         db_vnfrs,
3474         db_vnfds,
3475         task_instantiation_info,
3476     ):
3477         # Launch kdus if present in the descriptor
3478
3479 1         k8scluster_id_2_uuic = {
3480             "helm-chart-v3": {},
3481             "helm-chart": {},
3482             "juju-bundle": {},
3483         }
3484
3485 1         async def _get_cluster_id(cluster_id, cluster_type):
3486             nonlocal k8scluster_id_2_uuic
3487 1             if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3488 1                 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3489
3490             # check if K8scluster is creating and wait look if previous tasks in process
3491 1             task_name, task_dependency = self.lcm_tasks.lookfor_related(
3492                 "k8scluster", cluster_id
3493             )
3494 1             if task_dependency:
3495 0                 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3496                     task_name, cluster_id
3497                 )
3498 0                 self.logger.debug(logging_text + text)
3499 0                 await asyncio.wait(task_dependency, timeout=3600)
3500
3501 1             db_k8scluster = self.db.get_one(
3502                 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3503             )
3504 1             if not db_k8scluster:
3505 0                 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3506
3507 1             k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3508 1             if not k8s_id:
3509 0                 if cluster_type == "helm-chart-v3":
3510 0                     try:
3511                         # backward compatibility for existing clusters that have not been initialized for helm v3
3512 0                         k8s_credentials = yaml.safe_dump(
3513                             db_k8scluster.get("credentials")
3514                         )
3515 0                         k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3516                             k8s_credentials, reuse_cluster_uuid=cluster_id
3517                         )
3518 0                         db_k8scluster_update = {}
3519 0                         db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3520 0                         db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3521 0                         db_k8scluster_update[
3522                             "_admin.helm-chart-v3.created"
3523                         ] = uninstall_sw
3524 0                         db_k8scluster_update[
3525                             "_admin.helm-chart-v3.operationalState"
3526                         ] = "ENABLED"
3527 0                         self.update_db_2(
3528                             "k8sclusters", cluster_id, db_k8scluster_update
3529                         )
3530 0                     except Exception as e:
3531 0                         self.logger.error(
3532                             logging_text
3533                             + "error initializing helm-v3 cluster: {}".format(str(e))
3534                         )
3535 0                         raise LcmException(
3536                             "K8s cluster '{}' has not been initialized for '{}'".format(
3537                                 cluster_id, cluster_type
3538                             )
3539                         )
3540                 else:
3541 0                     raise LcmException(
3542                         "K8s cluster '{}' has not been initialized for '{}'".format(
3543                             cluster_id, cluster_type
3544                         )
3545                     )
3546 1             k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3547 1             return k8s_id
3548
3549 1         logging_text += "Deploy kdus: "
3550 1         step = ""
3551 1         try:
3552 1             db_nsr_update = {"_admin.deployed.K8s": []}
3553 1             self.update_db_2("nsrs", nsr_id, db_nsr_update)
3554
3555 1             index = 0
3556 1             updated_cluster_list = []
3557 1             updated_v3_cluster_list = []
3558
3559 1             for vnfr_data in db_vnfrs.values():
3560 1                 vca_id = self.get_vca_id(vnfr_data, {})
3561 1                 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3562                     # Step 0: Prepare and set parameters
3563 1                     desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3564 1                     vnfd_id = vnfr_data.get("vnfd-id")
3565 1                     vnfd_with_id = find_in_list(
3566                         db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3567                     )
3568 1                     kdud = next(
3569                         kdud
3570                         for kdud in vnfd_with_id["kdu"]
3571                         if kdud["name"] == kdur["kdu-name"]
3572                     )
3573 1                     namespace = kdur.get("k8s-namespace")
3574 1                     kdu_deployment_name = kdur.get("kdu-deployment-name")
3575 1                     if kdur.get("helm-chart"):
3576 1                         kdumodel = kdur["helm-chart"]
3577                         # Default version: helm3, if helm-version is v2 assign v2
3578 1                         k8sclustertype = "helm-chart-v3"
3579 1                         self.logger.debug("kdur: {}".format(kdur))
3580 1                         if (
3581                             kdur.get("helm-version")
3582                             and kdur.get("helm-version") == "v2"
3583                         ):
3584 0                             k8sclustertype = "helm-chart"
3585 0                     elif kdur.get("juju-bundle"):
3586 0                         kdumodel = kdur["juju-bundle"]
3587 0                         k8sclustertype = "juju-bundle"
3588                     else:
3589 0                         raise LcmException(
3590                             "kdu type for kdu='{}.{}' is neither helm-chart nor "
3591                             "juju-bundle. Maybe an old NBI version is running".format(
3592                                 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3593                             )
3594                         )
3595                     # check if kdumodel is a file and exists
3596 1                     try:
3597 1                         vnfd_with_id = find_in_list(
3598                             db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3599                         )
3600 1                         storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3601 1                         if storage:  # may be not present if vnfd has not artifacts
3602                             # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3603 1                             if storage["pkg-dir"]:
3604 1                                 filename = "{}/{}/{}s/{}".format(
3605                                     storage["folder"],
3606                                     storage["pkg-dir"],
3607                                     k8sclustertype,
3608                                     kdumodel,
3609                                 )
3610                             else:
3611 0                                 filename = "{}/Scripts/{}s/{}".format(
3612                                     storage["folder"],
3613                                     k8sclustertype,
3614                                     kdumodel,
3615                                 )
3616 1                             if self.fs.file_exists(
3617                                 filename, mode="file"
3618                             ) or self.fs.file_exists(filename, mode="dir"):
3619 1                                 kdumodel = self.fs.path + filename
3620 1                     except (asyncio.TimeoutError, asyncio.CancelledError):
3621 0                         raise
3622 1                     except Exception:  # it is not a file
3623 1                         pass
3624
3625 1                     k8s_cluster_id = kdur["k8s-cluster"]["id"]
3626 1                     step = "Synchronize repos for k8s cluster '{}'".format(
3627                         k8s_cluster_id
3628                     )
3629 1                     cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3630
3631                     # Synchronize  repos
3632 1                     if (
3633                         k8sclustertype == "helm-chart"
3634                         and cluster_uuid not in updated_cluster_list
3635                     ) or (
3636                         k8sclustertype == "helm-chart-v3"
3637                         and cluster_uuid not in updated_v3_cluster_list
3638                     ):
3639 1                         del_repo_list, added_repo_dict = await asyncio.ensure_future(
3640                             self.k8scluster_map[k8sclustertype].synchronize_repos(
3641                                 cluster_uuid=cluster_uuid
3642                             )
3643                         )
3644 1                         if del_repo_list or added_repo_dict:
3645 0                             if k8sclustertype == "helm-chart":
3646 0                                 unset = {
3647                                     "_admin.helm_charts_added." + item: None
3648                                     for item in del_repo_list
3649                                 }
3650 0                                 updated = {
3651                                     "_admin.helm_charts_added." + item: name
3652                                     for item, name in added_repo_dict.items()
3653                                 }
3654 0                                 updated_cluster_list.append(cluster_uuid)
3655 0                             elif k8sclustertype == "helm-chart-v3":
3656 0                                 unset = {
3657                                     "_admin.helm_charts_v3_added." + item: None
3658                                     for item in del_repo_list
3659                                 }
3660 0                                 updated = {
3661                                     "_admin.helm_charts_v3_added." + item: name
3662                                     for item, name in added_repo_dict.items()
3663                                 }
3664 0                                 updated_v3_cluster_list.append(cluster_uuid)
3665 0                             self.logger.debug(
3666                                 logging_text + "repos synchronized on k8s cluster "
3667                                 "'{}' to_delete: {}, to_add: {}".format(
3668                                     k8s_cluster_id, del_repo_list, added_repo_dict
3669                                 )
3670                             )
3671 0                             self.db.set_one(
3672                                 "k8sclusters",
3673                                 {"_id": k8s_cluster_id},
3674                                 updated,
3675                                 unset=unset,
3676                             )
3677
3678                     # Instantiate kdu
3679 1                     step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3680                         vnfr_data["member-vnf-index-ref"],
3681                         kdur["kdu-name"],
3682                         k8s_cluster_id,
3683                     )
3684 1                     k8s_instance_info = {
3685                         "kdu-instance": None,
3686                         "k8scluster-uuid": cluster_uuid,
3687                         "k8scluster-type": k8sclustertype,
3688                         "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3689                         "kdu-name": kdur["kdu-name"],
3690                         "kdu-model": kdumodel,
3691                         "namespace": namespace,
3692                         "kdu-deployment-name": kdu_deployment_name,
3693                     }
3694 1                     db_path = "_admin.deployed.K8s.{}".format(index)
3695 1                     db_nsr_update[db_path] = k8s_instance_info
3696 1                     self.update_db_2("nsrs", nsr_id, db_nsr_update)
3697 1                     vnfd_with_id = find_in_list(
3698                         db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3699                     )
3700 1                     task = asyncio.ensure_future(
3701                         self._install_kdu(
3702                             nsr_id,
3703                             db_path,
3704                             vnfr_data,
3705                             kdu_index,
3706                             kdud,
3707                             vnfd_with_id,
3708                             k8s_instance_info,
3709                             k8params=desc_params,
3710                             timeout=1800,
3711                             vca_id=vca_id,
3712                         )
3713                     )
3714 1                     self.lcm_tasks.register(
3715                         "ns",
3716                         nsr_id,
3717                         nslcmop_id,
3718                         "instantiate_KDU-{}".format(index),
3719                         task,
3720                     )
3721 1                     task_instantiation_info[task] = "Deploying KDU {}".format(
3722                         kdur["kdu-name"]
3723                     )
3724
3725 1                     index += 1
3726
3727 0         except (LcmException, asyncio.CancelledError):
3728 0             raise
3729 0         except Exception as e:
3730 0             msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3731 0             if isinstance(e, (N2VCException, DbException)):
3732 0                 self.logger.error(logging_text + msg)
3733             else:
3734 0                 self.logger.critical(logging_text + msg, exc_info=True)
3735 0             raise LcmException(msg)
3736         finally:
3737 1             if db_nsr_update:
3738 0                 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3739
3740 1     def _deploy_n2vc(
3741         self,
3742         logging_text,
3743         db_nsr,
3744         db_vnfr,
3745         nslcmop_id,
3746         nsr_id,
3747         nsi_id,
3748         vnfd_id,
3749         vdu_id,
3750         kdu_name,
3751         member_vnf_index,
3752         vdu_index,
3753         vdu_name,
3754         deploy_params,
3755         descriptor_config,
3756         base_folder,
3757         task_instantiation_info,
3758         stage,
3759     ):
3760         # launch instantiate_N2VC in a asyncio task and register task object
3761         # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3762         # if not found, create one entry and update database
3763         # fill db_nsr._admin.deployed.VCA.<index>
3764
3765 0         self.logger.debug(
3766             logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3767         )
3768
3769 0         charm_name = ""
3770 0         get_charm_name = False
3771 0         if "execution-environment-list" in descriptor_config:
3772 0             ee_list = descriptor_config.get("execution-environment-list", [])
3773 0         elif "juju" in descriptor_config:
3774 0             ee_list = [descriptor_config]  # ns charms
3775 0             if "execution-environment-list" not in descriptor_config:
3776                 # charm name is only required for ns charms
3777 0                 get_charm_name = True
3778         else:  # other types as script are not supported
3779 0             ee_list = []
3780
3781 0         for ee_item in ee_list:
3782 0             self.logger.debug(
3783                 logging_text
3784                 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3785                     ee_item.get("juju"), ee_item.get("helm-chart")
3786                 )
3787             )
3788 0             ee_descriptor_id = ee_item.get("id")
3789 0             if ee_item.get("juju"):
3790 0                 vca_name = ee_item["juju"].get("charm")
3791 0                 if get_charm_name:
3792 0                     charm_name = self.find_charm_name(db_nsr, str(vca_name))
3793 0                 vca_type = (
3794                     "lxc_proxy_charm"
3795                     if ee_item["juju"].get("charm") is not None
3796                     else "native_charm"
3797                 )
3798 0                 if ee_item["juju"].get("cloud") == "k8s":
3799 0                     vca_type = "k8s_proxy_charm"
3800 0                 elif ee_item["juju"].get("proxy") is False:
3801 0                     vca_type = "native_charm"
3802 0             elif ee_item.get("helm-chart"):
3803 0                 vca_name = ee_item["helm-chart"]
3804 0                 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3805 0                     vca_type = "helm"
3806                 else:
3807 0                     vca_type = "helm-v3"
3808             else:
3809 0                 self.logger.debug(
3810                     logging_text + "skipping non juju neither charm configuration"
3811                 )
3812 0                 continue
3813
3814 0             vca_index = -1
3815 0             for vca_index, vca_deployed in enumerate(
3816                 db_nsr["_admin"]["deployed"]["VCA"]
3817             ):
3818 0                 if not vca_deployed:
3819 0                     continue
3820 0                 if (
3821                     vca_deployed.get("member-vnf-index") == member_vnf_index
3822                     and vca_deployed.get("vdu_id") == vdu_id
3823                     and vca_deployed.get("kdu_name") == kdu_name
3824                     and vca_deployed.get("vdu_count_index", 0) == vdu_index
3825                     and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3826                 ):
3827 0                     break
3828             else:
3829                 # not found, create one.
3830 0                 target = (
3831                     "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3832                 )
3833 0                 if vdu_id:
3834 0                     target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3835 0                 elif kdu_name:
3836 0                     target += "/kdu/{}".format(kdu_name)
3837 0                 vca_deployed = {
3838                     "target_element": target,
3839                     # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
3840                     "member-vnf-index": member_vnf_index,
3841                     "vdu_id": vdu_id,
3842                     "kdu_name": kdu_name,
3843                     "vdu_count_index": vdu_index,
3844                     "operational-status": "init",  # TODO revise
3845                     "detailed-status": "",  # TODO revise
3846                     "step": "initial-deploy",  # TODO revise
3847                     "vnfd_id": vnfd_id,
3848                     "vdu_name": vdu_name,
3849                     "type": vca_type,
3850                     "ee_descriptor_id": ee_descriptor_id,
3851                     "charm_name": charm_name,
3852                 }
3853 0                 vca_index += 1
3854
3855                 # create VCA and configurationStatus in db
3856 0                 db_dict = {
3857                     "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
3858                     "configurationStatus.{}".format(vca_index): dict(),
3859                 }
3860 0                 self.update_db_2("nsrs", nsr_id, db_dict)
3861
3862 0                 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3863
3864 0             self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3865 0             self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3866 0             self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3867
3868             # Launch task
3869 0             task_n2vc = asyncio.ensure_future(
3870                 self.instantiate_N2VC(
3871                     logging_text=logging_text,
3872                     vca_index=vca_index,
3873                     nsi_id=nsi_id,
3874                     db_nsr=db_nsr,
3875                     db_vnfr=db_vnfr,
3876                     vdu_id=vdu_id,
3877                     kdu_name=kdu_name,
3878                     vdu_index=vdu_index,
3879                     deploy_params=deploy_params,
3880                     config_descriptor=descriptor_config,
3881                     base_folder=base_folder,
3882                     nslcmop_id=nslcmop_id,
3883                     stage=stage,
3884                     vca_type=vca_type,
3885                     vca_name=vca_name,
3886                     ee_config_descriptor=ee_item,
3887                 )
3888             )
3889 0             self.lcm_tasks.register(
3890                 "ns",
3891                 nsr_id,
3892                 nslcmop_id,
3893                 "instantiate_N2VC-{}".format(vca_index),
3894                 task_n2vc,
3895             )
3896 0             task_instantiation_info[
3897                 task_n2vc
3898             ] = self.task_name_deploy_vca + " {}.{}".format(
3899                 member_vnf_index or "", vdu_id or ""
3900             )
3901
3902 1     @staticmethod
3903 1     def _create_nslcmop(nsr_id, operation, params):
3904         """
3905         Creates a ns-lcm-opp content to be stored at database.
3906         :param nsr_id: internal id of the instance
3907         :param operation: instantiate, terminate, scale, action, ...
3908         :param params: user parameters for the operation
3909         :return: dictionary following SOL005 format
3910         """
3911         # Raise exception if invalid arguments
3912 0         if not (nsr_id and operation and params):
3913 0             raise LcmException(
3914                 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3915             )
3916 0         now = time()
3917 0         _id = str(uuid4())
3918 0         nslcmop = {
3919             "id": _id,
3920             "_id": _id,
3921             # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3922             "operationState": "PROCESSING",
3923             "statusEnteredTime": now,
3924             "nsInstanceId": nsr_id,
3925             "lcmOperationType": operation,
3926             "startTime": now,
3927             "isAutomaticInvocation": False,
3928             "operationParams": params,
3929             "isCancelPending": False,
3930             "links": {
3931                 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3932                 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3933             },
3934         }
3935 0         return nslcmop
3936
3937 1     def _format_additional_params(self, params):
3938 1         params = params or {}
3939 1         for key, value in params.items():
3940 0             if str(value).startswith("!!yaml "):
3941 0                 params[key] = yaml.safe_load(value[7:])
3942 1         return params
3943
3944 1     def _get_terminate_primitive_params(self, seq, vnf_index):
3945 0         primitive = seq.get("name")
3946 0         primitive_params = {}
3947 0         params = {
3948             "member_vnf_index": vnf_index,
3949             "primitive": primitive,
3950             "primitive_params": primitive_params,
3951         }
3952 0         desc_params = {}
3953 0         return self._map_primitive_params(seq, params, desc_params)
3954
3955     # sub-operations
3956
3957 1     def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3958 1         op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3959 1         if op.get("operationState") == "COMPLETED":
3960             # b. Skip sub-operation
3961             # _ns_execute_primitive() or RO.create_action() will NOT be executed
3962 1             return self.SUBOPERATION_STATUS_SKIP
3963         else:
3964             # c. retry executing sub-operation
3965             # The sub-operation exists, and operationState != 'COMPLETED'
3966             # Update operationState = 'PROCESSING' to indicate a retry.
3967 1             operationState = "PROCESSING"
3968 1             detailed_status = "In progress"
3969 1             self._update_suboperation_status(
3970                 db_nslcmop, op_index, operationState, detailed_status
3971             )
3972             # Return the sub-operation index
3973             # _ns_execute_primitive() or RO.create_action() will be called from scale()
3974             # with arguments extracted from the sub-operation
3975 1             return op_index
3976
3977     # Find a sub-operation where all keys in a matching dictionary must match
3978     # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3979 1     def _find_suboperation(self, db_nslcmop, match):
3980 1         if db_nslcmop and match:
3981 1             op_list = db_nslcmop.get("_admin", {}).get("operations", [])
3982 1             for i, op in enumerate(op_list):
3983 1                 if all(op.get(k) == match[k] for k in match):
3984 1                     return i
3985 1         return self.SUBOPERATION_STATUS_NOT_FOUND
3986
3987     # Update status for a sub-operation given its index
3988 1     def _update_suboperation_status(
3989         self, db_nslcmop, op_index, operationState, detailed_status
3990     ):
3991         # Update DB for HA tasks
3992 1         q_filter = {"_id": db_nslcmop["_id"]}
3993 1         update_dict = {
3994             "_admin.operations.{}.operationState".format(op_index): operationState,
3995             "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
3996         }
3997 1         self.db.set_one(
3998             "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
3999         )
4000
4001     # Add sub-operation, return the index of the added sub-operation
4002     # Optionally, set operationState, detailed-status, and operationType
4003     # Status and type are currently set for 'scale' sub-operations:
4004     # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
4005     # 'detailed-status' : status message
4006     # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
4007     # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
4008 1     def _add_suboperation(
4009         self,
4010         db_nslcmop,
4011         vnf_index,
4012         vdu_id,
4013         vdu_count_index,
4014         vdu_name,
4015         primitive,
4016         mapped_primitive_params,
4017         operationState=None,
4018         detailed_status=None,
4019         operationType=None,
4020         RO_nsr_id=None,
4021         RO_scaling_info=None,
4022     ):
4023 1         if not db_nslcmop:
4024 1             return self.SUBOPERATION_STATUS_NOT_FOUND
4025         # Get the "_admin.operations" list, if it exists
4026 1         db_nslcmop_admin = db_nslcmop.get("_admin", {})
4027 1         op_list = db_nslcmop_admin.get("operations")
4028         # Create or append to the "_admin.operations" list
4029 1         new_op = {
4030             "member_vnf_index": vnf_index,
4031             "vdu_id": vdu_id,
4032             "vdu_count_index": vdu_count_index,
4033             "primitive": primitive,
4034             "primitive_params": mapped_primitive_params,
4035         }
4036 1         if operationState:
4037 1             new_op["operationState"] = operationState
4038 1         if detailed_status:
4039 1             new_op["detailed-status"] = detailed_status
4040 1         if operationType:
4041 1             new_op["lcmOperationType"] = operationType
4042 1         if RO_nsr_id:
4043 1             new_op["RO_nsr_id"] = RO_nsr_id
4044 1         if RO_scaling_info:
4045 1             new_op["RO_scaling_info"] = RO_scaling_info
4046 1         if not op_list:
4047             # No existing operations, create key 'operations' with current operation as first list element
4048 1             db_nslcmop_admin.update({"operations": [new_op]})
4049 1             op_list = db_nslcmop_admin.get("operations")
4050         else:
4051             # Existing operations, append operation to list
4052 1             op_list.append(new_op)
4053
4054 1         db_nslcmop_update = {"_admin.operations": op_list}
4055 1         self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
4056 1         op_index = len(op_list) - 1
4057 1         return op_index
4058
4059     # Helper methods for scale() sub-operations
4060
4061     # pre-scale/post-scale:
4062     # Check for 3 different cases:
4063     # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4064     # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4065     # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4066 1     def _check_or_add_scale_suboperation(
4067         self,
4068         db_nslcmop,
4069         vnf_index,
4070         vnf_config_primitive,
4071         primitive_params,
4072         operationType,
4073         RO_nsr_id=None,
4074         RO_scaling_info=None,
4075     ):
4076         # Find this sub-operation
4077 1         if RO_nsr_id and RO_scaling_info:
4078 1             operationType = "SCALE-RO"
4079 1             match = {
4080                 "member_vnf_index": vnf_index,
4081                 "RO_nsr_id": RO_nsr_id,
4082                 "RO_scaling_info": RO_scaling_info,
4083             }
4084         else:
4085 1             match = {
4086                 "member_vnf_index": vnf_index,
4087                 "primitive": vnf_config_primitive,
4088                 "primitive_params": primitive_params,
4089                 "lcmOperationType": operationType,
4090             }
4091 1         op_index = self._find_suboperation(db_nslcmop, match)
4092 1         if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4093             # a. New sub-operation
4094             # The sub-operation does not exist, add it.
4095             # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4096             # The following parameters are set to None for all kind of scaling:
4097 1             vdu_id = None
4098 1             vdu_count_index = None
4099 1             vdu_name = None
4100 1             if RO_nsr_id and RO_scaling_info:
4101 1                 vnf_config_primitive = None
4102 1                 primitive_params = None
4103             else:
4104 1                 RO_nsr_id = None
4105 1                 RO_scaling_info = None
4106             # Initial status for sub-operation
4107 1             operationState = "PROCESSING"
4108 1             detailed_status = "In progress"
4109             # Add sub-operation for pre/post-scaling (zero or more operations)
4110 1             self._add_suboperation(
4111                 db_nslcmop,
4112                 vnf_index,
4113                 vdu_id,
4114                 vdu_count_index,
4115                 vdu_name,
4116                 vnf_config_primitive,
4117                 primitive_params,
4118                 operationState,
4119                 detailed_status,
4120                 operationType,
4121                 RO_nsr_id,
4122                 RO_scaling_info,
4123             )
4124 1             return self.SUBOPERATION_STATUS_NEW
4125         else:
4126             # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4127             # or op_index (operationState != 'COMPLETED')
4128 1             return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4129
4130     # Function to return execution_environment id
4131
4132 1     def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
4133         # TODO vdu_index_count
4134 0         for vca in vca_deployed_list:
4135 0             if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
4136 0                 return vca["ee_id"]
4137
4138 1     async def destroy_N2VC(
4139         self,
4140         logging_text,
4141         db_nslcmop,
4142         vca_deployed,
4143         config_descriptor,
4144         vca_index,
4145         destroy_ee=True,
4146         exec_primitives=True,
4147         scaling_in=False,
4148         vca_id: str = None,
4149     ):
4150         """
4151         Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4152         :param logging_text:
4153         :param db_nslcmop:
4154         :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4155         :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4156         :param vca_index: index in the database _admin.deployed.VCA
4157         :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4158         :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4159                             not executed properly
4160         :param scaling_in: True destroys the application, False destroys the model
4161         :return: None or exception
4162         """
4163
4164 0         self.logger.debug(
4165             logging_text
4166             + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4167                 vca_index, vca_deployed, config_descriptor, destroy_ee
4168             )
4169         )
4170
4171 0         vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4172
4173         # execute terminate_primitives
4174 0         if exec_primitives:
4175 0             terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4176                 config_descriptor.get("terminate-config-primitive"),
4177                 vca_deployed.get("ee_descriptor_id"),
4178             )
4179 0             vdu_id = vca_deployed.get("vdu_id")
4180 0             vdu_count_index = vca_deployed.get("vdu_count_index")
4181 0             vdu_name = vca_deployed.get("vdu_name")
4182 0             vnf_index = vca_deployed.get("member-vnf-index")
4183 0             if terminate_primitives and vca_deployed.get("needed_terminate"):
4184 0                 for seq in terminate_primitives:
4185                     # For each sequence in list, get primitive and call _ns_execute_primitive()
4186 0                     step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4187                         vnf_index, seq.get("name")
4188                     )
4189 0                     self.logger.debug(logging_text + step)
4190                     # Create the primitive for each sequence, i.e. "primitive": "touch"
4191 0                     primitive = seq.get("name")
4192 0                     mapped_primitive_params = self._get_terminate_primitive_params(
4193                         seq, vnf_index
4194                     )
4195
4196                     # Add sub-operation
4197 0                     self._add_suboperation(
4198                         db_nslcmop,
4199                         vnf_index,
4200                         vdu_id,
4201                         vdu_count_index,
4202                         vdu_name,
4203                         primitive,
4204                         mapped_primitive_params,
4205                     )
4206                     # Sub-operations: Call _ns_execute_primitive() instead of action()
4207 0                     try:
4208 0                         result, result_detail = await self._ns_execute_primitive(
4209                             vca_deployed["ee_id"],
4210                             primitive,
4211                             mapped_primitive_params,
4212                             vca_type=vca_type,
4213                             vca_id=vca_id,
4214                         )
4215 0                     except LcmException:
4216                         # this happens when VCA is not deployed. In this case it is not needed to terminate
4217 0                         continue
4218 0                     result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4219 0                     if result not in result_ok:
4220 0                         raise LcmException(
4221                             "terminate_primitive {}  for vnf_member_index={} fails with "
4222                             "error {}".format(seq.get("name"), vnf_index, result_detail)
4223                         )
4224                 # set that this VCA do not need terminated
4225 0                 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4226                     vca_index
4227                 )
4228 0                 self.update_db_2(
4229                     "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4230                 )
4231
4232         # Delete Prometheus Jobs if any
4233         # This uses NSR_ID, so it will destroy any jobs under this index
4234 0         self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4235
4236 0         if destroy_ee:
4237 0             await self.vca_map[vca_type].delete_execution_environment(
4238                 vca_deployed["ee_id"],
4239                 scaling_in=scaling_in,
4240                 vca_type=vca_type,
4241                 vca_id=vca_id,
4242             )
4243
4244 1     async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4245 0         self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4246 0         namespace = "." + db_nsr["_id"]
4247 0         try:
4248 0             await self.n2vc.delete_namespace(
4249                 namespace=namespace,
4250                 total_timeout=self.timeout.charm_delete,
4251                 vca_id=vca_id,
4252             )
4253 0         except N2VCNotFound:  # already deleted. Skip
4254 0             pass
4255 0         self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4256
4257 1     async def _terminate_RO(
4258         self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4259     ):
4260         """
4261         Terminates a deployment from RO
4262         :param logging_text:
4263         :param nsr_deployed: db_nsr._admin.deployed
4264         :param nsr_id:
4265         :param nslcmop_id:
4266         :param stage: list of string with the content to write on db_nslcmop.detailed-status.
4267             this method will update only the index 2, but it will write on database the concatenated content of the list
4268         :return:
4269         """
4270 0         db_nsr_update = {}
4271 0         failed_detail = []
4272 0         ro_nsr_id = ro_delete_action = None
4273 0         if nsr_deployed and nsr_deployed.get("RO"):
4274 0             ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
4275 0             ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
4276 0         try:
4277 0             if ro_nsr_id:
4278 0                 stage[2] = "Deleting ns from VIM."
4279 0                 db_nsr_update["detailed-status"] = " ".join(stage)
4280 0                 self._write_op_status(nslcmop_id, stage)
4281 0                 self.logger.debug(logging_text + stage[2])
4282 0                 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4283 0                 self._write_op_status(nslcmop_id, stage)
4284 0                 desc = await self.RO.delete("ns", ro_nsr_id)
4285 0                 ro_delete_action = desc["action_id"]
4286 0                 db_nsr_update[
4287                     "_admin.deployed.RO.nsr_delete_action_id"
4288                 ] = ro_delete_action
4289 0                 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4290 0                 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4291 0             if ro_delete_action:
4292                 # wait until NS is deleted from VIM
4293 0                 stage[2] = "Waiting ns deleted from VIM."
4294 0                 detailed_status_old = None
4295 0                 self.logger.debug(
4296                     logging_text
4297                     + stage[2]
4298                     + " RO_id={} ro_delete_action={}".format(
4299                         ro_nsr_id, ro_delete_action
4300                     )
4301                 )
4302 0                 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4303 0                 self._write_op_status(nslcmop_id, stage)
4304
4305 0                 delete_timeout = 20 * 60  # 20 minutes
4306 0                 while delete_timeout > 0:
4307 0                     desc = await self.RO.show(
4308                         "ns",
4309                         item_id_name=ro_nsr_id,
4310                         extra_item="action",
4311                         extra_item_id=ro_delete_action,
4312                     )
4313
4314                     # deploymentStatus
4315 0                     self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
4316
4317 0                     ns_status, ns_status_info = self.RO.check_action_status(desc)
4318 0                     if ns_status == "ERROR":
4319 0                         raise ROclient.ROClientException(ns_status_info)
4320 0                     elif ns_status == "BUILD":
4321 0                         stage[2] = "Deleting from VIM {}".format(ns_status_info)
4322 0                     elif ns_status == "ACTIVE":
4323 0                         db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4324 0                         db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4325 0                         break
4326                     else:
4327 0                         assert (
4328                             False
4329                         ), "ROclient.check_action_status returns unknown {}".format(
4330                             ns_status
4331                         )
4332 0                     if stage[2] != detailed_status_old:
4333 0                         detailed_status_old = stage[2]
4334 0                         db_nsr_update["detailed-status"] = " ".join(stage)
4335 0                         self._write_op_status(nslcmop_id, stage)
4336 0                         self.update_db_2("nsrs", nsr_id, db_nsr_update)
4337 0                     await asyncio.sleep(5, loop=self.loop)
4338 0                     delete_timeout -= 5
4339                 else:  # delete_timeout <= 0:
4340 0                     raise ROclient.ROClientException(
4341                         "Timeout waiting ns deleted from VIM"
4342                     )
4343
4344 0         except Exception as e:
4345 0             self.update_db_2("nsrs", nsr_id, db_nsr_update)
4346 0             if (
4347                 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4348             ):  # not found
4349 0                 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4350 0                 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4351 0                 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4352 0                 self.logger.debug(
4353                     logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)
4354                 )
4355 0             elif (
4356                 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4357             ):  # conflict
4358 0                 failed_detail.append("delete conflict: {}".format(e))
4359 0                 self.logger.debug(
4360                     logging_text
4361                     + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)
4362                 )
4363             else:
4364 0                 failed_detail.append("delete error: {}".format(e))
4365 0                 self.logger.error(
4366                     logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)
4367                 )
4368
4369         # Delete nsd
4370 0         if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
4371 0             ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
4372 0             try:
4373 0                 stage[2] = "Deleting nsd from RO."
4374 0                 db_nsr_update["detailed-status"] = " ".join(stage)
4375 0                 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4376 0                 self._write_op_status(nslcmop_id, stage)
4377 0                 await self.RO.delete("nsd", ro_nsd_id)
4378 0                 self.logger.debug(
4379                     logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)
4380                 )
4381 0                 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4382 0             except Exception as e:
4383 0                 if (
4384                     isinstance(e, ROclient.ROClientException) and e.http_code == 404
4385                 ):  # not found
4386 0                     db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4387 0                     self.logger.debug(
4388                         logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)
4389                     )
4390 0                 elif (
4391                     isinstance(e, ROclient.ROClientException) and e.http_code == 409
4392                 ):  # conflict
4393 0                     failed_detail.append(
4394                         "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)
4395                     )
4396 0                     self.logger.debug(logging_text + failed_detail[-1])
4397                 else:
4398 0                     failed_detail.append(
4399                         "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)
4400                     )
4401 0                     self.logger.error(logging_text + failed_detail[-1])
4402
4403 0         if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
4404 0             for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
4405 0                 if not vnf_deployed or not vnf_deployed["id"]:
4406 0                     continue
4407 0                 try:
4408 0                     ro_vnfd_id = vnf_deployed["id"]
4409 0                     stage[
4410                         2
4411                     ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
4412                         vnf_deployed["member-vnf-index"], ro_vnfd_id
4413                     )
4414 0                     db_nsr_update["detailed-status"] = " ".join(stage)
4415 0                     self.update_db_2("nsrs", nsr_id, db_nsr_update)
4416 0                     self._write_op_status(nslcmop_id, stage)
4417 0                     await self.RO.delete("vnfd", ro_vnfd_id)
4418 0                     self.logger.debug(
4419                         logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)
4420                     )
4421 0                     db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
4422 0                 except Exception as e:
4423 0                     if (
4424                         isinstance(e, ROclient.ROClientException) and e.http_code == 404
4425                     ):  # not found
4426 0                         db_nsr_update[
4427                             "_admin.deployed.RO.vnfd.{}.id".format(index)
4428                         ] = None
4429 0                         self.logger.debug(
4430                             logging_text
4431                             + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)
4432                         )
4433 0                     elif (
4434                         isinstance(e, ROclient.ROClientException) and e.http_code == 409
4435                     ):  # conflict
4436 0                         failed_detail.append(
4437                             "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)
4438                         )
4439 0                         self.logger.debug(logging_text + failed_detail[-1])
4440                     else:
4441 0                         failed_detail.append(
4442                             "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)
4443                         )
4444 0                         self.logger.error(logging_text + failed_detail[-1])
4445
4446 0         if failed_detail:
4447 0             stage[2] = "Error deleting from VIM"
4448         else:
4449 0             stage[2] = "Deleted from VIM"
4450 0         db_nsr_update["detailed-status"] = " ".join(stage)
4451 0         self.update_db_2("nsrs", nsr_id, db_nsr_update)
4452 0         self._write_op_status(nslcmop_id, stage)
4453
4454 0         if failed_detail:
4455 0             raise LcmException("; ".join(failed_detail))
4456
4457 1     async def terminate(self, nsr_id, nslcmop_id):
4458         # Try to lock HA task here
4459 0         task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4460 0         if not task_is_locked_by_me:
4461 0             return
4462
4463 0         logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4464 0         self.logger.debug(logging_text + "Enter")
4465 0         timeout_ns_terminate = self.timeout.ns_terminate
4466 0         db_nsr = None
4467 0         db_nslcmop = None
4468 0         operation_params = None
4469 0         exc = None
4470 0         error_list = []  # annotates all failed error messages
4471 0         db_nslcmop_update = {}
4472 0         autoremove = False  # autoremove after terminated
4473 0         tasks_dict_info = {}
4474 0         db_nsr_update = {}
4475 0         stage = [
4476             "Stage 1/3: Preparing task.",
4477             "Waiting for previous operations to terminate.",
4478             "",
4479         ]
4480         # ^ contains [stage, step, VIM-status]
4481 0         try:
4482             # wait for any previous tasks in process
4483 0             await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4484
4485 0             stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4486 0             db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4487 0             operation_params = db_nslcmop.get("operationParams") or {}
4488 0             if operation_params.get("timeout_ns_terminate"):
4489 0                 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4490 0             stage[1] = "Getting nsr={} from db.".format(nsr_id)
4491 0             db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4492
4493 0             db_nsr_update["operational-status"] = "terminating"
4494 0             db_nsr_update["config-status"] = "terminating"
4495 0             self._write_ns_status(
4496                 nsr_id=nsr_id,
4497                 ns_state="TERMINATING",
4498                 current_operation="TERMINATING",
4499                 current_operation_id=nslcmop_id,
4500                 other_update=db_nsr_update,
4501             )
4502 0             self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4503 0             nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4504 0             if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4505 0                 return
4506
4507 0             stage[1] = "Getting vnf descriptors from db."
4508 0             db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4509 0             db_vnfrs_dict = {
4510                 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4511             }
4512 0             db_vnfds_from_id = {}
4513 0             db_vnfds_from_member_index = {}
4514             # Loop over VNFRs
4515 0             for vnfr in db_vnfrs_list:
4516 0                 vnfd_id = vnfr["vnfd-id"]
4517 0                 if vnfd_id not in db_vnfds_from_id:
4518 0                     vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4519 0                     db_vnfds_from_id[vnfd_id] = vnfd
4520 0                 db_vnfds_from_member_index[
4521                     vnfr["member-vnf-index-ref"]
4522                 ] = db_vnfds_from_id[vnfd_id]
4523
4524             # Destroy individual execution environments when there are terminating primitives.
4525             # Rest of EE will be deleted at once
4526             # TODO - check before calling _destroy_N2VC
4527             # if not operation_params.get("skip_terminate_primitives"):#
4528             # or not vca.get("needed_terminate"):
4529 0             stage[0] = "Stage 2/3 execute terminating primitives."
4530 0             self.logger.debug(logging_text + stage[0])
4531 0             stage[1] = "Looking execution environment that needs terminate."
4532 0             self.logger.debug(logging_text + stage[1])
4533
4534 0             for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4535 0                 config_descriptor = None
4536 0                 vca_member_vnf_index = vca.get("member-vnf-index")
4537 0                 vca_id = self.get_vca_id(
4538                     db_vnfrs_dict.get(vca_member_vnf_index)
4539                     if vca_member_vnf_index
4540                     else None,
4541                     db_nsr,
4542                 )
4543 0                 if not vca or not vca.get("ee_id"):
4544 0                     continue
4545 0                 if not vca.get("member-vnf-index"):
4546                     # ns
4547 0                     config_descriptor = db_nsr.get("ns-configuration")
4548 0                 elif vca.get("vdu_id"):
4549 0                     db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4550 0                     config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4551 0                 elif vca.get("kdu_name"):
4552 0                     db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4553 0                     config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4554                 else:
4555 0                     db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4556 0                     config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4557 0                 vca_type = vca.get("type")
4558 0                 exec_terminate_primitives = not operation_params.get(
4559                     "skip_terminate_primitives"
4560                 ) and vca.get("needed_terminate")
4561                 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4562                 # pending native charms
4563 0                 destroy_ee = (
4564                     True if vca_type in ("helm", "helm-v3", "native_charm") else False
4565                 )
4566                 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4567                 #     vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4568 0                 task = asyncio.ensure_future(
4569                     self.destroy_N2VC(
4570                         logging_text,
4571                         db_nslcmop,
4572                         vca,
4573                         config_descriptor,
4574                         vca_index,
4575                         destroy_ee,
4576                         exec_terminate_primitives,
4577                         vca_id=vca_id,
4578                     )
4579                 )
4580 0                 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4581
4582             # wait for pending tasks of terminate primitives
4583 0             if tasks_dict_info:
4584 0                 self.logger.debug(
4585                     logging_text
4586                     + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4587                 )
4588 0                 error_list = await self._wait_for_tasks(
4589                     logging_text,
4590                     tasks_dict_info,
4591                     min(self.timeout.charm_delete, timeout_ns_terminate),
4592                     stage,
4593                     nslcmop_id,
4594                 )
4595 0                 tasks_dict_info.clear()
4596 0                 if error_list:
4597 0                     return  # raise LcmException("; ".join(error_list))
4598
4599             # remove All execution environments at once
4600 0             stage[0] = "Stage 3/3 delete all."
4601
4602 0             if nsr_deployed.get("VCA"):
4603 0                 stage[1] = "Deleting all execution environments."
4604 0                 self.logger.debug(logging_text + stage[1])
4605 0                 vca_id = self.get_vca_id({}, db_nsr)
4606 0                 task_delete_ee = asyncio.ensure_future(
4607                     asyncio.wait_for(
4608                         self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4609                         timeout=self.timeout.charm_delete,
4610                     )
4611                 )
4612                 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4613 0                 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4614
4615             # Delete Namespace and Certificates if necessary
4616 0             if check_helm_ee_in_ns(list(db_vnfds_from_member_index.values())):
4617 0                 await self.vca_map["helm-v3"].delete_tls_certificate(
4618                     certificate_name=db_nslcmop["nsInstanceId"],
4619                 )
4620                 # TODO: Delete namespace
4621
4622             # Delete from k8scluster
4623 0             stage[1] = "Deleting KDUs."
4624 0             self.logger.debug(logging_text + stage[1])
4625             # print(nsr_deployed)
4626 0             for kdu in get_iterable(nsr_deployed, "K8s"):
4627 0                 if not kdu or not kdu.get("kdu-instance"):
4628 0                     continue
4629 0                 kdu_instance = kdu.get("kdu-instance")
4630 0                 if kdu.get("k8scluster-type") in self.k8scluster_map:
4631                     # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4632 0                     vca_id = self.get_vca_id({}, db_nsr)
4633 0                     task_delete_kdu_instance = asyncio.ensure_future(
4634                         self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4635                             cluster_uuid=kdu.get("k8scluster-uuid"),
4636                             kdu_instance=kdu_instance,
4637                             vca_id=vca_id,
4638                             namespace=kdu.get("namespace"),
4639                         )
4640                     )
4641                 else:
4642 0                     self.logger.error(
4643                         logging_text
4644                         + "Unknown k8s deployment type {}".format(
4645                             kdu.get("k8scluster-type")
4646                         )
4647                     )
4648 0                     continue
4649 0                 tasks_dict_info[
4650                     task_delete_kdu_instance
4651                 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4652
4653             # remove from RO
4654 0             stage[1] = "Deleting ns from VIM."
4655 0             if self.ro_config.ng:
4656 0                 task_delete_ro = asyncio.ensure_future(
4657                     self._terminate_ng_ro(
4658                         logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4659                     )
4660                 )
4661             else:
4662 0                 task_delete_ro = asyncio.ensure_future(
4663                     self._terminate_RO(
4664                         logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4665                     )
4666                 )
4667 0             tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4668
4669             # rest of staff will be done at finally
4670
4671 0         except (
4672             ROclient.ROClientException,
4673             DbException,
4674             LcmException,
4675             N2VCException,
4676         ) as e:
4677 0             self.logger.error(logging_text + "Exit Exception {}".format(e))
4678 0             exc = e
4679 0         except asyncio.CancelledError:
4680 0             self.logger.error(
4681                 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4682             )
4683 0             exc = "Operation was cancelled"
4684 0         except Exception as e:
4685 0             exc = traceback.format_exc()
4686 0             self.logger.critical(
4687                 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4688                 exc_info=True,
4689             )
4690         finally:
4691 0             if exc:
4692 0                 error_list.append(str(exc))
4693 0             try:
4694                 # wait for pending tasks
4695 0                 if tasks_dict_info:
4696 0                     stage[1] = "Waiting for terminate pending tasks."
4697 0                     self.logger.debug(logging_text + stage[1])
4698 0                     error_list += await self._wait_for_tasks(
4699                         logging_text,
4700                         tasks_dict_info,
4701                         timeout_ns_terminate,
4702                         stage,
4703                         nslcmop_id,
4704                     )
4705 0                 stage[1] = stage[2] = ""
4706 0             except asyncio.CancelledError:
4707 0                 error_list.append("Cancelled")
4708                 # TODO cancell all tasks
4709 0             except Exception as exc:
4710 0                 error_list.append(str(exc))
4711             # update status at database
4712 0             if error_list:
4713 0                 error_detail = "; ".join(error_list)
4714                 # self.logger.error(logging_text + error_detail)
4715 0                 error_description_nslcmop = "{} Detail: {}".format(
4716                     stage[0], error_detail
4717                 )
4718 0                 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4719                     nslcmop_id, stage[0]
4720                 )
4721
4722 0                 db_nsr_update["operational-status"] = "failed"
4723 0                 db_nsr_update["detailed-status"] = (
4724                     error_description_nsr + " Detail: " + error_detail
4725                 )
4726 0                 db_nslcmop_update["detailed-status"] = error_detail
4727 0                 nslcmop_operation_state = "FAILED"
4728 0                 ns_state = "BROKEN"
4729             else:
4730 0                 error_detail = None
4731 0                 error_description_nsr = error_description_nslcmop = None
4732 0                 ns_state = "NOT_INSTANTIATED"
4733 0                 db_nsr_update["operational-status"] = "terminated"
4734 0                 db_nsr_update["detailed-status"] = "Done"
4735 0                 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4736 0                 db_nslcmop_update["detailed-status"] = "Done"
4737 0                 nslcmop_operation_state = "COMPLETED"
4738
4739 0             if db_nsr:
4740 0                 self._write_ns_status(
4741                     nsr_id=nsr_id,
4742                     ns_state=ns_state,
4743                     current_operation="IDLE",
4744                     current_operation_id=None,
4745                     error_description=error_description_nsr,
4746                     error_detail=error_detail,
4747                     other_update=db_nsr_update,
4748                 )
4749 0             self._write_op_status(
4750                 op_id=nslcmop_id,
4751                 stage="",
4752                 error_message=error_description_nslcmop,
4753                 operation_state=nslcmop_operation_state,
4754                 other_update=db_nslcmop_update,
4755             )
4756 0             if ns_state == "NOT_INSTANTIATED":
4757 0                 try:
4758 0                     self.db.set_list(
4759                         "vnfrs",
4760                         {"nsr-id-ref": nsr_id},
4761                         {"_admin.nsState": "NOT_INSTANTIATED"},
4762                     )
4763 0                 except DbException as e:
4764 0                     self.logger.warn(
4765                         logging_text
4766                         + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4767                             nsr_id, e
4768                         )
4769                     )
4770 0             if operation_params:
4771 0                 autoremove = operation_params.get("autoremove", False)
4772 0             if nslcmop_operation_state:
4773 0                 try:
4774 0                     await self.msg.aiowrite(
4775                         "ns",
4776                         "terminated",
4777                         {
4778                             "nsr_id": nsr_id,
4779                             "nslcmop_id": nslcmop_id,
4780                             "operationState": nslcmop_operation_state,
4781                             "autoremove": autoremove,
4782                         },
4783                         loop=self.loop,
4784                     )
4785 0                 except Exception as e:
4786 0                     self.logger.error(
4787                         logging_text + "kafka_write notification Exception {}".format(e)
4788                     )
4789
4790 0             self.logger.debug(logging_text + "Exit")
4791 0             self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4792
4793 1     async def _wait_for_tasks(
4794         self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4795     ):
4796 0         time_start = time()
4797 0         error_detail_list = []
4798 0         error_list = []
4799 0         pending_tasks = list(created_tasks_info.keys())
4800 0         num_tasks = len(pending_tasks)
4801 0         num_done = 0
4802 0         stage[1] = "{}/{}.".format(num_done, num_tasks)
4803 0         self._write_op_status(nslcmop_id, stage)
4804 0         while pending_tasks:
4805 0             new_error = None
4806 0             _timeout = timeout + time_start - time()
4807 0             done, pending_tasks = await asyncio.wait(
4808                 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4809             )
4810 0             num_done += len(done)
4811 0             if not done:  # Timeout
4812 0                 for task in pending_tasks:
4813 0                     new_error = created_tasks_info[task] + ": Timeout"
4814 0                     error_detail_list.append(new_error)
4815 0                     error_list.append(new_error)
4816 0                 break
4817 0             for task in done:
4818 0                 if task.cancelled():
4819 0                     exc = "Cancelled"
4820                 else:
4821 0                     exc = task.exception()
4822 0                 if exc:
4823 0                     if isinstance(exc, asyncio.TimeoutError):
4824 0                         exc = "Timeout"
4825 0                     new_error = created_tasks_info[task] + ": {}".format(exc)
4826 0                     error_list.append(created_tasks_info[task])
4827 0                     error_detail_list.append(new_error)
4828 0                     if isinstance(
4829                         exc,
4830                         (
4831                             str,
4832                             DbException,
4833                             N2VCException,
4834                             ROclient.ROClientException,
4835                             LcmException,
4836                             K8sException,
4837                             NgRoException,
4838                         ),
4839                     ):
4840 0                         self.logger.error(logging_text + new_error)
4841                     else:
4842 0                         exc_traceback = "".join(
4843                             traceback.format_exception(None, exc, exc.__traceback__)
4844                         )
4845 0                         self.logger.error(
4846                             logging_text
4847                             + created_tasks_info[task]
4848                             + " "
4849                             + exc_traceback
4850                         )
4851                 else:
4852 0                     self.logger.debug(
4853                         logging_text + created_tasks_info[task] + ": Done"
4854                     )
4855 0             stage[1] = "{}/{}.".format(num_done, num_tasks)
4856 0             if new_error:
4857 0                 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4858 0                 if nsr_id:  # update also nsr
4859 0                     self.update_db_2(
4860                         "nsrs",
4861                         nsr_id,
4862                         {
4863                             "errorDescription": "Error at: " + ", ".join(error_list),
4864                             "errorDetail": ". ".join(error_detail_list),
4865                         },
4866                     )
4867 0             self._write_op_status(nslcmop_id, stage)
4868 0         return error_detail_list
4869
4870 1     @staticmethod
4871 1     def _map_primitive_params(primitive_desc, params, instantiation_params):
4872         """
4873         Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4874         The default-value is used. If it is between < > it look for a value at instantiation_params
4875         :param primitive_desc: portion of VNFD/NSD that describes primitive
4876         :param params: Params provided by user
4877         :param instantiation_params: Instantiation params provided by user
4878         :return: a dictionary with the calculated params
4879         """
4880 1         calculated_params = {}
4881 1         for parameter in primitive_desc.get("parameter", ()):
4882 1             param_name = parameter["name"]
4883 1             if param_name in params:
4884 0                 calculated_params[param_name] = params[param_name]
4885 1             elif "default-value" in parameter or "value" in parameter:
4886 1                 if "value" in parameter:
4887 1                     calculated_params[param_name] = parameter["value"]
4888                 else:
4889 0                     calculated_params[param_name] = parameter["default-value"]
4890 1                 if (
4891                     isinstance(calculated_params[param_name], str)
4892                     and calculated_params[param_name].startswith("<")
4893                     and calculated_params[param_name].endswith(">")
4894                 ):
4895 0                     if calculated_params[param_name][1:-1] in instantiation_params:
4896 0                         calculated_params[param_name] = instantiation_params[
4897                             calculated_params[param_name][1:-1]
4898                         ]
4899                     else:
4900 0                         raise LcmException(
4901                             "Parameter {} needed to execute primitive {} not provided".format(
4902                                 calculated_params[param_name], primitive_desc["name"]
4903                             )
4904                         )
4905             else:
4906 0                 raise LcmException(
4907                     "Parameter {} needed to execute primitive {} not provided".format(
4908                         param_name, primitive_desc["name"]
4909                     )
4910                 )
4911
4912 1             if isinstance(calculated_params[param_name], (dict, list, tuple)):
4913 0                 calculated_params[param_name] = yaml.safe_dump(
4914                     calculated_params[param_name], default_flow_style=True, width=256
4915                 )
4916 1             elif isinstance(calculated_params[param_name], str) and calculated_params[
4917                 param_name
4918             ].startswith("!!yaml "):
4919 0                 calculated_params[param_name] = calculated_params[param_name][7:]
4920 1             if parameter.get("data-type") == "INTEGER":
4921 0                 try:
4922 0                     calculated_params[param_name] = int(calculated_params[param_name])
4923 0                 except ValueError:  # error converting string to int
4924 0                     raise LcmException(
4925                         "Parameter {} of primitive {} must be integer".format(
4926                             param_name, primitive_desc["name"]
4927                         )
4928                     )
4929 1             elif parameter.get("data-type") == "BOOLEAN":
4930 0                 calculated_params[param_name] = not (
4931                     (str(calculated_params[param_name])).lower() == "false"
4932                 )
4933
4934         # add always ns_config_info if primitive name is config
4935 1         if primitive_desc["name"] == "config":
4936 0             if "ns_config_info" in instantiation_params:
4937 0                 calculated_params["ns_config_info"] = instantiation_params[
4938                     "ns_config_info"
4939                 ]
4940 1         return calculated_params
4941
4942 1     def _look_for_deployed_vca(
4943         self,
4944         deployed_vca,
4945         member_vnf_index,
4946         vdu_id,
4947         vdu_count_index,
4948         kdu_name=None,
4949         ee_descriptor_id=None,
4950     ):
4951         # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4952 0         for vca in deployed_vca:
4953 0             if not vca:
4954 0                 continue
4955 0             if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4956 0                 continue
4957 0             if (
4958                 vdu_count_index is not None
4959                 and vdu_count_index != vca["vdu_count_index"]
4960             ):
4961 0                 continue
4962 0             if kdu_name and kdu_name != vca["kdu_name"]:
4963 0                 continue
4964 0             if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4965 0                 continue
4966 0             break
4967         else:
4968             # vca_deployed not found
4969 0             raise LcmException(
4970                 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4971                 " is not deployed".format(
4972                     member_vnf_index,
4973                     vdu_id,
4974                     vdu_count_index,
4975                     kdu_name,
4976                     ee_descriptor_id,
4977                 )
4978             )
4979         # get ee_id
4980 0         ee_id = vca.get("ee_id")
4981 0         vca_type = vca.get(
4982             "type", "lxc_proxy_charm"
4983         )  # default value for backward compatibility - proxy charm
4984 0         if not ee_id:
4985 0             raise LcmException(
4986                 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4987                 "execution environment".format(
4988                     member_vnf_index, vdu_id, kdu_name, vdu_count_index
4989                 )
4990             )
4991 0         return ee_id, vca_type
4992
4993 1     async def _ns_execute_primitive(
4994         self,
4995         ee_id,
4996         primitive,
4997         primitive_params,
4998         retries=0,
4999         retries_interval=30,
5000         timeout=None,
5001         vca_type=None,
5002         db_dict=None,
5003         vca_id: str = None,
5004     ) -> (str, str):
5005 0         try:
5006 0             if primitive == "config":
5007 0                 primitive_params = {"params": primitive_params}
5008
5009 0             vca_type = vca_type or "lxc_proxy_charm"
5010
5011 0             while retries >= 0:
5012 0                 try:
5013 0                     output = await asyncio.wait_for(
5014                         self.vca_map[vca_type].exec_primitive(
5015                             ee_id=ee_id,
5016                             primitive_name=primitive,
5017                             params_dict=primitive_params,
5018                             progress_timeout=self.timeout.progress_primitive,
5019                             total_timeout=self.timeout.primitive,
5020                             db_dict=db_dict,
5021                             vca_id=vca_id,
5022                             vca_type=vca_type,
5023                         ),
5024                         timeout=timeout or self.timeout.primitive,
5025                     )
5026                     # execution was OK
5027 0                     break
5028 0                 except asyncio.CancelledError:
5029 0                     raise
5030 0                 except Exception as e:
5031 0                     retries -= 1
5032 0                     if retries >= 0:
5033 0                         self.logger.debug(
5034                             "Error executing action {} on {} -> {}".format(
5035                                 primitive, ee_id, e
5036                             )
5037                         )
5038                         # wait and retry
5039 0                         await asyncio.sleep(retries_interval, loop=self.loop)
5040                     else:
5041 0                         if isinstance(e, asyncio.TimeoutError):
5042 0                             e = N2VCException(
5043                                 message="Timed out waiting for action to complete"
5044                             )
5045 0                         return "FAILED", getattr(e, "message", repr(e))
5046
5047 0             return "COMPLETED", output
5048
5049 0         except (LcmException, asyncio.CancelledError):
5050 0             raise
5051 0         except Exception as e:
5052 0             return "FAIL", "Error executing action {}: {}".format(primitive, e)
5053
5054 1     async def vca_status_refresh(self, nsr_id, nslcmop_id):
5055         """
5056         Updating the vca_status with latest juju information in nsrs record
5057         :param: nsr_id: Id of the nsr
5058         :param: nslcmop_id: Id of the nslcmop
5059         :return: None
5060         """
5061
5062 1         self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
5063 1         db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5064 1         vca_id = self.get_vca_id({}, db_nsr)
5065 1         if db_nsr["_admin"]["deployed"]["K8s"]:
5066 0             for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
5067 0                 cluster_uuid, kdu_instance, cluster_type = (
5068                     k8s["k8scluster-uuid"],
5069                     k8s["kdu-instance"],
5070                     k8s["k8scluster-type"],
5071                 )
5072 0                 await self._on_update_k8s_db(
5073                     cluster_uuid=cluster_uuid,
5074                     kdu_instance=kdu_instance,
5075                     filter={"_id": nsr_id},
5076                     vca_id=vca_id,
5077                     cluster_type=cluster_type,
5078                 )
5079         else:
5080 1             for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
5081 1                 table, filter = "nsrs", {"_id": nsr_id}
5082 1                 path = "_admin.deployed.VCA.{}.".format(vca_index)
5083 1                 await self._on_update_n2vc_db(table, filter, path, {})
5084
5085 1         self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
5086 1         self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
5087
5088 1     async def action(self, nsr_id, nslcmop_id):
5089         # Try to lock HA task here
5090 0         task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5091 0         if not task_is_locked_by_me:
5092 0             return
5093
5094 0         logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
5095 0         self.logger.debug(logging_text + "Enter")
5096         # get all needed from database
5097 0         db_nsr = None
5098 0         db_nslcmop = None
5099 0         db_nsr_update = {}
5100 0         db_nslcmop_update = {}
5101 0         nslcmop_operation_state = None
5102 0         error_description_nslcmop = None
5103 0         exc = None
5104 0         try:
5105             # wait for any previous tasks in process
5106 0             step = "Waiting for previous operations to terminate"
5107 0             await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5108
5109 0             self._write_ns_status(
5110                 nsr_id=nsr_id,
5111                 ns_state=None,
5112                 current_operation="RUNNING ACTION",
5113                 current_operation_id=nslcmop_id,
5114             )
5115
5116 0             step = "Getting information from database"
5117 0             db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5118 0             db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5119 0             if db_nslcmop["operationParams"].get("primitive_params"):
5120 0                 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5121                     db_nslcmop["operationParams"]["primitive_params"]
5122                 )
5123
5124 0             nsr_deployed = db_nsr["_admin"].get("deployed")
5125 0             vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
5126 0             vdu_id = db_nslcmop["operationParams"].get("vdu_id")
5127 0             kdu_name = db_nslcmop["operationParams"].get("kdu_name")
5128 0             vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
5129 0             primitive = db_nslcmop["operationParams"]["primitive"]
5130 0             primitive_params = db_nslcmop["operationParams"]["primitive_params"]
5131 0             timeout_ns_action = db_nslcmop["operationParams"].get(
5132                 "timeout_ns_action", self.timeout.primitive
5133             )
5134
5135 0             if vnf_index:
5136 0                 step = "Getting vnfr from database"
5137 0                 db_vnfr = self.db.get_one(
5138                     "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5139                 )
5140 0                 if db_vnfr.get("kdur"):
5141 0                     kdur_list = []
5142 0                     for kdur in db_vnfr["kdur"]:
5143 0                         if kdur.get("additionalParams"):
5144 0                             kdur["additionalParams"] = json.loads(
5145                                 kdur["additionalParams"]
5146                             )
5147 0                         kdur_list.append(kdur)
5148 0                     db_vnfr["kdur"] = kdur_list
5149 0                 step = "Getting vnfd from database"
5150 0                 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5151
5152                 # Sync filesystem before running a primitive
5153 0                 self.fs.sync(db_vnfr["vnfd-id"])
5154             else:
5155 0                 step = "Getting nsd from database"
5156 0                 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
5157
5158 0             vca_id = self.get_vca_id(db_vnfr, db_nsr)
5159             # for backward compatibility
5160 0             if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5161 0                 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5162 0                 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5163 0                 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5164
5165             # look for primitive
5166 0             config_primitive_desc = descriptor_configuration = None
5167 0             if vdu_id:
5168 0                 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
5169 0             elif kdu_name:
5170 0                 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
5171 0             elif vnf_index:
5172 0                 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
5173             else:
5174 0                 descriptor_configuration = db_nsd.get("ns-configuration")
5175
5176 0             if descriptor_configuration and descriptor_configuration.get(
5177                 "config-primitive"
5178             ):
5179 0                 for config_primitive in descriptor_configuration["config-primitive"]:
5180 0                     if config_primitive["name"] == primitive:
5181 0                         config_primitive_desc = config_primitive
5182 0                         break
5183
5184 0             if not config_primitive_desc:
5185 0                 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
5186 0                     raise LcmException(
5187                         "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5188                             primitive
5189                         )
5190                     )
5191 0                 primitive_name = primitive
5192 0                 ee_descriptor_id = None
5193             else:
5194 0                 primitive_name = config_primitive_desc.get(
5195                     "execution-environment-primitive", primitive
5196                 )
5197 0                 ee_descriptor_id = config_primitive_desc.get(
5198                     "execution-environment-ref"
5199                 )
5200
5201 0             if vnf_index:
5202 0                 if vdu_id:
5203 0                     vdur = next(
5204                         (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5205                     )
5206 0                     desc_params = parse_yaml_strings(vdur.get("additionalParams"))
5207 0                 elif kdu_name:
5208 0                     kdur = next(
5209                         (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5210                     )
5211 0                     desc_params = parse_yaml_strings(kdur.get("additionalParams"))
5212                 else:
5213 0                     desc_params = parse_yaml_strings(
5214                         db_vnfr.get("additionalParamsForVnf")
5215                     )
5216             else:
5217 0                 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
5218 0             if kdu_name and get_configuration(db_vnfd, kdu_name):
5219 0                 kdu_configuration = get_configuration(db_vnfd, kdu_name)
5220 0                 actions = set()
5221 0                 for primitive in kdu_configuration.get("initial-config-primitive", []):
5222 0                     actions.add(primitive["name"])
5223 0                 for primitive in kdu_configuration.get("config-primitive", []):
5224 0                     actions.add(primitive["name"])
5225 0                 kdu = find_in_list(
5226                     nsr_deployed["K8s"],
5227                     lambda kdu: kdu_name == kdu["kdu-name"]
5228                     and kdu["member-vnf-index"] == vnf_index,
5229                 )
5230 0                 kdu_action = (
5231                     True
5232                     if primitive_name in actions
5233                     and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
5234                     else False
5235                 )
5236
5237             # TODO check if ns is in a proper status
5238 0             if kdu_name and (
5239                 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5240             ):
5241                 # kdur and desc_params already set from before
5242 0                 if primitive_params:
5243 0                     desc_params.update(primitive_params)
5244                 # TODO Check if we will need something at vnf level
5245 0                 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5246 0                     if (
5247                         kdu_name == kdu["kdu-name"]
5248                         and kdu["member-vnf-index"] == vnf_index
5249                     ):
5250 0                         break
5251                 else:
5252 0                     raise LcmException(
5253                         "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5254                     )
5255
5256 0                 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5257 0                     msg = "unknown k8scluster-type '{}'".format(
5258                         kdu.get("k8scluster-type")
5259                     )
5260 0                     raise LcmException(msg)
5261
5262 0                 db_dict = {
5263                     "collection": "nsrs",
5264                     "filter": {"_id": nsr_id},
5265                     "path": "_admin.deployed.K8s.{}".format(index),
5266                 }
5267 0                 self.logger.debug(
5268                     logging_text
5269                     + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5270                 )
5271 0                 step = "Executing kdu {}".format(primitive_name)
5272 0                 if primitive_name == "upgrade":
5273 0                     if desc_params.get("kdu_model"):
5274 0                         kdu_model = desc_params.get("kdu_model")
5275 0                         del desc_params["kdu_model"]
5276                     else:
5277 0                         kdu_model = kdu.get("kdu-model")
5278 0                         parts = kdu_model.split(sep=":")
5279 0                         if len(parts) == 2:
5280 0                             kdu_model = parts[0]
5281 0                     if desc_params.get("kdu_atomic_upgrade"):
5282 0                         atomic_upgrade = desc_params.get(
5283                             "kdu_atomic_upgrade"
5284                         ).lower() in ("yes", "true", "1")
5285 0                         del desc_params["kdu_atomic_upgrade"]
5286                     else:
5287 0                         atomic_upgrade = True
5288
5289 0                     detailed_status = await asyncio.wait_for(
5290                         self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5291                             cluster_uuid=kdu.get("k8scluster-uuid"),
5292                             kdu_instance=kdu.get("kdu-instance"),
5293                             atomic=atomic_upgrade,
5294                             kdu_model=kdu_model,
5295                             params=desc_params,
5296                             db_dict=db_dict,
5297                             timeout=timeout_ns_action,
5298                         ),
5299                         timeout=timeout_ns_action + 10,
5300                     )
5301 0                     self.logger.debug(
5302                         logging_text + " Upgrade of kdu {} done".format(detailed_status)
5303                     )
5304 0                 elif primitive_name == "rollback":
5305 0                     detailed_status = await asyncio.wait_for(
5306                         self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5307                             cluster_uuid=kdu.get("k8scluster-uuid"),
5308                             kdu_instance=kdu.get("kdu-instance"),
5309                             db_dict=db_dict,
5310                         ),
5311                         timeout=timeout_ns_action,
5312                     )
5313 0                 elif primitive_name == "status":
5314 0                     detailed_status = await asyncio.wait_for(
5315                         self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5316                             cluster_uuid=kdu.get("k8scluster-uuid"),
5317                             kdu_instance=kdu.get("kdu-instance"),
5318                             vca_id=vca_id,
5319                         ),
5320                         timeout=timeout_ns_action,
5321                     )
5322                 else:
5323 0                     kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5324                         kdu["kdu-name"], nsr_id
5325                     )
5326 0                     params = self._map_primitive_params(
5327                         config_primitive_desc, primitive_params, desc_params
5328                     )
5329
5330 0                     detailed_status = await asyncio.wait_for(
5331                         self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5332                             cluster_uuid=kdu.get("k8scluster-uuid"),
5333                             kdu_instance=kdu_instance,
5334                             primitive_name=primitive_name,
5335                             params=params,
5336                             db_dict=db_dict,
5337                             timeout=timeout_ns_action,
5338                             vca_id=vca_id,
5339                         ),
5340                         timeout=timeout_ns_action,
5341                     )
5342
5343 0                 if detailed_status:
5344 0                     nslcmop_operation_state = "COMPLETED"
5345                 else:
5346 0                     detailed_status = ""
5347 0                     nslcmop_operation_state = "FAILED"
5348             else:
5349 0                 ee_id, vca_type = self._look_for_deployed_vca(
5350                     nsr_deployed["VCA"],
5351                     member_vnf_index=vnf_index,
5352                     vdu_id=vdu_id,
5353                     vdu_count_index=vdu_count_index,
5354                     ee_descriptor_id=ee_descriptor_id,
5355                 )
5356 0                 for vca_index, vca_deployed in enumerate(
5357                     db_nsr["_admin"]["deployed"]["VCA"]
5358                 ):
5359 0                     if vca_deployed.get("member-vnf-index") == vnf_index:
5360 0                         db_dict = {
5361                             "collection": "nsrs",
5362                             "filter": {"_id": nsr_id},
5363                             "path": "_admin.deployed.VCA.{}.".format(vca_index),
5364                         }
5365 0                         break
5366 0                 (
5367                     nslcmop_operation_state,
5368                     detailed_status,
5369                 ) = await self._ns_execute_primitive(
5370                     ee_id,
5371                     primitive=primitive_name,
5372                     primitive_params=self._map_primitive_params(
5373                         config_primitive_desc, primitive_params, desc_params
5374                     ),
5375                     timeout=timeout_ns_action,
5376                     vca_type=vca_type,
5377                     db_dict=db_dict,
5378                     vca_id=vca_id,
5379                 )
5380
5381 0             db_nslcmop_update["detailed-status"] = detailed_status
5382 0             error_description_nslcmop = (
5383                 detailed_status if nslcmop_operation_state == "FAILED" else ""
5384             )
5385 0             self.logger.debug(
5386                 logging_text
5387                 + "Done with result {} {}".format(
5388                     nslcmop_operation_state, detailed_status
5389                 )
5390             )
5391 0             return  # database update is called inside finally
5392
5393 0         except (DbException, LcmException, N2VCException, K8sException) as e:
5394 0             self.logger.error(logging_text + "Exit Exception {}".format(e))
5395 0             exc = e
5396 0         except asyncio.CancelledError:
5397 0             self.logger.error(
5398                 logging_text + "Cancelled Exception while '{}'".format(step)
5399             )
5400 0             exc = "Operation was cancelled"
5401 0         except asyncio.TimeoutError:
5402 0             self.logger.error(logging_text + "Timeout while '{}'".format(step))
5403 0             exc = "Timeout"
5404 0         except Exception as e:
5405 0             exc = traceback.format_exc()
5406 0             self.logger.critical(
5407                 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5408                 exc_info=True,
5409             )
5410         finally:
5411 0             if exc:
5412 0                 db_nslcmop_update[
5413                     "detailed-status"
5414                 ] = (
5415                     detailed_status
5416                 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5417 0                 nslcmop_operation_state = "FAILED"
5418 0             if db_nsr:
5419 0                 self._write_ns_status(
5420                     nsr_id=nsr_id,
5421                     ns_state=db_nsr[
5422                         "nsState"
5423                     ],  # TODO check if degraded. For the moment use previous status
5424                     current_operation="IDLE",
5425                     current_operation_id=None,
5426                     # error_description=error_description_nsr,
5427                     # error_detail=error_detail,
5428                     other_update=db_nsr_update,
5429                 )
5430
5431 0             self._write_op_status(
5432                 op_id=nslcmop_id,
5433                 stage="",
5434                 error_message=error_description_nslcmop,
5435                 operation_state=nslcmop_operation_state,
5436                 other_update=db_nslcmop_update,
5437             )
5438
5439 0             if nslcmop_operation_state:
5440 0                 try:
5441 0                     await self.msg.aiowrite(
5442                         "ns",
5443                         "actioned",
5444                         {
5445                             "nsr_id": nsr_id,
5446                             "nslcmop_id": nslcmop_id,
5447                             "operationState": nslcmop_operation_state,
5448                         },
5449                         loop=self.loop,
5450                     )
5451 0                 except Exception as e:
5452 0                     self.logger.error(
5453                         logging_text + "kafka_write notification Exception {}".format(e)
5454                     )
5455 0             self.logger.debug(logging_text + "Exit")
5456 0             self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5457 0             return nslcmop_operation_state, detailed_status
5458
5459 1     async def terminate_vdus(
5460         self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5461     ):
5462         """This method terminates VDUs
5463
5464         Args:
5465             db_vnfr: VNF instance record
5466             member_vnf_index: VNF index to identify the VDUs to be removed
5467             db_nsr: NS instance record
5468             update_db_nslcmops: Nslcmop update record
5469         """
5470 1         vca_scaling_info = []
5471 1         scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5472 1         scaling_info["scaling_direction"] = "IN"
5473 1         scaling_info["vdu-delete"] = {}
5474 1         scaling_info["kdu-delete"] = {}
5475 1         db_vdur = db_vnfr.get("vdur")
5476 1         vdur_list = copy(db_vdur)
5477 1         count_index = 0
5478 1         for index, vdu in enumerate(vdur_list):
5479 1             vca_scaling_info.append(
5480                 {
5481                     "osm_vdu_id": vdu["vdu-id-ref"],
5482                     "member-vnf-index": member_vnf_index,
5483                     "type": "delete",
5484                     "vdu_index": count_index,
5485                 }
5486             )
5487 1             scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5488 1             scaling_info["vdu"].append(
5489                 {
5490                     "name": vdu.get("name") or vdu.get("vdu-name"),
5491                     "vdu_id": vdu["vdu-id-ref"],
5492                     "interface": [],
5493                 }
5494             )
5495 1             for interface in vdu["interfaces"]:
5496 1                 scaling_info["vdu"][index]["interface"].append(
5497                     {
5498                         "name": interface["name"],
5499                         "ip_address": interface["ip-address"],
5500                         "mac_address": interface.get("mac-address"),
5501                     }
5502                 )
5503 1             self.logger.info("NS update scaling info{}".format(scaling_info))
5504 1             stage[2] = "Terminating VDUs"
5505 1             if scaling_info.get("vdu-delete"):
5506                 # scale_process = "RO"
5507 1                 if self.ro_config.ng:
5508 1                     await self._scale_ng_ro(
5509                         logging_text,
5510                         db_nsr,
5511                         update_db_nslcmops,
5512                         db_vnfr,
5513                         scaling_info,
5514                         stage,
5515                     )
5516
5517 1     async def remove_vnf(self, nsr_id, nslcmop_id, vnf_instance_id):
5518         """This method is to Remove VNF instances from NS.
5519
5520         Args:
5521             nsr_id: NS instance id
5522             nslcmop_id: nslcmop id of update
5523             vnf_instance_id: id of the VNF instance to be removed
5524
5525         Returns:
5526             result: (str, str) COMPLETED/FAILED, details
5527         """
5528 1         try:
5529 1             db_nsr_update = {}
5530 1             logging_text = "Task ns={} update ".format(nsr_id)
5531 1             check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5532 1             self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5533 1             if check_vnfr_count > 1:
5534 1                 stage = ["", "", ""]
5535 1                 step = "Getting nslcmop from database"
5536 1                 self.logger.debug(
5537                     step + " after having waited for previous tasks to be completed"
5538                 )
5539                 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5540 1                 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5541 1                 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5542 1                 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5543                 """ db_vnfr = self.db.get_one(
5544                     "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5545
5546 1                 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5547 1                 await self.terminate_vdus(
5548                     db_vnfr,
5549                     member_vnf_index,
5550                     db_nsr,
5551                     update_db_nslcmops,
5552                     stage,
5553                     logging_text,
5554                 )
5555
5556 1                 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5557 1                 constituent_vnfr.remove(db_vnfr.get("_id"))
5558 1                 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get(
5559                     "constituent-vnfr-ref"
5560                 )
5561 1                 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5562 1                 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5563 1                 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5564 1                 return "COMPLETED", "Done"
5565             else:
5566 0                 step = "Terminate VNF Failed with"
5567 0                 raise LcmException(
5568                     "{} Cannot terminate the last VNF in this NS.".format(
5569                         vnf_instance_id
5570                     )
5571                 )
5572 0         except (LcmException, asyncio.CancelledError):
5573 0             raise
5574 0         except Exception as e:
5575 0             self.logger.debug("Error removing VNF {}".format(e))
5576 0             return "FAILED", "Error removing VNF {}".format(e)
5577
5578 1     async def _ns_redeploy_vnf(
5579         self,
5580         nsr_id,
5581         nslcmop_id,
5582         db_vnfd,
5583         db_vnfr,
5584         db_nsr,
5585     ):
5586         """This method updates and redeploys VNF instances
5587
5588         Args:
5589             nsr_id: NS instance id
5590             nslcmop_id:   nslcmop id
5591             db_vnfd: VNF descriptor
5592             db_vnfr: VNF instance record
5593             db_nsr: NS instance record
5594
5595         Returns:
5596             result: (str, str) COMPLETED/FAILED, details
5597         """
5598 0         try:
5599 0             count_index = 0
5600 0             stage = ["", "", ""]
5601 0             logging_text = "Task ns={} update ".format(nsr_id)
5602 0             latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5603 0             member_vnf_index = db_vnfr["member-vnf-index-ref"]
5604
5605             # Terminate old VNF resources
5606 0             update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5607 0             await self.terminate_vdus(
5608                 db_vnfr,
5609                 member_vnf_index,
5610                 db_nsr,
5611                 update_db_nslcmops,
5612                 stage,
5613                 logging_text,
5614             )
5615
5616             # old_vnfd_id = db_vnfr["vnfd-id"]
5617             # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5618 0             new_db_vnfd = db_vnfd
5619             # new_vnfd_ref = new_db_vnfd["id"]
5620             # new_vnfd_id = vnfd_id
5621
5622             # Create VDUR
5623 0             new_vnfr_cp = []
5624 0             for cp in new_db_vnfd.get("ext-cpd", ()):
5625 0                 vnf_cp = {
5626                     "name": cp.get("id"),
5627                     "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5628                     "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5629                     "id": cp.get("id"),
5630                 }
5631 0                 new_vnfr_cp.append(vnf_cp)
5632 0             new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5633             # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5634             # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5635 0             new_vnfr_update = {
5636                 "revision": latest_vnfd_revision,
5637                 "connection-point": new_vnfr_cp,
5638                 "vdur": new_vdur,
5639                 "ip-address": "",
5640             }
5641 0             self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5642 0             updated_db_vnfr = self.db.get_one(
5643                 "vnfrs",
5644                 {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id},
5645             )
5646
5647             # Instantiate new VNF resources
5648             # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5649 0             vca_scaling_info = []
5650 0             scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5651 0             scaling_info["scaling_direction"] = "OUT"
5652 0             scaling_info["vdu-create"] = {}
5653 0             scaling_info["kdu-create"] = {}
5654 0             vdud_instantiate_list = db_vnfd["vdu"]
5655 0             for index, vdud in enumerate(vdud_instantiate_list):
5656 0                 cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
5657 0                 if cloud_init_text:
5658 0                     additional_params = (
5659                         self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5660                         or {}
5661                     )
5662 0                 cloud_init_list = []
5663 0                 if cloud_init_text:
5664                     # TODO Information of its own ip is not available because db_vnfr is not updated.
5665 0                     additional_params["OSM"] = get_osm_params(
5666                         updated_db_vnfr, vdud["id"], 1
5667                     )
5668 0                     cloud_init_list.append(
5669                         self._parse_cloud_init(
5670                             cloud_init_text,
5671                             additional_params,
5672                             db_vnfd["id"],
5673                             vdud["id"],
5674                         )
5675                     )
5676 0                     vca_scaling_info.append(
5677                         {
5678                             "osm_vdu_id": vdud["id"],
5679                             "member-vnf-index": member_vnf_index,
5680                             "type": "create",
5681                             "vdu_index": count_index,
5682                         }
5683                     )
5684 0                 scaling_info["vdu-create"][vdud["id"]] = count_index
5685 0             if self.ro_config.ng:
5686 0                 self.logger.debug(
5687                     "New Resources to be deployed: {}".format(scaling_info)
5688                 )
5689 0                 await self._scale_ng_ro(
5690                     logging_text,
5691                     db_nsr,
5692                     update_db_nslcmops,
5693                     updated_db_vnfr,
5694                     scaling_info,
5695                     stage,
5696                 )
5697 0                 return "COMPLETED", "Done"
5698 0         except (LcmException, asyncio.CancelledError):
5699 0             raise
5700 0         except Exception as e:
5701 0             self.logger.debug("Error updating VNF {}".format(e))
5702 0             return "FAILED", "Error updating VNF {}".format(e)
5703
5704 1     async def _ns_charm_upgrade(
5705         self,
5706         ee_id,
5707         charm_id,
5708         charm_type,
5709         path,
5710         timeout: float = None,
5711     ) -> (str, str):
5712         """This method upgrade charms in VNF instances
5713
5714         Args:
5715             ee_id:  Execution environment id
5716             path:   Local path to the charm
5717             charm_id: charm-id
5718             charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5719             timeout: (Float)    Timeout for the ns update operation
5720
5721         Returns:
5722             result: (str, str) COMPLETED/FAILED, details
5723         """
5724 0         try:
5725 0             charm_type = charm_type or "lxc_proxy_charm"
5726 0             output = await self.vca_map[charm_type].upgrade_charm(
5727                 ee_id=ee_id,
5728                 path=path,
5729                 charm_id=charm_id,
5730                 charm_type=charm_type,
5731                 timeout=timeout or self.timeout.ns_update,
5732             )
5733
5734 0             if output:
5735 0                 return "COMPLETED", output
5736
5737 0         except (LcmException, asyncio.CancelledError):
5738 0             raise
5739
5740 0         except Exception as e:
5741 0             self.logger.debug("Error upgrading charm {}".format(path))
5742
5743 0             return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5744
5745 1     async def update(self, nsr_id, nslcmop_id):
5746         """Update NS according to different update types
5747
5748         This method performs upgrade of VNF instances then updates the revision
5749         number in VNF record
5750
5751         Args:
5752             nsr_id: Network service will be updated
5753             nslcmop_id: ns lcm operation id
5754
5755         Returns:
5756              It may raise DbException, LcmException, N2VCException, K8sException
5757
5758         """
5759         # Try to lock HA task here
5760 1         task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5761 1         if not task_is_locked_by_me:
5762 0             return
5763
5764 1         logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5765 1         self.logger.debug(logging_text + "Enter")
5766
5767         # Set the required variables to be filled up later
5768 1         db_nsr = None
5769 1         db_nslcmop_update = {}
5770 1         vnfr_update = {}
5771 1         nslcmop_operation_state = None
5772 1         db_nsr_update = {}
5773 1         error_description_nslcmop = ""
5774 1         exc = None
5775 1         change_type = "updated"
5776 1         detailed_status = ""
5777
5778 1         try:
5779             # wait for any previous tasks in process
5780 1             step = "Waiting for previous operations to terminate"
5781 1             await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5782 1             self._write_ns_status(
5783                 nsr_id=nsr_id,
5784                 ns_state=None,
5785                 current_operation="UPDATING",
5786                 current_operation_id=nslcmop_id,
5787             )
5788
5789 1             step = "Getting nslcmop from database"
5790 1             db_nslcmop = self.db.get_one(
5791                 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5792             )
5793 1             update_type = db_nslcmop["operationParams"]["updateType"]
5794
5795 1             step = "Getting nsr from database"
5796 1             db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5797 1             old_operational_status = db_nsr["operational-status"]
5798 1             db_nsr_update["operational-status"] = "updating"
5799 1             self.update_db_2("nsrs", nsr_id, db_nsr_update)
5800 1             nsr_deployed = db_nsr["_admin"].get("deployed")
5801
5802 1             if update_type == "CHANGE_VNFPKG":
5803                 # Get the input parameters given through update request
5804 1                 vnf_instance_id = db_nslcmop["operationParams"][
5805                     "changeVnfPackageData"
5806                 ].get("vnfInstanceId")
5807
5808 1                 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5809                     "vnfdId"
5810                 )
5811 1                 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5812
5813 1                 step = "Getting vnfr from database"
5814 1                 db_vnfr = self.db.get_one(
5815                     "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5816                 )
5817
5818 1                 step = "Getting vnfds from database"
5819                 # Latest VNFD
5820 1                 latest_vnfd = self.db.get_one(
5821                     "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5822                 )
5823 1                 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5824
5825                 # Current VNFD
5826 1                 current_vnf_revision = db_vnfr.get("revision", 1)
5827 1                 current_vnfd = self.db.get_one(
5828                     "vnfds_revisions",
5829                     {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5830                     fail_on_empty=False,
5831                 )
5832                 # Charm artifact paths will be filled up later
5833 1                 (
5834                     current_charm_artifact_path,
5835                     target_charm_artifact_path,
5836                     charm_artifact_paths,
5837                     helm_artifacts,
5838                 ) = ([], [], [], [])
5839
5840 1                 step = "Checking if revision has changed in VNFD"
5841 1                 if current_vnf_revision != latest_vnfd_revision:
5842 1                     change_type = "policy_updated"
5843
5844                     # There is new revision of VNFD, update operation is required
5845 1                     current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5846 1                     latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5847
5848 1                     step = "Removing the VNFD packages if they exist in the local path"
5849 1                     shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5850 1                     shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5851
5852 1                     step = "Get the VNFD packages from FSMongo"
5853 1                     self.fs.sync(from_path=latest_vnfd_path)
5854 1                     self.fs.sync(from_path=current_vnfd_path)
5855
5856 1                     step = (
5857                         "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5858                     )
5859 1                     current_base_folder = current_vnfd["_admin"]["storage"]
5860 1                     latest_base_folder = latest_vnfd["_admin"]["storage"]
5861
5862 1                     for vca_index, vca_deployed in enumerate(
5863                         get_iterable(nsr_deployed, "VCA")
5864                     ):
5865 1                         vnf_index = db_vnfr.get("member-vnf-index-ref")
5866
5867                         # Getting charm-id and charm-type
5868 1                         if vca_deployed.get("member-vnf-index") == vnf_index:
5869 1                             vca_id = self.get_vca_id(db_vnfr, db_nsr)
5870 1                             vca_type = vca_deployed.get("type")
5871 1                             vdu_count_index = vca_deployed.get("vdu_count_index")
5872
5873                             # Getting ee-id
5874 1                             ee_id = vca_deployed.get("ee_id")
5875
5876 1                             step = "Getting descriptor config"
5877 1                             if current_vnfd.get("kdu"):
5878 1                                 search_key = "kdu_name"
5879                             else:
5880 1                                 search_key = "vnfd_id"
5881
5882 1                             entity_id = vca_deployed.get(search_key)
5883
5884 1                             descriptor_config = get_configuration(
5885                                 current_vnfd, entity_id
5886                             )
5887
5888 1                             if "execution-environment-list" in descriptor_config:
5889 1                                 ee_list = descriptor_config.get(
5890                                     "execution-environment-list", []
5891                                 )
5892                             else:
5893 0                                 ee_list = []
5894
5895                             # There could be several charm used in the same VNF
5896 1                             for ee_item in ee_list:
5897 1                                 if ee_item.get("juju"):
5898 1                                     step = "Getting charm name"
5899 1                                     charm_name = ee_item["juju"].get("charm")
5900
5901 1                                     step = "Setting Charm artifact paths"
5902 1                                     current_charm_artifact_path.append(
5903                                         get_charm_artifact_path(
5904                                             current_base_folder,
5905                                             charm_name,
5906                                             vca_type,
5907                                             current_vnf_revision,
5908                                         )
5909                                     )
5910 1                                     target_charm_artifact_path.append(
5911                                         get_charm_artifact_path(
5912                                             latest_base_folder,
5913                                             charm_name,
5914                                             vca_type,
5915                                             latest_vnfd_revision,
5916                                         )
5917                                     )
5918 0                                 elif ee_item.get("helm-chart"):
5919                                     # add chart to list and all parameters
5920 0                                     step = "Getting helm chart name"
5921 0                                     chart_name = ee_item.get("helm-chart")
5922 0                                     if (
5923                                         ee_item.get("helm-version")
5924                                         and ee_item.get("helm-version") == "v2"
5925                                     ):
5926 0                                         vca_type = "helm"
5927                                     else:
5928 0                                         vca_type = "helm-v3"
5929 0                                     step = "Setting Helm chart artifact paths"
5930
5931 0                                     helm_artifacts.append(
5932                                         {
5933                                             "current_artifact_path": get_charm_artifact_path(
5934                                                 current_base_folder,
5935                                                 chart_name,
5936                                                 vca_type,
5937                                                 current_vnf_revision,
5938                                             ),
5939                                             "target_artifact_path": get_charm_artifact_path(
5940                                                 latest_base_folder,
5941                                                 chart_name,
5942                                                 vca_type,
5943                                                 latest_vnfd_revision,
5944                                             ),
5945                                             "ee_id": ee_id,
5946                                             "vca_index": vca_index,
5947                                             "vdu_index": vdu_count_index,
5948                                         }
5949                                     )
5950
5951 1                             charm_artifact_paths = zip(
5952                                 current_charm_artifact_path, target_charm_artifact_path
5953                             )
5954
5955 1                     step = "Checking if software version has changed in VNFD"
5956 1                     if find_software_version(current_vnfd) != find_software_version(
5957                         latest_vnfd
5958                     ):
5959 1                         step = "Checking if existing VNF has charm"
5960 1                         for current_charm_path, target_charm_path in list(
5961                             charm_artifact_paths
5962                         ):
5963 1                             if current_charm_path:
5964 1                                 raise LcmException(
5965                                     "Software version change is not supported as VNF instance {} has charm.".format(
5966                                         vnf_instance_id
5967                                     )
5968                                 )
5969
5970                         # There is no change in the charm package, then redeploy the VNF
5971                         # based on new descriptor
5972 0                         step = "Redeploying VNF"
5973 0                         member_vnf_index = db_vnfr["member-vnf-index-ref"]
5974 0                         (result, detailed_status) = await self._ns_redeploy_vnf(
5975                             nsr_id, nslcmop_id, latest_vnfd, db_vnfr, db_nsr
5976                         )
5977 0                         if result == "FAILED":
5978 0                             nslcmop_operation_state = result
5979 0                             error_description_nslcmop = detailed_status
5980 0                         db_nslcmop_update["detailed-status"] = detailed_status
5981 0                         self.logger.debug(
5982                             logging_text
5983                             + " step {} Done with result {} {}".format(
5984                                 step, nslcmop_operation_state, detailed_status
5985                             )
5986                         )
5987
5988                     else:
5989 1                         step = "Checking if any charm package has changed or not"
5990 1                         for current_charm_path, target_charm_path in list(
5991                             charm_artifact_paths
5992                         ):
5993 1                             if (
5994                                 current_charm_path
5995                                 and target_charm_path
5996                                 and self.check_charm_hash_changed(
5997                                     current_charm_path, target_charm_path
5998                                 )
5999                             ):
6000 1                                 step = "Checking whether VNF uses juju bundle"
6001 1                                 if check_juju_bundle_existence(current_vnfd):
6002 0                                     raise LcmException(
6003                                         "Charm upgrade is not supported for the instance which"
6004                                         " uses juju-bundle: {}".format(
6005                                             check_juju_bundle_existence(current_vnfd)
6006                                         )
6007                                     )
6008
6009 1                                 step = "Upgrading Charm"
6010 1                                 (
6011                                     result,
6012                                     detailed_status,
6013                                 ) = await self._ns_charm_upgrade(
6014                                     ee_id=ee_id,
6015                                     charm_id=vca_id,
6016                                     charm_type=vca_type,
6017                                     path=self.fs.path + target_charm_path,
6018                                     timeout=timeout_seconds,
6019                                 )
6020
6021 1                                 if result == "FAILED":
6022 1                                     nslcmop_operation_state = result
6023 1                                     error_description_nslcmop = detailed_status
6024
6025 1                                 db_nslcmop_update["detailed-status"] = detailed_status
6026 1                                 self.logger.debug(
6027                                     logging_text
6028                                     + " step {} Done with result {} {}".format(
6029                                         step, nslcmop_operation_state, detailed_status
6030                                     )
6031                                 )
6032
6033 1                         step = "Updating policies"
6034 1                         member_vnf_index = db_vnfr["member-vnf-index-ref"]
6035 1                         result = "COMPLETED"
6036 1                         detailed_status = "Done"
6037 1                         db_nslcmop_update["detailed-status"] = "Done"
6038
6039                     # helm base EE
6040 1                     for item in helm_artifacts:
6041 0                         if not (
6042                             item["current_artifact_path"]
6043                             and item["target_artifact_path"]
6044                             and self.check_charm_hash_changed(
6045                                 item["current_artifact_path"],
6046                                 item["target_artifact_path"],
6047                             )
6048                         ):
6049 0                             continue
6050 0                         db_update_entry = "_admin.deployed.VCA.{}.".format(
6051                             item["vca_index"]
6052                         )
6053 0                         vnfr_id = db_vnfr["_id"]
6054 0                         osm_config = {"osm": {"ns_id": nsr_id, "vnf_id": vnfr_id}}
6055 0                         db_dict = {
6056                             "collection": "nsrs",
6057                             "filter": {"_id": nsr_id},
6058                             "path": db_update_entry,
6059                         }
6060 0                         vca_type, namespace, helm_id = get_ee_id_parts(item["ee_id"])
6061 0                         await self.vca_map[vca_type].upgrade_execution_environment(
6062                             namespace=namespace,
6063                             helm_id=helm_id,
6064                             db_dict=db_dict,
6065                             config=osm_config,
6066                             artifact_path=item["target_artifact_path"],
6067                             vca_type=vca_type,
6068                         )
6069 0                         vnf_id = db_vnfr.get("vnfd-ref")
6070 0                         config_descriptor = get_configuration(latest_vnfd, vnf_id)
6071 0                         self.logger.debug("get ssh key block")
6072 0                         rw_mgmt_ip = None
6073 0                         if deep_get(
6074                             config_descriptor,
6075                             ("config-access", "ssh-access", "required"),
6076                         ):
6077                             # Needed to inject a ssh key
6078 0                             user = deep_get(
6079                                 config_descriptor,
6080                                 ("config-access", "ssh-access", "default-user"),
6081                             )
6082 0                             step = (
6083                                 "Install configuration Software, getting public ssh key"
6084                             )
6085 0                             pub_key = await self.vca_map[
6086                                 vca_type
6087                             ].get_ee_ssh_public__key(
6088                                 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
6089                             )
6090
6091 0                             step = (
6092                                 "Insert public key into VM user={} ssh_key={}".format(
6093                                     user, pub_key
6094                                 )
6095                             )
6096 0                             self.logger.debug(logging_text + step)
6097
6098                             # wait for RO (ip-address) Insert pub_key into VM
6099 0                             rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
6100                                 logging_text,
6101                                 nsr_id,
6102                                 vnfr_id,
6103                                 None,
6104                                 item["vdu_index"],
6105                                 user=user,
6106                                 pub_key=pub_key,
6107                             )
6108
6109 0                         initial_config_primitive_list = config_descriptor.get(
6110                             "initial-config-primitive"
6111                         )
6112 0                         config_primitive = next(
6113                             (
6114                                 p
6115                                 for p in initial_config_primitive_list
6116                                 if p["name"] == "config"
6117                             ),
6118                             None,
6119                         )
6120 0                         if not config_primitive:
6121 0                             continue
6122
6123 0                         deploy_params = {"OSM": get_osm_params(db_vnfr)}
6124 0                         if rw_mgmt_ip:
6125 0                             deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
6126 0                         if db_vnfr.get("additionalParamsForVnf"):
6127 0                             deploy_params.update(
6128                                 parse_yaml_strings(
6129                                     db_vnfr["additionalParamsForVnf"].copy()
6130                                 )
6131                             )
6132 0                         primitive_params_ = self._map_primitive_params(
6133                             config_primitive, {}, deploy_params
6134                         )
6135
6136 0                         step = "execute primitive '{}' params '{}'".format(
6137                             config_primitive["name"], primitive_params_
6138                         )
6139 0                         self.logger.debug(logging_text + step)
6140 0                         await self.vca_map[vca_type].exec_primitive(
6141                             ee_id=ee_id,
6142                             primitive_name=config_primitive["name"],
6143                             params_dict=primitive_params_,
6144                             db_dict=db_dict,
6145                             vca_id=vca_id,
6146                             vca_type=vca_type,
6147                         )
6148
6149 0                         step = "Updating policies"
6150 0                         member_vnf_index = db_vnfr["member-vnf-index-ref"]
6151 0                         detailed_status = "Done"
6152 0                         db_nslcmop_update["detailed-status"] = "Done"
6153
6154                     #  If nslcmop_operation_state is None, so any operation is not failed.
6155 1                     if not nslcmop_operation_state:
6156 1                         nslcmop_operation_state = "COMPLETED"
6157
6158                         # If update CHANGE_VNFPKG nslcmop_operation is successful
6159                         # vnf revision need to be updated
6160 1                         vnfr_update["revision"] = latest_vnfd_revision
6161 1                         self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
6162
6163 1                     self.logger.debug(
6164                         logging_text
6165                         + " task Done with result {} {}".format(
6166                             nslcmop_operation_state, detailed_status
6167                         )
6168                     )
6169 1             elif update_type == "REMOVE_VNF":
6170                 # This part is included in https://osm.etsi.org/gerrit/11876
6171 1                 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
6172 1                 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
6173 1                 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6174 1                 step = "Removing VNF"
6175 1                 (result, detailed_status) = await self.remove_vnf(
6176                     nsr_id, nslcmop_id, vnf_instance_id
6177                 )
6178 1                 if result == "FAILED":
6179 0                     nslcmop_operation_state = result
6180 0                     error_description_nslcmop = detailed_status
6181 1                 db_nslcmop_update["detailed-status"] = detailed_status
6182 1                 change_type = "vnf_terminated"
6183 1                 if not nslcmop_operation_state:
6184 1                     nslcmop_operation_state = "COMPLETED"
6185 1                 self.logger.debug(
6186                     logging_text
6187                     + " task Done with result {} {}".format(
6188                         nslcmop_operation_state, detailed_status
6189                     )
6190                 )
6191
6192 0             elif update_type == "OPERATE_VNF":
6193 0                 vnf_id = db_nslcmop["operationParams"]["operateVnfData"][
6194                     "vnfInstanceId"
6195                 ]
6196 0                 operation_type = db_nslcmop["operationParams"]["operateVnfData"][
6197                     "changeStateTo"
6198                 ]
6199 0                 additional_param = db_nslcmop["operationParams"]["operateVnfData"][
6200                     "additionalParam"
6201                 ]
6202 0                 (result, detailed_status) = await self.rebuild_start_stop(
6203                     nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
6204                 )
6205 0                 if result == "FAILED":
6206 0                     nslcmop_operation_state = result
6207 0                     error_description_nslcmop = detailed_status
6208 0                 db_nslcmop_update["detailed-status"] = detailed_status
6209 0                 if not nslcmop_operation_state:
6210 0                     nslcmop_operation_state = "COMPLETED"
6211 0                 self.logger.debug(
6212                     logging_text
6213                     + " task Done with result {} {}".format(
6214                         nslcmop_operation_state, detailed_status
6215                     )
6216                 )
6217
6218             #  If nslcmop_operation_state is None, so any operation is not failed.
6219             #  All operations are executed in overall.
6220 1             if not nslcmop_operation_state:
6221 1                 nslcmop_operation_state = "COMPLETED"
6222 1             db_nsr_update["operational-status"] = old_operational_status
6223
6224 1         except (DbException, LcmException, N2VCException, K8sException) as e:
6225 1             self.logger.error(logging_text + "Exit Exception {}".format(e))
6226 1             exc = e
6227 1         except asyncio.CancelledError:
6228 0             self.logger.error(
6229                 logging_text + "Cancelled Exception while '{}'".format(step)
6230             )
6231 0             exc = "Operation was cancelled"
6232 1         except asyncio.TimeoutError:
6233 0             self.logger.error(logging_text + "Timeout while '{}'".format(step))
6234 0             exc = "Timeout"
6235 1         except Exception as e:
6236 1             exc = traceback.format_exc()
6237 1             self.logger.critical(
6238                 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6239                 exc_info=True,
6240             )
6241         finally:
6242 1             if exc:
6243 1                 db_nslcmop_update[
6244                     "detailed-status"
6245                 ] = (
6246                     detailed_status
6247                 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6248 1                 nslcmop_operation_state = "FAILED"
6249 1                 db_nsr_update["operational-status"] = old_operational_status
6250 1             if db_nsr:
6251 1                 self._write_ns_status(
6252                     nsr_id=nsr_id,
6253                     ns_state=db_nsr["nsState"],
6254                     current_operation="IDLE",
6255                     current_operation_id=None,
6256                     other_update=db_nsr_update,
6257                 )
6258
6259 1             self._write_op_status(
6260                 op_id=nslcmop_id,
6261                 stage="",
6262                 error_message=error_description_nslcmop,
6263                 operation_state=nslcmop_operation_state,
6264                 other_update=db_nslcmop_update,
6265             )
6266
6267 1             if nslcmop_operation_state:
6268 1                 try:
6269 1                     msg = {
6270                         "nsr_id": nsr_id,
6271                         "nslcmop_id": nslcmop_id,
6272                         "operationState": nslcmop_operation_state,
6273                     }
6274 1                     if change_type in ("vnf_terminated", "policy_updated"):
6275 1                         msg.update({"vnf_member_index": member_vnf_index})
6276 1                     await self.msg.aiowrite("ns", change_type, msg, loop=self.loop)
6277 1                 except Exception as e:
6278 1                     self.logger.error(
6279                         logging_text + "kafka_write notification Exception {}".format(e)
6280                     )
6281 1             self.logger.debug(logging_text + "Exit")
6282 1             self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6283 1             return nslcmop_operation_state, detailed_status
6284
6285 1     async def scale(self, nsr_id, nslcmop_id):
6286         # Try to lock HA task here
6287 1         task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6288 1         if not task_is_locked_by_me:
6289 0             return
6290
6291 1         logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6292 1         stage = ["", "", ""]
6293 1         tasks_dict_info = {}
6294         # ^ stage, step, VIM progress
6295 1         self.logger.debug(logging_text + "Enter")
6296         # get all needed from database
6297 1         db_nsr = None
6298 1         db_nslcmop_update = {}
6299 1         db_nsr_update = {}
6300 1         exc = None
6301         # in case of error, indicates what part of scale was failed to put nsr at error status
6302 1         scale_process = None
6303 1         old_operational_status = ""
6304 1         old_config_status = ""
6305 1         nsi_id = None
6306 1         try:
6307             # wait for any previous tasks in process
6308 1             step = "Waiting for previous operations to terminate"
6309 1             await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6310 1             self._write_ns_status(
6311                 nsr_id=nsr_id,
6312                 ns_state=None,
6313                 current_operation="SCALING",
6314                 current_operation_id=nslcmop_id,
6315             )
6316
6317 1             step = "Getting nslcmop from database"
6318 1             self.logger.debug(
6319                 step + " after having waited for previous tasks to be completed"
6320             )
6321 1             db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6322
6323 1             step = "Getting nsr from database"
6324 1             db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6325 1             old_operational_status = db_nsr["operational-status"]
6326 1             old_config_status = db_nsr["config-status"]
6327
6328 1             step = "Parsing scaling parameters"
6329 1             db_nsr_update["operational-status"] = "scaling"
6330 1             self.update_db_2("nsrs", nsr_id, db_nsr_update)
6331 1             nsr_deployed = db_nsr["_admin"].get("deployed")
6332
6333 1             vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6334                 "scaleByStepData"
6335             ]["member-vnf-index"]
6336 1             scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6337                 "scaleByStepData"
6338             ]["scaling-group-descriptor"]
6339 1             scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6340             # for backward compatibility
6341 1             if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6342 0                 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6343 0                 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6344 0                 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6345
6346 1             step = "Getting vnfr from database"
6347 1             db_vnfr = self.db.get_one(
6348                 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6349             )
6350
6351 1             vca_id = self.get_vca_id(db_vnfr, db_nsr)
6352
6353 1             step = "Getting vnfd from database"
6354 1             db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6355
6356 1             base_folder = db_vnfd["_admin"]["storage"]
6357
6358 1             step = "Getting scaling-group-descriptor"
6359 1             scaling_descriptor = find_in_list(
6360                 get_scaling_aspect(db_vnfd),
6361                 lambda scale_desc: scale_desc["name"] == scaling_group,
6362             )
6363 1             if not scaling_descriptor:
6364 0                 raise LcmException(
6365                     "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6366                     "at vnfd:scaling-group-descriptor".format(scaling_group)
6367                 )
6368
6369 1             step = "Sending scale order to VIM"
6370             # TODO check if ns is in a proper status
6371 1             nb_scale_op = 0
6372 1             if not db_nsr["_admin"].get("scaling-group"):
6373 1                 self.update_db_2(
6374                     "nsrs",
6375                     nsr_id,
6376                     {
6377                         "_admin.scaling-group": [
6378                             {"name": scaling_group, "nb-scale-op": 0}
6379                         ]
6380                     },
6381                 )
6382 1                 admin_scale_index = 0
6383             else:
6384 1                 for admin_scale_index, admin_scale_info in enumerate(
6385                     db_nsr["_admin"]["scaling-group"]
6386                 ):
6387 1                     if admin_scale_info["name"] == scaling_group:
6388 0                         nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6389 0                         break
6390                 else:  # not found, set index one plus last element and add new entry with the name
6391 1                     admin_scale_index += 1
6392 1                     db_nsr_update[
6393                         "_admin.scaling-group.{}.name".format(admin_scale_index)
6394                     ] = scaling_group
6395
6396 1             vca_scaling_info = []
6397 1             scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6398 1             if scaling_type == "SCALE_OUT":
6399 1                 if "aspect-delta-details" not in scaling_descriptor:
6400 0                     raise LcmException(
6401                         "Aspect delta details not fount in scaling descriptor {}".format(
6402                             scaling_descriptor["name"]
6403                         )
6404                     )
6405                 # count if max-instance-count is reached
6406 1                 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6407
6408 1                 scaling_info["scaling_direction"] = "OUT"
6409 1                 scaling_info["vdu-create"] = {}
6410 1                 scaling_info["kdu-create"] = {}
6411 1                 for delta in deltas:
6412 1                     for vdu_delta in delta.get("vdu-delta", {}):
6413 0                         vdud = get_vdu(db_vnfd, vdu_delta["id"])
6414                         # vdu_index also provides the number of instance of the targeted vdu
6415 0                         vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6416 0                         cloud_init_text = self._get_vdu_cloud_init_content(
6417                             vdud, db_vnfd
6418                         )
6419 0                         if cloud_init_text:
6420 0                             additional_params = (
6421                                 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6422                                 or {}
6423                             )
6424 0                         cloud_init_list = []
6425
6426 0                         vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6427 0                         max_instance_count = 10
6428 0                         if vdu_profile and "max-number-of-instances" in vdu_profile:
6429 0                             max_instance_count = vdu_profile.get(
6430                                 "max-number-of-instances", 10
6431                             )
6432
6433 0                         default_instance_num = get_number_of_instances(
6434                             db_vnfd, vdud["id"]
6435                         )
6436 0                         instances_number = vdu_delta.get("number-of-instances", 1)
6437 0                         nb_scale_op += instances_number
6438
6439 0                         new_instance_count = nb_scale_op + default_instance_num
6440                         # Control if new count is over max and vdu count is less than max.
6441                         # Then assign new instance count
6442 0                         if new_instance_count > max_instance_count > vdu_count:
6443 0                             instances_number = new_instance_count - max_instance_count
6444                         else:
6445 0                             instances_number = instances_number
6446
6447 0                         if new_instance_count > max_instance_count:
6448 0                             raise LcmException(
6449                                 "reached the limit of {} (max-instance-count) "
6450                                 "scaling-out operations for the "
6451                                 "scaling-group-descriptor '{}'".format(
6452                                     nb_scale_op, scaling_group
6453                                 )
6454                             )
6455 0                         for x in range(vdu_delta.get("number-of-instances", 1)):
6456 0                             if cloud_init_text:
6457                                 # TODO Information of its own ip is not available because db_vnfr is not updated.
6458 0                                 additional_params["OSM"] = get_osm_params(
6459                                     db_vnfr, vdu_delta["id"], vdu_index + x
6460                                 )
6461 0                                 cloud_init_list.append(
6462                                     self._parse_cloud_init(
6463                                         cloud_init_text,
6464                                         additional_params,
6465                                         db_vnfd["id"],
6466                                         vdud["id"],
6467                                     )
6468                                 )
6469 0                                 vca_scaling_info.append(
6470                                     {
6471                                         "osm_vdu_id": vdu_delta["id"],
6472                                         "member-vnf-index": vnf_index,
6473                                         "type": "create",
6474                                         "vdu_index": vdu_index + x,
6475                                     }
6476                                 )
6477 0                         scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6478 1                     for kdu_delta in delta.get("kdu-resource-delta", {}):
6479 1                         kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6480 1                         kdu_name = kdu_profile["kdu-name"]
6481 1                         resource_name = kdu_profile.get("resource-name", "")
6482
6483                         # Might have different kdus in the same delta
6484                         # Should have list for each kdu
6485 1                         if not scaling_info["kdu-create"].get(kdu_name, None):
6486 1                             scaling_info["kdu-create"][kdu_name] = []
6487
6488 1                         kdur = get_kdur(db_vnfr, kdu_name)
6489 1                         if kdur.get("helm-chart"):
6490 0                             k8s_cluster_type = "helm-chart-v3"
6491 0                             self.logger.debug("kdur: {}".format(kdur))
6492 0                             if (
6493                                 kdur.get("helm-version")
6494                                 and kdur.get("helm-version") == "v2"
6495                             ):
6496 0                                 k8s_cluster_type = "helm-chart"
6497 1                         elif kdur.get("juju-bundle"):
6498 1                             k8s_cluster_type = "juju-bundle"
6499                         else:
6500 0                             raise LcmException(
6501                                 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6502                                 "juju-bundle. Maybe an old NBI version is running".format(
6503                                     db_vnfr["member-vnf-index-ref"], kdu_name
6504                                 )
6505                             )
6506
6507 1                         max_instance_count = 10
6508 1                         if kdu_profile and "max-number-of-instances" in kdu_profile:
6509 1                             max_instance_count = kdu_profile.get(
6510                                 "max-number-of-instances", 10
6511                             )
6512
6513 1                         nb_scale_op += kdu_delta.get("number-of-instances", 1)
6514 1                         deployed_kdu, _ = get_deployed_kdu(
6515                             nsr_deployed, kdu_name, vnf_index
6516                         )
6517 1                         if deployed_kdu is None:
6518 0                             raise LcmException(
6519                                 "KDU '{}' for vnf '{}' not deployed".format(
6520                                     kdu_name, vnf_index
6521                                 )
6522                             )
6523 1                         kdu_instance = deployed_kdu.get("kdu-instance")
6524 1                         instance_num = await self.k8scluster_map[
6525                             k8s_cluster_type
6526                         ].get_scale_count(
6527                             resource_name,
6528                             kdu_instance,
6529                             vca_id=vca_id,
6530                             cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6531                             kdu_model=deployed_kdu.get("kdu-model"),
6532                         )
6533 1                         kdu_replica_count = instance_num + kdu_delta.get(
6534                             "number-of-instances", 1
6535                         )
6536
6537                         # Control if new count is over max and instance_num is less than max.
6538                         # Then assign max instance number to kdu replica count
6539 1                         if kdu_replica_count > max_instance_count > instance_num:
6540 0                             kdu_replica_count = max_instance_count
6541 1                         if kdu_replica_count > max_instance_count:
6542 0                             raise LcmException(
6543                                 "reached the limit of {} (max-instance-count) "
6544                                 "scaling-out operations for the "
6545                                 "scaling-group-descriptor '{}'".format(
6546                                     instance_num, scaling_group
6547                                 )
6548                             )
6549
6550 1                         for x in range(kdu_delta.get("number-of-instances", 1)):
6551 1                             vca_scaling_info.append(
6552                                 {
6553                                     "osm_kdu_id": kdu_name,
6554                                     "member-vnf-index": vnf_index,
6555                                     "type": "create",
6556                                     "kdu_index": instance_num + x - 1,
6557                                 }
6558                             )
6559 1                         scaling_info["kdu-create"][kdu_name].append(
6560                             {
6561                                 "member-vnf-index": vnf_index,
6562                                 "type": "create",
6563                                 "k8s-cluster-type": k8s_cluster_type,
6564                                 "resource-name": resource_name,
6565                                 "scale": kdu_replica_count,
6566                             }
6567                         )
6568 0             elif scaling_type == "SCALE_IN":
6569 0                 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6570
6571 0                 scaling_info["scaling_direction"] = "IN"
6572 0                 scaling_info["vdu-delete"] = {}
6573 0                 scaling_info["kdu-delete"] = {}
6574
6575 0                 for delta in deltas:
6576 0                     for vdu_delta in delta.get("vdu-delta", {}):
6577 0                         vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6578 0                         min_instance_count = 0
6579 0                         vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6580 0                         if vdu_profile and "min-number-of-instances" in vdu_profile:
6581 0                             min_instance_count = vdu_profile["min-number-of-instances"]
6582
6583 0                         default_instance_num = get_number_of_instances(
6584                             db_vnfd, vdu_delta["id"]
6585                         )
6586 0                         instance_num = vdu_delta.get("number-of-instances", 1)
6587 0                         nb_scale_op -= instance_num
6588
6589 0                         new_instance_count = nb_scale_op + default_instance_num
6590
6591 0                         if new_instance_count < min_instance_count < vdu_count:
6592 0                             instances_number = min_instance_count - new_instance_count
6593                         else:
6594 0                             instances_number = instance_num
6595
6596 0                         if new_instance_count < min_instance_count:
6597 0                             raise LcmException(
6598                                 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6599                                 "scaling-group-descriptor '{}'".format(
6600                                     nb_scale_op, scaling_group
6601                                 )
6602                             )
6603 0                         for x in range(vdu_delta.get("number-of-instances", 1)):
6604 0                             vca_scaling_info.append(
6605                                 {
6606                                     "osm_vdu_id": vdu_delta["id"],
6607                                     "member-vnf-index": vnf_index,
6608                                     "type": "delete",
6609                                     "vdu_index": vdu_index - 1 - x,
6610                                 }
6611                             )
6612 0                         scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6613 0                     for kdu_delta in delta.get("kdu-resource-delta", {}):
6614 0                         kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6615 0                         kdu_name = kdu_profile["kdu-name"]
6616 0                         resource_name = kdu_profile.get("resource-name", "")
6617
6618 0                         if not scaling_info["kdu-delete"].get(kdu_name, None):
6619 0                             scaling_info["kdu-delete"][kdu_name] = []
6620
6621 0                         kdur = get_kdur(db_vnfr, kdu_name)
6622 0                         if kdur.get("helm-chart"):
6623 0                             k8s_cluster_type = "helm-chart-v3"
6624 0                             self.logger.debug("kdur: {}".format(kdur))
6625 0                             if (
6626                                 kdur.get("helm-version")
6627                                 and kdur.get("helm-version") == "v2"
6628                             ):
6629 0                                 k8s_cluster_type = "helm-chart"
6630 0                         elif kdur.get("juju-bundle"):
6631 0                             k8s_cluster_type = "juju-bundle"
6632                         else:
6633 0                             raise LcmException(
6634                                 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6635                                 "juju-bundle. Maybe an old NBI version is running".format(
6636                                     db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6637                                 )
6638                             )
6639
6640 0                         min_instance_count = 0
6641 0                         if kdu_profile and "min-number-of-instances" in kdu_profile:
6642 0                             min_instance_count = kdu_profile["min-number-of-instances"]
6643
6644 0                         nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6645 0                         deployed_kdu, _ = get_deployed_kdu(
6646                             nsr_deployed, kdu_name, vnf_index
6647                         )
6648 0                         if deployed_kdu is None:
6649 0                             raise LcmException(
6650                                 "KDU '{}' for vnf '{}' not deployed".format(
6651                                     kdu_name, vnf_index
6652                                 )
6653                             )
6654 0                         kdu_instance = deployed_kdu.get("kdu-instance")
6655 0                         instance_num = await self.k8scluster_map[
6656                             k8s_cluster_type
6657                         ].get_scale_count(
6658                             resource_name,
6659                             kdu_instance,
6660                             vca_id=vca_id,
6661                             cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6662                             kdu_model=deployed_kdu.get("kdu-model"),
6663                         )
6664 0                         kdu_replica_count = instance_num - kdu_delta.get(
6665                             "number-of-instances", 1
6666                         )
6667
6668 0                         if kdu_replica_count < min_instance_count < instance_num:
6669 0                             kdu_replica_count = min_instance_count
6670 0                         if kdu_replica_count < min_instance_count:
6671 0                             raise LcmException(
6672                                 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6673                                 "scaling-group-descriptor '{}'".format(
6674                                     instance_num, scaling_group
6675                                 )
6676                             )
6677
6678 0                         for x in range(kdu_delta.get("number-of-instances", 1)):
6679 0                             vca_scaling_info.append(
6680                                 {
6681                                     "osm_kdu_id": kdu_name,
6682                                     "member-vnf-index": vnf_index,
6683                                     "type": "delete",
6684                                     "kdu_index": instance_num - x - 1,
6685                                 }
6686                             )
6687 0                         scaling_info["kdu-delete"][kdu_name].append(
6688                             {
6689                                 "member-vnf-index": vnf_index,
6690                                 "type": "delete",
6691                                 "k8s-cluster-type": k8s_cluster_type,
6692                                 "resource-name": resource_name,
6693                                 "scale": kdu_replica_count,
6694                             }
6695                         )
6696
6697             # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6698 1             vdu_delete = copy(scaling_info.get("vdu-delete"))
6699 1             if scaling_info["scaling_direction"] == "IN":
6700 0                 for vdur in reversed(db_vnfr["vdur"]):
6701 0                     if vdu_delete.get(vdur["vdu-id-ref"]):
6702 0                         vdu_delete[vdur["vdu-id-ref"]] -= 1
6703 0                         scaling_info["vdu"].append(
6704                             {
6705                                 "name": vdur.get("name") or vdur.get("vdu-name"),
6706                                 "vdu_id": vdur["vdu-id-ref"],
6707                                 "interface": [],
6708                             }
6709                         )
6710 0                         for interface in vdur["interfaces"]:
6711 0                             scaling_info["vdu"][-1]["interface"].append(
6712                                 {
6713                                     "name": interface["name"],
6714                                     "ip_address": interface["ip-address"],
6715                                     "mac_address": interface.get("mac-address"),
6716                                 }
6717                             )
6718                 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6719
6720             # PRE-SCALE BEGIN
6721 1             step = "Executing pre-scale vnf-config-primitive"
6722 1             if scaling_descriptor.get("scaling-config-action"):
6723 0                 for scaling_config_action in scaling_descriptor[
6724                     "scaling-config-action"
6725                 ]:
6726 0                     if (
6727                         scaling_config_action.get("trigger") == "pre-scale-in"
6728                         and scaling_type == "SCALE_IN"
6729                     ) or (
6730                         scaling_config_action.get("trigger") == "pre-scale-out"
6731                         and scaling_type == "SCALE_OUT"
6732                     ):
6733 0                         vnf_config_primitive = scaling_config_action[
6734                             "vnf-config-primitive-name-ref"
6735                         ]
6736 0                         step = db_nslcmop_update[
6737                             "detailed-status"
6738                         ] = "executing pre-scale scaling-config-action '{}'".format(
6739                             vnf_config_primitive
6740                         )
6741
6742                         # look for primitive
6743 0                         for config_primitive in (
6744                             get_configuration(db_vnfd, db_vnfd["id"]) or {}
6745                         ).get("config-primitive", ()):
6746 0                             if config_primitive["name"] == vnf_config_primitive:
6747 0                                 break
6748                         else:
6749 0                             raise LcmException(
6750                                 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6751                                 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6752                                 "primitive".format(scaling_group, vnf_config_primitive)
6753                             )
6754
6755 0                         vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6756 0                         if db_vnfr.get("additionalParamsForVnf"):
6757 0                             vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6758
6759 0                         scale_process = "VCA"
6760 0                         db_nsr_update["config-status"] = "configuring pre-scaling"
6761 0                         primitive_params = self._map_primitive_params(
6762                             config_primitive, {}, vnfr_params
6763                         )
6764
6765                         # Pre-scale retry check: Check if this sub-operation has been executed before
6766 0                         op_index = self._check_or_add_scale_suboperation(
6767                             db_nslcmop,
6768                             vnf_index,
6769                             vnf_config_primitive,
6770                             primitive_params,
6771                             "PRE-SCALE",
6772                         )
6773 0                         if op_index == self.SUBOPERATION_STATUS_SKIP:
6774                             # Skip sub-operation
6775 0                             result = "COMPLETED"
6776 0                             result_detail = "Done"
6777 0                             self.logger.debug(
6778                                 logging_text
6779                                 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6780                                     vnf_config_primitive, result, result_detail
6781                                 )
6782                             )
6783                         else:
6784 0                             if op_index == self.SUBOPERATION_STATUS_NEW:
6785                                 # New sub-operation: Get index of this sub-operation
6786 0                                 op_index = (
6787                                     len(db_nslcmop.get("_admin", {}).get("operations"))
6788                                     - 1
6789                                 )
6790 0                                 self.logger.debug(
6791                                     logging_text
6792                                     + "vnf_config_primitive={} New sub-operation".format(
6793                                         vnf_config_primitive
6794                                     )
6795                                 )
6796                             else:
6797                                 # retry:  Get registered params for this existing sub-operation
6798 0                                 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6799                                     op_index
6800                                 ]
6801 0                                 vnf_index = op.get("member_vnf_index")
6802 0                                 vnf_config_primitive = op.get("primitive")
6803 0                                 primitive_params = op.get("primitive_params")
6804 0                                 self.logger.debug(
6805                                     logging_text
6806                                     + "vnf_config_primitive={} Sub-operation retry".format(
6807                                         vnf_config_primitive
6808                                     )
6809                                 )
6810                             # Execute the primitive, either with new (first-time) or registered (reintent) args
6811 0                             ee_descriptor_id = config_primitive.get(
6812                                 "execution-environment-ref"
6813                             )
6814 0                             primitive_name = config_primitive.get(
6815                                 "execution-environment-primitive", vnf_config_primitive
6816                             )
6817 0                             ee_id, vca_type = self._look_for_deployed_vca(
6818                                 nsr_deployed["VCA"],
6819                                 member_vnf_index=vnf_index,
6820                                 vdu_id=None,
6821                                 vdu_count_index=None,
6822                                 ee_descriptor_id=ee_descriptor_id,
6823                             )
6824 0                             result, result_detail = await self._ns_execute_primitive(
6825                                 ee_id,
6826                                 primitive_name,
6827                                 primitive_params,
6828                                 vca_type=vca_type,
6829                                 vca_id=vca_id,
6830                             )
6831 0                             self.logger.debug(
6832                                 logging_text
6833                                 + "vnf_config_primitive={} Done with result {} {}".format(
6834                                     vnf_config_primitive, result, result_detail
6835                                 )
6836                             )
6837                             # Update operationState = COMPLETED | FAILED
6838 0                             self._update_suboperation_status(
6839                                 db_nslcmop, op_index, result, result_detail
6840                             )
6841
6842 0                         if result == "FAILED":
6843 0                             raise LcmException(result_detail)
6844 0                         db_nsr_update["config-status"] = old_config_status
6845 0                         scale_process = None
6846             # PRE-SCALE END
6847
6848 1             db_nsr_update[
6849                 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6850             ] = nb_scale_op
6851 1             db_nsr_update[
6852                 "_admin.scaling-group.{}.time".format(admin_scale_index)
6853             ] = time()
6854
6855             # SCALE-IN VCA - BEGIN
6856 1             if vca_scaling_info:
6857 1                 step = db_nslcmop_update[
6858                     "detailed-status"
6859                 ] = "Deleting the execution environments"
6860 1                 scale_process = "VCA"
6861 1                 for vca_info in vca_scaling_info:
6862 1                     if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6863 0                         member_vnf_index = str(vca_info["member-vnf-index"])
6864 0                         self.logger.debug(
6865                             logging_text + "vdu info: {}".format(vca_info)
6866                         )
6867 0                         if vca_info.get("osm_vdu_id"):
6868 0                             vdu_id = vca_info["osm_vdu_id"]
6869 0                             vdu_index = int(vca_info["vdu_index"])
6870 0                             stage[
6871                                 1
6872                             ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6873                                 member_vnf_index, vdu_id, vdu_index
6874                             )
6875 0                         stage[2] = step = "Scaling in VCA"
6876 0                         self._write_op_status(op_id=nslcmop_id, stage=stage)
6877 0                         vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6878 0                         config_update = db_nsr["configurationStatus"]
6879 0                         for vca_index, vca in enumerate(vca_update):
6880 0                             if (
6881                                 (vca or vca.get("ee_id"))
6882                                 and vca["member-vnf-index"] == member_vnf_index
6883                                 and vca["vdu_count_index"] == vdu_index
6884                             ):
6885 0                                 if vca.get("vdu_id"):
6886 0                                     config_descriptor = get_configuration(
6887                                         db_vnfd, vca.get("vdu_id")
6888                                     )
6889 0                                 elif vca.get("kdu_name"):
6890 0                                     config_descriptor = get_configuration(
6891                                         db_vnfd, vca.get("kdu_name")
6892                                     )
6893                                 else:
6894 0                                     config_descriptor = get_configuration(
6895                                         db_vnfd, db_vnfd["id"]
6896                                     )
6897 0                                 operation_params = (
6898                                     db_nslcmop.get("operationParams") or {}
6899                                 )
6900 0                                 exec_terminate_primitives = not operation_params.get(
6901                                     "skip_terminate_primitives"
6902                                 ) and vca.get("needed_terminate")
6903 0                                 task = asyncio.ensure_future(
6904                                     asyncio.wait_for(
6905                                         self.destroy_N2VC(
6906                                             logging_text,
6907                                             db_nslcmop,
6908                                             vca,
6909                                             config_descriptor,
6910                                             vca_index,
6911                                             destroy_ee=True,
6912                                             exec_primitives=exec_terminate_primitives,
6913                                             scaling_in=True,
6914                                             vca_id=vca_id,
6915                                         ),
6916                                         timeout=self.timeout.charm_delete,
6917                                     )
6918                                 )
6919 0                                 tasks_dict_info[task] = "Terminating VCA {}".format(
6920                                     vca.get("ee_id")
6921                                 )
6922 0                                 del vca_update[vca_index]
6923 0                                 del config_update[vca_index]
6924                         # wait for pending tasks of terminate primitives
6925 0                         if tasks_dict_info:
6926 0                             self.logger.debug(
6927                                 logging_text
6928                                 + "Waiting for tasks {}".format(
6929                                     list(tasks_dict_info.keys())
6930                                 )
6931                             )
6932 0                             error_list = await self._wait_for_tasks(
6933                                 logging_text,
6934                                 tasks_dict_info,
6935                                 min(
6936                                     self.timeout.charm_delete, self.timeout.ns_terminate
6937                                 ),
6938                                 stage,
6939                                 nslcmop_id,
6940                             )
6941 0                             tasks_dict_info.clear()
6942 0                             if error_list:
6943 0                                 raise LcmException("; ".join(error_list))
6944
6945 0                         db_vca_and_config_update = {
6946                             "_admin.deployed.VCA": vca_update,
6947                             "configurationStatus": config_update,
6948                         }
6949 0                         self.update_db_2(
6950                             "nsrs", db_nsr["_id"], db_vca_and_config_update
6951                         )
6952 1             scale_process = None
6953             # SCALE-IN VCA - END
6954
6955             # SCALE RO - BEGIN
6956 1             if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
6957 0                 scale_process = "RO"
6958 0                 if self.ro_config.ng:
6959 0                     await self._scale_ng_ro(
6960                         logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
6961                     )
6962 1             scaling_info.pop("vdu-create", None)
6963 1             scaling_info.pop("vdu-delete", None)
6964
6965 1             scale_process = None
6966             # SCALE RO - END
6967
6968             # SCALE KDU - BEGIN
6969 1             if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
6970 1                 scale_process = "KDU"
6971 1                 await self._scale_kdu(
6972                     logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6973                 )
6974 1             scaling_info.pop("kdu-create", None)
6975 1             scaling_info.pop("kdu-delete", None)
6976
6977 1             scale_process = None
6978             # SCALE KDU - END
6979
6980 1             if db_nsr_update:
6981 1                 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6982
6983             # SCALE-UP VCA - BEGIN
6984 1             if vca_scaling_info:
6985 1                 step = db_nslcmop_update[
6986                     "detailed-status"
6987                 ] = "Creating new execution environments"
6988 1                 scale_process = "VCA"
6989 1                 for vca_info in vca_scaling_info:
6990 1                     if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
6991 0                         member_vnf_index = str(vca_info["member-vnf-index"])
6992 0                         self.logger.debug(
6993                             logging_text + "vdu info: {}".format(vca_info)
6994                         )
6995 0                         vnfd_id = db_vnfr["vnfd-ref"]
6996 0                         if vca_info.get("osm_vdu_id"):
6997 0                             vdu_index = int(vca_info["vdu_index"])
6998 0                             deploy_params = {"OSM": get_osm_params(db_vnfr)}
6999 0                             if db_vnfr.get("additionalParamsForVnf"):
7000 0                                 deploy_params.update(
7001                                     parse_yaml_strings(
7002                                         db_vnfr["additionalParamsForVnf"].copy()
7003                                     )
7004                                 )
7005 0                             descriptor_config = get_configuration(
7006                                 db_vnfd, db_vnfd["id"]
7007                             )
7008 0                             if descriptor_config:
7009 0                                 vdu_id = None
7010 0                                 vdu_name = None
7011 0                                 kdu_name = None
7012 0                                 self._deploy_n2vc(
7013                                     logging_text=logging_text
7014                                     + "member_vnf_index={} ".format(member_vnf_index),
7015                                     db_nsr=db_nsr,
7016                                     db_vnfr=db_vnfr,
7017                                     nslcmop_id=nslcmop_id,
7018                                     nsr_id=nsr_id,
7019                                     nsi_id=nsi_id,
7020                                     vnfd_id=vnfd_id,
7021                                     vdu_id=vdu_id,
7022                                     kdu_name=kdu_name,
7023                                     member_vnf_index=member_vnf_index,
7024                                     vdu_index=vdu_index,
7025                                     vdu_name=vdu_name,
7026                                     deploy_params=deploy_params,
7027                                     descriptor_config=descriptor_config,
7028                                     base_folder=base_folder,
7029                                     task_instantiation_info=tasks_dict_info,
7030                                     stage=stage,
7031                                 )
7032 0                             vdu_id = vca_info["osm_vdu_id"]
7033 0                             vdur = find_in_list(
7034                                 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
7035                             )
7036 0                             descriptor_config = get_configuration(db_vnfd, vdu_id)
7037 0                             if vdur.get("additionalParams"):
7038 0                                 deploy_params_vdu = parse_yaml_strings(
7039                                     vdur["additionalParams"]
7040                                 )
7041                             else:
7042 0                                 deploy_params_vdu = deploy_params
7043 0                             deploy_params_vdu["OSM"] = get_osm_params(
7044                                 db_vnfr, vdu_id, vdu_count_index=vdu_index
7045                             )
7046 0                             if descriptor_config:
7047 0                                 vdu_name = None
7048 0                                 kdu_name = None
7049 0                                 stage[
7050                                     1
7051                                 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7052                                     member_vnf_index, vdu_id, vdu_index
7053                                 )
7054 0                                 stage[2] = step = "Scaling out VCA"
7055 0                                 self._write_op_status(op_id=nslcmop_id, stage=stage)
7056 0                                 self._deploy_n2vc(
7057                                     logging_text=logging_text
7058                                     + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7059                                         member_vnf_index, vdu_id, vdu_index
7060                                     ),
7061                                     db_nsr=db_nsr,
7062                                     db_vnfr=db_vnfr,
7063                                     nslcmop_id=nslcmop_id,
7064                                     nsr_id=nsr_id,
7065                                     nsi_id=nsi_id,
7066                                     vnfd_id=vnfd_id,
7067                                     vdu_id=vdu_id,
7068                                     kdu_name=kdu_name,
7069                                     member_vnf_index=member_vnf_index,
7070                                     vdu_index=vdu_index,
7071                                     vdu_name=vdu_name,
7072                                     deploy_params=deploy_params_vdu,
7073                                     descriptor_config=descriptor_config,
7074                                     base_folder=base_folder,
7075                                     task_instantiation_info=tasks_dict_info,
7076                                     stage=stage,
7077                                 )
7078             # SCALE-UP VCA - END
7079 1             scale_process = None
7080
7081             # POST-SCALE BEGIN
7082             # execute primitive service POST-SCALING
7083 1             step = "Executing post-scale vnf-config-primitive"
7084 1             if scaling_descriptor.get("scaling-config-action"):
7085 0                 for scaling_config_action in scaling_descriptor[
7086                     "scaling-config-action"
7087                 ]:
7088 0                     if (
7089                         scaling_config_action.get("trigger") == "post-scale-in"
7090                         and scaling_type == "SCALE_IN"
7091                     ) or (
7092                         scaling_config_action.get("trigger") == "post-scale-out"
7093                         and scaling_type == "SCALE_OUT"
7094                     ):
7095 0                         vnf_config_primitive = scaling_config_action[
7096                             "vnf-config-primitive-name-ref"
7097                         ]
7098 0                         step = db_nslcmop_update[
7099                             "detailed-status"
7100                         ] = "executing post-scale scaling-config-action '{}'".format(
7101                             vnf_config_primitive
7102                         )
7103
7104 0                         vnfr_params = {"VDU_SCALE_INFO": scaling_info}
7105 0                         if db_vnfr.get("additionalParamsForVnf"):
7106 0                             vnfr_params.update(db_vnfr["additionalParamsForVnf"])
7107
7108                         # look for primitive
7109 0                         for config_primitive in (
7110                             get_configuration(db_vnfd, db_vnfd["id"]) or {}
7111                         ).get("config-primitive", ()):
7112 0                             if config_primitive["name"] == vnf_config_primitive:
7113 0                                 break
7114                         else:
7115 0                             raise LcmException(
7116                                 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
7117                                 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
7118                                 "config-primitive".format(
7119                                     scaling_group, vnf_config_primitive
7120                                 )
7121                             )
7122 0                         scale_process = "VCA"
7123 0                         db_nsr_update["config-status"] = "configuring post-scaling"
7124 0                         primitive_params = self._map_primitive_params(
7125                             config_primitive, {}, vnfr_params
7126                         )
7127
7128                         # Post-scale retry check: Check if this sub-operation has been executed before
7129 0                         op_index = self._check_or_add_scale_suboperation(
7130                             db_nslcmop,
7131                             vnf_index,
7132                             vnf_config_primitive,
7133                             primitive_params,
7134                             "POST-SCALE",
7135                         )
7136 0                         if op_index == self.SUBOPERATION_STATUS_SKIP:
7137                             # Skip sub-operation
7138 0                             result = "COMPLETED"
7139 0                             result_detail = "Done"
7140 0                             self.logger.debug(
7141                                 logging_text
7142                                 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
7143                                     vnf_config_primitive, result, result_detail
7144                                 )
7145                             )
7146                         else:
7147 0                             if op_index == self.SUBOPERATION_STATUS_NEW:
7148                                 # New sub-operation: Get index of this sub-operation
7149 0                                 op_index = (
7150                                     len(db_nslcmop.get("_admin", {}).get("operations"))
7151                                     - 1
7152                                 )
7153 0                                 self.logger.debug(
7154                                     logging_text
7155                                     + "vnf_config_primitive={} New sub-operation".format(
7156                                         vnf_config_primitive
7157                                     )
7158                                 )
7159                             else:
7160                                 # retry:  Get registered params for this existing sub-operation
7161 0                                 op = db_nslcmop.get("_admin", {}).get("operations", [])[
7162                                     op_index
7163                                 ]
7164 0                                 vnf_index = op.get("member_vnf_index")
7165 0                                 vnf_config_primitive = op.get("primitive")
7166 0                                 primitive_params = op.get("primitive_params")
7167 0                                 self.logger.debug(
7168                                     logging_text
7169                                     + "vnf_config_primitive={} Sub-operation retry".format(
7170                                         vnf_config_primitive
7171                                     )
7172                                 )
7173                             # Execute the primitive, either with new (first-time) or registered (reintent) args
7174 0                             ee_descriptor_id = config_primitive.get(
7175                                 "execution-environment-ref"
7176                             )
7177 0                             primitive_name = config_primitive.get(
7178                                 "execution-environment-primitive", vnf_config_primitive
7179                             )
7180 0                             ee_id, vca_type = self._look_for_deployed_vca(
7181                                 nsr_deployed["VCA"],
7182                                 member_vnf_index=vnf_index,
7183                                 vdu_id=None,
7184                                 vdu_count_index=None,
7185                                 ee_descriptor_id=ee_descriptor_id,
7186                             )
7187 0                             result, result_detail = await self._ns_execute_primitive(
7188                                 ee_id,
7189                                 primitive_name,
7190                                 primitive_params,
7191                                 vca_type=vca_type,
7192                                 vca_id=vca_id,
7193                             )
7194 0                             self.logger.debug(
7195                                 logging_text
7196                                 + "vnf_config_primitive={} Done with result {} {}".format(
7197                                     vnf_config_primitive, result, result_detail
7198                                 )
7199                             )
7200                             # Update operationState = COMPLETED | FAILED
7201 0                             self._update_suboperation_status(
7202                                 db_nslcmop, op_index, result, result_detail
7203                             )
7204
7205 0                         if result == "FAILED":
7206 0                             raise LcmException(result_detail)
7207 0                         db_nsr_update["config-status"] = old_config_status
7208 0                         scale_process = None
7209             # POST-SCALE END
7210
7211 1             db_nsr_update[
7212                 "detailed-status"
7213             ] = ""  # "scaled {} {}".format(scaling_group, scaling_type)
7214 1             db_nsr_update["operational-status"] = (
7215                 "running"
7216                 if old_operational_status == "failed"
7217                 else old_operational_status
7218             )
7219 1             db_nsr_update["config-status"] = old_config_status
7220 1             return
7221 1         except (
7222             ROclient.ROClientException,
7223             DbException,
7224             LcmException,
7225             NgRoException,
7226         ) as e:
7227 0             self.logger.error(logging_text + "Exit Exception {}".format(e))
7228 0             exc = e
7229 1         except asyncio.CancelledError:
7230 0             self.logger.error(
7231                 logging_text + "Cancelled Exception while '{}'".format(step)
7232             )
7233 0             exc = "Operation was cancelled"
7234 1         except Exception as e:
7235 1             exc = traceback.format_exc()
7236 1             self.logger.critical(
7237                 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7238                 exc_info=True,
7239             )
7240         finally:
7241 1             self._write_ns_status(
7242                 nsr_id=nsr_id,
7243                 ns_state=None,
7244                 current_operation="IDLE",
7245                 current_operation_id=None,
7246             )
7247 1             if tasks_dict_info:
7248 0                 stage[1] = "Waiting for instantiate pending tasks."
7249 0                 self.logger.debug(logging_text + stage[1])
7250 0                 exc = await self._wait_for_tasks(
7251                     logging_text,
7252                     tasks_dict_info,
7253                     self.timeout.ns_deploy,
7254                     stage,
7255                     nslcmop_id,
7256                     nsr_id=nsr_id,
7257                 )
7258 1             if exc:
7259 1                 db_nslcmop_update[
7260                     "detailed-status"
7261                 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7262 1                 nslcmop_operation_state = "FAILED"
7263 1                 if db_nsr:
7264 1                     db_nsr_update["operational-status"] = old_operational_status
7265 1                     db_nsr_update["config-status"] = old_config_status
7266 1                     db_nsr_update["detailed-status"] = ""
7267 1                     if scale_process:
7268 0                         if "VCA" in scale_process:
7269 0                             db_nsr_update["config-status"] = "failed"
7270 0                         if "RO" in scale_process:
7271 0                             db_nsr_update["operational-status"] = "failed"
7272 0                         db_nsr_update[
7273                             "detailed-status"
7274                         ] = "FAILED scaling nslcmop={} {}: {}".format(
7275                             nslcmop_id, step, exc
7276                         )
7277             else:
7278 1                 error_description_nslcmop = None
7279 1                 nslcmop_operation_state = "COMPLETED"
7280 1                 db_nslcmop_update["detailed-status"] = "Done"
7281
7282 1             self._write_op_status(
7283                 op_id=nslcmop_id,
7284                 stage="",
7285                 error_message=error_description_nslcmop,
7286                 operation_state=nslcmop_operation_state,
7287                 other_update=db_nslcmop_update,
7288             )
7289 1             if db_nsr:
7290 1                 self._write_ns_status(
7291                     nsr_id=nsr_id,
7292                     ns_state=None,
7293                     current_operation="IDLE",
7294                     current_operation_id=None,
7295                     other_update=db_nsr_update,
7296                 )
7297
7298 1             if nslcmop_operation_state:
7299 1                 try:
7300 1                     msg = {
7301                         "nsr_id": nsr_id,
7302                         "nslcmop_id": nslcmop_id,
7303                         "operationState": nslcmop_operation_state,
7304                     }
7305 1                     await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
7306 0                 except Exception as e:
7307 0                     self.logger.error(
7308                         logging_text + "kafka_write notification Exception {}".format(e)
7309                     )
7310 1             self.logger.debug(logging_text + "Exit")
7311 1             self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7312
7313 1     async def _scale_kdu(
7314         self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7315     ):
7316 1         _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7317 1         for kdu_name in _scaling_info:
7318 1             for kdu_scaling_info in _scaling_info[kdu_name]:
7319 1                 deployed_kdu, index = get_deployed_kdu(
7320                     nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7321                 )
7322 1                 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7323 1                 kdu_instance = deployed_kdu["kdu-instance"]
7324 1                 kdu_model = deployed_kdu.get("kdu-model")
7325 1                 scale = int(kdu_scaling_info["scale"])
7326 1                 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7327
7328 1                 db_dict = {
7329                     "collection": "nsrs",
7330                     "filter": {"_id": nsr_id},
7331                     "path": "_admin.deployed.K8s.{}".format(index),
7332                 }
7333
7334 1                 step = "scaling application {}".format(
7335                     kdu_scaling_info["resource-name"]
7336                 )
7337 1                 self.logger.debug(logging_text + step)
7338
7339 1                 if kdu_scaling_info["type"] == "delete":
7340 0                     kdu_config = get_configuration(db_vnfd, kdu_name)
7341 0                     if (
7342                         kdu_config
7343                         and kdu_config.get("terminate-config-primitive")
7344                         and get_juju_ee_ref(db_vnfd, kdu_name) is None
7345                     ):
7346 0                         terminate_config_primitive_list = kdu_config.get(
7347                             "terminate-config-primitive"
7348                         )
7349 0                         terminate_config_primitive_list.sort(
7350                             key=lambda val: int(val["seq"])
7351                         )
7352
7353 0                         for (
7354                             terminate_config_primitive
7355                         ) in terminate_config_primitive_list:
7356 0                             primitive_params_ = self._map_primitive_params(
7357                                 terminate_config_primitive, {}, {}
7358                             )
7359 0                             step = "execute terminate config primitive"
7360 0                             self.logger.debug(logging_text + step)
7361 0                             await asyncio.wait_for(
7362                                 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7363                                     cluster_uuid=cluster_uuid,
7364                                     kdu_instance=kdu_instance,
7365                                     primitive_name=terminate_config_primitive["name"],
7366                                     params=primitive_params_,
7367                                     db_dict=db_dict,
7368                                     total_timeout=self.timeout.primitive,
7369                                     vca_id=vca_id,
7370                                 ),
7371                                 timeout=self.timeout.primitive
7372                                 * self.timeout.primitive_outer_factor,
7373                             )
7374
7375 1                 await asyncio.wait_for(
7376                     self.k8scluster_map[k8s_cluster_type].scale(
7377                         kdu_instance=kdu_instance,
7378                         scale=scale,
7379                         resource_name=kdu_scaling_info["resource-name"],
7380                         total_timeout=self.timeout.scale_on_error,
7381                         vca_id=vca_id,
7382                         cluster_uuid=cluster_uuid,
7383                         kdu_model=kdu_model,
7384                         atomic=True,
7385                         db_dict=db_dict,
7386                     ),
7387                     timeout=self.timeout.scale_on_error
7388                     * self.timeout.scale_on_error_outer_factor,
7389                 )
7390
7391 1                 if kdu_scaling_info["type"] == "create":
7392 1                     kdu_config = get_configuration(db_vnfd, kdu_name)
7393 1                     if (
7394                         kdu_config
7395                         and kdu_config.get("initial-config-primitive")
7396                         and get_juju_ee_ref(db_vnfd, kdu_name) is None
7397                     ):
7398 1                         initial_config_primitive_list = kdu_config.get(
7399                             "initial-config-primitive"
7400                         )
7401 1                         initial_config_primitive_list.sort(
7402                             key=lambda val: int(val["seq"])
7403                         )
7404
7405 1                         for initial_config_primitive in initial_config_primitive_list:
7406 1                             primitive_params_ = self._map_primitive_params(
7407                                 initial_config_primitive, {}, {}
7408                             )
7409 1                             step = "execute initial config primitive"
7410 1                             self.logger.debug(logging_text + step)
7411 1                             await asyncio.wait_for(
7412                                 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7413                                     cluster_uuid=cluster_uuid,
7414                                     kdu_instance=kdu_instance,
7415                                     primitive_name=initial_config_primitive["name"],
7416                                     params=primitive_params_,
7417                                     db_dict=db_dict,
7418                                     vca_id=vca_id,
7419                                 ),
7420                                 timeout=600,
7421                             )
7422
7423 1     async def _scale_ng_ro(
7424         self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7425     ):
7426 1         nsr_id = db_nslcmop["nsInstanceId"]
7427 1         db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7428 1         db_vnfrs = {}
7429
7430         # read from db: vnfd's for every vnf
7431 1         db_vnfds = []
7432
7433         # for each vnf in ns, read vnfd
7434 1         for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7435 1             db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7436 1             vnfd_id = vnfr["vnfd-id"]  # vnfd uuid for this vnf
7437             # if we haven't this vnfd, read it from db
7438 1             if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7439                 # read from db
7440 1                 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7441 1                 db_vnfds.append(vnfd)
7442 1         n2vc_key = self.n2vc.get_public_key()
7443 1         n2vc_key_list = [n2vc_key]
7444 1         self.scale_vnfr(
7445             db_vnfr,
7446             vdu_scaling_info.get("vdu-create"),
7447             vdu_scaling_info.get("vdu-delete"),
7448             mark_delete=True,
7449         )
7450         # db_vnfr has been updated, update db_vnfrs to use it
7451 1         db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7452 1         await self._instantiate_ng_ro(
7453             logging_text,
7454             nsr_id,
7455             db_nsd,
7456             db_nsr,
7457             db_nslcmop,
7458             db_vnfrs,
7459             db_vnfds,
7460             n2vc_key_list,
7461             stage=stage,
7462             start_deploy=time(),
7463             timeout_ns_deploy=self.timeout.ns_deploy,
7464         )
7465 1         if vdu_scaling_info.get("vdu-delete"):
7466 1             self.scale_vnfr(
7467                 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7468             )
7469
7470 1     async def extract_prometheus_scrape_jobs(
7471         self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip
7472     ):
7473         # look if exist a file called 'prometheus*.j2' and
7474 0         artifact_content = self.fs.dir_ls(artifact_path)
7475 0         job_file = next(
7476             (
7477                 f
7478                 for f in artifact_content
7479                 if f.startswith("prometheus") and f.endswith(".j2")
7480             ),
7481             None,
7482         )
7483 0         if not job_file:
7484 0             return
7485 0         with self.fs.file_open((artifact_path, job_file), "r") as f:
7486 0             job_data = f.read()
7487
7488         # TODO get_service
7489 0         _, _, service = ee_id.partition(".")  # remove prefix   "namespace."
7490 0         host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
7491 0         host_port = "80"
7492 0         vnfr_id = vnfr_id.replace("-", "")
7493 0         variables = {
7494             "JOB_NAME": vnfr_id,
7495             "TARGET_IP": target_ip,
7496             "EXPORTER_POD_IP": host_name,
7497             "EXPORTER_POD_PORT": host_port,
7498         }
7499 0         job_list = parse_job(job_data, variables)
7500         # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7501 0         for job in job_list:
7502 0             if (
7503                 not isinstance(job.get("job_name"), str)
7504                 or vnfr_id not in job["job_name"]
7505             ):
7506 0                 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
7507 0             job["nsr_id"] = nsr_id
7508 0             job["vnfr_id"] = vnfr_id
7509 0         return job_list
7510
7511 1     async def rebuild_start_stop(
7512         self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
7513     ):
7514 1         logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7515 1         self.logger.info(logging_text + "Enter")
7516 1         stage = ["Preparing the environment", ""]
7517         # database nsrs record
7518 1         db_nsr_update = {}
7519 1         vdu_vim_name = None
7520 1         vim_vm_id = None
7521         # in case of error, indicates what part of scale was failed to put nsr at error status
7522 1         start_deploy = time()
7523 1         try:
7524 1             db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7525 1             vim_account_id = db_vnfr.get("vim-account-id")
7526 1             vim_info_key = "vim:" + vim_account_id
7527 1             vdu_id = additional_param["vdu_id"]
7528 0             vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
7529 0             vdur = find_in_list(
7530                 vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
7531             )
7532 0             if vdur:
7533 0                 vdu_vim_name = vdur["name"]
7534 0                 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7535 0                 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7536             else:
7537 0                 raise LcmException("Target vdu is not found")
7538 0             self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7539             # wait for any previous tasks in process
7540 0             stage[1] = "Waiting for previous operations to terminate"
7541 0             self.logger.info(stage[1])
7542 0             await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7543
7544 0             stage[1] = "Reading from database."
7545 0             self.logger.info(stage[1])
7546 0             self._write_ns_status(
7547                 nsr_id=nsr_id,
7548                 ns_state=None,
7549                 current_operation=operation_type.upper(),
7550                 current_operation_id=nslcmop_id,
7551             )
7552 0             self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7553
7554             # read from db: ns
7555 0             stage[1] = "Getting nsr={} from db.".format(nsr_id)
7556 0             db_nsr_update["operational-status"] = operation_type
7557 0             self.update_db_2("nsrs", nsr_id, db_nsr_update)
7558             # Payload for RO
7559 0             desc = {
7560                 operation_type: {
7561                     "vim_vm_id": vim_vm_id,
7562                     "vnf_id": vnf_id,
7563                     "vdu_index": additional_param["count-index"],
7564                     "vdu_id": vdur["id"],
7565                     "target_vim": target_vim,
7566                     "vim_account_id": vim_account_id,
7567                 }
7568             }
7569 0             stage[1] = "Sending rebuild request to RO... {}".format(desc)
7570 0             self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7571 0             self.logger.info("ro nsr id: {}".format(nsr_id))
7572 0             result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7573 0             self.logger.info("response from RO: {}".format(result_dict))
7574 0             action_id = result_dict["action_id"]
7575 0             await self._wait_ng_ro(
7576                 nsr_id,
7577                 action_id,
7578                 nslcmop_id,
7579                 start_deploy,
7580                 self.timeout.operate,
7581                 None,
7582                 "start_stop_rebuild",
7583             )
7584 0             return "COMPLETED", "Done"
7585 1         except (ROclient.ROClientException, DbException, LcmException) as e:
7586 0             self.logger.error("Exit Exception {}".format(e))
7587 0             exc = e
7588 1         except asyncio.CancelledError:
7589 0             self.logger.error("Cancelled Exception while '{}'".format(stage))
7590 0             exc = "Operation was cancelled"
7591 1         except Exception as e:
7592 1             exc = traceback.format_exc()
7593 1             self.logger.critical(
7594                 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7595             )
7596 1             return "FAILED", "Error in operate VNF {}".format(exc)
7597
7598 1     def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7599         """
7600         Get VCA Cloud and VCA Cloud Credentials for the VIM account
7601
7602         :param: vim_account_id:     VIM Account ID
7603
7604         :return: (cloud_name, cloud_credential)
7605         """
7606 0         config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7607 0         return config.get("vca_cloud"), config.get("vca_cloud_credential")
7608
7609 1     def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7610         """
7611         Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7612
7613         :param: vim_account_id:     VIM Account ID
7614
7615         :return: (cloud_name, cloud_credential)
7616         """
7617 0         config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7618 0         return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
7619
7620 1     async def migrate(self, nsr_id, nslcmop_id):
7621         """
7622         Migrate VNFs and VDUs instances in a NS
7623
7624         :param: nsr_id: NS Instance ID
7625         :param: nslcmop_id: nslcmop ID of migrate
7626
7627         """
7628         # Try to lock HA task here
7629 0         task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7630 0         if not task_is_locked_by_me:
7631 0             return
7632 0         logging_text = "Task ns={} migrate ".format(nsr_id)
7633 0         self.logger.debug(logging_text + "Enter")
7634         # get all needed from database
7635 0         db_nslcmop = None
7636 0         db_nslcmop_update = {}
7637 0         nslcmop_operation_state = None
7638 0         db_nsr_update = {}
7639 0         target = {}
7640 0         exc = None
7641         # in case of error, indicates what part of scale was failed to put nsr at error status
7642 0         start_deploy = time()
7643
7644 0         try:
7645             # wait for any previous tasks in process
7646 0             step = "Waiting for previous operations to terminate"
7647 0             await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7648
7649 0             self._write_ns_status(
7650                 nsr_id=nsr_id,
7651                 ns_state=None,
7652                 current_operation="MIGRATING",
7653                 current_operation_id=nslcmop_id,
7654             )
7655 0             step = "Getting nslcmop from database"
7656 0             self.logger.debug(
7657                 step + " after having waited for previous tasks to be completed"
7658             )
7659 0             db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7660 0             migrate_params = db_nslcmop.get("operationParams")
7661
7662 0             target = {}
7663 0             target.update(migrate_params)
7664 0             desc = await self.RO.migrate(nsr_id, target)
7665 0             self.logger.debug("RO return > {}".format(desc))
7666 0             action_id = desc["action_id"]
7667 0             await self._wait_ng_ro(
7668                 nsr_id,
7669                 action_id,
7670                 nslcmop_id,
7671                 start_deploy,
7672                 self.timeout.migrate,
7673                 operation="migrate",
7674             )
7675 0         except (ROclient.ROClientException, DbException, LcmException) as e:
7676 0             self.logger.error("Exit Exception {}".format(e))
7677 0             exc = e
7678 0         except asyncio.CancelledError:
7679 0             self.logger.error("Cancelled Exception while '{}'".format(step))
7680 0             exc = "Operation was cancelled"
7681 0         except Exception as e:
7682 0             exc = traceback.format_exc()
7683 0             self.logger.critical(
7684                 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7685             )
7686         finally:
7687 0             self._write_ns_status(
7688                 nsr_id=nsr_id,
7689                 ns_state=None,
7690                 current_operation="IDLE",
7691                 current_operation_id=None,
7692             )
7693 0             if exc:
7694 0                 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7695 0                 nslcmop_operation_state = "FAILED"
7696             else:
7697 0                 nslcmop_operation_state = "COMPLETED"
7698 0                 db_nslcmop_update["detailed-status"] = "Done"
7699 0                 db_nsr_update["detailed-status"] = "Done"
7700
7701 0             self._write_op_status(
7702                 op_id=nslcmop_id,
7703                 stage="",
7704                 error_message="",
7705                 operation_state=nslcmop_operation_state,
7706                 other_update=db_nslcmop_update,
7707             )
7708 0             if nslcmop_operation_state:
7709 0                 try:
7710 0                     msg = {
7711                         "nsr_id": nsr_id,
7712                         "nslcmop_id": nslcmop_id,
7713                         "operationState": nslcmop_operation_state,
7714                     }
7715 0                     await self.msg.aiowrite("ns", "migrated", msg, loop=self.loop)
7716 0                 except Exception as e:
7717 0                     self.logger.error(
7718                         logging_text + "kafka_write notification Exception {}".format(e)
7719                     )
7720 0             self.logger.debug(logging_text + "Exit")
7721 0             self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7722
7723 1     async def heal(self, nsr_id, nslcmop_id):
7724         """
7725         Heal NS
7726
7727         :param nsr_id: ns instance to heal
7728         :param nslcmop_id: operation to run
7729         :return:
7730         """
7731
7732         # Try to lock HA task here
7733 0         task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7734 0         if not task_is_locked_by_me:
7735 0             return
7736
7737 0         logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7738 0         stage = ["", "", ""]
7739 0         tasks_dict_info = {}
7740         # ^ stage, step, VIM progress
7741 0         self.logger.debug(logging_text + "Enter")
7742         # get all needed from database
7743 0         db_nsr = None
7744 0         db_nslcmop_update = {}
7745 0         db_nsr_update = {}
7746 0         db_vnfrs = {}  # vnf's info indexed by _id
7747 0         exc = None
7748 0         old_operational_status = ""
7749 0         old_config_status = ""
7750 0         nsi_id = None
7751 0         try:
7752             # wait for any previous tasks in process
7753 0             step = "Waiting for previous operations to terminate"
7754 0             await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7755 0             self._write_ns_status(
7756                 nsr_id=nsr_id,
7757                 ns_state=None,
7758                 current_operation="HEALING",
7759                 current_operation_id=nslcmop_id,
7760             )
7761
7762 0             step = "Getting nslcmop from database"
7763 0             self.logger.debug(
7764                 step + " after having waited for previous tasks to be completed"
7765             )
7766 0             db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7767
7768 0             step = "Getting nsr from database"
7769 0             db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7770 0             old_operational_status = db_nsr["operational-status"]
7771 0             old_config_status = db_nsr["config-status"]
7772
7773 0             db_nsr_update = {
7774                 "_admin.deployed.RO.operational-status": "healing",
7775             }
7776 0             self.update_db_2("nsrs", nsr_id, db_nsr_update)
7777
7778 0             step = "Sending heal order to VIM"
7779 0             await self.heal_RO(
7780                 logging_text=logging_text,
7781                 nsr_id=nsr_id,
7782                 db_nslcmop=db_nslcmop,
7783                 stage=stage,
7784             )
7785             # VCA tasks
7786             # read from db: nsd
7787 0             stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7788 0             self.logger.debug(logging_text + stage[1])
7789 0             nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7790 0             self.fs.sync(db_nsr["nsd-id"])
7791 0             db_nsr["nsd"] = nsd
7792             # read from db: vnfr's of this ns
7793 0             step = "Getting vnfrs from db"
7794 0             db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7795 0             for vnfr in db_vnfrs_list:
7796 0                 db_vnfrs[vnfr["_id"]] = vnfr
7797 0             self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7798
7799             # Check for each target VNF
7800 0             target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7801 0             for target_vnf in target_list:
7802                 # Find this VNF in the list from DB
7803 0                 vnfr_id = target_vnf.get("vnfInstanceId", None)
7804 0                 if vnfr_id:
7805 0                     db_vnfr = db_vnfrs[vnfr_id]
7806 0                     vnfd_id = db_vnfr.get("vnfd-id")
7807 0                     vnfd_ref = db_vnfr.get("vnfd-ref")
7808 0                     vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7809 0                     base_folder = vnfd["_admin"]["storage"]
7810 0                     vdu_id = None
7811 0                     vdu_index = 0
7812 0                     vdu_name = None
7813 0                     kdu_name = None
7814 0                     nsi_id = None  # TODO put nsi_id when this nsr belongs to a NSI
7815 0                     member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7816
7817                     # Check each target VDU and deploy N2VC
7818 0                     target_vdu_list = target_vnf.get("additionalParams", {}).get(
7819                         "vdu", []
7820                     )
7821 0                     if not target_vdu_list:
7822                         # Codigo nuevo para crear diccionario
7823 0                         target_vdu_list = []
7824 0                         for existing_vdu in db_vnfr.get("vdur"):
7825 0                             vdu_name = existing_vdu.get("vdu-name", None)
7826 0                             vdu_index = existing_vdu.get("count-index", 0)
7827 0                             vdu_run_day1 = target_vnf.get("additionalParams", {}).get(
7828                                 "run-day1", False
7829                             )
7830 0                             vdu_to_be_healed = {
7831                                 "vdu-id": vdu_name,
7832                                 "count-index": vdu_index,
7833                                 "run-day1": vdu_run_day1,
7834                             }
7835 0                             target_vdu_list.append(vdu_to_be_healed)
7836 0                     for target_vdu in target_vdu_list:
7837 0                         deploy_params_vdu = target_vdu
7838                         # Set run-day1 vnf level value if not vdu level value exists
7839 0                         if not deploy_params_vdu.get("run-day1") and target_vnf[
7840                             "additionalParams"
7841                         ].get("run-day1"):
7842 0                             deploy_params_vdu["run-day1"] = target_vnf[
7843                                 "additionalParams"
7844                             ].get("run-day1")
7845 0                         vdu_name = target_vdu.get("vdu-id", None)
7846                         # TODO: Get vdu_id from vdud.
7847 0                         vdu_id = vdu_name
7848                         # For multi instance VDU count-index is mandatory
7849                         # For single session VDU count-indes is 0
7850 0                         vdu_index = target_vdu.get("count-index", 0)
7851
7852                         # n2vc_redesign STEP 3 to 6 Deploy N2VC
7853 0                         stage[1] = "Deploying Execution Environments."
7854 0                         self.logger.debug(logging_text + stage[1])
7855
7856                         # VNF Level charm. Normal case when proxy charms.
7857                         # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7858 0                         descriptor_config = get_configuration(vnfd, vnfd_ref)
7859 0                         if descriptor_config:
7860                             # Continue if healed machine is management machine
7861 0                             vnf_ip_address = db_vnfr.get("ip-address")
7862 0                             target_instance = None
7863 0                             for instance in db_vnfr.get("vdur", None):
7864 0                                 if (
7865                                     instance["vdu-name"] == vdu_name
7866                                     and instance["count-index"] == vdu_index
7867                                 ):
7868 0                                     target_instance = instance
7869 0                                     break
7870 0                             if vnf_ip_address == target_instance.get("ip-address"):
7871 0                                 self._heal_n2vc(
7872                                     logging_text=logging_text
7873                                     + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7874                                         member_vnf_index, vdu_name, vdu_index
7875                                     ),
7876                                     db_nsr=db_nsr,
7877                                     db_vnfr=db_vnfr,
7878                                     nslcmop_id=nslcmop_id,
7879                                     nsr_id=nsr_id,
7880                                     nsi_id=nsi_id,
7881                                     vnfd_id=vnfd_ref,
7882                                     vdu_id=None,
7883                                     kdu_name=None,
7884                                     member_vnf_index=member_vnf_index,
7885                                     vdu_index=0,
7886                                     vdu_name=None,
7887                                     deploy_params=deploy_params_vdu,
7888                                     descriptor_config=descriptor_config,
7889                                     base_folder=base_folder,
7890                                     task_instantiation_info=tasks_dict_info,
7891                                     stage=stage,
7892                                 )
7893
7894                         # VDU Level charm. Normal case with native charms.
7895 0                         descriptor_config = get_configuration(vnfd, vdu_name)
7896 0                         if descriptor_config:
7897 0                             self._heal_n2vc(
7898                                 logging_text=logging_text
7899                                 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7900                                     member_vnf_index, vdu_name, vdu_index
7901                                 ),
7902                                 db_nsr=db_nsr,
7903                                 db_vnfr=db_vnfr,
7904                                 nslcmop_id=nslcmop_id,
7905                                 nsr_id=nsr_id,
7906                                 nsi_id=nsi_id,
7907                                 vnfd_id=vnfd_ref,
7908                                 vdu_id=vdu_id,
7909                                 kdu_name=kdu_name,
7910                                 member_vnf_index=member_vnf_index,
7911                                 vdu_index=vdu_index,
7912                                 vdu_name=vdu_name,
7913                                 deploy_params=deploy_params_vdu,
7914                                 descriptor_config=descriptor_config,
7915                                 base_folder=base_folder,
7916                                 task_instantiation_info=tasks_dict_info,
7917                                 stage=stage,
7918                             )
7919
7920 0         except (
7921             ROclient.ROClientException,
7922             DbException,
7923             LcmException,
7924             NgRoException,
7925         ) as e:
7926 0             self.logger.error(logging_text + "Exit Exception {}".format(e))
7927 0             exc = e
7928 0         except asyncio.CancelledError:
7929 0             self.logger.error(
7930                 logging_text + "Cancelled Exception while '{}'".format(step)
7931             )
7932 0             exc = "Operation was cancelled"
7933 0         except Exception as e:
7934 0             exc = traceback.format_exc()
7935 0             self.logger.critical(
7936                 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7937                 exc_info=True,
7938             )
7939         finally:
7940 0             if tasks_dict_info:
7941 0                 stage[1] = "Waiting for healing pending tasks."
7942 0                 self.logger.debug(logging_text + stage[1])
7943 0                 exc = await self._wait_for_tasks(
7944                     logging_text,
7945                     tasks_dict_info,
7946                     self.timeout.ns_deploy,
7947                     stage,
7948                     nslcmop_id,
7949                     nsr_id=nsr_id,
7950                 )
7951 0             if exc:
7952 0                 db_nslcmop_update[
7953                     "detailed-status"
7954                 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7955 0                 nslcmop_operation_state = "FAILED"
7956 0                 if db_nsr:
7957 0                     db_nsr_update["operational-status"] = old_operational_status
7958 0                     db_nsr_update["config-status"] = old_config_status
7959 0                     db_nsr_update[
7960                         "detailed-status"
7961                     ] = "FAILED healing nslcmop={} {}: {}".format(nslcmop_id, step, exc)
7962 0                     for task, task_name in tasks_dict_info.items():
7963 0                         if not task.done() or task.cancelled() or task.exception():
7964 0                             if task_name.startswith(self.task_name_deploy_vca):
7965                                 # A N2VC task is pending
7966 0                                 db_nsr_update["config-status"] = "failed"
7967                             else:
7968                                 # RO task is pending
7969 0                                 db_nsr_update["operational-status"] = "failed"
7970             else:
7971 0                 error_description_nslcmop = None
7972 0                 nslcmop_operation_state = "COMPLETED"
7973 0                 db_nslcmop_update["detailed-status"] = "Done"
7974 0                 db_nsr_update["detailed-status"] = "Done"
7975 0                 db_nsr_update["operational-status"] = "running"
7976 0                 db_nsr_update["config-status"] = "configured"
7977
7978 0             self._write_op_status(
7979                 op_id=nslcmop_id,
7980                 stage="",
7981                 error_message=error_description_nslcmop,
7982                 operation_state=nslcmop_operation_state,
7983                 other_update=db_nslcmop_update,
7984             )
7985 0             if db_nsr:
7986 0                 self._write_ns_status(
7987                     nsr_id=nsr_id,
7988                     ns_state=None,
7989                     current_operation="IDLE",
7990                     current_operation_id=None,
7991                     other_update=db_nsr_update,
7992                 )
7993
7994 0             if nslcmop_operation_state:
7995 0                 try:
7996 0                     msg = {
7997                         "nsr_id": nsr_id,
7998                         "nslcmop_id": nslcmop_id,
7999                         "operationState": nslcmop_operation_state,
8000                     }
8001 0                     await self.msg.aiowrite("ns", "healed", msg, loop=self.loop)
8002 0                 except Exception as e:
8003 0                     self.logger.error(
8004                         logging_text + "kafka_write notification Exception {}".format(e)
8005                     )
8006 0             self.logger.debug(logging_text + "Exit")
8007 0             self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
8008
8009 1     async def heal_RO(
8010         self,
8011         logging_text,
8012         nsr_id,
8013         db_nslcmop,
8014         stage,
8015     ):
8016         """
8017         Heal at RO
8018         :param logging_text: preffix text to use at logging
8019         :param nsr_id: nsr identity
8020         :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
8021         :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
8022         :return: None or exception
8023         """
8024
8025 0         def get_vim_account(vim_account_id):
8026             nonlocal db_vims
8027 0             if vim_account_id in db_vims:
8028 0                 return db_vims[vim_account_id]
8029 0             db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
8030 0             db_vims[vim_account_id] = db_vim
8031 0             return db_vim
8032
8033 0         try:
8034 0             start_heal = time()
8035 0             ns_params = db_nslcmop.get("operationParams")
8036 0             if ns_params and ns_params.get("timeout_ns_heal"):
8037 0                 timeout_ns_heal = ns_params["timeout_ns_heal"]
8038             else:
8039 0                 timeout_ns_heal = self.timeout.ns_heal
8040
8041 0             db_vims = {}
8042
8043 0             nslcmop_id = db_nslcmop["_id"]
8044 0             target = {
8045                 "action_id": nslcmop_id,
8046             }
8047 0             self.logger.warning(
8048                 "db_nslcmop={} and timeout_ns_heal={}".format(
8049                     db_nslcmop, timeout_ns_heal
8050                 )
8051             )
8052 0             target.update(db_nslcmop.get("operationParams", {}))
8053
8054 0             self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
8055 0             desc = await self.RO.recreate(nsr_id, target)
8056 0             self.logger.debug("RO return > {}".format(desc))
8057 0             action_id = desc["action_id"]
8058             # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
8059 0             await self._wait_ng_ro(
8060                 nsr_id,
8061                 action_id,
8062                 nslcmop_id,
8063                 start_heal,
8064                 timeout_ns_heal,
8065                 stage,
8066                 operation="healing",
8067             )
8068
8069             # Updating NSR
8070 0             db_nsr_update = {
8071                 "_admin.deployed.RO.operational-status": "running",
8072                 "detailed-status": " ".join(stage),
8073             }
8074 0             self.update_db_2("nsrs", nsr_id, db_nsr_update)
8075 0             self._write_op_status(nslcmop_id, stage)
8076 0             self.logger.debug(
8077                 logging_text + "ns healed at RO. RO_id={}".format(action_id)
8078             )
8079
8080 0         except Exception as e:
8081 0             stage[2] = "ERROR healing at VIM"
8082             # self.set_vnfr_at_error(db_vnfrs, str(e))
8083 0             self.logger.error(
8084                 "Error healing at VIM {}".format(e),
8085                 exc_info=not isinstance(
8086                     e,
8087                     (
8088                         ROclient.ROClientException,
8089                         LcmException,
8090                         DbException,
8091                         NgRoException,
8092                     ),
8093                 ),
8094             )
8095 0             raise
8096
8097 1     def _heal_n2vc(
8098         self,
8099         logging_text,
8100         db_nsr,
8101         db_vnfr,
8102         nslcmop_id,
8103         nsr_id,
8104         nsi_id,
8105         vnfd_id,
8106         vdu_id,
8107         kdu_name,
8108         member_vnf_index,
8109         vdu_index,
8110         vdu_name,
8111         deploy_params,
8112         descriptor_config,
8113         base_folder,
8114         task_instantiation_info,
8115         stage,
8116     ):
8117         # launch instantiate_N2VC in a asyncio task and register task object
8118         # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
8119         # if not found, create one entry and update database
8120         # fill db_nsr._admin.deployed.VCA.<index>
8121
8122 0         self.logger.debug(
8123             logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
8124         )
8125
8126 0         charm_name = ""
8127 0         get_charm_name = False
8128 0         if "execution-environment-list" in descriptor_config:
8129 0             ee_list = descriptor_config.get("execution-environment-list", [])
8130 0         elif "juju" in descriptor_config:
8131 0             ee_list = [descriptor_config]  # ns charms
8132 0             if "execution-environment-list" not in descriptor_config:
8133                 # charm name is only required for ns charms
8134 0                 get_charm_name = True
8135         else:  # other types as script are not supported
8136 0             ee_list = []
8137
8138 0         for ee_item in ee_list:
8139 0             self.logger.debug(
8140                 logging_text
8141                 + "_deploy_n2vc ee_item juju={}, helm={}".format(
8142                     ee_item.get("juju"), ee_item.get("helm-chart")
8143                 )
8144             )
8145 0             ee_descriptor_id = ee_item.get("id")
8146 0             if ee_item.get("juju"):
8147 0                 vca_name = ee_item["juju"].get("charm")
8148 0                 if get_charm_name:
8149 0                     charm_name = self.find_charm_name(db_nsr, str(vca_name))
8150 0                 vca_type = (
8151                     "lxc_proxy_charm"
8152                     if ee_item["juju"].get("charm") is not None
8153                     else "native_charm"
8154                 )
8155 0                 if ee_item["juju"].get("cloud") == "k8s":
8156 0                     vca_type = "k8s_proxy_charm"
8157 0                 elif ee_item["juju"].get("proxy") is False:
8158 0                     vca_type = "native_charm"
8159 0             elif ee_item.get("helm-chart"):
8160 0                 vca_name = ee_item["helm-chart"]
8161 0                 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
8162 0                     vca_type = "helm"
8163                 else:
8164 0                     vca_type = "helm-v3"
8165             else:
8166 0                 self.logger.debug(
8167                     logging_text + "skipping non juju neither charm configuration"
8168                 )
8169 0                 continue
8170
8171 0             vca_index = -1
8172 0             for vca_index, vca_deployed in enumerate(
8173                 db_nsr["_admin"]["deployed"]["VCA"]
8174             ):
8175 0                 if not vca_deployed:
8176 0                     continue
8177 0                 if (
8178                     vca_deployed.get("member-vnf-index") == member_vnf_index
8179                     and vca_deployed.get("vdu_id") == vdu_id
8180                     and vca_deployed.get("kdu_name") == kdu_name
8181                     and vca_deployed.get("vdu_count_index", 0) == vdu_index
8182                     and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
8183                 ):
8184 0                     break
8185             else:
8186                 # not found, create one.
8187 0                 target = (
8188                     "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
8189                 )
8190 0                 if vdu_id:
8191 0                     target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
8192 0                 elif kdu_name:
8193 0                     target += "/kdu/{}".format(kdu_name)
8194 0                 vca_deployed = {
8195                     "target_element": target,
8196                     # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8197                     "member-vnf-index": member_vnf_index,
8198                     "vdu_id": vdu_id,
8199                     "kdu_name": kdu_name,
8200                     "vdu_count_index": vdu_index,
8201                     "operational-status": "init",  # TODO revise
8202                     "detailed-status": "",  # TODO revise
8203                     "step": "initial-deploy",  # TODO revise
8204                     "vnfd_id": vnfd_id,
8205                     "vdu_name": vdu_name,
8206                     "type": vca_type,
8207                     "ee_descriptor_id": ee_descriptor_id,
8208                     "charm_name": charm_name,
8209                 }
8210 0                 vca_index += 1
8211
8212                 # create VCA and configurationStatus in db
8213 0                 db_dict = {
8214                     "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
8215                     "configurationStatus.{}".format(vca_index): dict(),
8216                 }
8217 0                 self.update_db_2("nsrs", nsr_id, db_dict)
8218
8219 0                 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
8220
8221 0             self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
8222 0             self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
8223 0             self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
8224
8225             # Launch task
8226 0             task_n2vc = asyncio.ensure_future(
8227                 self.heal_N2VC(
8228                     logging_text=logging_text,
8229                     vca_index=vca_index,
8230                     nsi_id=nsi_id,
8231                     db_nsr=db_nsr,
8232                     db_vnfr=db_vnfr,
8233                     vdu_id=vdu_id,
8234                     kdu_name=kdu_name,
8235                     vdu_index=vdu_index,
8236                     deploy_params=deploy_params,
8237                     config_descriptor=descriptor_config,
8238                     base_folder=base_folder,
8239                     nslcmop_id=nslcmop_id,
8240                     stage=stage,
8241                     vca_type=vca_type,
8242                     vca_name=vca_name,
8243                     ee_config_descriptor=ee_item,
8244                 )
8245             )
8246 0             self.lcm_tasks.register(
8247                 "ns",
8248                 nsr_id,
8249                 nslcmop_id,
8250                 "instantiate_N2VC-{}".format(vca_index),
8251                 task_n2vc,
8252             )
8253 0             task_instantiation_info[
8254                 task_n2vc
8255             ] = self.task_name_deploy_vca + " {}.{}".format(
8256                 member_vnf_index or "", vdu_id or ""
8257             )
8258
8259 1     async def heal_N2VC(
8260         self,
8261         logging_text,
8262         vca_index,
8263         nsi_id,
8264         db_nsr,
8265         db_vnfr,
8266         vdu_id,
8267         kdu_name,
8268         vdu_index,
8269         config_descriptor,
8270         deploy_params,
8271         base_folder,
8272         nslcmop_id,
8273         stage,
8274         vca_type,
8275         vca_name,
8276         ee_config_descriptor,
8277     ):
8278 0         nsr_id = db_nsr["_id"]
8279 0         db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
8280 0         vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
8281 0         vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
8282 0         osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
8283 0         db_dict = {
8284             "collection": "nsrs",
8285             "filter": {"_id": nsr_id},
8286             "path": db_update_entry,
8287         }
8288 0         step = ""
8289 0         try:
8290 0             element_type = "NS"
8291 0             element_under_configuration = nsr_id
8292
8293 0             vnfr_id = None
8294 0             if db_vnfr:
8295 0                 vnfr_id = db_vnfr["_id"]
8296 0                 osm_config["osm"]["vnf_id"] = vnfr_id
8297
8298 0             namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8299
8300 0             if vca_type == "native_charm":
8301 0                 index_number = 0
8302             else:
8303 0                 index_number = vdu_index or 0
8304
8305 0             if vnfr_id:
8306 0                 element_type = "VNF"
8307 0                 element_under_configuration = vnfr_id
8308 0                 namespace += ".{}-{}".format(vnfr_id, index_number)
8309 0                 if vdu_id:
8310 0                     namespace += ".{}-{}".format(vdu_id, index_number)
8311 0                     element_type = "VDU"
8312 0                     element_under_configuration = "{}-{}".format(vdu_id, index_number)
8313 0                     osm_config["osm"]["vdu_id"] = vdu_id
8314 0                 elif kdu_name:
8315 0                     namespace += ".{}".format(kdu_name)
8316 0                     element_type = "KDU"
8317 0                     element_under_configuration = kdu_name
8318 0                     osm_config["osm"]["kdu_name"] = kdu_name
8319
8320             # Get artifact path
8321 0             if base_folder["pkg-dir"]:
8322 0                 artifact_path = "{}/{}/{}/{}".format(
8323                     base_folder["folder"],
8324                     base_folder["pkg-dir"],
8325                     "charms"
8326                     if vca_type
8327                     in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8328                     else "helm-charts",
8329                     vca_name,
8330                 )
8331             else:
8332 0                 artifact_path = "{}/Scripts/{}/{}/".format(
8333                     base_folder["folder"],
8334                     "charms"
8335                     if vca_type
8336                     in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8337                     else "helm-charts",
8338                     vca_name,
8339                 )
8340
8341 0             self.logger.debug("Artifact path > {}".format(artifact_path))
8342
8343             # get initial_config_primitive_list that applies to this element
8344 0             initial_config_primitive_list = config_descriptor.get(
8345                 "initial-config-primitive"
8346             )
8347
8348 0             self.logger.debug(
8349                 "Initial config primitive list > {}".format(
8350                     initial_config_primitive_list
8351                 )
8352             )
8353
8354             # add config if not present for NS charm
8355 0             ee_descriptor_id = ee_config_descriptor.get("id")
8356 0             self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8357 0             initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8358                 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8359             )
8360
8361 0             self.logger.debug(
8362                 "Initial config primitive list #2 > {}".format(
8363                     initial_config_primitive_list
8364                 )
8365             )
8366             # n2vc_redesign STEP 3.1
8367             # find old ee_id if exists
8368 0             ee_id = vca_deployed.get("ee_id")
8369
8370 0             vca_id = self.get_vca_id(db_vnfr, db_nsr)
8371             # create or register execution environment in VCA. Only for native charms when healing
8372 0             if vca_type == "native_charm":
8373 0                 step = "Waiting to VM being up and getting IP address"
8374 0                 self.logger.debug(logging_text + step)
8375 0                 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8376                     logging_text,
8377                     nsr_id,
8378                     vnfr_id,
8379                     vdu_id,
8380                     vdu_index,
8381                     user=None,
8382                     pub_key=None,
8383                 )
8384 0                 credentials = {"hostname": rw_mgmt_ip}
8385                 # get username
8386 0                 username = deep_get(
8387                     config_descriptor, ("config-access", "ssh-access", "default-user")
8388                 )
8389                 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8390                 #  merged. Meanwhile let's get username from initial-config-primitive
8391 0                 if not username and initial_config_primitive_list:
8392 0                     for config_primitive in initial_config_primitive_list:
8393 0                         for param in config_primitive.get("parameter", ()):
8394 0                             if param["name"] == "ssh-username":
8395 0                                 username = param["value"]
8396 0                                 break
8397 0                 if not username:
8398 0                     raise LcmException(
8399                         "Cannot determine the username neither with 'initial-config-primitive' nor with "
8400                         "'config-access.ssh-access.default-user'"
8401                     )
8402 0                 credentials["username"] = username
8403
8404                 # n2vc_redesign STEP 3.2
8405                 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8406 0                 self._write_configuration_status(
8407                     nsr_id=nsr_id,
8408                     vca_index=vca_index,
8409                     status="REGISTERING",
8410                     element_under_configuration=element_under_configuration,
8411                     element_type=element_type,
8412                 )
8413
8414 0                 step = "register execution environment {}".format(credentials)
8415 0                 self.logger.debug(logging_text + step)
8416 0                 ee_id = await self.vca_map[vca_type].register_execution_environment(
8417                     credentials=credentials,
8418                     namespace=namespace,
8419                     db_dict=db_dict,
8420                     vca_id=vca_id,
8421                 )
8422
8423                 # update ee_id en db
8424 0                 db_dict_ee_id = {
8425                     "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8426                 }
8427 0                 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8428
8429             # for compatibility with MON/POL modules, the need model and application name at database
8430             # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8431             # Not sure if this need to be done when healing
8432             """
8433             ee_id_parts = ee_id.split(".")
8434             db_nsr_update = {db_update_entry + "ee_id": ee_id}
8435             if len(ee_id_parts) >= 2:
8436                 model_name = ee_id_parts[0]
8437                 application_name = ee_id_parts[1]
8438                 db_nsr_update[db_update_entry + "model"] = model_name
8439                 db_nsr_update[db_update_entry + "application"] = application_name
8440             """
8441
8442             # n2vc_redesign STEP 3.3
8443             # Install configuration software. Only for native charms.
8444 0             step = "Install configuration Software"
8445
8446 0             self._write_configuration_status(
8447                 nsr_id=nsr_id,
8448                 vca_index=vca_index,
8449                 status="INSTALLING SW",
8450                 element_under_configuration=element_under_configuration,
8451                 element_type=element_type,
8452                 # other_update=db_nsr_update,
8453                 other_update=None,
8454             )
8455
8456             # TODO check if already done
8457 0             self.logger.debug(logging_text + step)
8458 0             config = None
8459 0             if vca_type == "native_charm":
8460 0                 config_primitive = next(
8461                     (p for p in initial_config_primitive_list if p["name"] == "config"),
8462                     None,
8463                 )
8464 0                 if config_primitive:
8465 0                     config = self._map_primitive_params(
8466                         config_primitive, {}, deploy_params
8467                     )
8468 0                 await self.vca_map[vca_type].install_configuration_sw(
8469                     ee_id=ee_id,
8470                     artifact_path=artifact_path,
8471                     db_dict=db_dict,
8472                     config=config,
8473                     num_units=1,
8474                     vca_id=vca_id,
8475                     vca_type=vca_type,
8476                 )
8477
8478             # write in db flag of configuration_sw already installed
8479 0             self.update_db_2(
8480                 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8481             )
8482
8483             # Not sure if this need to be done when healing
8484             """
8485             # add relations for this VCA (wait for other peers related with this VCA)
8486             await self._add_vca_relations(
8487                 logging_text=logging_text,
8488                 nsr_id=nsr_id,
8489                 vca_type=vca_type,
8490                 vca_index=vca_index,
8491             )
8492             """
8493
8494             # if SSH access is required, then get execution environment SSH public
8495             # if native charm we have waited already to VM be UP
8496 0             if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8497 0                 pub_key = None
8498 0                 user = None
8499                 # self.logger.debug("get ssh key block")
8500 0                 if deep_get(
8501                     config_descriptor, ("config-access", "ssh-access", "required")
8502                 ):
8503                     # self.logger.debug("ssh key needed")
8504                     # Needed to inject a ssh key
8505 0                     user = deep_get(
8506                         config_descriptor,
8507                         ("config-access", "ssh-access", "default-user"),
8508                     )
8509 0                     step = "Install configuration Software, getting public ssh key"
8510 0                     pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8511                         ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8512                     )
8513
8514 0                     step = "Insert public key into VM user={} ssh_key={}".format(
8515                         user, pub_key
8516                     )
8517                 else:
8518                     # self.logger.debug("no need to get ssh key")
8519 0                     step = "Waiting to VM being up and getting IP address"
8520 0                 self.logger.debug(logging_text + step)
8521
8522                 # n2vc_redesign STEP 5.1
8523                 # wait for RO (ip-address) Insert pub_key into VM
8524                 # IMPORTANT: We need do wait for RO to complete healing operation.
8525 0                 await self._wait_heal_ro(nsr_id, self.timeout.ns_heal)
8526 0                 if vnfr_id:
8527 0                     if kdu_name:
8528 0                         rw_mgmt_ip = await self.wait_kdu_up(
8529                             logging_text, nsr_id, vnfr_id, kdu_name
8530                         )
8531                     else:
8532 0                         rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8533                             logging_text,
8534                             nsr_id,
8535                             vnfr_id,
8536                             vdu_id,
8537                             vdu_index,
8538                             user=user,
8539                             pub_key=pub_key,
8540                         )
8541                 else:
8542 0                     rw_mgmt_ip = None  # This is for a NS configuration
8543
8544 0                 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8545
8546             # store rw_mgmt_ip in deploy params for later replacement
8547 0             deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8548
8549             # Day1 operations.
8550             # get run-day1 operation parameter
8551 0             runDay1 = deploy_params.get("run-day1", False)
8552 0             self.logger.debug(
8553                 "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id, vdu_id, runDay1)
8554             )
8555 0             if runDay1:
8556                 # n2vc_redesign STEP 6  Execute initial config primitive
8557 0                 step = "execute initial config primitive"
8558
8559                 # wait for dependent primitives execution (NS -> VNF -> VDU)
8560 0                 if initial_config_primitive_list:
8561 0                     await self._wait_dependent_n2vc(
8562                         nsr_id, vca_deployed_list, vca_index
8563                     )
8564
8565                 # stage, in function of element type: vdu, kdu, vnf or ns
8566 0                 my_vca = vca_deployed_list[vca_index]
8567 0                 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8568                     # VDU or KDU
8569 0                     stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8570 0                 elif my_vca.get("member-vnf-index"):
8571                     # VNF
8572 0                     stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8573                 else:
8574                     # NS
8575 0                     stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8576
8577 0                 self._write_configuration_status(
8578                     nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8579                 )
8580
8581 0                 self._write_op_status(op_id=nslcmop_id, stage=stage)
8582
8583 0                 check_if_terminated_needed = True
8584 0                 for initial_config_primitive in initial_config_primitive_list:
8585                     # adding information on the vca_deployed if it is a NS execution environment
8586 0                     if not vca_deployed["member-vnf-index"]:
8587 0                         deploy_params["ns_config_info"] = json.dumps(
8588                             self._get_ns_config_info(nsr_id)
8589                         )
8590                     # TODO check if already done
8591 0                     primitive_params_ = self._map_primitive_params(
8592                         initial_config_primitive, {}, deploy_params
8593                     )
8594
8595 0                     step = "execute primitive '{}' params '{}'".format(
8596                         initial_config_primitive["name"], primitive_params_
8597                     )
8598 0                     self.logger.debug(logging_text + step)
8599 0                     await self.vca_map[vca_type].exec_primitive(
8600                         ee_id=ee_id,
8601                         primitive_name=initial_config_primitive["name"],
8602                         params_dict=primitive_params_,
8603                         db_dict=db_dict,
8604                         vca_id=vca_id,
8605                         vca_type=vca_type,
8606                     )
8607                     # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8608 0                     if check_if_terminated_needed:
8609 0                         if config_descriptor.get("terminate-config-primitive"):
8610 0                             self.update_db_2(
8611                                 "nsrs",
8612                                 nsr_id,
8613                                 {db_update_entry + "needed_terminate": True},
8614                             )
8615 0                         check_if_terminated_needed = False
8616
8617                     # TODO register in database that primitive is done
8618
8619             # STEP 7 Configure metrics
8620             # Not sure if this need to be done when healing
8621             """
8622             if vca_type == "helm" or vca_type == "helm-v3":
8623                 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8624                     ee_id=ee_id,
8625                     artifact_path=artifact_path,
8626                     ee_config_descriptor=ee_config_descriptor,
8627                     vnfr_id=vnfr_id,
8628                     nsr_id=nsr_id,
8629                     target_ip=rw_mgmt_ip,
8630                 )
8631                 if prometheus_jobs:
8632                     self.update_db_2(
8633                         "nsrs",
8634                         nsr_id,
8635                         {db_update_entry + "prometheus_jobs": prometheus_jobs},
8636                     )
8637
8638                     for job in prometheus_jobs:
8639                         self.db.set_one(
8640                             "prometheus_jobs",
8641                             {"job_name": job["job_name"]},
8642                             job,
8643                             upsert=True,
8644                             fail_on_empty=False,
8645                         )
8646
8647             """
8648 0             step = "instantiated at VCA"
8649 0             self.logger.debug(logging_text + step)
8650
8651 0             self._write_configuration_status(
8652                 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8653             )
8654
8655 0         except Exception as e:  # TODO not use Exception but N2VC exception
8656             # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8657 0             if not isinstance(
8658                 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8659             ):
8660 0                 self.logger.error(
8661                     "Exception while {} : {}".format(step, e), exc_info=True
8662                 )
8663 0             self._write_configuration_status(
8664                 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8665             )
8666 0             raise LcmException("{} {}".format(step, e)) from e
8667
8668 1     async def _wait_heal_ro(
8669         self,
8670         nsr_id,
8671         timeout=600,
8672     ):
8673 0         start_time = time()
8674 0         while time() <= start_time + timeout:
8675 0             db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8676 0             operational_status_ro = db_nsr["_admin"]["deployed"]["RO"][
8677                 "operational-status"
8678             ]
8679 0             self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8680 0             if operational_status_ro != "healing":
8681 0                 break
8682 0             await asyncio.sleep(15, loop=self.loop)
8683         else:  # timeout_ns_deploy
8684 0             raise NgRoException("Timeout waiting ns to deploy")
8685
8686 1     async def vertical_scale(self, nsr_id, nslcmop_id):
8687         """
8688         Vertical Scale the VDUs in a NS
8689
8690         :param: nsr_id: NS Instance ID
8691         :param: nslcmop_id: nslcmop ID of migrate
8692
8693         """
8694         # Try to lock HA task here
8695 1         task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8696 1         if not task_is_locked_by_me:
8697 0             return
8698 1         logging_text = "Task ns={} vertical scale ".format(nsr_id)
8699 1         self.logger.debug(logging_text + "Enter")
8700         # get all needed from database
8701 1         db_nslcmop = None
8702 1         db_nslcmop_update = {}
8703 1         nslcmop_operation_state = None
8704 1         db_nsr_update = {}
8705 1         target = {}
8706 1         exc = None
8707         # in case of error, indicates what part of scale was failed to put nsr at error status
8708 1         start_deploy = time()
8709
8710 1         try:
8711             # wait for any previous tasks in process
8712 1             step = "Waiting for previous operations to terminate"
8713 1             await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
8714
8715 1             self._write_ns_status(
8716                 nsr_id=nsr_id,
8717                 ns_state=None,
8718                 current_operation="VerticalScale",
8719                 current_operation_id=nslcmop_id,
8720             )
8721 1             step = "Getting nslcmop from database"
8722 1             self.logger.debug(
8723                 step + " after having waited for previous tasks to be completed"
8724             )
8725 1             db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8726 1             operationParams = db_nslcmop.get("operationParams")
8727 1             target = {}
8728 1             target.update(operationParams)
8729 1             desc = await self.RO.vertical_scale(nsr_id, target)
8730 1             self.logger.debug("RO return > {}".format(desc))
8731 1             action_id = desc["action_id"]
8732 1             await self._wait_ng_ro(
8733                 nsr_id,
8734                 action_id,
8735                 nslcmop_id,
8736                 start_deploy,
8737                 self.timeout.verticalscale,
8738                 operation="verticalscale",
8739             )
8740 1         except (ROclient.ROClientException, DbException, LcmException) as e:
8741 0             self.logger.error("Exit Exception {}".format(e))
8742 0             exc = e
8743 1         except asyncio.CancelledError:
8744 0             self.logger.error("Cancelled Exception while '{}'".format(step))
8745 0             exc = "Operation was cancelled"
8746 1         except Exception as e:
8747 1             exc = traceback.format_exc()
8748 1             self.logger.critical(
8749                 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
8750             )
8751         finally:
8752 1             self._write_ns_status(
8753                 nsr_id=nsr_id,
8754                 ns_state=None,
8755                 current_operation="IDLE",
8756                 current_operation_id=None,
8757             )
8758 1             if exc:
8759 1                 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
8760 1                 nslcmop_operation_state = "FAILED"
8761             else:
8762 1                 nslcmop_operation_state = "COMPLETED"
8763 1                 db_nslcmop_update["detailed-status"] = "Done"
8764 1                 db_nsr_update["detailed-status"] = "Done"
8765
8766 1             self._write_op_status(
8767                 op_id=nslcmop_id,
8768                 stage="",
8769                 error_message="",
8770                 operation_state=nslcmop_operation_state,
8771                 other_update=db_nslcmop_update,
8772             )
8773 1             if nslcmop_operation_state:
8774 1                 try:
8775 1                     msg = {
8776                         "nsr_id": nsr_id,
8777                         "nslcmop_id": nslcmop_id,
8778                         "operationState": nslcmop_operation_state,
8779                     }
8780 1                     await self.msg.aiowrite("ns", "verticalscaled", msg, loop=self.loop)
8781 0                 except Exception as e:
8782 0                     self.logger.error(
8783                         logging_text + "kafka_write notification Exception {}".format(e)
8784                     )
8785 1             self.logger.debug(logging_text + "Exit")
8786 1             self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")