Bug 1234: OSM reports successful deployment when a charm relation fails
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import json
27 from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33 select_autoescape,
34 )
35
36 from osm_lcm import ROclient
37 from osm_lcm.data_utils.lcm_config import LcmCfg
38 from osm_lcm.data_utils.nsr import (
39 get_deployed_kdu,
40 get_deployed_vca,
41 get_deployed_vca_list,
42 get_nsd,
43 )
44 from osm_lcm.data_utils.vca import (
45 DeployedComponent,
46 DeployedK8sResource,
47 DeployedVCA,
48 EELevel,
49 Relation,
50 EERelation,
51 safe_get_ee_relation,
52 )
53 from osm_lcm.ng_ro import NgRoClient, NgRoException
54 from osm_lcm.lcm_utils import (
55 LcmException,
56 LcmExceptionNoMgmtIP,
57 LcmBase,
58 deep_get,
59 get_iterable,
60 populate_dict,
61 check_juju_bundle_existence,
62 get_charm_artifact_path,
63 get_ee_id_parts,
64 )
65 from osm_lcm.data_utils.nsd import (
66 get_ns_configuration_relation_list,
67 get_vnf_profile,
68 get_vnf_profiles,
69 )
70 from osm_lcm.data_utils.vnfd import (
71 get_kdu,
72 get_kdu_services,
73 get_relation_list,
74 get_vdu_list,
75 get_vdu_profile,
76 get_ee_sorted_initial_config_primitive_list,
77 get_ee_sorted_terminate_config_primitive_list,
78 get_kdu_list,
79 get_virtual_link_profiles,
80 get_vdu,
81 get_configuration,
82 get_vdu_index,
83 get_scaling_aspect,
84 get_number_of_instances,
85 get_juju_ee_ref,
86 get_kdu_resource_profile,
87 find_software_version,
88 check_helm_ee_in_ns,
89 )
90 from osm_lcm.data_utils.list_utils import find_in_list
91 from osm_lcm.data_utils.vnfr import (
92 get_osm_params,
93 get_vdur_index,
94 get_kdur,
95 get_volumes_from_instantiation_params,
96 )
97 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
98 from osm_lcm.data_utils.database.vim_account import VimAccountDB
99 from n2vc.definitions import RelationEndpoint
100 from n2vc.k8s_helm_conn import K8sHelmConnector
101 from n2vc.k8s_helm3_conn import K8sHelm3Connector
102 from n2vc.k8s_juju_conn import K8sJujuConnector
103
104 from osm_common.dbbase import DbException
105 from osm_common.fsbase import FsException
106
107 from osm_lcm.data_utils.database.database import Database
108 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
109 from osm_lcm.data_utils.wim import (
110 get_sdn_ports,
111 get_target_wim_attrs,
112 select_feasible_wim_account,
113 )
114
115 from n2vc.n2vc_juju_conn import N2VCJujuConnector
116 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
117
118 from osm_lcm.lcm_helm_conn import LCMHelmConn
119 from osm_lcm.osm_config import OsmConfigBuilder
120 from osm_lcm.prometheus import parse_job
121
122 from copy import copy, deepcopy
123 from time import time
124 from uuid import uuid4
125
126 from random import randint
127
128 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
129
130
131 class NsLcm(LcmBase):
132 SUBOPERATION_STATUS_NOT_FOUND = -1
133 SUBOPERATION_STATUS_NEW = -2
134 SUBOPERATION_STATUS_SKIP = -3
135 task_name_deploy_vca = "Deploying VCA"
136
137 def __init__(self, msg, lcm_tasks, config: LcmCfg, loop):
138 """
139 Init, Connect to database, filesystem storage, and messaging
140 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
141 :return: None
142 """
143 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
144
145 self.db = Database().instance.db
146 self.fs = Filesystem().instance.fs
147 self.loop = loop
148 self.lcm_tasks = lcm_tasks
149 self.timeout = config.timeout
150 self.ro_config = config.RO
151 self.vca_config = config.VCA
152
153 # create N2VC connector
154 self.n2vc = N2VCJujuConnector(
155 log=self.logger,
156 loop=self.loop,
157 on_update_db=self._on_update_n2vc_db,
158 fs=self.fs,
159 db=self.db,
160 )
161
162 self.conn_helm_ee = LCMHelmConn(
163 log=self.logger,
164 loop=self.loop,
165 vca_config=self.vca_config,
166 on_update_db=self._on_update_n2vc_db,
167 )
168
169 self.k8sclusterhelm2 = K8sHelmConnector(
170 kubectl_command=self.vca_config.kubectlpath,
171 helm_command=self.vca_config.helmpath,
172 log=self.logger,
173 on_update_db=None,
174 fs=self.fs,
175 db=self.db,
176 )
177
178 self.k8sclusterhelm3 = K8sHelm3Connector(
179 kubectl_command=self.vca_config.kubectlpath,
180 helm_command=self.vca_config.helm3path,
181 fs=self.fs,
182 log=self.logger,
183 db=self.db,
184 on_update_db=None,
185 )
186
187 self.k8sclusterjuju = K8sJujuConnector(
188 kubectl_command=self.vca_config.kubectlpath,
189 juju_command=self.vca_config.jujupath,
190 log=self.logger,
191 loop=self.loop,
192 on_update_db=self._on_update_k8s_db,
193 fs=self.fs,
194 db=self.db,
195 )
196
197 self.k8scluster_map = {
198 "helm-chart": self.k8sclusterhelm2,
199 "helm-chart-v3": self.k8sclusterhelm3,
200 "chart": self.k8sclusterhelm3,
201 "juju-bundle": self.k8sclusterjuju,
202 "juju": self.k8sclusterjuju,
203 }
204
205 self.vca_map = {
206 "lxc_proxy_charm": self.n2vc,
207 "native_charm": self.n2vc,
208 "k8s_proxy_charm": self.n2vc,
209 "helm": self.conn_helm_ee,
210 "helm-v3": self.conn_helm_ee,
211 }
212
213 # create RO client
214 self.RO = NgRoClient(self.loop, **self.ro_config.to_dict())
215
216 self.op_status_map = {
217 "instantiation": self.RO.status,
218 "termination": self.RO.status,
219 "migrate": self.RO.status,
220 "healing": self.RO.recreate_status,
221 "verticalscale": self.RO.status,
222 "start_stop_rebuild": self.RO.status,
223 }
224
225 @staticmethod
226 def increment_ip_mac(ip_mac, vm_index=1):
227 if not isinstance(ip_mac, str):
228 return ip_mac
229 try:
230 # try with ipv4 look for last dot
231 i = ip_mac.rfind(".")
232 if i > 0:
233 i += 1
234 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
235 # try with ipv6 or mac look for last colon. Operate in hex
236 i = ip_mac.rfind(":")
237 if i > 0:
238 i += 1
239 # format in hex, len can be 2 for mac or 4 for ipv6
240 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
241 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
242 )
243 except Exception:
244 pass
245 return None
246
247 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
248
249 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
250
251 try:
252 # TODO filter RO descriptor fields...
253
254 # write to database
255 db_dict = dict()
256 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
257 db_dict["deploymentStatus"] = ro_descriptor
258 self.update_db_2("nsrs", nsrs_id, db_dict)
259
260 except Exception as e:
261 self.logger.warn(
262 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
263 )
264
265 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
266
267 # remove last dot from path (if exists)
268 if path.endswith("."):
269 path = path[:-1]
270
271 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
272 # .format(table, filter, path, updated_data))
273 try:
274
275 nsr_id = filter.get("_id")
276
277 # read ns record from database
278 nsr = self.db.get_one(table="nsrs", q_filter=filter)
279 current_ns_status = nsr.get("nsState")
280
281 # get vca status for NS
282 status_dict = await self.n2vc.get_status(
283 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
284 )
285
286 # vcaStatus
287 db_dict = dict()
288 db_dict["vcaStatus"] = status_dict
289
290 # update configurationStatus for this VCA
291 try:
292 vca_index = int(path[path.rfind(".") + 1 :])
293
294 vca_list = deep_get(
295 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
296 )
297 vca_status = vca_list[vca_index].get("status")
298
299 configuration_status_list = nsr.get("configurationStatus")
300 config_status = configuration_status_list[vca_index].get("status")
301
302 if config_status == "BROKEN" and vca_status != "failed":
303 db_dict["configurationStatus"][vca_index] = "READY"
304 elif config_status != "BROKEN" and vca_status == "failed":
305 db_dict["configurationStatus"][vca_index] = "BROKEN"
306 except Exception as e:
307 # not update configurationStatus
308 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
309
310 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
311 # if nsState = 'DEGRADED' check if all is OK
312 is_degraded = False
313 if current_ns_status in ("READY", "DEGRADED"):
314 error_description = ""
315 # check machines
316 if status_dict.get("machines"):
317 for machine_id in status_dict.get("machines"):
318 machine = status_dict.get("machines").get(machine_id)
319 # check machine agent-status
320 if machine.get("agent-status"):
321 s = machine.get("agent-status").get("status")
322 if s != "started":
323 is_degraded = True
324 error_description += (
325 "machine {} agent-status={} ; ".format(
326 machine_id, s
327 )
328 )
329 # check machine instance status
330 if machine.get("instance-status"):
331 s = machine.get("instance-status").get("status")
332 if s != "running":
333 is_degraded = True
334 error_description += (
335 "machine {} instance-status={} ; ".format(
336 machine_id, s
337 )
338 )
339 # check applications
340 if status_dict.get("applications"):
341 for app_id in status_dict.get("applications"):
342 app = status_dict.get("applications").get(app_id)
343 # check application status
344 if app.get("status"):
345 s = app.get("status").get("status")
346 if s != "active":
347 is_degraded = True
348 error_description += (
349 "application {} status={} ; ".format(app_id, s)
350 )
351
352 if error_description:
353 db_dict["errorDescription"] = error_description
354 if current_ns_status == "READY" and is_degraded:
355 db_dict["nsState"] = "DEGRADED"
356 if current_ns_status == "DEGRADED" and not is_degraded:
357 db_dict["nsState"] = "READY"
358
359 # write to database
360 self.update_db_2("nsrs", nsr_id, db_dict)
361
362 except (asyncio.CancelledError, asyncio.TimeoutError):
363 raise
364 except Exception as e:
365 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
366
367 async def _on_update_k8s_db(
368 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
369 ):
370 """
371 Updating vca status in NSR record
372 :param cluster_uuid: UUID of a k8s cluster
373 :param kdu_instance: The unique name of the KDU instance
374 :param filter: To get nsr_id
375 :cluster_type: The cluster type (juju, k8s)
376 :return: none
377 """
378
379 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
380 # .format(cluster_uuid, kdu_instance, filter))
381
382 nsr_id = filter.get("_id")
383 try:
384 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
385 cluster_uuid=cluster_uuid,
386 kdu_instance=kdu_instance,
387 yaml_format=False,
388 complete_status=True,
389 vca_id=vca_id,
390 )
391
392 # vcaStatus
393 db_dict = dict()
394 db_dict["vcaStatus"] = {nsr_id: vca_status}
395
396 self.logger.debug(
397 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
398 )
399
400 # write to database
401 self.update_db_2("nsrs", nsr_id, db_dict)
402 except (asyncio.CancelledError, asyncio.TimeoutError):
403 raise
404 except Exception as e:
405 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
406
407 @staticmethod
408 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
409 try:
410 env = Environment(
411 undefined=StrictUndefined,
412 autoescape=select_autoescape(default_for_string=True, default=True),
413 )
414 template = env.from_string(cloud_init_text)
415 return template.render(additional_params or {})
416 except UndefinedError as e:
417 raise LcmException(
418 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
419 "file, must be provided in the instantiation parameters inside the "
420 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
421 )
422 except (TemplateError, TemplateNotFound) as e:
423 raise LcmException(
424 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
425 vnfd_id, vdu_id, e
426 )
427 )
428
429 def _get_vdu_cloud_init_content(self, vdu, vnfd):
430 cloud_init_content = cloud_init_file = None
431 try:
432 if vdu.get("cloud-init-file"):
433 base_folder = vnfd["_admin"]["storage"]
434 if base_folder["pkg-dir"]:
435 cloud_init_file = "{}/{}/cloud_init/{}".format(
436 base_folder["folder"],
437 base_folder["pkg-dir"],
438 vdu["cloud-init-file"],
439 )
440 else:
441 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
442 base_folder["folder"],
443 vdu["cloud-init-file"],
444 )
445 with self.fs.file_open(cloud_init_file, "r") as ci_file:
446 cloud_init_content = ci_file.read()
447 elif vdu.get("cloud-init"):
448 cloud_init_content = vdu["cloud-init"]
449
450 return cloud_init_content
451 except FsException as e:
452 raise LcmException(
453 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
454 vnfd["id"], vdu["id"], cloud_init_file, e
455 )
456 )
457
458 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
459 vdur = next(
460 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
461 )
462 additional_params = vdur.get("additionalParams")
463 return parse_yaml_strings(additional_params)
464
465 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
466 """
467 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
468 :param vnfd: input vnfd
469 :param new_id: overrides vnf id if provided
470 :param additionalParams: Instantiation params for VNFs provided
471 :param nsrId: Id of the NSR
472 :return: copy of vnfd
473 """
474 vnfd_RO = deepcopy(vnfd)
475 # remove unused by RO configuration, monitoring, scaling and internal keys
476 vnfd_RO.pop("_id", None)
477 vnfd_RO.pop("_admin", None)
478 vnfd_RO.pop("monitoring-param", None)
479 vnfd_RO.pop("scaling-group-descriptor", None)
480 vnfd_RO.pop("kdu", None)
481 vnfd_RO.pop("k8s-cluster", None)
482 if new_id:
483 vnfd_RO["id"] = new_id
484
485 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
486 for vdu in get_iterable(vnfd_RO, "vdu"):
487 vdu.pop("cloud-init-file", None)
488 vdu.pop("cloud-init", None)
489 return vnfd_RO
490
491 @staticmethod
492 def ip_profile_2_RO(ip_profile):
493 RO_ip_profile = deepcopy(ip_profile)
494 if "dns-server" in RO_ip_profile:
495 if isinstance(RO_ip_profile["dns-server"], list):
496 RO_ip_profile["dns-address"] = []
497 for ds in RO_ip_profile.pop("dns-server"):
498 RO_ip_profile["dns-address"].append(ds["address"])
499 else:
500 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
501 if RO_ip_profile.get("ip-version") == "ipv4":
502 RO_ip_profile["ip-version"] = "IPv4"
503 if RO_ip_profile.get("ip-version") == "ipv6":
504 RO_ip_profile["ip-version"] = "IPv6"
505 if "dhcp-params" in RO_ip_profile:
506 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
507 return RO_ip_profile
508
509 def _get_ro_vim_id_for_vim_account(self, vim_account):
510 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
511 if db_vim["_admin"]["operationalState"] != "ENABLED":
512 raise LcmException(
513 "VIM={} is not available. operationalState={}".format(
514 vim_account, db_vim["_admin"]["operationalState"]
515 )
516 )
517 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
518 return RO_vim_id
519
520 def get_ro_wim_id_for_wim_account(self, wim_account):
521 if isinstance(wim_account, str):
522 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
523 if db_wim["_admin"]["operationalState"] != "ENABLED":
524 raise LcmException(
525 "WIM={} is not available. operationalState={}".format(
526 wim_account, db_wim["_admin"]["operationalState"]
527 )
528 )
529 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
530 return RO_wim_id
531 else:
532 return wim_account
533
534 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
535
536 db_vdu_push_list = []
537 template_vdur = []
538 db_update = {"_admin.modified": time()}
539 if vdu_create:
540 for vdu_id, vdu_count in vdu_create.items():
541 vdur = next(
542 (
543 vdur
544 for vdur in reversed(db_vnfr["vdur"])
545 if vdur["vdu-id-ref"] == vdu_id
546 ),
547 None,
548 )
549 if not vdur:
550 # Read the template saved in the db:
551 self.logger.debug(
552 "No vdur in the database. Using the vdur-template to scale"
553 )
554 vdur_template = db_vnfr.get("vdur-template")
555 if not vdur_template:
556 raise LcmException(
557 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
558 vdu_id
559 )
560 )
561 vdur = vdur_template[0]
562 # Delete a template from the database after using it
563 self.db.set_one(
564 "vnfrs",
565 {"_id": db_vnfr["_id"]},
566 None,
567 pull={"vdur-template": {"_id": vdur["_id"]}},
568 )
569 for count in range(vdu_count):
570 vdur_copy = deepcopy(vdur)
571 vdur_copy["status"] = "BUILD"
572 vdur_copy["status-detailed"] = None
573 vdur_copy["ip-address"] = None
574 vdur_copy["_id"] = str(uuid4())
575 vdur_copy["count-index"] += count + 1
576 vdur_copy["id"] = "{}-{}".format(
577 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
578 )
579 vdur_copy.pop("vim_info", None)
580 for iface in vdur_copy["interfaces"]:
581 if iface.get("fixed-ip"):
582 iface["ip-address"] = self.increment_ip_mac(
583 iface["ip-address"], count + 1
584 )
585 else:
586 iface.pop("ip-address", None)
587 if iface.get("fixed-mac"):
588 iface["mac-address"] = self.increment_ip_mac(
589 iface["mac-address"], count + 1
590 )
591 else:
592 iface.pop("mac-address", None)
593 if db_vnfr["vdur"]:
594 iface.pop(
595 "mgmt_vnf", None
596 ) # only first vdu can be managment of vnf
597 db_vdu_push_list.append(vdur_copy)
598 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
599 if vdu_delete:
600 if len(db_vnfr["vdur"]) == 1:
601 # The scale will move to 0 instances
602 self.logger.debug(
603 "Scaling to 0 !, creating the template with the last vdur"
604 )
605 template_vdur = [db_vnfr["vdur"][0]]
606 for vdu_id, vdu_count in vdu_delete.items():
607 if mark_delete:
608 indexes_to_delete = [
609 iv[0]
610 for iv in enumerate(db_vnfr["vdur"])
611 if iv[1]["vdu-id-ref"] == vdu_id
612 ]
613 db_update.update(
614 {
615 "vdur.{}.status".format(i): "DELETING"
616 for i in indexes_to_delete[-vdu_count:]
617 }
618 )
619 else:
620 # it must be deleted one by one because common.db does not allow otherwise
621 vdus_to_delete = [
622 v
623 for v in reversed(db_vnfr["vdur"])
624 if v["vdu-id-ref"] == vdu_id
625 ]
626 for vdu in vdus_to_delete[:vdu_count]:
627 self.db.set_one(
628 "vnfrs",
629 {"_id": db_vnfr["_id"]},
630 None,
631 pull={"vdur": {"_id": vdu["_id"]}},
632 )
633 db_push = {}
634 if db_vdu_push_list:
635 db_push["vdur"] = db_vdu_push_list
636 if template_vdur:
637 db_push["vdur-template"] = template_vdur
638 if not db_push:
639 db_push = None
640 db_vnfr["vdur-template"] = template_vdur
641 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
642 # modify passed dictionary db_vnfr
643 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
644 db_vnfr["vdur"] = db_vnfr_["vdur"]
645
646 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
647 """
648 Updates database nsr with the RO info for the created vld
649 :param ns_update_nsr: dictionary to be filled with the updated info
650 :param db_nsr: content of db_nsr. This is also modified
651 :param nsr_desc_RO: nsr descriptor from RO
652 :return: Nothing, LcmException is raised on errors
653 """
654
655 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
656 for net_RO in get_iterable(nsr_desc_RO, "nets"):
657 if vld["id"] != net_RO.get("ns_net_osm_id"):
658 continue
659 vld["vim-id"] = net_RO.get("vim_net_id")
660 vld["name"] = net_RO.get("vim_name")
661 vld["status"] = net_RO.get("status")
662 vld["status-detailed"] = net_RO.get("error_msg")
663 ns_update_nsr["vld.{}".format(vld_index)] = vld
664 break
665 else:
666 raise LcmException(
667 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
668 )
669
670 def set_vnfr_at_error(self, db_vnfrs, error_text):
671 try:
672 for db_vnfr in db_vnfrs.values():
673 vnfr_update = {"status": "ERROR"}
674 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
675 if "status" not in vdur:
676 vdur["status"] = "ERROR"
677 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
678 if error_text:
679 vdur["status-detailed"] = str(error_text)
680 vnfr_update[
681 "vdur.{}.status-detailed".format(vdu_index)
682 ] = "ERROR"
683 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
684 except DbException as e:
685 self.logger.error("Cannot update vnf. {}".format(e))
686
687 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
688 """
689 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
690 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
691 :param nsr_desc_RO: nsr descriptor from RO
692 :return: Nothing, LcmException is raised on errors
693 """
694 for vnf_index, db_vnfr in db_vnfrs.items():
695 for vnf_RO in nsr_desc_RO["vnfs"]:
696 if vnf_RO["member_vnf_index"] != vnf_index:
697 continue
698 vnfr_update = {}
699 if vnf_RO.get("ip_address"):
700 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
701 "ip_address"
702 ].split(";")[0]
703 elif not db_vnfr.get("ip-address"):
704 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
705 raise LcmExceptionNoMgmtIP(
706 "ns member_vnf_index '{}' has no IP address".format(
707 vnf_index
708 )
709 )
710
711 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
712 vdur_RO_count_index = 0
713 if vdur.get("pdu-type"):
714 continue
715 for vdur_RO in get_iterable(vnf_RO, "vms"):
716 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
717 continue
718 if vdur["count-index"] != vdur_RO_count_index:
719 vdur_RO_count_index += 1
720 continue
721 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
722 if vdur_RO.get("ip_address"):
723 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
724 else:
725 vdur["ip-address"] = None
726 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
727 vdur["name"] = vdur_RO.get("vim_name")
728 vdur["status"] = vdur_RO.get("status")
729 vdur["status-detailed"] = vdur_RO.get("error_msg")
730 for ifacer in get_iterable(vdur, "interfaces"):
731 for interface_RO in get_iterable(vdur_RO, "interfaces"):
732 if ifacer["name"] == interface_RO.get("internal_name"):
733 ifacer["ip-address"] = interface_RO.get(
734 "ip_address"
735 )
736 ifacer["mac-address"] = interface_RO.get(
737 "mac_address"
738 )
739 break
740 else:
741 raise LcmException(
742 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
743 "from VIM info".format(
744 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
745 )
746 )
747 vnfr_update["vdur.{}".format(vdu_index)] = vdur
748 break
749 else:
750 raise LcmException(
751 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
752 "VIM info".format(
753 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
754 )
755 )
756
757 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
758 for net_RO in get_iterable(nsr_desc_RO, "nets"):
759 if vld["id"] != net_RO.get("vnf_net_osm_id"):
760 continue
761 vld["vim-id"] = net_RO.get("vim_net_id")
762 vld["name"] = net_RO.get("vim_name")
763 vld["status"] = net_RO.get("status")
764 vld["status-detailed"] = net_RO.get("error_msg")
765 vnfr_update["vld.{}".format(vld_index)] = vld
766 break
767 else:
768 raise LcmException(
769 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
770 vnf_index, vld["id"]
771 )
772 )
773
774 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
775 break
776
777 else:
778 raise LcmException(
779 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
780 vnf_index
781 )
782 )
783
784 def _get_ns_config_info(self, nsr_id):
785 """
786 Generates a mapping between vnf,vdu elements and the N2VC id
787 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
788 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
789 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
790 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
791 """
792 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
793 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
794 mapping = {}
795 ns_config_info = {"osm-config-mapping": mapping}
796 for vca in vca_deployed_list:
797 if not vca["member-vnf-index"]:
798 continue
799 if not vca["vdu_id"]:
800 mapping[vca["member-vnf-index"]] = vca["application"]
801 else:
802 mapping[
803 "{}.{}.{}".format(
804 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
805 )
806 ] = vca["application"]
807 return ns_config_info
808
809 async def _instantiate_ng_ro(
810 self,
811 logging_text,
812 nsr_id,
813 nsd,
814 db_nsr,
815 db_nslcmop,
816 db_vnfrs,
817 db_vnfds,
818 n2vc_key_list,
819 stage,
820 start_deploy,
821 timeout_ns_deploy,
822 ):
823
824 db_vims = {}
825
826 def get_vim_account(vim_account_id):
827 nonlocal db_vims
828 if vim_account_id in db_vims:
829 return db_vims[vim_account_id]
830 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
831 db_vims[vim_account_id] = db_vim
832 return db_vim
833
834 # modify target_vld info with instantiation parameters
835 def parse_vld_instantiation_params(
836 target_vim, target_vld, vld_params, target_sdn
837 ):
838 if vld_params.get("ip-profile"):
839 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
840 "ip-profile"
841 ]
842 if vld_params.get("provider-network"):
843 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
844 "provider-network"
845 ]
846 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
847 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
848 "provider-network"
849 ]["sdn-ports"]
850
851 # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
852 # if wim_account_id is specified in vld_params, validate if it is feasible.
853 wim_account_id, db_wim = select_feasible_wim_account(
854 db_nsr, db_vnfrs, target_vld, vld_params, self.logger
855 )
856
857 if wim_account_id:
858 # WIM is needed and a feasible one was found, populate WIM target and SDN ports
859 self.logger.info("WIM selected: {:s}".format(str(wim_account_id)))
860 # update vld_params with correct WIM account Id
861 vld_params["wimAccountId"] = wim_account_id
862
863 target_wim = "wim:{}".format(wim_account_id)
864 target_wim_attrs = get_target_wim_attrs(nsr_id, target_vld, vld_params)
865 sdn_ports = get_sdn_ports(vld_params, db_wim)
866 if len(sdn_ports) > 0:
867 target_vld["vim_info"][target_wim] = target_wim_attrs
868 target_vld["vim_info"][target_wim]["sdn-ports"] = sdn_ports
869
870 self.logger.debug(
871 "Target VLD with WIM data: {:s}".format(str(target_vld))
872 )
873
874 for param in ("vim-network-name", "vim-network-id"):
875 if vld_params.get(param):
876 if isinstance(vld_params[param], dict):
877 for vim, vim_net in vld_params[param].items():
878 other_target_vim = "vim:" + vim
879 populate_dict(
880 target_vld["vim_info"],
881 (other_target_vim, param.replace("-", "_")),
882 vim_net,
883 )
884 else: # isinstance str
885 target_vld["vim_info"][target_vim][
886 param.replace("-", "_")
887 ] = vld_params[param]
888 if vld_params.get("common_id"):
889 target_vld["common_id"] = vld_params.get("common_id")
890
891 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
892 def update_ns_vld_target(target, ns_params):
893 for vnf_params in ns_params.get("vnf", ()):
894 if vnf_params.get("vimAccountId"):
895 target_vnf = next(
896 (
897 vnfr
898 for vnfr in db_vnfrs.values()
899 if vnf_params["member-vnf-index"]
900 == vnfr["member-vnf-index-ref"]
901 ),
902 None,
903 )
904 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
905 if not vdur:
906 return
907 for a_index, a_vld in enumerate(target["ns"]["vld"]):
908 target_vld = find_in_list(
909 get_iterable(vdur, "interfaces"),
910 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
911 )
912
913 vld_params = find_in_list(
914 get_iterable(ns_params, "vld"),
915 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
916 )
917 if target_vld:
918
919 if vnf_params.get("vimAccountId") not in a_vld.get(
920 "vim_info", {}
921 ):
922 target_vim_network_list = [
923 v for _, v in a_vld.get("vim_info").items()
924 ]
925 target_vim_network_name = next(
926 (
927 item.get("vim_network_name", "")
928 for item in target_vim_network_list
929 ),
930 "",
931 )
932
933 target["ns"]["vld"][a_index].get("vim_info").update(
934 {
935 "vim:{}".format(vnf_params["vimAccountId"]): {
936 "vim_network_name": target_vim_network_name,
937 }
938 }
939 )
940
941 if vld_params:
942 for param in ("vim-network-name", "vim-network-id"):
943 if vld_params.get(param) and isinstance(
944 vld_params[param], dict
945 ):
946 for vim, vim_net in vld_params[
947 param
948 ].items():
949 other_target_vim = "vim:" + vim
950 populate_dict(
951 target["ns"]["vld"][a_index].get(
952 "vim_info"
953 ),
954 (
955 other_target_vim,
956 param.replace("-", "_"),
957 ),
958 vim_net,
959 )
960
961 nslcmop_id = db_nslcmop["_id"]
962 target = {
963 "name": db_nsr["name"],
964 "ns": {"vld": []},
965 "vnf": [],
966 "image": deepcopy(db_nsr["image"]),
967 "flavor": deepcopy(db_nsr["flavor"]),
968 "action_id": nslcmop_id,
969 "cloud_init_content": {},
970 }
971 for image in target["image"]:
972 image["vim_info"] = {}
973 for flavor in target["flavor"]:
974 flavor["vim_info"] = {}
975 if db_nsr.get("affinity-or-anti-affinity-group"):
976 target["affinity-or-anti-affinity-group"] = deepcopy(
977 db_nsr["affinity-or-anti-affinity-group"]
978 )
979 for affinity_or_anti_affinity_group in target[
980 "affinity-or-anti-affinity-group"
981 ]:
982 affinity_or_anti_affinity_group["vim_info"] = {}
983
984 if db_nslcmop.get("lcmOperationType") != "instantiate":
985 # get parameters of instantiation:
986 db_nslcmop_instantiate = self.db.get_list(
987 "nslcmops",
988 {
989 "nsInstanceId": db_nslcmop["nsInstanceId"],
990 "lcmOperationType": "instantiate",
991 },
992 )[-1]
993 ns_params = db_nslcmop_instantiate.get("operationParams")
994 else:
995 ns_params = db_nslcmop.get("operationParams")
996 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
997 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
998
999 cp2target = {}
1000 for vld_index, vld in enumerate(db_nsr.get("vld")):
1001 target_vim = "vim:{}".format(ns_params["vimAccountId"])
1002 target_vld = {
1003 "id": vld["id"],
1004 "name": vld["name"],
1005 "mgmt-network": vld.get("mgmt-network", False),
1006 "type": vld.get("type"),
1007 "vim_info": {
1008 target_vim: {
1009 "vim_network_name": vld.get("vim-network-name"),
1010 "vim_account_id": ns_params["vimAccountId"],
1011 }
1012 },
1013 }
1014 # check if this network needs SDN assist
1015 if vld.get("pci-interfaces"):
1016 db_vim = get_vim_account(ns_params["vimAccountId"])
1017 sdnc_id = db_vim["config"].get("sdn-controller")
1018 if sdnc_id:
1019 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
1020 target_sdn = "sdn:{}".format(sdnc_id)
1021 target_vld["vim_info"][target_sdn] = {
1022 "sdn": True,
1023 "target_vim": target_vim,
1024 "vlds": [sdn_vld],
1025 "type": vld.get("type"),
1026 }
1027
1028 nsd_vnf_profiles = get_vnf_profiles(nsd)
1029 for nsd_vnf_profile in nsd_vnf_profiles:
1030 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
1031 if cp["virtual-link-profile-id"] == vld["id"]:
1032 cp2target[
1033 "member_vnf:{}.{}".format(
1034 cp["constituent-cpd-id"][0][
1035 "constituent-base-element-id"
1036 ],
1037 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
1038 )
1039 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
1040
1041 # check at nsd descriptor, if there is an ip-profile
1042 vld_params = {}
1043 nsd_vlp = find_in_list(
1044 get_virtual_link_profiles(nsd),
1045 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
1046 == vld["id"],
1047 )
1048 if (
1049 nsd_vlp
1050 and nsd_vlp.get("virtual-link-protocol-data")
1051 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1052 ):
1053 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
1054 "l3-protocol-data"
1055 ]
1056 ip_profile_dest_data = {}
1057 if "ip-version" in ip_profile_source_data:
1058 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1059 "ip-version"
1060 ]
1061 if "cidr" in ip_profile_source_data:
1062 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1063 "cidr"
1064 ]
1065 if "gateway-ip" in ip_profile_source_data:
1066 ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
1067 "gateway-ip"
1068 ]
1069 if "dhcp-enabled" in ip_profile_source_data:
1070 ip_profile_dest_data["dhcp-params"] = {
1071 "enabled": ip_profile_source_data["dhcp-enabled"]
1072 }
1073 vld_params["ip-profile"] = ip_profile_dest_data
1074
1075 # update vld_params with instantiation params
1076 vld_instantiation_params = find_in_list(
1077 get_iterable(ns_params, "vld"),
1078 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
1079 )
1080 if vld_instantiation_params:
1081 vld_params.update(vld_instantiation_params)
1082 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
1083 target["ns"]["vld"].append(target_vld)
1084 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1085 update_ns_vld_target(target, ns_params)
1086
1087 for vnfr in db_vnfrs.values():
1088 vnfd = find_in_list(
1089 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
1090 )
1091 vnf_params = find_in_list(
1092 get_iterable(ns_params, "vnf"),
1093 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
1094 )
1095 target_vnf = deepcopy(vnfr)
1096 target_vim = "vim:{}".format(vnfr["vim-account-id"])
1097 for vld in target_vnf.get("vld", ()):
1098 # check if connected to a ns.vld, to fill target'
1099 vnf_cp = find_in_list(
1100 vnfd.get("int-virtual-link-desc", ()),
1101 lambda cpd: cpd.get("id") == vld["id"],
1102 )
1103 if vnf_cp:
1104 ns_cp = "member_vnf:{}.{}".format(
1105 vnfr["member-vnf-index-ref"], vnf_cp["id"]
1106 )
1107 if cp2target.get(ns_cp):
1108 vld["target"] = cp2target[ns_cp]
1109
1110 vld["vim_info"] = {
1111 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1112 }
1113 # check if this network needs SDN assist
1114 target_sdn = None
1115 if vld.get("pci-interfaces"):
1116 db_vim = get_vim_account(vnfr["vim-account-id"])
1117 sdnc_id = db_vim["config"].get("sdn-controller")
1118 if sdnc_id:
1119 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1120 target_sdn = "sdn:{}".format(sdnc_id)
1121 vld["vim_info"][target_sdn] = {
1122 "sdn": True,
1123 "target_vim": target_vim,
1124 "vlds": [sdn_vld],
1125 "type": vld.get("type"),
1126 }
1127
1128 # check at vnfd descriptor, if there is an ip-profile
1129 vld_params = {}
1130 vnfd_vlp = find_in_list(
1131 get_virtual_link_profiles(vnfd),
1132 lambda a_link_profile: a_link_profile["id"] == vld["id"],
1133 )
1134 if (
1135 vnfd_vlp
1136 and vnfd_vlp.get("virtual-link-protocol-data")
1137 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1138 ):
1139 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
1140 "l3-protocol-data"
1141 ]
1142 ip_profile_dest_data = {}
1143 if "ip-version" in ip_profile_source_data:
1144 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1145 "ip-version"
1146 ]
1147 if "cidr" in ip_profile_source_data:
1148 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1149 "cidr"
1150 ]
1151 if "gateway-ip" in ip_profile_source_data:
1152 ip_profile_dest_data[
1153 "gateway-address"
1154 ] = ip_profile_source_data["gateway-ip"]
1155 if "dhcp-enabled" in ip_profile_source_data:
1156 ip_profile_dest_data["dhcp-params"] = {
1157 "enabled": ip_profile_source_data["dhcp-enabled"]
1158 }
1159
1160 vld_params["ip-profile"] = ip_profile_dest_data
1161 # update vld_params with instantiation params
1162 if vnf_params:
1163 vld_instantiation_params = find_in_list(
1164 get_iterable(vnf_params, "internal-vld"),
1165 lambda i_vld: i_vld["name"] == vld["id"],
1166 )
1167 if vld_instantiation_params:
1168 vld_params.update(vld_instantiation_params)
1169 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1170
1171 vdur_list = []
1172 for vdur in target_vnf.get("vdur", ()):
1173 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1174 continue # This vdu must not be created
1175 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1176
1177 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1178
1179 if ssh_keys_all:
1180 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1181 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1182 if (
1183 vdu_configuration
1184 and vdu_configuration.get("config-access")
1185 and vdu_configuration.get("config-access").get("ssh-access")
1186 ):
1187 vdur["ssh-keys"] = ssh_keys_all
1188 vdur["ssh-access-required"] = vdu_configuration[
1189 "config-access"
1190 ]["ssh-access"]["required"]
1191 elif (
1192 vnf_configuration
1193 and vnf_configuration.get("config-access")
1194 and vnf_configuration.get("config-access").get("ssh-access")
1195 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1196 ):
1197 vdur["ssh-keys"] = ssh_keys_all
1198 vdur["ssh-access-required"] = vnf_configuration[
1199 "config-access"
1200 ]["ssh-access"]["required"]
1201 elif ssh_keys_instantiation and find_in_list(
1202 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1203 ):
1204 vdur["ssh-keys"] = ssh_keys_instantiation
1205
1206 self.logger.debug("NS > vdur > {}".format(vdur))
1207
1208 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1209 # cloud-init
1210 if vdud.get("cloud-init-file"):
1211 vdur["cloud-init"] = "{}:file:{}".format(
1212 vnfd["_id"], vdud.get("cloud-init-file")
1213 )
1214 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1215 if vdur["cloud-init"] not in target["cloud_init_content"]:
1216 base_folder = vnfd["_admin"]["storage"]
1217 if base_folder["pkg-dir"]:
1218 cloud_init_file = "{}/{}/cloud_init/{}".format(
1219 base_folder["folder"],
1220 base_folder["pkg-dir"],
1221 vdud.get("cloud-init-file"),
1222 )
1223 else:
1224 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1225 base_folder["folder"],
1226 vdud.get("cloud-init-file"),
1227 )
1228 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1229 target["cloud_init_content"][
1230 vdur["cloud-init"]
1231 ] = ci_file.read()
1232 elif vdud.get("cloud-init"):
1233 vdur["cloud-init"] = "{}:vdu:{}".format(
1234 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1235 )
1236 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1237 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1238 "cloud-init"
1239 ]
1240 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1241 deploy_params_vdu = self._format_additional_params(
1242 vdur.get("additionalParams") or {}
1243 )
1244 deploy_params_vdu["OSM"] = get_osm_params(
1245 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1246 )
1247 vdur["additionalParams"] = deploy_params_vdu
1248
1249 # flavor
1250 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1251 if target_vim not in ns_flavor["vim_info"]:
1252 ns_flavor["vim_info"][target_vim] = {}
1253
1254 # deal with images
1255 # in case alternative images are provided we must check if they should be applied
1256 # for the vim_type, modify the vim_type taking into account
1257 ns_image_id = int(vdur["ns-image-id"])
1258 if vdur.get("alt-image-ids"):
1259 db_vim = get_vim_account(vnfr["vim-account-id"])
1260 vim_type = db_vim["vim_type"]
1261 for alt_image_id in vdur.get("alt-image-ids"):
1262 ns_alt_image = target["image"][int(alt_image_id)]
1263 if vim_type == ns_alt_image.get("vim-type"):
1264 # must use alternative image
1265 self.logger.debug(
1266 "use alternative image id: {}".format(alt_image_id)
1267 )
1268 ns_image_id = alt_image_id
1269 vdur["ns-image-id"] = ns_image_id
1270 break
1271 ns_image = target["image"][int(ns_image_id)]
1272 if target_vim not in ns_image["vim_info"]:
1273 ns_image["vim_info"][target_vim] = {}
1274
1275 # Affinity groups
1276 if vdur.get("affinity-or-anti-affinity-group-id"):
1277 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1278 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1279 if target_vim not in ns_ags["vim_info"]:
1280 ns_ags["vim_info"][target_vim] = {}
1281
1282 vdur["vim_info"] = {target_vim: {}}
1283 # instantiation parameters
1284 if vnf_params:
1285 vdu_instantiation_params = find_in_list(
1286 get_iterable(vnf_params, "vdu"),
1287 lambda i_vdu: i_vdu["id"] == vdud["id"],
1288 )
1289 if vdu_instantiation_params:
1290 # Parse the vdu_volumes from the instantiation params
1291 vdu_volumes = get_volumes_from_instantiation_params(
1292 vdu_instantiation_params, vdud
1293 )
1294 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1295 vdur_list.append(vdur)
1296 target_vnf["vdur"] = vdur_list
1297 target["vnf"].append(target_vnf)
1298
1299 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1300 desc = await self.RO.deploy(nsr_id, target)
1301 self.logger.debug("RO return > {}".format(desc))
1302 action_id = desc["action_id"]
1303 await self._wait_ng_ro(
1304 nsr_id,
1305 action_id,
1306 nslcmop_id,
1307 start_deploy,
1308 timeout_ns_deploy,
1309 stage,
1310 operation="instantiation",
1311 )
1312
1313 # Updating NSR
1314 db_nsr_update = {
1315 "_admin.deployed.RO.operational-status": "running",
1316 "detailed-status": " ".join(stage),
1317 }
1318 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1319 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1320 self._write_op_status(nslcmop_id, stage)
1321 self.logger.debug(
1322 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1323 )
1324 return
1325
1326 async def _wait_ng_ro(
1327 self,
1328 nsr_id,
1329 action_id,
1330 nslcmop_id=None,
1331 start_time=None,
1332 timeout=600,
1333 stage=None,
1334 operation=None,
1335 ):
1336 detailed_status_old = None
1337 db_nsr_update = {}
1338 start_time = start_time or time()
1339 while time() <= start_time + timeout:
1340 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1341 self.logger.debug("Wait NG RO > {}".format(desc_status))
1342 if desc_status["status"] == "FAILED":
1343 raise NgRoException(desc_status["details"])
1344 elif desc_status["status"] == "BUILD":
1345 if stage:
1346 stage[2] = "VIM: ({})".format(desc_status["details"])
1347 elif desc_status["status"] == "DONE":
1348 if stage:
1349 stage[2] = "Deployed at VIM"
1350 break
1351 else:
1352 assert False, "ROclient.check_ns_status returns unknown {}".format(
1353 desc_status["status"]
1354 )
1355 if stage and nslcmop_id and stage[2] != detailed_status_old:
1356 detailed_status_old = stage[2]
1357 db_nsr_update["detailed-status"] = " ".join(stage)
1358 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1359 self._write_op_status(nslcmop_id, stage)
1360 await asyncio.sleep(15, loop=self.loop)
1361 else: # timeout_ns_deploy
1362 raise NgRoException("Timeout waiting ns to deploy")
1363
1364 async def _terminate_ng_ro(
1365 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1366 ):
1367 db_nsr_update = {}
1368 failed_detail = []
1369 action_id = None
1370 start_deploy = time()
1371 try:
1372 target = {
1373 "ns": {"vld": []},
1374 "vnf": [],
1375 "image": [],
1376 "flavor": [],
1377 "action_id": nslcmop_id,
1378 }
1379 desc = await self.RO.deploy(nsr_id, target)
1380 action_id = desc["action_id"]
1381 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
1382 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1383 self.logger.debug(
1384 logging_text
1385 + "ns terminate action at RO. action_id={}".format(action_id)
1386 )
1387
1388 # wait until done
1389 delete_timeout = 20 * 60 # 20 minutes
1390 await self._wait_ng_ro(
1391 nsr_id,
1392 action_id,
1393 nslcmop_id,
1394 start_deploy,
1395 delete_timeout,
1396 stage,
1397 operation="termination",
1398 )
1399
1400 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1401 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1402 # delete all nsr
1403 await self.RO.delete(nsr_id)
1404 except Exception as e:
1405 if isinstance(e, NgRoException) and e.http_code == 404: # not found
1406 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1407 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1408 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1409 self.logger.debug(
1410 logging_text + "RO_action_id={} already deleted".format(action_id)
1411 )
1412 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
1413 failed_detail.append("delete conflict: {}".format(e))
1414 self.logger.debug(
1415 logging_text
1416 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1417 )
1418 else:
1419 failed_detail.append("delete error: {}".format(e))
1420 self.logger.error(
1421 logging_text
1422 + "RO_action_id={} delete error: {}".format(action_id, e)
1423 )
1424
1425 if failed_detail:
1426 stage[2] = "Error deleting from VIM"
1427 else:
1428 stage[2] = "Deleted from VIM"
1429 db_nsr_update["detailed-status"] = " ".join(stage)
1430 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1431 self._write_op_status(nslcmop_id, stage)
1432
1433 if failed_detail:
1434 raise LcmException("; ".join(failed_detail))
1435 return
1436
1437 async def instantiate_RO(
1438 self,
1439 logging_text,
1440 nsr_id,
1441 nsd,
1442 db_nsr,
1443 db_nslcmop,
1444 db_vnfrs,
1445 db_vnfds,
1446 n2vc_key_list,
1447 stage,
1448 ):
1449 """
1450 Instantiate at RO
1451 :param logging_text: preffix text to use at logging
1452 :param nsr_id: nsr identity
1453 :param nsd: database content of ns descriptor
1454 :param db_nsr: database content of ns record
1455 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1456 :param db_vnfrs:
1457 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1458 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1459 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1460 :return: None or exception
1461 """
1462 try:
1463 start_deploy = time()
1464 ns_params = db_nslcmop.get("operationParams")
1465 if ns_params and ns_params.get("timeout_ns_deploy"):
1466 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1467 else:
1468 timeout_ns_deploy = self.timeout.ns_deploy
1469
1470 # Check for and optionally request placement optimization. Database will be updated if placement activated
1471 stage[2] = "Waiting for Placement."
1472 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1473 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1474 for vnfr in db_vnfrs.values():
1475 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1476 break
1477 else:
1478 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1479
1480 return await self._instantiate_ng_ro(
1481 logging_text,
1482 nsr_id,
1483 nsd,
1484 db_nsr,
1485 db_nslcmop,
1486 db_vnfrs,
1487 db_vnfds,
1488 n2vc_key_list,
1489 stage,
1490 start_deploy,
1491 timeout_ns_deploy,
1492 )
1493 except Exception as e:
1494 stage[2] = "ERROR deploying at VIM"
1495 self.set_vnfr_at_error(db_vnfrs, str(e))
1496 self.logger.error(
1497 "Error deploying at VIM {}".format(e),
1498 exc_info=not isinstance(
1499 e,
1500 (
1501 ROclient.ROClientException,
1502 LcmException,
1503 DbException,
1504 NgRoException,
1505 ),
1506 ),
1507 )
1508 raise
1509
1510 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1511 """
1512 Wait for kdu to be up, get ip address
1513 :param logging_text: prefix use for logging
1514 :param nsr_id:
1515 :param vnfr_id:
1516 :param kdu_name:
1517 :return: IP address, K8s services
1518 """
1519
1520 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1521 nb_tries = 0
1522
1523 while nb_tries < 360:
1524 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1525 kdur = next(
1526 (
1527 x
1528 for x in get_iterable(db_vnfr, "kdur")
1529 if x.get("kdu-name") == kdu_name
1530 ),
1531 None,
1532 )
1533 if not kdur:
1534 raise LcmException(
1535 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1536 )
1537 if kdur.get("status"):
1538 if kdur["status"] in ("READY", "ENABLED"):
1539 return kdur.get("ip-address"), kdur.get("services")
1540 else:
1541 raise LcmException(
1542 "target KDU={} is in error state".format(kdu_name)
1543 )
1544
1545 await asyncio.sleep(10, loop=self.loop)
1546 nb_tries += 1
1547 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1548
1549 async def wait_vm_up_insert_key_ro(
1550 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1551 ):
1552 """
1553 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1554 :param logging_text: prefix use for logging
1555 :param nsr_id:
1556 :param vnfr_id:
1557 :param vdu_id:
1558 :param vdu_index:
1559 :param pub_key: public ssh key to inject, None to skip
1560 :param user: user to apply the public ssh key
1561 :return: IP address
1562 """
1563
1564 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1565 ro_nsr_id = None
1566 ip_address = None
1567 nb_tries = 0
1568 target_vdu_id = None
1569 ro_retries = 0
1570
1571 while True:
1572
1573 ro_retries += 1
1574 if ro_retries >= 360: # 1 hour
1575 raise LcmException(
1576 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1577 )
1578
1579 await asyncio.sleep(10, loop=self.loop)
1580
1581 # get ip address
1582 if not target_vdu_id:
1583 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1584
1585 if not vdu_id: # for the VNF case
1586 if db_vnfr.get("status") == "ERROR":
1587 raise LcmException(
1588 "Cannot inject ssh-key because target VNF is in error state"
1589 )
1590 ip_address = db_vnfr.get("ip-address")
1591 if not ip_address:
1592 continue
1593 vdur = next(
1594 (
1595 x
1596 for x in get_iterable(db_vnfr, "vdur")
1597 if x.get("ip-address") == ip_address
1598 ),
1599 None,
1600 )
1601 else: # VDU case
1602 vdur = next(
1603 (
1604 x
1605 for x in get_iterable(db_vnfr, "vdur")
1606 if x.get("vdu-id-ref") == vdu_id
1607 and x.get("count-index") == vdu_index
1608 ),
1609 None,
1610 )
1611
1612 if (
1613 not vdur and len(db_vnfr.get("vdur", ())) == 1
1614 ): # If only one, this should be the target vdu
1615 vdur = db_vnfr["vdur"][0]
1616 if not vdur:
1617 raise LcmException(
1618 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1619 vnfr_id, vdu_id, vdu_index
1620 )
1621 )
1622 # New generation RO stores information at "vim_info"
1623 ng_ro_status = None
1624 target_vim = None
1625 if vdur.get("vim_info"):
1626 target_vim = next(
1627 t for t in vdur["vim_info"]
1628 ) # there should be only one key
1629 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1630 if (
1631 vdur.get("pdu-type")
1632 or vdur.get("status") == "ACTIVE"
1633 or ng_ro_status == "ACTIVE"
1634 ):
1635 ip_address = vdur.get("ip-address")
1636 if not ip_address:
1637 continue
1638 target_vdu_id = vdur["vdu-id-ref"]
1639 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1640 raise LcmException(
1641 "Cannot inject ssh-key because target VM is in error state"
1642 )
1643
1644 if not target_vdu_id:
1645 continue
1646
1647 # inject public key into machine
1648 if pub_key and user:
1649 self.logger.debug(logging_text + "Inserting RO key")
1650 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1651 if vdur.get("pdu-type"):
1652 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1653 return ip_address
1654 try:
1655 ro_vm_id = "{}-{}".format(
1656 db_vnfr["member-vnf-index-ref"], target_vdu_id
1657 ) # TODO add vdu_index
1658 if self.ro_config.ng:
1659 target = {
1660 "action": {
1661 "action": "inject_ssh_key",
1662 "key": pub_key,
1663 "user": user,
1664 },
1665 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1666 }
1667 desc = await self.RO.deploy(nsr_id, target)
1668 action_id = desc["action_id"]
1669 await self._wait_ng_ro(
1670 nsr_id, action_id, timeout=600, operation="instantiation"
1671 )
1672 break
1673 else:
1674 # wait until NS is deployed at RO
1675 if not ro_nsr_id:
1676 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1677 ro_nsr_id = deep_get(
1678 db_nsrs, ("_admin", "deployed", "RO", "nsr_id")
1679 )
1680 if not ro_nsr_id:
1681 continue
1682 result_dict = await self.RO.create_action(
1683 item="ns",
1684 item_id_name=ro_nsr_id,
1685 descriptor={
1686 "add_public_key": pub_key,
1687 "vms": [ro_vm_id],
1688 "user": user,
1689 },
1690 )
1691 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1692 if not result_dict or not isinstance(result_dict, dict):
1693 raise LcmException(
1694 "Unknown response from RO when injecting key"
1695 )
1696 for result in result_dict.values():
1697 if result.get("vim_result") == 200:
1698 break
1699 else:
1700 raise ROclient.ROClientException(
1701 "error injecting key: {}".format(
1702 result.get("description")
1703 )
1704 )
1705 break
1706 except NgRoException as e:
1707 raise LcmException(
1708 "Reaching max tries injecting key. Error: {}".format(e)
1709 )
1710 except ROclient.ROClientException as e:
1711 if not nb_tries:
1712 self.logger.debug(
1713 logging_text
1714 + "error injecting key: {}. Retrying until {} seconds".format(
1715 e, 20 * 10
1716 )
1717 )
1718 nb_tries += 1
1719 if nb_tries >= 20:
1720 raise LcmException(
1721 "Reaching max tries injecting key. Error: {}".format(e)
1722 )
1723 else:
1724 break
1725
1726 return ip_address
1727
1728 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1729 """
1730 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1731 """
1732 my_vca = vca_deployed_list[vca_index]
1733 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1734 # vdu or kdu: no dependencies
1735 return
1736 timeout = 300
1737 while timeout >= 0:
1738 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1739 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1740 configuration_status_list = db_nsr["configurationStatus"]
1741 for index, vca_deployed in enumerate(configuration_status_list):
1742 if index == vca_index:
1743 # myself
1744 continue
1745 if not my_vca.get("member-vnf-index") or (
1746 vca_deployed.get("member-vnf-index")
1747 == my_vca.get("member-vnf-index")
1748 ):
1749 internal_status = configuration_status_list[index].get("status")
1750 if internal_status == "READY":
1751 continue
1752 elif internal_status == "BROKEN":
1753 raise LcmException(
1754 "Configuration aborted because dependent charm/s has failed"
1755 )
1756 else:
1757 break
1758 else:
1759 # no dependencies, return
1760 return
1761 await asyncio.sleep(10)
1762 timeout -= 1
1763
1764 raise LcmException("Configuration aborted because dependent charm/s timeout")
1765
1766 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1767 vca_id = None
1768 if db_vnfr:
1769 vca_id = deep_get(db_vnfr, ("vca-id",))
1770 elif db_nsr:
1771 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1772 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1773 return vca_id
1774
1775 async def instantiate_N2VC(
1776 self,
1777 logging_text,
1778 vca_index,
1779 nsi_id,
1780 db_nsr,
1781 db_vnfr,
1782 vdu_id,
1783 kdu_name,
1784 vdu_index,
1785 kdu_index,
1786 config_descriptor,
1787 deploy_params,
1788 base_folder,
1789 nslcmop_id,
1790 stage,
1791 vca_type,
1792 vca_name,
1793 ee_config_descriptor,
1794 ):
1795 nsr_id = db_nsr["_id"]
1796 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1797 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1798 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1799 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1800 db_dict = {
1801 "collection": "nsrs",
1802 "filter": {"_id": nsr_id},
1803 "path": db_update_entry,
1804 }
1805 step = ""
1806 try:
1807
1808 element_type = "NS"
1809 element_under_configuration = nsr_id
1810
1811 vnfr_id = None
1812 if db_vnfr:
1813 vnfr_id = db_vnfr["_id"]
1814 osm_config["osm"]["vnf_id"] = vnfr_id
1815
1816 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1817
1818 if vca_type == "native_charm":
1819 index_number = 0
1820 else:
1821 index_number = vdu_index or 0
1822
1823 if vnfr_id:
1824 element_type = "VNF"
1825 element_under_configuration = vnfr_id
1826 namespace += ".{}-{}".format(vnfr_id, index_number)
1827 if vdu_id:
1828 namespace += ".{}-{}".format(vdu_id, index_number)
1829 element_type = "VDU"
1830 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1831 osm_config["osm"]["vdu_id"] = vdu_id
1832 elif kdu_name:
1833 namespace += ".{}".format(kdu_name)
1834 element_type = "KDU"
1835 element_under_configuration = kdu_name
1836 osm_config["osm"]["kdu_name"] = kdu_name
1837
1838 # Get artifact path
1839 if base_folder["pkg-dir"]:
1840 artifact_path = "{}/{}/{}/{}".format(
1841 base_folder["folder"],
1842 base_folder["pkg-dir"],
1843 "charms"
1844 if vca_type
1845 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1846 else "helm-charts",
1847 vca_name,
1848 )
1849 else:
1850 artifact_path = "{}/Scripts/{}/{}/".format(
1851 base_folder["folder"],
1852 "charms"
1853 if vca_type
1854 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1855 else "helm-charts",
1856 vca_name,
1857 )
1858
1859 self.logger.debug("Artifact path > {}".format(artifact_path))
1860
1861 # get initial_config_primitive_list that applies to this element
1862 initial_config_primitive_list = config_descriptor.get(
1863 "initial-config-primitive"
1864 )
1865
1866 self.logger.debug(
1867 "Initial config primitive list > {}".format(
1868 initial_config_primitive_list
1869 )
1870 )
1871
1872 # add config if not present for NS charm
1873 ee_descriptor_id = ee_config_descriptor.get("id")
1874 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1875 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1876 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1877 )
1878
1879 self.logger.debug(
1880 "Initial config primitive list #2 > {}".format(
1881 initial_config_primitive_list
1882 )
1883 )
1884 # n2vc_redesign STEP 3.1
1885 # find old ee_id if exists
1886 ee_id = vca_deployed.get("ee_id")
1887
1888 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1889 # create or register execution environment in VCA
1890 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1891
1892 self._write_configuration_status(
1893 nsr_id=nsr_id,
1894 vca_index=vca_index,
1895 status="CREATING",
1896 element_under_configuration=element_under_configuration,
1897 element_type=element_type,
1898 )
1899
1900 step = "create execution environment"
1901 self.logger.debug(logging_text + step)
1902
1903 ee_id = None
1904 credentials = None
1905 if vca_type == "k8s_proxy_charm":
1906 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1907 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1908 namespace=namespace,
1909 artifact_path=artifact_path,
1910 db_dict=db_dict,
1911 vca_id=vca_id,
1912 )
1913 elif vca_type == "helm" or vca_type == "helm-v3":
1914 ee_id, credentials = await self.vca_map[
1915 vca_type
1916 ].create_execution_environment(
1917 namespace=namespace,
1918 reuse_ee_id=ee_id,
1919 db_dict=db_dict,
1920 config=osm_config,
1921 artifact_path=artifact_path,
1922 chart_model=vca_name,
1923 vca_type=vca_type,
1924 )
1925 else:
1926 ee_id, credentials = await self.vca_map[
1927 vca_type
1928 ].create_execution_environment(
1929 namespace=namespace,
1930 reuse_ee_id=ee_id,
1931 db_dict=db_dict,
1932 vca_id=vca_id,
1933 )
1934
1935 elif vca_type == "native_charm":
1936 step = "Waiting to VM being up and getting IP address"
1937 self.logger.debug(logging_text + step)
1938 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1939 logging_text,
1940 nsr_id,
1941 vnfr_id,
1942 vdu_id,
1943 vdu_index,
1944 user=None,
1945 pub_key=None,
1946 )
1947 credentials = {"hostname": rw_mgmt_ip}
1948 # get username
1949 username = deep_get(
1950 config_descriptor, ("config-access", "ssh-access", "default-user")
1951 )
1952 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1953 # merged. Meanwhile let's get username from initial-config-primitive
1954 if not username and initial_config_primitive_list:
1955 for config_primitive in initial_config_primitive_list:
1956 for param in config_primitive.get("parameter", ()):
1957 if param["name"] == "ssh-username":
1958 username = param["value"]
1959 break
1960 if not username:
1961 raise LcmException(
1962 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1963 "'config-access.ssh-access.default-user'"
1964 )
1965 credentials["username"] = username
1966 # n2vc_redesign STEP 3.2
1967
1968 self._write_configuration_status(
1969 nsr_id=nsr_id,
1970 vca_index=vca_index,
1971 status="REGISTERING",
1972 element_under_configuration=element_under_configuration,
1973 element_type=element_type,
1974 )
1975
1976 step = "register execution environment {}".format(credentials)
1977 self.logger.debug(logging_text + step)
1978 ee_id = await self.vca_map[vca_type].register_execution_environment(
1979 credentials=credentials,
1980 namespace=namespace,
1981 db_dict=db_dict,
1982 vca_id=vca_id,
1983 )
1984
1985 # for compatibility with MON/POL modules, the need model and application name at database
1986 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1987 ee_id_parts = ee_id.split(".")
1988 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1989 if len(ee_id_parts) >= 2:
1990 model_name = ee_id_parts[0]
1991 application_name = ee_id_parts[1]
1992 db_nsr_update[db_update_entry + "model"] = model_name
1993 db_nsr_update[db_update_entry + "application"] = application_name
1994
1995 # n2vc_redesign STEP 3.3
1996 step = "Install configuration Software"
1997
1998 self._write_configuration_status(
1999 nsr_id=nsr_id,
2000 vca_index=vca_index,
2001 status="INSTALLING SW",
2002 element_under_configuration=element_under_configuration,
2003 element_type=element_type,
2004 other_update=db_nsr_update,
2005 )
2006
2007 # TODO check if already done
2008 self.logger.debug(logging_text + step)
2009 config = None
2010 if vca_type == "native_charm":
2011 config_primitive = next(
2012 (p for p in initial_config_primitive_list if p["name"] == "config"),
2013 None,
2014 )
2015 if config_primitive:
2016 config = self._map_primitive_params(
2017 config_primitive, {}, deploy_params
2018 )
2019 num_units = 1
2020 if vca_type == "lxc_proxy_charm":
2021 if element_type == "NS":
2022 num_units = db_nsr.get("config-units") or 1
2023 elif element_type == "VNF":
2024 num_units = db_vnfr.get("config-units") or 1
2025 elif element_type == "VDU":
2026 for v in db_vnfr["vdur"]:
2027 if vdu_id == v["vdu-id-ref"]:
2028 num_units = v.get("config-units") or 1
2029 break
2030 if vca_type != "k8s_proxy_charm":
2031 await self.vca_map[vca_type].install_configuration_sw(
2032 ee_id=ee_id,
2033 artifact_path=artifact_path,
2034 db_dict=db_dict,
2035 config=config,
2036 num_units=num_units,
2037 vca_id=vca_id,
2038 vca_type=vca_type,
2039 )
2040
2041 # write in db flag of configuration_sw already installed
2042 self.update_db_2(
2043 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
2044 )
2045
2046 # add relations for this VCA (wait for other peers related with this VCA)
2047 is_relation_added = await self._add_vca_relations(
2048 logging_text=logging_text,
2049 nsr_id=nsr_id,
2050 vca_type=vca_type,
2051 vca_index=vca_index,
2052 )
2053
2054 if not is_relation_added:
2055 raise LcmException("Relations could not be added to VCA.")
2056
2057 # if SSH access is required, then get execution environment SSH public
2058 # if native charm we have waited already to VM be UP
2059 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
2060 pub_key = None
2061 user = None
2062 # self.logger.debug("get ssh key block")
2063 if deep_get(
2064 config_descriptor, ("config-access", "ssh-access", "required")
2065 ):
2066 # self.logger.debug("ssh key needed")
2067 # Needed to inject a ssh key
2068 user = deep_get(
2069 config_descriptor,
2070 ("config-access", "ssh-access", "default-user"),
2071 )
2072 step = "Install configuration Software, getting public ssh key"
2073 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
2074 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
2075 )
2076
2077 step = "Insert public key into VM user={} ssh_key={}".format(
2078 user, pub_key
2079 )
2080 else:
2081 # self.logger.debug("no need to get ssh key")
2082 step = "Waiting to VM being up and getting IP address"
2083 self.logger.debug(logging_text + step)
2084
2085 # default rw_mgmt_ip to None, avoiding the non definition of the variable
2086 rw_mgmt_ip = None
2087
2088 # n2vc_redesign STEP 5.1
2089 # wait for RO (ip-address) Insert pub_key into VM
2090 if vnfr_id:
2091 if kdu_name:
2092 rw_mgmt_ip, services = await self.wait_kdu_up(
2093 logging_text, nsr_id, vnfr_id, kdu_name
2094 )
2095 vnfd = self.db.get_one(
2096 "vnfds_revisions",
2097 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2098 )
2099 kdu = get_kdu(vnfd, kdu_name)
2100 kdu_services = [
2101 service["name"] for service in get_kdu_services(kdu)
2102 ]
2103 exposed_services = []
2104 for service in services:
2105 if any(s in service["name"] for s in kdu_services):
2106 exposed_services.append(service)
2107 await self.vca_map[vca_type].exec_primitive(
2108 ee_id=ee_id,
2109 primitive_name="config",
2110 params_dict={
2111 "osm-config": json.dumps(
2112 OsmConfigBuilder(
2113 k8s={"services": exposed_services}
2114 ).build()
2115 )
2116 },
2117 vca_id=vca_id,
2118 )
2119
2120 # This verification is needed in order to avoid trying to add a public key
2121 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
2122 # for a KNF and not for its KDUs, the previous verification gives False, and the code
2123 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
2124 # or it is a KNF)
2125 elif db_vnfr.get("vdur"):
2126 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2127 logging_text,
2128 nsr_id,
2129 vnfr_id,
2130 vdu_id,
2131 vdu_index,
2132 user=user,
2133 pub_key=pub_key,
2134 )
2135
2136 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
2137
2138 # store rw_mgmt_ip in deploy params for later replacement
2139 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
2140
2141 # n2vc_redesign STEP 6 Execute initial config primitive
2142 step = "execute initial config primitive"
2143
2144 # wait for dependent primitives execution (NS -> VNF -> VDU)
2145 if initial_config_primitive_list:
2146 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
2147
2148 # stage, in function of element type: vdu, kdu, vnf or ns
2149 my_vca = vca_deployed_list[vca_index]
2150 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
2151 # VDU or KDU
2152 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
2153 elif my_vca.get("member-vnf-index"):
2154 # VNF
2155 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
2156 else:
2157 # NS
2158 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
2159
2160 self._write_configuration_status(
2161 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
2162 )
2163
2164 self._write_op_status(op_id=nslcmop_id, stage=stage)
2165
2166 check_if_terminated_needed = True
2167 for initial_config_primitive in initial_config_primitive_list:
2168 # adding information on the vca_deployed if it is a NS execution environment
2169 if not vca_deployed["member-vnf-index"]:
2170 deploy_params["ns_config_info"] = json.dumps(
2171 self._get_ns_config_info(nsr_id)
2172 )
2173 # TODO check if already done
2174 primitive_params_ = self._map_primitive_params(
2175 initial_config_primitive, {}, deploy_params
2176 )
2177
2178 step = "execute primitive '{}' params '{}'".format(
2179 initial_config_primitive["name"], primitive_params_
2180 )
2181 self.logger.debug(logging_text + step)
2182 await self.vca_map[vca_type].exec_primitive(
2183 ee_id=ee_id,
2184 primitive_name=initial_config_primitive["name"],
2185 params_dict=primitive_params_,
2186 db_dict=db_dict,
2187 vca_id=vca_id,
2188 vca_type=vca_type,
2189 )
2190 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2191 if check_if_terminated_needed:
2192 if config_descriptor.get("terminate-config-primitive"):
2193 self.update_db_2(
2194 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2195 )
2196 check_if_terminated_needed = False
2197
2198 # TODO register in database that primitive is done
2199
2200 # STEP 7 Configure metrics
2201 if vca_type == "helm" or vca_type == "helm-v3":
2202 # TODO: review for those cases where the helm chart is a reference and
2203 # is not part of the NF package
2204 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2205 ee_id=ee_id,
2206 artifact_path=artifact_path,
2207 ee_config_descriptor=ee_config_descriptor,
2208 vnfr_id=vnfr_id,
2209 nsr_id=nsr_id,
2210 target_ip=rw_mgmt_ip,
2211 vnf_member_index=db_vnfr.get("member-vnf-index-ref", ""),
2212 vdu_id=vdu_id,
2213 vdu_index=vdu_index,
2214 kdu_name=kdu_name,
2215 kdu_index=kdu_index,
2216 )
2217 if prometheus_jobs:
2218 self.update_db_2(
2219 "nsrs",
2220 nsr_id,
2221 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2222 )
2223
2224 for job in prometheus_jobs:
2225 self.db.set_one(
2226 "prometheus_jobs",
2227 {"job_name": job["job_name"]},
2228 job,
2229 upsert=True,
2230 fail_on_empty=False,
2231 )
2232
2233 step = "instantiated at VCA"
2234 self.logger.debug(logging_text + step)
2235
2236 self._write_configuration_status(
2237 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2238 )
2239
2240 except Exception as e: # TODO not use Exception but N2VC exception
2241 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2242 if not isinstance(
2243 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2244 ):
2245 self.logger.error(
2246 "Exception while {} : {}".format(step, e), exc_info=True
2247 )
2248 self._write_configuration_status(
2249 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2250 )
2251 raise LcmException("{}. {}".format(step, e)) from e
2252
2253 def _write_ns_status(
2254 self,
2255 nsr_id: str,
2256 ns_state: str,
2257 current_operation: str,
2258 current_operation_id: str,
2259 error_description: str = None,
2260 error_detail: str = None,
2261 other_update: dict = None,
2262 ):
2263 """
2264 Update db_nsr fields.
2265 :param nsr_id:
2266 :param ns_state:
2267 :param current_operation:
2268 :param current_operation_id:
2269 :param error_description:
2270 :param error_detail:
2271 :param other_update: Other required changes at database if provided, will be cleared
2272 :return:
2273 """
2274 try:
2275 db_dict = other_update or {}
2276 db_dict[
2277 "_admin.nslcmop"
2278 ] = current_operation_id # for backward compatibility
2279 db_dict["_admin.current-operation"] = current_operation_id
2280 db_dict["_admin.operation-type"] = (
2281 current_operation if current_operation != "IDLE" else None
2282 )
2283 db_dict["currentOperation"] = current_operation
2284 db_dict["currentOperationID"] = current_operation_id
2285 db_dict["errorDescription"] = error_description
2286 db_dict["errorDetail"] = error_detail
2287
2288 if ns_state:
2289 db_dict["nsState"] = ns_state
2290 self.update_db_2("nsrs", nsr_id, db_dict)
2291 except DbException as e:
2292 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2293
2294 def _write_op_status(
2295 self,
2296 op_id: str,
2297 stage: list = None,
2298 error_message: str = None,
2299 queuePosition: int = 0,
2300 operation_state: str = None,
2301 other_update: dict = None,
2302 ):
2303 try:
2304 db_dict = other_update or {}
2305 db_dict["queuePosition"] = queuePosition
2306 if isinstance(stage, list):
2307 db_dict["stage"] = stage[0]
2308 db_dict["detailed-status"] = " ".join(stage)
2309 elif stage is not None:
2310 db_dict["stage"] = str(stage)
2311
2312 if error_message is not None:
2313 db_dict["errorMessage"] = error_message
2314 if operation_state is not None:
2315 db_dict["operationState"] = operation_state
2316 db_dict["statusEnteredTime"] = time()
2317 self.update_db_2("nslcmops", op_id, db_dict)
2318 except DbException as e:
2319 self.logger.warn(
2320 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2321 )
2322
2323 def _write_all_config_status(self, db_nsr: dict, status: str):
2324 try:
2325 nsr_id = db_nsr["_id"]
2326 # configurationStatus
2327 config_status = db_nsr.get("configurationStatus")
2328 if config_status:
2329 db_nsr_update = {
2330 "configurationStatus.{}.status".format(index): status
2331 for index, v in enumerate(config_status)
2332 if v
2333 }
2334 # update status
2335 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2336
2337 except DbException as e:
2338 self.logger.warn(
2339 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2340 )
2341
2342 def _write_configuration_status(
2343 self,
2344 nsr_id: str,
2345 vca_index: int,
2346 status: str = None,
2347 element_under_configuration: str = None,
2348 element_type: str = None,
2349 other_update: dict = None,
2350 ):
2351
2352 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2353 # .format(vca_index, status))
2354
2355 try:
2356 db_path = "configurationStatus.{}.".format(vca_index)
2357 db_dict = other_update or {}
2358 if status:
2359 db_dict[db_path + "status"] = status
2360 if element_under_configuration:
2361 db_dict[
2362 db_path + "elementUnderConfiguration"
2363 ] = element_under_configuration
2364 if element_type:
2365 db_dict[db_path + "elementType"] = element_type
2366 self.update_db_2("nsrs", nsr_id, db_dict)
2367 except DbException as e:
2368 self.logger.warn(
2369 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2370 status, nsr_id, vca_index, e
2371 )
2372 )
2373
2374 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2375 """
2376 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2377 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2378 Database is used because the result can be obtained from a different LCM worker in case of HA.
2379 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2380 :param db_nslcmop: database content of nslcmop
2381 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2382 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2383 computed 'vim-account-id'
2384 """
2385 modified = False
2386 nslcmop_id = db_nslcmop["_id"]
2387 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2388 if placement_engine == "PLA":
2389 self.logger.debug(
2390 logging_text + "Invoke and wait for placement optimization"
2391 )
2392 await self.msg.aiowrite(
2393 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2394 )
2395 db_poll_interval = 5
2396 wait = db_poll_interval * 10
2397 pla_result = None
2398 while not pla_result and wait >= 0:
2399 await asyncio.sleep(db_poll_interval)
2400 wait -= db_poll_interval
2401 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2402 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2403
2404 if not pla_result:
2405 raise LcmException(
2406 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2407 )
2408
2409 for pla_vnf in pla_result["vnf"]:
2410 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2411 if not pla_vnf.get("vimAccountId") or not vnfr:
2412 continue
2413 modified = True
2414 self.db.set_one(
2415 "vnfrs",
2416 {"_id": vnfr["_id"]},
2417 {"vim-account-id": pla_vnf["vimAccountId"]},
2418 )
2419 # Modifies db_vnfrs
2420 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2421 return modified
2422
2423 def update_nsrs_with_pla_result(self, params):
2424 try:
2425 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2426 self.update_db_2(
2427 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2428 )
2429 except Exception as e:
2430 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2431
2432 async def instantiate(self, nsr_id, nslcmop_id):
2433 """
2434
2435 :param nsr_id: ns instance to deploy
2436 :param nslcmop_id: operation to run
2437 :return:
2438 """
2439
2440 # Try to lock HA task here
2441 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2442 if not task_is_locked_by_me:
2443 self.logger.debug(
2444 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2445 )
2446 return
2447
2448 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2449 self.logger.debug(logging_text + "Enter")
2450
2451 # get all needed from database
2452
2453 # database nsrs record
2454 db_nsr = None
2455
2456 # database nslcmops record
2457 db_nslcmop = None
2458
2459 # update operation on nsrs
2460 db_nsr_update = {}
2461 # update operation on nslcmops
2462 db_nslcmop_update = {}
2463
2464 nslcmop_operation_state = None
2465 db_vnfrs = {} # vnf's info indexed by member-index
2466 # n2vc_info = {}
2467 tasks_dict_info = {} # from task to info text
2468 exc = None
2469 error_list = []
2470 stage = [
2471 "Stage 1/5: preparation of the environment.",
2472 "Waiting for previous operations to terminate.",
2473 "",
2474 ]
2475 # ^ stage, step, VIM progress
2476 try:
2477 # wait for any previous tasks in process
2478 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2479
2480 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2481 stage[1] = "Reading from database."
2482 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2483 db_nsr_update["detailed-status"] = "creating"
2484 db_nsr_update["operational-status"] = "init"
2485 self._write_ns_status(
2486 nsr_id=nsr_id,
2487 ns_state="BUILDING",
2488 current_operation="INSTANTIATING",
2489 current_operation_id=nslcmop_id,
2490 other_update=db_nsr_update,
2491 )
2492 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2493
2494 # read from db: operation
2495 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2496 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2497 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2498 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2499 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2500 )
2501 ns_params = db_nslcmop.get("operationParams")
2502 if ns_params and ns_params.get("timeout_ns_deploy"):
2503 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2504 else:
2505 timeout_ns_deploy = self.timeout.ns_deploy
2506
2507 # read from db: ns
2508 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2509 self.logger.debug(logging_text + stage[1])
2510 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2511 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2512 self.logger.debug(logging_text + stage[1])
2513 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2514 self.fs.sync(db_nsr["nsd-id"])
2515 db_nsr["nsd"] = nsd
2516 # nsr_name = db_nsr["name"] # TODO short-name??
2517
2518 # read from db: vnf's of this ns
2519 stage[1] = "Getting vnfrs from db."
2520 self.logger.debug(logging_text + stage[1])
2521 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2522
2523 # read from db: vnfd's for every vnf
2524 db_vnfds = [] # every vnfd data
2525
2526 # for each vnf in ns, read vnfd
2527 for vnfr in db_vnfrs_list:
2528 if vnfr.get("kdur"):
2529 kdur_list = []
2530 for kdur in vnfr["kdur"]:
2531 if kdur.get("additionalParams"):
2532 kdur["additionalParams"] = json.loads(
2533 kdur["additionalParams"]
2534 )
2535 kdur_list.append(kdur)
2536 vnfr["kdur"] = kdur_list
2537
2538 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2539 vnfd_id = vnfr["vnfd-id"]
2540 vnfd_ref = vnfr["vnfd-ref"]
2541 self.fs.sync(vnfd_id)
2542
2543 # if we haven't this vnfd, read it from db
2544 if vnfd_id not in db_vnfds:
2545 # read from db
2546 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2547 vnfd_id, vnfd_ref
2548 )
2549 self.logger.debug(logging_text + stage[1])
2550 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2551
2552 # store vnfd
2553 db_vnfds.append(vnfd)
2554
2555 # Get or generates the _admin.deployed.VCA list
2556 vca_deployed_list = None
2557 if db_nsr["_admin"].get("deployed"):
2558 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2559 if vca_deployed_list is None:
2560 vca_deployed_list = []
2561 configuration_status_list = []
2562 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2563 db_nsr_update["configurationStatus"] = configuration_status_list
2564 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2565 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2566 elif isinstance(vca_deployed_list, dict):
2567 # maintain backward compatibility. Change a dict to list at database
2568 vca_deployed_list = list(vca_deployed_list.values())
2569 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2570 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2571
2572 if not isinstance(
2573 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2574 ):
2575 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2576 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2577
2578 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2579 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2580 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2581 self.db.set_list(
2582 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2583 )
2584
2585 # n2vc_redesign STEP 2 Deploy Network Scenario
2586 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2587 self._write_op_status(op_id=nslcmop_id, stage=stage)
2588
2589 stage[1] = "Deploying KDUs."
2590 # self.logger.debug(logging_text + "Before deploy_kdus")
2591 # Call to deploy_kdus in case exists the "vdu:kdu" param
2592 await self.deploy_kdus(
2593 logging_text=logging_text,
2594 nsr_id=nsr_id,
2595 nslcmop_id=nslcmop_id,
2596 db_vnfrs=db_vnfrs,
2597 db_vnfds=db_vnfds,
2598 task_instantiation_info=tasks_dict_info,
2599 )
2600
2601 stage[1] = "Getting VCA public key."
2602 # n2vc_redesign STEP 1 Get VCA public ssh-key
2603 # feature 1429. Add n2vc public key to needed VMs
2604 n2vc_key = self.n2vc.get_public_key()
2605 n2vc_key_list = [n2vc_key]
2606 if self.vca_config.public_key:
2607 n2vc_key_list.append(self.vca_config.public_key)
2608
2609 stage[1] = "Deploying NS at VIM."
2610 task_ro = asyncio.ensure_future(
2611 self.instantiate_RO(
2612 logging_text=logging_text,
2613 nsr_id=nsr_id,
2614 nsd=nsd,
2615 db_nsr=db_nsr,
2616 db_nslcmop=db_nslcmop,
2617 db_vnfrs=db_vnfrs,
2618 db_vnfds=db_vnfds,
2619 n2vc_key_list=n2vc_key_list,
2620 stage=stage,
2621 )
2622 )
2623 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2624 tasks_dict_info[task_ro] = "Deploying at VIM"
2625
2626 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2627 stage[1] = "Deploying Execution Environments."
2628 self.logger.debug(logging_text + stage[1])
2629
2630 # create namespace and certificate if any helm based EE is present in the NS
2631 if check_helm_ee_in_ns(db_vnfds):
2632 # TODO: create EE namespace
2633 # create TLS certificates
2634 await self.vca_map["helm-v3"].create_tls_certificate(
2635 secret_name="ee-tls-{}".format(nsr_id),
2636 dns_prefix="*",
2637 nsr_id=nsr_id,
2638 usage="server auth",
2639 )
2640
2641 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2642 for vnf_profile in get_vnf_profiles(nsd):
2643 vnfd_id = vnf_profile["vnfd-id"]
2644 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2645 member_vnf_index = str(vnf_profile["id"])
2646 db_vnfr = db_vnfrs[member_vnf_index]
2647 base_folder = vnfd["_admin"]["storage"]
2648 vdu_id = None
2649 vdu_index = 0
2650 vdu_name = None
2651 kdu_name = None
2652 kdu_index = None
2653
2654 # Get additional parameters
2655 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2656 if db_vnfr.get("additionalParamsForVnf"):
2657 deploy_params.update(
2658 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2659 )
2660
2661 descriptor_config = get_configuration(vnfd, vnfd["id"])
2662 if descriptor_config:
2663 self._deploy_n2vc(
2664 logging_text=logging_text
2665 + "member_vnf_index={} ".format(member_vnf_index),
2666 db_nsr=db_nsr,
2667 db_vnfr=db_vnfr,
2668 nslcmop_id=nslcmop_id,
2669 nsr_id=nsr_id,
2670 nsi_id=nsi_id,
2671 vnfd_id=vnfd_id,
2672 vdu_id=vdu_id,
2673 kdu_name=kdu_name,
2674 member_vnf_index=member_vnf_index,
2675 vdu_index=vdu_index,
2676 kdu_index=kdu_index,
2677 vdu_name=vdu_name,
2678 deploy_params=deploy_params,
2679 descriptor_config=descriptor_config,
2680 base_folder=base_folder,
2681 task_instantiation_info=tasks_dict_info,
2682 stage=stage,
2683 )
2684
2685 # Deploy charms for each VDU that supports one.
2686 for vdud in get_vdu_list(vnfd):
2687 vdu_id = vdud["id"]
2688 descriptor_config = get_configuration(vnfd, vdu_id)
2689 vdur = find_in_list(
2690 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2691 )
2692
2693 if vdur.get("additionalParams"):
2694 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2695 else:
2696 deploy_params_vdu = deploy_params
2697 deploy_params_vdu["OSM"] = get_osm_params(
2698 db_vnfr, vdu_id, vdu_count_index=0
2699 )
2700 vdud_count = get_number_of_instances(vnfd, vdu_id)
2701
2702 self.logger.debug("VDUD > {}".format(vdud))
2703 self.logger.debug(
2704 "Descriptor config > {}".format(descriptor_config)
2705 )
2706 if descriptor_config:
2707 vdu_name = None
2708 kdu_name = None
2709 kdu_index = None
2710 for vdu_index in range(vdud_count):
2711 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2712 self._deploy_n2vc(
2713 logging_text=logging_text
2714 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2715 member_vnf_index, vdu_id, vdu_index
2716 ),
2717 db_nsr=db_nsr,
2718 db_vnfr=db_vnfr,
2719 nslcmop_id=nslcmop_id,
2720 nsr_id=nsr_id,
2721 nsi_id=nsi_id,
2722 vnfd_id=vnfd_id,
2723 vdu_id=vdu_id,
2724 kdu_name=kdu_name,
2725 kdu_index=kdu_index,
2726 member_vnf_index=member_vnf_index,
2727 vdu_index=vdu_index,
2728 vdu_name=vdu_name,
2729 deploy_params=deploy_params_vdu,
2730 descriptor_config=descriptor_config,
2731 base_folder=base_folder,
2732 task_instantiation_info=tasks_dict_info,
2733 stage=stage,
2734 )
2735 for kdud in get_kdu_list(vnfd):
2736 kdu_name = kdud["name"]
2737 descriptor_config = get_configuration(vnfd, kdu_name)
2738 if descriptor_config:
2739 vdu_id = None
2740 vdu_index = 0
2741 vdu_name = None
2742 kdu_index, kdur = next(
2743 x
2744 for x in enumerate(db_vnfr["kdur"])
2745 if x[1]["kdu-name"] == kdu_name
2746 )
2747 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2748 if kdur.get("additionalParams"):
2749 deploy_params_kdu.update(
2750 parse_yaml_strings(kdur["additionalParams"].copy())
2751 )
2752
2753 self._deploy_n2vc(
2754 logging_text=logging_text,
2755 db_nsr=db_nsr,
2756 db_vnfr=db_vnfr,
2757 nslcmop_id=nslcmop_id,
2758 nsr_id=nsr_id,
2759 nsi_id=nsi_id,
2760 vnfd_id=vnfd_id,
2761 vdu_id=vdu_id,
2762 kdu_name=kdu_name,
2763 member_vnf_index=member_vnf_index,
2764 vdu_index=vdu_index,
2765 kdu_index=kdu_index,
2766 vdu_name=vdu_name,
2767 deploy_params=deploy_params_kdu,
2768 descriptor_config=descriptor_config,
2769 base_folder=base_folder,
2770 task_instantiation_info=tasks_dict_info,
2771 stage=stage,
2772 )
2773
2774 # Check if this NS has a charm configuration
2775 descriptor_config = nsd.get("ns-configuration")
2776 if descriptor_config and descriptor_config.get("juju"):
2777 vnfd_id = None
2778 db_vnfr = None
2779 member_vnf_index = None
2780 vdu_id = None
2781 kdu_name = None
2782 kdu_index = None
2783 vdu_index = 0
2784 vdu_name = None
2785
2786 # Get additional parameters
2787 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2788 if db_nsr.get("additionalParamsForNs"):
2789 deploy_params.update(
2790 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2791 )
2792 base_folder = nsd["_admin"]["storage"]
2793 self._deploy_n2vc(
2794 logging_text=logging_text,
2795 db_nsr=db_nsr,
2796 db_vnfr=db_vnfr,
2797 nslcmop_id=nslcmop_id,
2798 nsr_id=nsr_id,
2799 nsi_id=nsi_id,
2800 vnfd_id=vnfd_id,
2801 vdu_id=vdu_id,
2802 kdu_name=kdu_name,
2803 member_vnf_index=member_vnf_index,
2804 vdu_index=vdu_index,
2805 kdu_index=kdu_index,
2806 vdu_name=vdu_name,
2807 deploy_params=deploy_params,
2808 descriptor_config=descriptor_config,
2809 base_folder=base_folder,
2810 task_instantiation_info=tasks_dict_info,
2811 stage=stage,
2812 )
2813
2814 # rest of staff will be done at finally
2815
2816 except (
2817 ROclient.ROClientException,
2818 DbException,
2819 LcmException,
2820 N2VCException,
2821 ) as e:
2822 self.logger.error(
2823 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2824 )
2825 exc = e
2826 except asyncio.CancelledError:
2827 self.logger.error(
2828 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2829 )
2830 exc = "Operation was cancelled"
2831 except Exception as e:
2832 exc = traceback.format_exc()
2833 self.logger.critical(
2834 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2835 exc_info=True,
2836 )
2837 finally:
2838 if exc:
2839 error_list.append(str(exc))
2840 try:
2841 # wait for pending tasks
2842 if tasks_dict_info:
2843 stage[1] = "Waiting for instantiate pending tasks."
2844 self.logger.debug(logging_text + stage[1])
2845 error_list += await self._wait_for_tasks(
2846 logging_text,
2847 tasks_dict_info,
2848 timeout_ns_deploy,
2849 stage,
2850 nslcmop_id,
2851 nsr_id=nsr_id,
2852 )
2853 stage[1] = stage[2] = ""
2854 except asyncio.CancelledError:
2855 error_list.append("Cancelled")
2856 # TODO cancel all tasks
2857 except Exception as exc:
2858 error_list.append(str(exc))
2859
2860 # update operation-status
2861 db_nsr_update["operational-status"] = "running"
2862 # let's begin with VCA 'configured' status (later we can change it)
2863 db_nsr_update["config-status"] = "configured"
2864 for task, task_name in tasks_dict_info.items():
2865 if not task.done() or task.cancelled() or task.exception():
2866 if task_name.startswith(self.task_name_deploy_vca):
2867 # A N2VC task is pending
2868 db_nsr_update["config-status"] = "failed"
2869 else:
2870 # RO or KDU task is pending
2871 db_nsr_update["operational-status"] = "failed"
2872
2873 # update status at database
2874 if error_list:
2875 error_detail = ". ".join(error_list)
2876 self.logger.error(logging_text + error_detail)
2877 error_description_nslcmop = "{} Detail: {}".format(
2878 stage[0], error_detail
2879 )
2880 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2881 nslcmop_id, stage[0]
2882 )
2883
2884 db_nsr_update["detailed-status"] = (
2885 error_description_nsr + " Detail: " + error_detail
2886 )
2887 db_nslcmop_update["detailed-status"] = error_detail
2888 nslcmop_operation_state = "FAILED"
2889 ns_state = "BROKEN"
2890 else:
2891 error_detail = None
2892 error_description_nsr = error_description_nslcmop = None
2893 ns_state = "READY"
2894 db_nsr_update["detailed-status"] = "Done"
2895 db_nslcmop_update["detailed-status"] = "Done"
2896 nslcmop_operation_state = "COMPLETED"
2897
2898 if db_nsr:
2899 self._write_ns_status(
2900 nsr_id=nsr_id,
2901 ns_state=ns_state,
2902 current_operation="IDLE",
2903 current_operation_id=None,
2904 error_description=error_description_nsr,
2905 error_detail=error_detail,
2906 other_update=db_nsr_update,
2907 )
2908 self._write_op_status(
2909 op_id=nslcmop_id,
2910 stage="",
2911 error_message=error_description_nslcmop,
2912 operation_state=nslcmop_operation_state,
2913 other_update=db_nslcmop_update,
2914 )
2915
2916 if nslcmop_operation_state:
2917 try:
2918 await self.msg.aiowrite(
2919 "ns",
2920 "instantiated",
2921 {
2922 "nsr_id": nsr_id,
2923 "nslcmop_id": nslcmop_id,
2924 "operationState": nslcmop_operation_state,
2925 },
2926 loop=self.loop,
2927 )
2928 except Exception as e:
2929 self.logger.error(
2930 logging_text + "kafka_write notification Exception {}".format(e)
2931 )
2932
2933 self.logger.debug(logging_text + "Exit")
2934 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2935
2936 def _get_vnfd(self, vnfd_id: str, projects_read: str, cached_vnfds: Dict[str, Any]):
2937 if vnfd_id not in cached_vnfds:
2938 cached_vnfds[vnfd_id] = self.db.get_one(
2939 "vnfds", {"id": vnfd_id, "_admin.projects_read": projects_read}
2940 )
2941 return cached_vnfds[vnfd_id]
2942
2943 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
2944 if vnf_profile_id not in cached_vnfrs:
2945 cached_vnfrs[vnf_profile_id] = self.db.get_one(
2946 "vnfrs",
2947 {
2948 "member-vnf-index-ref": vnf_profile_id,
2949 "nsr-id-ref": nsr_id,
2950 },
2951 )
2952 return cached_vnfrs[vnf_profile_id]
2953
2954 def _is_deployed_vca_in_relation(
2955 self, vca: DeployedVCA, relation: Relation
2956 ) -> bool:
2957 found = False
2958 for endpoint in (relation.provider, relation.requirer):
2959 if endpoint["kdu-resource-profile-id"]:
2960 continue
2961 found = (
2962 vca.vnf_profile_id == endpoint.vnf_profile_id
2963 and vca.vdu_profile_id == endpoint.vdu_profile_id
2964 and vca.execution_environment_ref == endpoint.execution_environment_ref
2965 )
2966 if found:
2967 break
2968 return found
2969
2970 def _update_ee_relation_data_with_implicit_data(
2971 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
2972 ):
2973 ee_relation_data = safe_get_ee_relation(
2974 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
2975 )
2976 ee_relation_level = EELevel.get_level(ee_relation_data)
2977 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
2978 "execution-environment-ref"
2979 ]:
2980 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
2981 vnfd_id = vnf_profile["vnfd-id"]
2982 project = nsd["_admin"]["projects_read"][0]
2983 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
2984 entity_id = (
2985 vnfd_id
2986 if ee_relation_level == EELevel.VNF
2987 else ee_relation_data["vdu-profile-id"]
2988 )
2989 ee = get_juju_ee_ref(db_vnfd, entity_id)
2990 if not ee:
2991 raise Exception(
2992 f"not execution environments found for ee_relation {ee_relation_data}"
2993 )
2994 ee_relation_data["execution-environment-ref"] = ee["id"]
2995 return ee_relation_data
2996
2997 def _get_ns_relations(
2998 self,
2999 nsr_id: str,
3000 nsd: Dict[str, Any],
3001 vca: DeployedVCA,
3002 cached_vnfds: Dict[str, Any],
3003 ) -> List[Relation]:
3004 relations = []
3005 db_ns_relations = get_ns_configuration_relation_list(nsd)
3006 for r in db_ns_relations:
3007 provider_dict = None
3008 requirer_dict = None
3009 if all(key in r for key in ("provider", "requirer")):
3010 provider_dict = r["provider"]
3011 requirer_dict = r["requirer"]
3012 elif "entities" in r:
3013 provider_id = r["entities"][0]["id"]
3014 provider_dict = {
3015 "nsr-id": nsr_id,
3016 "endpoint": r["entities"][0]["endpoint"],
3017 }
3018 if provider_id != nsd["id"]:
3019 provider_dict["vnf-profile-id"] = provider_id
3020 requirer_id = r["entities"][1]["id"]
3021 requirer_dict = {
3022 "nsr-id": nsr_id,
3023 "endpoint": r["entities"][1]["endpoint"],
3024 }
3025 if requirer_id != nsd["id"]:
3026 requirer_dict["vnf-profile-id"] = requirer_id
3027 else:
3028 raise Exception(
3029 "provider/requirer or entities must be included in the relation."
3030 )
3031 relation_provider = self._update_ee_relation_data_with_implicit_data(
3032 nsr_id, nsd, provider_dict, cached_vnfds
3033 )
3034 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3035 nsr_id, nsd, requirer_dict, cached_vnfds
3036 )
3037 provider = EERelation(relation_provider)
3038 requirer = EERelation(relation_requirer)
3039 relation = Relation(r["name"], provider, requirer)
3040 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3041 if vca_in_relation:
3042 relations.append(relation)
3043 return relations
3044
3045 def _get_vnf_relations(
3046 self,
3047 nsr_id: str,
3048 nsd: Dict[str, Any],
3049 vca: DeployedVCA,
3050 cached_vnfds: Dict[str, Any],
3051 ) -> List[Relation]:
3052 relations = []
3053 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
3054 vnf_profile_id = vnf_profile["id"]
3055 vnfd_id = vnf_profile["vnfd-id"]
3056 project = nsd["_admin"]["projects_read"][0]
3057 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3058 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
3059 for r in db_vnf_relations:
3060 provider_dict = None
3061 requirer_dict = None
3062 if all(key in r for key in ("provider", "requirer")):
3063 provider_dict = r["provider"]
3064 requirer_dict = r["requirer"]
3065 elif "entities" in r:
3066 provider_id = r["entities"][0]["id"]
3067 provider_dict = {
3068 "nsr-id": nsr_id,
3069 "vnf-profile-id": vnf_profile_id,
3070 "endpoint": r["entities"][0]["endpoint"],
3071 }
3072 if provider_id != vnfd_id:
3073 provider_dict["vdu-profile-id"] = provider_id
3074 requirer_id = r["entities"][1]["id"]
3075 requirer_dict = {
3076 "nsr-id": nsr_id,
3077 "vnf-profile-id": vnf_profile_id,
3078 "endpoint": r["entities"][1]["endpoint"],
3079 }
3080 if requirer_id != vnfd_id:
3081 requirer_dict["vdu-profile-id"] = requirer_id
3082 else:
3083 raise Exception(
3084 "provider/requirer or entities must be included in the relation."
3085 )
3086 relation_provider = self._update_ee_relation_data_with_implicit_data(
3087 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3088 )
3089 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3090 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3091 )
3092 provider = EERelation(relation_provider)
3093 requirer = EERelation(relation_requirer)
3094 relation = Relation(r["name"], provider, requirer)
3095 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3096 if vca_in_relation:
3097 relations.append(relation)
3098 return relations
3099
3100 def _get_kdu_resource_data(
3101 self,
3102 ee_relation: EERelation,
3103 db_nsr: Dict[str, Any],
3104 cached_vnfds: Dict[str, Any],
3105 ) -> DeployedK8sResource:
3106 nsd = get_nsd(db_nsr)
3107 vnf_profiles = get_vnf_profiles(nsd)
3108 vnfd_id = find_in_list(
3109 vnf_profiles,
3110 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3111 )["vnfd-id"]
3112 project = nsd["_admin"]["projects_read"][0]
3113 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3114 kdu_resource_profile = get_kdu_resource_profile(
3115 db_vnfd, ee_relation.kdu_resource_profile_id
3116 )
3117 kdu_name = kdu_resource_profile["kdu-name"]
3118 deployed_kdu, _ = get_deployed_kdu(
3119 db_nsr.get("_admin", ()).get("deployed", ()),
3120 kdu_name,
3121 ee_relation.vnf_profile_id,
3122 )
3123 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3124 return deployed_kdu
3125
3126 def _get_deployed_component(
3127 self,
3128 ee_relation: EERelation,
3129 db_nsr: Dict[str, Any],
3130 cached_vnfds: Dict[str, Any],
3131 ) -> DeployedComponent:
3132 nsr_id = db_nsr["_id"]
3133 deployed_component = None
3134 ee_level = EELevel.get_level(ee_relation)
3135 if ee_level == EELevel.NS:
3136 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3137 if vca:
3138 deployed_component = DeployedVCA(nsr_id, vca)
3139 elif ee_level == EELevel.VNF:
3140 vca = get_deployed_vca(
3141 db_nsr,
3142 {
3143 "vdu_id": None,
3144 "member-vnf-index": ee_relation.vnf_profile_id,
3145 "ee_descriptor_id": ee_relation.execution_environment_ref,
3146 },
3147 )
3148 if vca:
3149 deployed_component = DeployedVCA(nsr_id, vca)
3150 elif ee_level == EELevel.VDU:
3151 vca = get_deployed_vca(
3152 db_nsr,
3153 {
3154 "vdu_id": ee_relation.vdu_profile_id,
3155 "member-vnf-index": ee_relation.vnf_profile_id,
3156 "ee_descriptor_id": ee_relation.execution_environment_ref,
3157 },
3158 )
3159 if vca:
3160 deployed_component = DeployedVCA(nsr_id, vca)
3161 elif ee_level == EELevel.KDU:
3162 kdu_resource_data = self._get_kdu_resource_data(
3163 ee_relation, db_nsr, cached_vnfds
3164 )
3165 if kdu_resource_data:
3166 deployed_component = DeployedK8sResource(kdu_resource_data)
3167 return deployed_component
3168
3169 async def _add_relation(
3170 self,
3171 relation: Relation,
3172 vca_type: str,
3173 db_nsr: Dict[str, Any],
3174 cached_vnfds: Dict[str, Any],
3175 cached_vnfrs: Dict[str, Any],
3176 ) -> bool:
3177 deployed_provider = self._get_deployed_component(
3178 relation.provider, db_nsr, cached_vnfds
3179 )
3180 deployed_requirer = self._get_deployed_component(
3181 relation.requirer, db_nsr, cached_vnfds
3182 )
3183 if (
3184 deployed_provider
3185 and deployed_requirer
3186 and deployed_provider.config_sw_installed
3187 and deployed_requirer.config_sw_installed
3188 ):
3189 provider_db_vnfr = (
3190 self._get_vnfr(
3191 relation.provider.nsr_id,
3192 relation.provider.vnf_profile_id,
3193 cached_vnfrs,
3194 )
3195 if relation.provider.vnf_profile_id
3196 else None
3197 )
3198 requirer_db_vnfr = (
3199 self._get_vnfr(
3200 relation.requirer.nsr_id,
3201 relation.requirer.vnf_profile_id,
3202 cached_vnfrs,
3203 )
3204 if relation.requirer.vnf_profile_id
3205 else None
3206 )
3207 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3208 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3209 provider_relation_endpoint = RelationEndpoint(
3210 deployed_provider.ee_id,
3211 provider_vca_id,
3212 relation.provider.endpoint,
3213 )
3214 requirer_relation_endpoint = RelationEndpoint(
3215 deployed_requirer.ee_id,
3216 requirer_vca_id,
3217 relation.requirer.endpoint,
3218 )
3219 try:
3220 await self.vca_map[vca_type].add_relation(
3221 provider=provider_relation_endpoint,
3222 requirer=requirer_relation_endpoint,
3223 )
3224 except N2VCException as exception:
3225 self.logger.error(exception)
3226 raise LcmException(exception)
3227 return True
3228 return False
3229
3230 async def _add_vca_relations(
3231 self,
3232 logging_text,
3233 nsr_id,
3234 vca_type: str,
3235 vca_index: int,
3236 timeout: int = 3600,
3237 ) -> bool:
3238
3239 # steps:
3240 # 1. find all relations for this VCA
3241 # 2. wait for other peers related
3242 # 3. add relations
3243
3244 try:
3245 # STEP 1: find all relations for this VCA
3246
3247 # read nsr record
3248 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3249 nsd = get_nsd(db_nsr)
3250
3251 # this VCA data
3252 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3253 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3254
3255 cached_vnfds = {}
3256 cached_vnfrs = {}
3257 relations = []
3258 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3259 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3260
3261 # if no relations, terminate
3262 if not relations:
3263 self.logger.debug(logging_text + " No relations")
3264 return True
3265
3266 self.logger.debug(logging_text + " adding relations {}".format(relations))
3267
3268 # add all relations
3269 start = time()
3270 while True:
3271 # check timeout
3272 now = time()
3273 if now - start >= timeout:
3274 self.logger.error(logging_text + " : timeout adding relations")
3275 return False
3276
3277 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3278 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3279
3280 # for each relation, find the VCA's related
3281 for relation in relations.copy():
3282 added = await self._add_relation(
3283 relation,
3284 vca_type,
3285 db_nsr,
3286 cached_vnfds,
3287 cached_vnfrs,
3288 )
3289 if added:
3290 relations.remove(relation)
3291
3292 if not relations:
3293 self.logger.debug("Relations added")
3294 break
3295 await asyncio.sleep(5.0)
3296
3297 return True
3298
3299 except Exception as e:
3300 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3301 return False
3302
3303 async def _install_kdu(
3304 self,
3305 nsr_id: str,
3306 nsr_db_path: str,
3307 vnfr_data: dict,
3308 kdu_index: int,
3309 kdud: dict,
3310 vnfd: dict,
3311 k8s_instance_info: dict,
3312 k8params: dict = None,
3313 timeout: int = 600,
3314 vca_id: str = None,
3315 ):
3316
3317 try:
3318 k8sclustertype = k8s_instance_info["k8scluster-type"]
3319 # Instantiate kdu
3320 db_dict_install = {
3321 "collection": "nsrs",
3322 "filter": {"_id": nsr_id},
3323 "path": nsr_db_path,
3324 }
3325
3326 if k8s_instance_info.get("kdu-deployment-name"):
3327 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3328 else:
3329 kdu_instance = self.k8scluster_map[
3330 k8sclustertype
3331 ].generate_kdu_instance_name(
3332 db_dict=db_dict_install,
3333 kdu_model=k8s_instance_info["kdu-model"],
3334 kdu_name=k8s_instance_info["kdu-name"],
3335 )
3336
3337 # Update the nsrs table with the kdu-instance value
3338 self.update_db_2(
3339 item="nsrs",
3340 _id=nsr_id,
3341 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3342 )
3343
3344 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3345 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3346 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3347 # namespace, this first verification could be removed, and the next step would be done for any kind
3348 # of KNF.
3349 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3350 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3351 if k8sclustertype in ("juju", "juju-bundle"):
3352 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3353 # that the user passed a namespace which he wants its KDU to be deployed in)
3354 if (
3355 self.db.count(
3356 table="nsrs",
3357 q_filter={
3358 "_id": nsr_id,
3359 "_admin.projects_write": k8s_instance_info["namespace"],
3360 "_admin.projects_read": k8s_instance_info["namespace"],
3361 },
3362 )
3363 > 0
3364 ):
3365 self.logger.debug(
3366 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3367 )
3368 self.update_db_2(
3369 item="nsrs",
3370 _id=nsr_id,
3371 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3372 )
3373 k8s_instance_info["namespace"] = kdu_instance
3374
3375 await self.k8scluster_map[k8sclustertype].install(
3376 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3377 kdu_model=k8s_instance_info["kdu-model"],
3378 atomic=True,
3379 params=k8params,
3380 db_dict=db_dict_install,
3381 timeout=timeout,
3382 kdu_name=k8s_instance_info["kdu-name"],
3383 namespace=k8s_instance_info["namespace"],
3384 kdu_instance=kdu_instance,
3385 vca_id=vca_id,
3386 )
3387
3388 # Obtain services to obtain management service ip
3389 services = await self.k8scluster_map[k8sclustertype].get_services(
3390 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3391 kdu_instance=kdu_instance,
3392 namespace=k8s_instance_info["namespace"],
3393 )
3394
3395 # Obtain management service info (if exists)
3396 vnfr_update_dict = {}
3397 kdu_config = get_configuration(vnfd, kdud["name"])
3398 if kdu_config:
3399 target_ee_list = kdu_config.get("execution-environment-list", [])
3400 else:
3401 target_ee_list = []
3402
3403 if services:
3404 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3405 mgmt_services = [
3406 service
3407 for service in kdud.get("service", [])
3408 if service.get("mgmt-service")
3409 ]
3410 for mgmt_service in mgmt_services:
3411 for service in services:
3412 if service["name"].startswith(mgmt_service["name"]):
3413 # Mgmt service found, Obtain service ip
3414 ip = service.get("external_ip", service.get("cluster_ip"))
3415 if isinstance(ip, list) and len(ip) == 1:
3416 ip = ip[0]
3417
3418 vnfr_update_dict[
3419 "kdur.{}.ip-address".format(kdu_index)
3420 ] = ip
3421
3422 # Check if must update also mgmt ip at the vnf
3423 service_external_cp = mgmt_service.get(
3424 "external-connection-point-ref"
3425 )
3426 if service_external_cp:
3427 if (
3428 deep_get(vnfd, ("mgmt-interface", "cp"))
3429 == service_external_cp
3430 ):
3431 vnfr_update_dict["ip-address"] = ip
3432
3433 if find_in_list(
3434 target_ee_list,
3435 lambda ee: ee.get(
3436 "external-connection-point-ref", ""
3437 )
3438 == service_external_cp,
3439 ):
3440 vnfr_update_dict[
3441 "kdur.{}.ip-address".format(kdu_index)
3442 ] = ip
3443 break
3444 else:
3445 self.logger.warn(
3446 "Mgmt service name: {} not found".format(
3447 mgmt_service["name"]
3448 )
3449 )
3450
3451 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3452 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3453
3454 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3455 if (
3456 kdu_config
3457 and kdu_config.get("initial-config-primitive")
3458 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3459 ):
3460 initial_config_primitive_list = kdu_config.get(
3461 "initial-config-primitive"
3462 )
3463 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3464
3465 for initial_config_primitive in initial_config_primitive_list:
3466 primitive_params_ = self._map_primitive_params(
3467 initial_config_primitive, {}, {}
3468 )
3469
3470 await asyncio.wait_for(
3471 self.k8scluster_map[k8sclustertype].exec_primitive(
3472 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3473 kdu_instance=kdu_instance,
3474 primitive_name=initial_config_primitive["name"],
3475 params=primitive_params_,
3476 db_dict=db_dict_install,
3477 vca_id=vca_id,
3478 ),
3479 timeout=timeout,
3480 )
3481
3482 except Exception as e:
3483 # Prepare update db with error and raise exception
3484 try:
3485 self.update_db_2(
3486 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3487 )
3488 self.update_db_2(
3489 "vnfrs",
3490 vnfr_data.get("_id"),
3491 {"kdur.{}.status".format(kdu_index): "ERROR"},
3492 )
3493 except Exception:
3494 # ignore to keep original exception
3495 pass
3496 # reraise original error
3497 raise
3498
3499 return kdu_instance
3500
3501 async def deploy_kdus(
3502 self,
3503 logging_text,
3504 nsr_id,
3505 nslcmop_id,
3506 db_vnfrs,
3507 db_vnfds,
3508 task_instantiation_info,
3509 ):
3510 # Launch kdus if present in the descriptor
3511
3512 k8scluster_id_2_uuic = {
3513 "helm-chart-v3": {},
3514 "helm-chart": {},
3515 "juju-bundle": {},
3516 }
3517
3518 async def _get_cluster_id(cluster_id, cluster_type):
3519 nonlocal k8scluster_id_2_uuic
3520 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3521 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3522
3523 # check if K8scluster is creating and wait look if previous tasks in process
3524 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3525 "k8scluster", cluster_id
3526 )
3527 if task_dependency:
3528 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3529 task_name, cluster_id
3530 )
3531 self.logger.debug(logging_text + text)
3532 await asyncio.wait(task_dependency, timeout=3600)
3533
3534 db_k8scluster = self.db.get_one(
3535 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3536 )
3537 if not db_k8scluster:
3538 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3539
3540 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3541 if not k8s_id:
3542 if cluster_type == "helm-chart-v3":
3543 try:
3544 # backward compatibility for existing clusters that have not been initialized for helm v3
3545 k8s_credentials = yaml.safe_dump(
3546 db_k8scluster.get("credentials")
3547 )
3548 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3549 k8s_credentials, reuse_cluster_uuid=cluster_id
3550 )
3551 db_k8scluster_update = {}
3552 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3553 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3554 db_k8scluster_update[
3555 "_admin.helm-chart-v3.created"
3556 ] = uninstall_sw
3557 db_k8scluster_update[
3558 "_admin.helm-chart-v3.operationalState"
3559 ] = "ENABLED"
3560 self.update_db_2(
3561 "k8sclusters", cluster_id, db_k8scluster_update
3562 )
3563 except Exception as e:
3564 self.logger.error(
3565 logging_text
3566 + "error initializing helm-v3 cluster: {}".format(str(e))
3567 )
3568 raise LcmException(
3569 "K8s cluster '{}' has not been initialized for '{}'".format(
3570 cluster_id, cluster_type
3571 )
3572 )
3573 else:
3574 raise LcmException(
3575 "K8s cluster '{}' has not been initialized for '{}'".format(
3576 cluster_id, cluster_type
3577 )
3578 )
3579 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3580 return k8s_id
3581
3582 logging_text += "Deploy kdus: "
3583 step = ""
3584 try:
3585 db_nsr_update = {"_admin.deployed.K8s": []}
3586 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3587
3588 index = 0
3589 updated_cluster_list = []
3590 updated_v3_cluster_list = []
3591
3592 for vnfr_data in db_vnfrs.values():
3593 vca_id = self.get_vca_id(vnfr_data, {})
3594 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3595 # Step 0: Prepare and set parameters
3596 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3597 vnfd_id = vnfr_data.get("vnfd-id")
3598 vnfd_with_id = find_in_list(
3599 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3600 )
3601 kdud = next(
3602 kdud
3603 for kdud in vnfd_with_id["kdu"]
3604 if kdud["name"] == kdur["kdu-name"]
3605 )
3606 namespace = kdur.get("k8s-namespace")
3607 kdu_deployment_name = kdur.get("kdu-deployment-name")
3608 if kdur.get("helm-chart"):
3609 kdumodel = kdur["helm-chart"]
3610 # Default version: helm3, if helm-version is v2 assign v2
3611 k8sclustertype = "helm-chart-v3"
3612 self.logger.debug("kdur: {}".format(kdur))
3613 if (
3614 kdur.get("helm-version")
3615 and kdur.get("helm-version") == "v2"
3616 ):
3617 k8sclustertype = "helm-chart"
3618 elif kdur.get("juju-bundle"):
3619 kdumodel = kdur["juju-bundle"]
3620 k8sclustertype = "juju-bundle"
3621 else:
3622 raise LcmException(
3623 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3624 "juju-bundle. Maybe an old NBI version is running".format(
3625 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3626 )
3627 )
3628 # check if kdumodel is a file and exists
3629 try:
3630 vnfd_with_id = find_in_list(
3631 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3632 )
3633 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3634 if storage: # may be not present if vnfd has not artifacts
3635 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3636 if storage["pkg-dir"]:
3637 filename = "{}/{}/{}s/{}".format(
3638 storage["folder"],
3639 storage["pkg-dir"],
3640 k8sclustertype,
3641 kdumodel,
3642 )
3643 else:
3644 filename = "{}/Scripts/{}s/{}".format(
3645 storage["folder"],
3646 k8sclustertype,
3647 kdumodel,
3648 )
3649 if self.fs.file_exists(
3650 filename, mode="file"
3651 ) or self.fs.file_exists(filename, mode="dir"):
3652 kdumodel = self.fs.path + filename
3653 except (asyncio.TimeoutError, asyncio.CancelledError):
3654 raise
3655 except Exception: # it is not a file
3656 pass
3657
3658 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3659 step = "Synchronize repos for k8s cluster '{}'".format(
3660 k8s_cluster_id
3661 )
3662 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3663
3664 # Synchronize repos
3665 if (
3666 k8sclustertype == "helm-chart"
3667 and cluster_uuid not in updated_cluster_list
3668 ) or (
3669 k8sclustertype == "helm-chart-v3"
3670 and cluster_uuid not in updated_v3_cluster_list
3671 ):
3672 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3673 self.k8scluster_map[k8sclustertype].synchronize_repos(
3674 cluster_uuid=cluster_uuid
3675 )
3676 )
3677 if del_repo_list or added_repo_dict:
3678 if k8sclustertype == "helm-chart":
3679 unset = {
3680 "_admin.helm_charts_added." + item: None
3681 for item in del_repo_list
3682 }
3683 updated = {
3684 "_admin.helm_charts_added." + item: name
3685 for item, name in added_repo_dict.items()
3686 }
3687 updated_cluster_list.append(cluster_uuid)
3688 elif k8sclustertype == "helm-chart-v3":
3689 unset = {
3690 "_admin.helm_charts_v3_added." + item: None
3691 for item in del_repo_list
3692 }
3693 updated = {
3694 "_admin.helm_charts_v3_added." + item: name
3695 for item, name in added_repo_dict.items()
3696 }
3697 updated_v3_cluster_list.append(cluster_uuid)
3698 self.logger.debug(
3699 logging_text + "repos synchronized on k8s cluster "
3700 "'{}' to_delete: {}, to_add: {}".format(
3701 k8s_cluster_id, del_repo_list, added_repo_dict
3702 )
3703 )
3704 self.db.set_one(
3705 "k8sclusters",
3706 {"_id": k8s_cluster_id},
3707 updated,
3708 unset=unset,
3709 )
3710
3711 # Instantiate kdu
3712 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3713 vnfr_data["member-vnf-index-ref"],
3714 kdur["kdu-name"],
3715 k8s_cluster_id,
3716 )
3717 k8s_instance_info = {
3718 "kdu-instance": None,
3719 "k8scluster-uuid": cluster_uuid,
3720 "k8scluster-type": k8sclustertype,
3721 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3722 "kdu-name": kdur["kdu-name"],
3723 "kdu-model": kdumodel,
3724 "namespace": namespace,
3725 "kdu-deployment-name": kdu_deployment_name,
3726 }
3727 db_path = "_admin.deployed.K8s.{}".format(index)
3728 db_nsr_update[db_path] = k8s_instance_info
3729 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3730 vnfd_with_id = find_in_list(
3731 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3732 )
3733 task = asyncio.ensure_future(
3734 self._install_kdu(
3735 nsr_id,
3736 db_path,
3737 vnfr_data,
3738 kdu_index,
3739 kdud,
3740 vnfd_with_id,
3741 k8s_instance_info,
3742 k8params=desc_params,
3743 timeout=1800,
3744 vca_id=vca_id,
3745 )
3746 )
3747 self.lcm_tasks.register(
3748 "ns",
3749 nsr_id,
3750 nslcmop_id,
3751 "instantiate_KDU-{}".format(index),
3752 task,
3753 )
3754 task_instantiation_info[task] = "Deploying KDU {}".format(
3755 kdur["kdu-name"]
3756 )
3757
3758 index += 1
3759
3760 except (LcmException, asyncio.CancelledError):
3761 raise
3762 except Exception as e:
3763 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3764 if isinstance(e, (N2VCException, DbException)):
3765 self.logger.error(logging_text + msg)
3766 else:
3767 self.logger.critical(logging_text + msg, exc_info=True)
3768 raise LcmException(msg)
3769 finally:
3770 if db_nsr_update:
3771 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3772
3773 def _deploy_n2vc(
3774 self,
3775 logging_text,
3776 db_nsr,
3777 db_vnfr,
3778 nslcmop_id,
3779 nsr_id,
3780 nsi_id,
3781 vnfd_id,
3782 vdu_id,
3783 kdu_name,
3784 member_vnf_index,
3785 vdu_index,
3786 kdu_index,
3787 vdu_name,
3788 deploy_params,
3789 descriptor_config,
3790 base_folder,
3791 task_instantiation_info,
3792 stage,
3793 ):
3794 # launch instantiate_N2VC in a asyncio task and register task object
3795 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3796 # if not found, create one entry and update database
3797 # fill db_nsr._admin.deployed.VCA.<index>
3798
3799 self.logger.debug(
3800 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3801 )
3802
3803 charm_name = ""
3804 get_charm_name = False
3805 if "execution-environment-list" in descriptor_config:
3806 ee_list = descriptor_config.get("execution-environment-list", [])
3807 elif "juju" in descriptor_config:
3808 ee_list = [descriptor_config] # ns charms
3809 if "execution-environment-list" not in descriptor_config:
3810 # charm name is only required for ns charms
3811 get_charm_name = True
3812 else: # other types as script are not supported
3813 ee_list = []
3814
3815 for ee_item in ee_list:
3816 self.logger.debug(
3817 logging_text
3818 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3819 ee_item.get("juju"), ee_item.get("helm-chart")
3820 )
3821 )
3822 ee_descriptor_id = ee_item.get("id")
3823 if ee_item.get("juju"):
3824 vca_name = ee_item["juju"].get("charm")
3825 if get_charm_name:
3826 charm_name = self.find_charm_name(db_nsr, str(vca_name))
3827 vca_type = (
3828 "lxc_proxy_charm"
3829 if ee_item["juju"].get("charm") is not None
3830 else "native_charm"
3831 )
3832 if ee_item["juju"].get("cloud") == "k8s":
3833 vca_type = "k8s_proxy_charm"
3834 elif ee_item["juju"].get("proxy") is False:
3835 vca_type = "native_charm"
3836 elif ee_item.get("helm-chart"):
3837 vca_name = ee_item["helm-chart"]
3838 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3839 vca_type = "helm"
3840 else:
3841 vca_type = "helm-v3"
3842 else:
3843 self.logger.debug(
3844 logging_text + "skipping non juju neither charm configuration"
3845 )
3846 continue
3847
3848 vca_index = -1
3849 for vca_index, vca_deployed in enumerate(
3850 db_nsr["_admin"]["deployed"]["VCA"]
3851 ):
3852 if not vca_deployed:
3853 continue
3854 if (
3855 vca_deployed.get("member-vnf-index") == member_vnf_index
3856 and vca_deployed.get("vdu_id") == vdu_id
3857 and vca_deployed.get("kdu_name") == kdu_name
3858 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3859 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3860 ):
3861 break
3862 else:
3863 # not found, create one.
3864 target = (
3865 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3866 )
3867 if vdu_id:
3868 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3869 elif kdu_name:
3870 target += "/kdu/{}".format(kdu_name)
3871 vca_deployed = {
3872 "target_element": target,
3873 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
3874 "member-vnf-index": member_vnf_index,
3875 "vdu_id": vdu_id,
3876 "kdu_name": kdu_name,
3877 "vdu_count_index": vdu_index,
3878 "operational-status": "init", # TODO revise
3879 "detailed-status": "", # TODO revise
3880 "step": "initial-deploy", # TODO revise
3881 "vnfd_id": vnfd_id,
3882 "vdu_name": vdu_name,
3883 "type": vca_type,
3884 "ee_descriptor_id": ee_descriptor_id,
3885 "charm_name": charm_name,
3886 }
3887 vca_index += 1
3888
3889 # create VCA and configurationStatus in db
3890 db_dict = {
3891 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
3892 "configurationStatus.{}".format(vca_index): dict(),
3893 }
3894 self.update_db_2("nsrs", nsr_id, db_dict)
3895
3896 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3897
3898 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3899 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3900 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3901
3902 # Launch task
3903 task_n2vc = asyncio.ensure_future(
3904 self.instantiate_N2VC(
3905 logging_text=logging_text,
3906 vca_index=vca_index,
3907 nsi_id=nsi_id,
3908 db_nsr=db_nsr,
3909 db_vnfr=db_vnfr,
3910 vdu_id=vdu_id,
3911 kdu_name=kdu_name,
3912 vdu_index=vdu_index,
3913 kdu_index=kdu_index,
3914 deploy_params=deploy_params,
3915 config_descriptor=descriptor_config,
3916 base_folder=base_folder,
3917 nslcmop_id=nslcmop_id,
3918 stage=stage,
3919 vca_type=vca_type,
3920 vca_name=vca_name,
3921 ee_config_descriptor=ee_item,
3922 )
3923 )
3924 self.lcm_tasks.register(
3925 "ns",
3926 nsr_id,
3927 nslcmop_id,
3928 "instantiate_N2VC-{}".format(vca_index),
3929 task_n2vc,
3930 )
3931 task_instantiation_info[
3932 task_n2vc
3933 ] = self.task_name_deploy_vca + " {}.{}".format(
3934 member_vnf_index or "", vdu_id or ""
3935 )
3936
3937 @staticmethod
3938 def _create_nslcmop(nsr_id, operation, params):
3939 """
3940 Creates a ns-lcm-opp content to be stored at database.
3941 :param nsr_id: internal id of the instance
3942 :param operation: instantiate, terminate, scale, action, ...
3943 :param params: user parameters for the operation
3944 :return: dictionary following SOL005 format
3945 """
3946 # Raise exception if invalid arguments
3947 if not (nsr_id and operation and params):
3948 raise LcmException(
3949 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3950 )
3951 now = time()
3952 _id = str(uuid4())
3953 nslcmop = {
3954 "id": _id,
3955 "_id": _id,
3956 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3957 "operationState": "PROCESSING",
3958 "statusEnteredTime": now,
3959 "nsInstanceId": nsr_id,
3960 "lcmOperationType": operation,
3961 "startTime": now,
3962 "isAutomaticInvocation": False,
3963 "operationParams": params,
3964 "isCancelPending": False,
3965 "links": {
3966 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3967 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3968 },
3969 }
3970 return nslcmop
3971
3972 def _format_additional_params(self, params):
3973 params = params or {}
3974 for key, value in params.items():
3975 if str(value).startswith("!!yaml "):
3976 params[key] = yaml.safe_load(value[7:])
3977 return params
3978
3979 def _get_terminate_primitive_params(self, seq, vnf_index):
3980 primitive = seq.get("name")
3981 primitive_params = {}
3982 params = {
3983 "member_vnf_index": vnf_index,
3984 "primitive": primitive,
3985 "primitive_params": primitive_params,
3986 }
3987 desc_params = {}
3988 return self._map_primitive_params(seq, params, desc_params)
3989
3990 # sub-operations
3991
3992 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3993 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3994 if op.get("operationState") == "COMPLETED":
3995 # b. Skip sub-operation
3996 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3997 return self.SUBOPERATION_STATUS_SKIP
3998 else:
3999 # c. retry executing sub-operation
4000 # The sub-operation exists, and operationState != 'COMPLETED'
4001 # Update operationState = 'PROCESSING' to indicate a retry.
4002 operationState = "PROCESSING"
4003 detailed_status = "In progress"
4004 self._update_suboperation_status(
4005 db_nslcmop, op_index, operationState, detailed_status
4006 )
4007 # Return the sub-operation index
4008 # _ns_execute_primitive() or RO.create_action() will be called from scale()
4009 # with arguments extracted from the sub-operation
4010 return op_index
4011
4012 # Find a sub-operation where all keys in a matching dictionary must match
4013 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
4014 def _find_suboperation(self, db_nslcmop, match):
4015 if db_nslcmop and match:
4016 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
4017 for i, op in enumerate(op_list):
4018 if all(op.get(k) == match[k] for k in match):
4019 return i
4020 return self.SUBOPERATION_STATUS_NOT_FOUND
4021
4022 # Update status for a sub-operation given its index
4023 def _update_suboperation_status(
4024 self, db_nslcmop, op_index, operationState, detailed_status
4025 ):
4026 # Update DB for HA tasks
4027 q_filter = {"_id": db_nslcmop["_id"]}
4028 update_dict = {
4029 "_admin.operations.{}.operationState".format(op_index): operationState,
4030 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
4031 }
4032 self.db.set_one(
4033 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
4034 )
4035
4036 # Add sub-operation, return the index of the added sub-operation
4037 # Optionally, set operationState, detailed-status, and operationType
4038 # Status and type are currently set for 'scale' sub-operations:
4039 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
4040 # 'detailed-status' : status message
4041 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
4042 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
4043 def _add_suboperation(
4044 self,
4045 db_nslcmop,
4046 vnf_index,
4047 vdu_id,
4048 vdu_count_index,
4049 vdu_name,
4050 primitive,
4051 mapped_primitive_params,
4052 operationState=None,
4053 detailed_status=None,
4054 operationType=None,
4055 RO_nsr_id=None,
4056 RO_scaling_info=None,
4057 ):
4058 if not db_nslcmop:
4059 return self.SUBOPERATION_STATUS_NOT_FOUND
4060 # Get the "_admin.operations" list, if it exists
4061 db_nslcmop_admin = db_nslcmop.get("_admin", {})
4062 op_list = db_nslcmop_admin.get("operations")
4063 # Create or append to the "_admin.operations" list
4064 new_op = {
4065 "member_vnf_index": vnf_index,
4066 "vdu_id": vdu_id,
4067 "vdu_count_index": vdu_count_index,
4068 "primitive": primitive,
4069 "primitive_params": mapped_primitive_params,
4070 }
4071 if operationState:
4072 new_op["operationState"] = operationState
4073 if detailed_status:
4074 new_op["detailed-status"] = detailed_status
4075 if operationType:
4076 new_op["lcmOperationType"] = operationType
4077 if RO_nsr_id:
4078 new_op["RO_nsr_id"] = RO_nsr_id
4079 if RO_scaling_info:
4080 new_op["RO_scaling_info"] = RO_scaling_info
4081 if not op_list:
4082 # No existing operations, create key 'operations' with current operation as first list element
4083 db_nslcmop_admin.update({"operations": [new_op]})
4084 op_list = db_nslcmop_admin.get("operations")
4085 else:
4086 # Existing operations, append operation to list
4087 op_list.append(new_op)
4088
4089 db_nslcmop_update = {"_admin.operations": op_list}
4090 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
4091 op_index = len(op_list) - 1
4092 return op_index
4093
4094 # Helper methods for scale() sub-operations
4095
4096 # pre-scale/post-scale:
4097 # Check for 3 different cases:
4098 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4099 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4100 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4101 def _check_or_add_scale_suboperation(
4102 self,
4103 db_nslcmop,
4104 vnf_index,
4105 vnf_config_primitive,
4106 primitive_params,
4107 operationType,
4108 RO_nsr_id=None,
4109 RO_scaling_info=None,
4110 ):
4111 # Find this sub-operation
4112 if RO_nsr_id and RO_scaling_info:
4113 operationType = "SCALE-RO"
4114 match = {
4115 "member_vnf_index": vnf_index,
4116 "RO_nsr_id": RO_nsr_id,
4117 "RO_scaling_info": RO_scaling_info,
4118 }
4119 else:
4120 match = {
4121 "member_vnf_index": vnf_index,
4122 "primitive": vnf_config_primitive,
4123 "primitive_params": primitive_params,
4124 "lcmOperationType": operationType,
4125 }
4126 op_index = self._find_suboperation(db_nslcmop, match)
4127 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4128 # a. New sub-operation
4129 # The sub-operation does not exist, add it.
4130 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4131 # The following parameters are set to None for all kind of scaling:
4132 vdu_id = None
4133 vdu_count_index = None
4134 vdu_name = None
4135 if RO_nsr_id and RO_scaling_info:
4136 vnf_config_primitive = None
4137 primitive_params = None
4138 else:
4139 RO_nsr_id = None
4140 RO_scaling_info = None
4141 # Initial status for sub-operation
4142 operationState = "PROCESSING"
4143 detailed_status = "In progress"
4144 # Add sub-operation for pre/post-scaling (zero or more operations)
4145 self._add_suboperation(
4146 db_nslcmop,
4147 vnf_index,
4148 vdu_id,
4149 vdu_count_index,
4150 vdu_name,
4151 vnf_config_primitive,
4152 primitive_params,
4153 operationState,
4154 detailed_status,
4155 operationType,
4156 RO_nsr_id,
4157 RO_scaling_info,
4158 )
4159 return self.SUBOPERATION_STATUS_NEW
4160 else:
4161 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4162 # or op_index (operationState != 'COMPLETED')
4163 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4164
4165 # Function to return execution_environment id
4166
4167 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
4168 # TODO vdu_index_count
4169 for vca in vca_deployed_list:
4170 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
4171 return vca["ee_id"]
4172
4173 async def destroy_N2VC(
4174 self,
4175 logging_text,
4176 db_nslcmop,
4177 vca_deployed,
4178 config_descriptor,
4179 vca_index,
4180 destroy_ee=True,
4181 exec_primitives=True,
4182 scaling_in=False,
4183 vca_id: str = None,
4184 ):
4185 """
4186 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4187 :param logging_text:
4188 :param db_nslcmop:
4189 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4190 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4191 :param vca_index: index in the database _admin.deployed.VCA
4192 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4193 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4194 not executed properly
4195 :param scaling_in: True destroys the application, False destroys the model
4196 :return: None or exception
4197 """
4198
4199 self.logger.debug(
4200 logging_text
4201 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4202 vca_index, vca_deployed, config_descriptor, destroy_ee
4203 )
4204 )
4205
4206 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4207
4208 # execute terminate_primitives
4209 if exec_primitives:
4210 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4211 config_descriptor.get("terminate-config-primitive"),
4212 vca_deployed.get("ee_descriptor_id"),
4213 )
4214 vdu_id = vca_deployed.get("vdu_id")
4215 vdu_count_index = vca_deployed.get("vdu_count_index")
4216 vdu_name = vca_deployed.get("vdu_name")
4217 vnf_index = vca_deployed.get("member-vnf-index")
4218 if terminate_primitives and vca_deployed.get("needed_terminate"):
4219 for seq in terminate_primitives:
4220 # For each sequence in list, get primitive and call _ns_execute_primitive()
4221 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4222 vnf_index, seq.get("name")
4223 )
4224 self.logger.debug(logging_text + step)
4225 # Create the primitive for each sequence, i.e. "primitive": "touch"
4226 primitive = seq.get("name")
4227 mapped_primitive_params = self._get_terminate_primitive_params(
4228 seq, vnf_index
4229 )
4230
4231 # Add sub-operation
4232 self._add_suboperation(
4233 db_nslcmop,
4234 vnf_index,
4235 vdu_id,
4236 vdu_count_index,
4237 vdu_name,
4238 primitive,
4239 mapped_primitive_params,
4240 )
4241 # Sub-operations: Call _ns_execute_primitive() instead of action()
4242 try:
4243 result, result_detail = await self._ns_execute_primitive(
4244 vca_deployed["ee_id"],
4245 primitive,
4246 mapped_primitive_params,
4247 vca_type=vca_type,
4248 vca_id=vca_id,
4249 )
4250 except LcmException:
4251 # this happens when VCA is not deployed. In this case it is not needed to terminate
4252 continue
4253 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4254 if result not in result_ok:
4255 raise LcmException(
4256 "terminate_primitive {} for vnf_member_index={} fails with "
4257 "error {}".format(seq.get("name"), vnf_index, result_detail)
4258 )
4259 # set that this VCA do not need terminated
4260 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4261 vca_index
4262 )
4263 self.update_db_2(
4264 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4265 )
4266
4267 # Delete Prometheus Jobs if any
4268 # This uses NSR_ID, so it will destroy any jobs under this index
4269 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4270
4271 if destroy_ee:
4272 await self.vca_map[vca_type].delete_execution_environment(
4273 vca_deployed["ee_id"],
4274 scaling_in=scaling_in,
4275 vca_type=vca_type,
4276 vca_id=vca_id,
4277 )
4278
4279 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4280 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4281 namespace = "." + db_nsr["_id"]
4282 try:
4283 await self.n2vc.delete_namespace(
4284 namespace=namespace,
4285 total_timeout=self.timeout.charm_delete,
4286 vca_id=vca_id,
4287 )
4288 except N2VCNotFound: # already deleted. Skip
4289 pass
4290 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4291
4292 async def _terminate_RO(
4293 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4294 ):
4295 """
4296 Terminates a deployment from RO
4297 :param logging_text:
4298 :param nsr_deployed: db_nsr._admin.deployed
4299 :param nsr_id:
4300 :param nslcmop_id:
4301 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
4302 this method will update only the index 2, but it will write on database the concatenated content of the list
4303 :return:
4304 """
4305 db_nsr_update = {}
4306 failed_detail = []
4307 ro_nsr_id = ro_delete_action = None
4308 if nsr_deployed and nsr_deployed.get("RO"):
4309 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
4310 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
4311 try:
4312 if ro_nsr_id:
4313 stage[2] = "Deleting ns from VIM."
4314 db_nsr_update["detailed-status"] = " ".join(stage)
4315 self._write_op_status(nslcmop_id, stage)
4316 self.logger.debug(logging_text + stage[2])
4317 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4318 self._write_op_status(nslcmop_id, stage)
4319 desc = await self.RO.delete("ns", ro_nsr_id)
4320 ro_delete_action = desc["action_id"]
4321 db_nsr_update[
4322 "_admin.deployed.RO.nsr_delete_action_id"
4323 ] = ro_delete_action
4324 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4325 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4326 if ro_delete_action:
4327 # wait until NS is deleted from VIM
4328 stage[2] = "Waiting ns deleted from VIM."
4329 detailed_status_old = None
4330 self.logger.debug(
4331 logging_text
4332 + stage[2]
4333 + " RO_id={} ro_delete_action={}".format(
4334 ro_nsr_id, ro_delete_action
4335 )
4336 )
4337 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4338 self._write_op_status(nslcmop_id, stage)
4339
4340 delete_timeout = 20 * 60 # 20 minutes
4341 while delete_timeout > 0:
4342 desc = await self.RO.show(
4343 "ns",
4344 item_id_name=ro_nsr_id,
4345 extra_item="action",
4346 extra_item_id=ro_delete_action,
4347 )
4348
4349 # deploymentStatus
4350 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
4351
4352 ns_status, ns_status_info = self.RO.check_action_status(desc)
4353 if ns_status == "ERROR":
4354 raise ROclient.ROClientException(ns_status_info)
4355 elif ns_status == "BUILD":
4356 stage[2] = "Deleting from VIM {}".format(ns_status_info)
4357 elif ns_status == "ACTIVE":
4358 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4359 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4360 break
4361 else:
4362 assert (
4363 False
4364 ), "ROclient.check_action_status returns unknown {}".format(
4365 ns_status
4366 )
4367 if stage[2] != detailed_status_old:
4368 detailed_status_old = stage[2]
4369 db_nsr_update["detailed-status"] = " ".join(stage)
4370 self._write_op_status(nslcmop_id, stage)
4371 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4372 await asyncio.sleep(5, loop=self.loop)
4373 delete_timeout -= 5
4374 else: # delete_timeout <= 0:
4375 raise ROclient.ROClientException(
4376 "Timeout waiting ns deleted from VIM"
4377 )
4378
4379 except Exception as e:
4380 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4381 if (
4382 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4383 ): # not found
4384 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4385 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4386 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4387 self.logger.debug(
4388 logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)
4389 )
4390 elif (
4391 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4392 ): # conflict
4393 failed_detail.append("delete conflict: {}".format(e))
4394 self.logger.debug(
4395 logging_text
4396 + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)
4397 )
4398 else:
4399 failed_detail.append("delete error: {}".format(e))
4400 self.logger.error(
4401 logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)
4402 )
4403
4404 # Delete nsd
4405 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
4406 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
4407 try:
4408 stage[2] = "Deleting nsd from RO."
4409 db_nsr_update["detailed-status"] = " ".join(stage)
4410 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4411 self._write_op_status(nslcmop_id, stage)
4412 await self.RO.delete("nsd", ro_nsd_id)
4413 self.logger.debug(
4414 logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)
4415 )
4416 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4417 except Exception as e:
4418 if (
4419 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4420 ): # not found
4421 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4422 self.logger.debug(
4423 logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)
4424 )
4425 elif (
4426 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4427 ): # conflict
4428 failed_detail.append(
4429 "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)
4430 )
4431 self.logger.debug(logging_text + failed_detail[-1])
4432 else:
4433 failed_detail.append(
4434 "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)
4435 )
4436 self.logger.error(logging_text + failed_detail[-1])
4437
4438 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
4439 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
4440 if not vnf_deployed or not vnf_deployed["id"]:
4441 continue
4442 try:
4443 ro_vnfd_id = vnf_deployed["id"]
4444 stage[
4445 2
4446 ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
4447 vnf_deployed["member-vnf-index"], ro_vnfd_id
4448 )
4449 db_nsr_update["detailed-status"] = " ".join(stage)
4450 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4451 self._write_op_status(nslcmop_id, stage)
4452 await self.RO.delete("vnfd", ro_vnfd_id)
4453 self.logger.debug(
4454 logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)
4455 )
4456 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
4457 except Exception as e:
4458 if (
4459 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4460 ): # not found
4461 db_nsr_update[
4462 "_admin.deployed.RO.vnfd.{}.id".format(index)
4463 ] = None
4464 self.logger.debug(
4465 logging_text
4466 + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)
4467 )
4468 elif (
4469 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4470 ): # conflict
4471 failed_detail.append(
4472 "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)
4473 )
4474 self.logger.debug(logging_text + failed_detail[-1])
4475 else:
4476 failed_detail.append(
4477 "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)
4478 )
4479 self.logger.error(logging_text + failed_detail[-1])
4480
4481 if failed_detail:
4482 stage[2] = "Error deleting from VIM"
4483 else:
4484 stage[2] = "Deleted from VIM"
4485 db_nsr_update["detailed-status"] = " ".join(stage)
4486 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4487 self._write_op_status(nslcmop_id, stage)
4488
4489 if failed_detail:
4490 raise LcmException("; ".join(failed_detail))
4491
4492 async def terminate(self, nsr_id, nslcmop_id):
4493 # Try to lock HA task here
4494 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4495 if not task_is_locked_by_me:
4496 return
4497
4498 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4499 self.logger.debug(logging_text + "Enter")
4500 timeout_ns_terminate = self.timeout.ns_terminate
4501 db_nsr = None
4502 db_nslcmop = None
4503 operation_params = None
4504 exc = None
4505 error_list = [] # annotates all failed error messages
4506 db_nslcmop_update = {}
4507 autoremove = False # autoremove after terminated
4508 tasks_dict_info = {}
4509 db_nsr_update = {}
4510 stage = [
4511 "Stage 1/3: Preparing task.",
4512 "Waiting for previous operations to terminate.",
4513 "",
4514 ]
4515 # ^ contains [stage, step, VIM-status]
4516 try:
4517 # wait for any previous tasks in process
4518 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4519
4520 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4521 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4522 operation_params = db_nslcmop.get("operationParams") or {}
4523 if operation_params.get("timeout_ns_terminate"):
4524 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4525 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4526 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4527
4528 db_nsr_update["operational-status"] = "terminating"
4529 db_nsr_update["config-status"] = "terminating"
4530 self._write_ns_status(
4531 nsr_id=nsr_id,
4532 ns_state="TERMINATING",
4533 current_operation="TERMINATING",
4534 current_operation_id=nslcmop_id,
4535 other_update=db_nsr_update,
4536 )
4537 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4538 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4539 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4540 return
4541
4542 stage[1] = "Getting vnf descriptors from db."
4543 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4544 db_vnfrs_dict = {
4545 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4546 }
4547 db_vnfds_from_id = {}
4548 db_vnfds_from_member_index = {}
4549 # Loop over VNFRs
4550 for vnfr in db_vnfrs_list:
4551 vnfd_id = vnfr["vnfd-id"]
4552 if vnfd_id not in db_vnfds_from_id:
4553 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4554 db_vnfds_from_id[vnfd_id] = vnfd
4555 db_vnfds_from_member_index[
4556 vnfr["member-vnf-index-ref"]
4557 ] = db_vnfds_from_id[vnfd_id]
4558
4559 # Destroy individual execution environments when there are terminating primitives.
4560 # Rest of EE will be deleted at once
4561 # TODO - check before calling _destroy_N2VC
4562 # if not operation_params.get("skip_terminate_primitives"):#
4563 # or not vca.get("needed_terminate"):
4564 stage[0] = "Stage 2/3 execute terminating primitives."
4565 self.logger.debug(logging_text + stage[0])
4566 stage[1] = "Looking execution environment that needs terminate."
4567 self.logger.debug(logging_text + stage[1])
4568
4569 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4570 config_descriptor = None
4571 vca_member_vnf_index = vca.get("member-vnf-index")
4572 vca_id = self.get_vca_id(
4573 db_vnfrs_dict.get(vca_member_vnf_index)
4574 if vca_member_vnf_index
4575 else None,
4576 db_nsr,
4577 )
4578 if not vca or not vca.get("ee_id"):
4579 continue
4580 if not vca.get("member-vnf-index"):
4581 # ns
4582 config_descriptor = db_nsr.get("ns-configuration")
4583 elif vca.get("vdu_id"):
4584 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4585 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4586 elif vca.get("kdu_name"):
4587 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4588 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4589 else:
4590 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4591 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4592 vca_type = vca.get("type")
4593 exec_terminate_primitives = not operation_params.get(
4594 "skip_terminate_primitives"
4595 ) and vca.get("needed_terminate")
4596 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4597 # pending native charms
4598 destroy_ee = (
4599 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4600 )
4601 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4602 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4603 task = asyncio.ensure_future(
4604 self.destroy_N2VC(
4605 logging_text,
4606 db_nslcmop,
4607 vca,
4608 config_descriptor,
4609 vca_index,
4610 destroy_ee,
4611 exec_terminate_primitives,
4612 vca_id=vca_id,
4613 )
4614 )
4615 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4616
4617 # wait for pending tasks of terminate primitives
4618 if tasks_dict_info:
4619 self.logger.debug(
4620 logging_text
4621 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4622 )
4623 error_list = await self._wait_for_tasks(
4624 logging_text,
4625 tasks_dict_info,
4626 min(self.timeout.charm_delete, timeout_ns_terminate),
4627 stage,
4628 nslcmop_id,
4629 )
4630 tasks_dict_info.clear()
4631 if error_list:
4632 return # raise LcmException("; ".join(error_list))
4633
4634 # remove All execution environments at once
4635 stage[0] = "Stage 3/3 delete all."
4636
4637 if nsr_deployed.get("VCA"):
4638 stage[1] = "Deleting all execution environments."
4639 self.logger.debug(logging_text + stage[1])
4640 vca_id = self.get_vca_id({}, db_nsr)
4641 task_delete_ee = asyncio.ensure_future(
4642 asyncio.wait_for(
4643 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4644 timeout=self.timeout.charm_delete,
4645 )
4646 )
4647 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4648 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4649
4650 # Delete Namespace and Certificates if necessary
4651 if check_helm_ee_in_ns(list(db_vnfds_from_member_index.values())):
4652 await self.vca_map["helm-v3"].delete_tls_certificate(
4653 certificate_name=db_nslcmop["nsInstanceId"],
4654 )
4655 # TODO: Delete namespace
4656
4657 # Delete from k8scluster
4658 stage[1] = "Deleting KDUs."
4659 self.logger.debug(logging_text + stage[1])
4660 # print(nsr_deployed)
4661 for kdu in get_iterable(nsr_deployed, "K8s"):
4662 if not kdu or not kdu.get("kdu-instance"):
4663 continue
4664 kdu_instance = kdu.get("kdu-instance")
4665 if kdu.get("k8scluster-type") in self.k8scluster_map:
4666 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4667 vca_id = self.get_vca_id({}, db_nsr)
4668 task_delete_kdu_instance = asyncio.ensure_future(
4669 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4670 cluster_uuid=kdu.get("k8scluster-uuid"),
4671 kdu_instance=kdu_instance,
4672 vca_id=vca_id,
4673 namespace=kdu.get("namespace"),
4674 )
4675 )
4676 else:
4677 self.logger.error(
4678 logging_text
4679 + "Unknown k8s deployment type {}".format(
4680 kdu.get("k8scluster-type")
4681 )
4682 )
4683 continue
4684 tasks_dict_info[
4685 task_delete_kdu_instance
4686 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4687
4688 # remove from RO
4689 stage[1] = "Deleting ns from VIM."
4690 if self.ro_config.ng:
4691 task_delete_ro = asyncio.ensure_future(
4692 self._terminate_ng_ro(
4693 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4694 )
4695 )
4696 else:
4697 task_delete_ro = asyncio.ensure_future(
4698 self._terminate_RO(
4699 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4700 )
4701 )
4702 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4703
4704 # rest of staff will be done at finally
4705
4706 except (
4707 ROclient.ROClientException,
4708 DbException,
4709 LcmException,
4710 N2VCException,
4711 ) as e:
4712 self.logger.error(logging_text + "Exit Exception {}".format(e))
4713 exc = e
4714 except asyncio.CancelledError:
4715 self.logger.error(
4716 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4717 )
4718 exc = "Operation was cancelled"
4719 except Exception as e:
4720 exc = traceback.format_exc()
4721 self.logger.critical(
4722 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4723 exc_info=True,
4724 )
4725 finally:
4726 if exc:
4727 error_list.append(str(exc))
4728 try:
4729 # wait for pending tasks
4730 if tasks_dict_info:
4731 stage[1] = "Waiting for terminate pending tasks."
4732 self.logger.debug(logging_text + stage[1])
4733 error_list += await self._wait_for_tasks(
4734 logging_text,
4735 tasks_dict_info,
4736 timeout_ns_terminate,
4737 stage,
4738 nslcmop_id,
4739 )
4740 stage[1] = stage[2] = ""
4741 except asyncio.CancelledError:
4742 error_list.append("Cancelled")
4743 # TODO cancell all tasks
4744 except Exception as exc:
4745 error_list.append(str(exc))
4746 # update status at database
4747 if error_list:
4748 error_detail = "; ".join(error_list)
4749 # self.logger.error(logging_text + error_detail)
4750 error_description_nslcmop = "{} Detail: {}".format(
4751 stage[0], error_detail
4752 )
4753 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4754 nslcmop_id, stage[0]
4755 )
4756
4757 db_nsr_update["operational-status"] = "failed"
4758 db_nsr_update["detailed-status"] = (
4759 error_description_nsr + " Detail: " + error_detail
4760 )
4761 db_nslcmop_update["detailed-status"] = error_detail
4762 nslcmop_operation_state = "FAILED"
4763 ns_state = "BROKEN"
4764 else:
4765 error_detail = None
4766 error_description_nsr = error_description_nslcmop = None
4767 ns_state = "NOT_INSTANTIATED"
4768 db_nsr_update["operational-status"] = "terminated"
4769 db_nsr_update["detailed-status"] = "Done"
4770 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4771 db_nslcmop_update["detailed-status"] = "Done"
4772 nslcmop_operation_state = "COMPLETED"
4773
4774 if db_nsr:
4775 self._write_ns_status(
4776 nsr_id=nsr_id,
4777 ns_state=ns_state,
4778 current_operation="IDLE",
4779 current_operation_id=None,
4780 error_description=error_description_nsr,
4781 error_detail=error_detail,
4782 other_update=db_nsr_update,
4783 )
4784 self._write_op_status(
4785 op_id=nslcmop_id,
4786 stage="",
4787 error_message=error_description_nslcmop,
4788 operation_state=nslcmop_operation_state,
4789 other_update=db_nslcmop_update,
4790 )
4791 if ns_state == "NOT_INSTANTIATED":
4792 try:
4793 self.db.set_list(
4794 "vnfrs",
4795 {"nsr-id-ref": nsr_id},
4796 {"_admin.nsState": "NOT_INSTANTIATED"},
4797 )
4798 except DbException as e:
4799 self.logger.warn(
4800 logging_text
4801 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4802 nsr_id, e
4803 )
4804 )
4805 if operation_params:
4806 autoremove = operation_params.get("autoremove", False)
4807 if nslcmop_operation_state:
4808 try:
4809 await self.msg.aiowrite(
4810 "ns",
4811 "terminated",
4812 {
4813 "nsr_id": nsr_id,
4814 "nslcmop_id": nslcmop_id,
4815 "operationState": nslcmop_operation_state,
4816 "autoremove": autoremove,
4817 },
4818 loop=self.loop,
4819 )
4820 except Exception as e:
4821 self.logger.error(
4822 logging_text + "kafka_write notification Exception {}".format(e)
4823 )
4824
4825 self.logger.debug(logging_text + "Exit")
4826 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4827
4828 async def _wait_for_tasks(
4829 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4830 ):
4831 time_start = time()
4832 error_detail_list = []
4833 error_list = []
4834 pending_tasks = list(created_tasks_info.keys())
4835 num_tasks = len(pending_tasks)
4836 num_done = 0
4837 stage[1] = "{}/{}.".format(num_done, num_tasks)
4838 self._write_op_status(nslcmop_id, stage)
4839 while pending_tasks:
4840 new_error = None
4841 _timeout = timeout + time_start - time()
4842 done, pending_tasks = await asyncio.wait(
4843 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4844 )
4845 num_done += len(done)
4846 if not done: # Timeout
4847 for task in pending_tasks:
4848 new_error = created_tasks_info[task] + ": Timeout"
4849 error_detail_list.append(new_error)
4850 error_list.append(new_error)
4851 break
4852 for task in done:
4853 if task.cancelled():
4854 exc = "Cancelled"
4855 else:
4856 exc = task.exception()
4857 if exc:
4858 if isinstance(exc, asyncio.TimeoutError):
4859 exc = "Timeout"
4860 new_error = created_tasks_info[task] + ": {}".format(exc)
4861 error_list.append(created_tasks_info[task])
4862 error_detail_list.append(new_error)
4863 if isinstance(
4864 exc,
4865 (
4866 str,
4867 DbException,
4868 N2VCException,
4869 ROclient.ROClientException,
4870 LcmException,
4871 K8sException,
4872 NgRoException,
4873 ),
4874 ):
4875 self.logger.error(logging_text + new_error)
4876 else:
4877 exc_traceback = "".join(
4878 traceback.format_exception(None, exc, exc.__traceback__)
4879 )
4880 self.logger.error(
4881 logging_text
4882 + created_tasks_info[task]
4883 + " "
4884 + exc_traceback
4885 )
4886 else:
4887 self.logger.debug(
4888 logging_text + created_tasks_info[task] + ": Done"
4889 )
4890 stage[1] = "{}/{}.".format(num_done, num_tasks)
4891 if new_error:
4892 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4893 if nsr_id: # update also nsr
4894 self.update_db_2(
4895 "nsrs",
4896 nsr_id,
4897 {
4898 "errorDescription": "Error at: " + ", ".join(error_list),
4899 "errorDetail": ". ".join(error_detail_list),
4900 },
4901 )
4902 self._write_op_status(nslcmop_id, stage)
4903 return error_detail_list
4904
4905 @staticmethod
4906 def _map_primitive_params(primitive_desc, params, instantiation_params):
4907 """
4908 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4909 The default-value is used. If it is between < > it look for a value at instantiation_params
4910 :param primitive_desc: portion of VNFD/NSD that describes primitive
4911 :param params: Params provided by user
4912 :param instantiation_params: Instantiation params provided by user
4913 :return: a dictionary with the calculated params
4914 """
4915 calculated_params = {}
4916 for parameter in primitive_desc.get("parameter", ()):
4917 param_name = parameter["name"]
4918 if param_name in params:
4919 calculated_params[param_name] = params[param_name]
4920 elif "default-value" in parameter or "value" in parameter:
4921 if "value" in parameter:
4922 calculated_params[param_name] = parameter["value"]
4923 else:
4924 calculated_params[param_name] = parameter["default-value"]
4925 if (
4926 isinstance(calculated_params[param_name], str)
4927 and calculated_params[param_name].startswith("<")
4928 and calculated_params[param_name].endswith(">")
4929 ):
4930 if calculated_params[param_name][1:-1] in instantiation_params:
4931 calculated_params[param_name] = instantiation_params[
4932 calculated_params[param_name][1:-1]
4933 ]
4934 else:
4935 raise LcmException(
4936 "Parameter {} needed to execute primitive {} not provided".format(
4937 calculated_params[param_name], primitive_desc["name"]
4938 )
4939 )
4940 else:
4941 raise LcmException(
4942 "Parameter {} needed to execute primitive {} not provided".format(
4943 param_name, primitive_desc["name"]
4944 )
4945 )
4946
4947 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4948 calculated_params[param_name] = yaml.safe_dump(
4949 calculated_params[param_name], default_flow_style=True, width=256
4950 )
4951 elif isinstance(calculated_params[param_name], str) and calculated_params[
4952 param_name
4953 ].startswith("!!yaml "):
4954 calculated_params[param_name] = calculated_params[param_name][7:]
4955 if parameter.get("data-type") == "INTEGER":
4956 try:
4957 calculated_params[param_name] = int(calculated_params[param_name])
4958 except ValueError: # error converting string to int
4959 raise LcmException(
4960 "Parameter {} of primitive {} must be integer".format(
4961 param_name, primitive_desc["name"]
4962 )
4963 )
4964 elif parameter.get("data-type") == "BOOLEAN":
4965 calculated_params[param_name] = not (
4966 (str(calculated_params[param_name])).lower() == "false"
4967 )
4968
4969 # add always ns_config_info if primitive name is config
4970 if primitive_desc["name"] == "config":
4971 if "ns_config_info" in instantiation_params:
4972 calculated_params["ns_config_info"] = instantiation_params[
4973 "ns_config_info"
4974 ]
4975 return calculated_params
4976
4977 def _look_for_deployed_vca(
4978 self,
4979 deployed_vca,
4980 member_vnf_index,
4981 vdu_id,
4982 vdu_count_index,
4983 kdu_name=None,
4984 ee_descriptor_id=None,
4985 ):
4986 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4987 for vca in deployed_vca:
4988 if not vca:
4989 continue
4990 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4991 continue
4992 if (
4993 vdu_count_index is not None
4994 and vdu_count_index != vca["vdu_count_index"]
4995 ):
4996 continue
4997 if kdu_name and kdu_name != vca["kdu_name"]:
4998 continue
4999 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
5000 continue
5001 break
5002 else:
5003 # vca_deployed not found
5004 raise LcmException(
5005 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
5006 " is not deployed".format(
5007 member_vnf_index,
5008 vdu_id,
5009 vdu_count_index,
5010 kdu_name,
5011 ee_descriptor_id,
5012 )
5013 )
5014 # get ee_id
5015 ee_id = vca.get("ee_id")
5016 vca_type = vca.get(
5017 "type", "lxc_proxy_charm"
5018 ) # default value for backward compatibility - proxy charm
5019 if not ee_id:
5020 raise LcmException(
5021 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
5022 "execution environment".format(
5023 member_vnf_index, vdu_id, kdu_name, vdu_count_index
5024 )
5025 )
5026 return ee_id, vca_type
5027
5028 async def _ns_execute_primitive(
5029 self,
5030 ee_id,
5031 primitive,
5032 primitive_params,
5033 retries=0,
5034 retries_interval=30,
5035 timeout=None,
5036 vca_type=None,
5037 db_dict=None,
5038 vca_id: str = None,
5039 ) -> (str, str):
5040 try:
5041 if primitive == "config":
5042 primitive_params = {"params": primitive_params}
5043
5044 vca_type = vca_type or "lxc_proxy_charm"
5045
5046 while retries >= 0:
5047 try:
5048 output = await asyncio.wait_for(
5049 self.vca_map[vca_type].exec_primitive(
5050 ee_id=ee_id,
5051 primitive_name=primitive,
5052 params_dict=primitive_params,
5053 progress_timeout=self.timeout.progress_primitive,
5054 total_timeout=self.timeout.primitive,
5055 db_dict=db_dict,
5056 vca_id=vca_id,
5057 vca_type=vca_type,
5058 ),
5059 timeout=timeout or self.timeout.primitive,
5060 )
5061 # execution was OK
5062 break
5063 except asyncio.CancelledError:
5064 raise
5065 except Exception as e:
5066 retries -= 1
5067 if retries >= 0:
5068 self.logger.debug(
5069 "Error executing action {} on {} -> {}".format(
5070 primitive, ee_id, e
5071 )
5072 )
5073 # wait and retry
5074 await asyncio.sleep(retries_interval, loop=self.loop)
5075 else:
5076 if isinstance(e, asyncio.TimeoutError):
5077 e = N2VCException(
5078 message="Timed out waiting for action to complete"
5079 )
5080 return "FAILED", getattr(e, "message", repr(e))
5081
5082 return "COMPLETED", output
5083
5084 except (LcmException, asyncio.CancelledError):
5085 raise
5086 except Exception as e:
5087 return "FAIL", "Error executing action {}: {}".format(primitive, e)
5088
5089 async def vca_status_refresh(self, nsr_id, nslcmop_id):
5090 """
5091 Updating the vca_status with latest juju information in nsrs record
5092 :param: nsr_id: Id of the nsr
5093 :param: nslcmop_id: Id of the nslcmop
5094 :return: None
5095 """
5096
5097 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
5098 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5099 vca_id = self.get_vca_id({}, db_nsr)
5100 if db_nsr["_admin"]["deployed"]["K8s"]:
5101 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
5102 cluster_uuid, kdu_instance, cluster_type = (
5103 k8s["k8scluster-uuid"],
5104 k8s["kdu-instance"],
5105 k8s["k8scluster-type"],
5106 )
5107 await self._on_update_k8s_db(
5108 cluster_uuid=cluster_uuid,
5109 kdu_instance=kdu_instance,
5110 filter={"_id": nsr_id},
5111 vca_id=vca_id,
5112 cluster_type=cluster_type,
5113 )
5114 else:
5115 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
5116 table, filter = "nsrs", {"_id": nsr_id}
5117 path = "_admin.deployed.VCA.{}.".format(vca_index)
5118 await self._on_update_n2vc_db(table, filter, path, {})
5119
5120 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
5121 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
5122
5123 async def action(self, nsr_id, nslcmop_id):
5124 # Try to lock HA task here
5125 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5126 if not task_is_locked_by_me:
5127 return
5128
5129 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
5130 self.logger.debug(logging_text + "Enter")
5131 # get all needed from database
5132 db_nsr = None
5133 db_nslcmop = None
5134 db_nsr_update = {}
5135 db_nslcmop_update = {}
5136 nslcmop_operation_state = None
5137 error_description_nslcmop = None
5138 exc = None
5139 try:
5140 # wait for any previous tasks in process
5141 step = "Waiting for previous operations to terminate"
5142 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5143
5144 self._write_ns_status(
5145 nsr_id=nsr_id,
5146 ns_state=None,
5147 current_operation="RUNNING ACTION",
5148 current_operation_id=nslcmop_id,
5149 )
5150
5151 step = "Getting information from database"
5152 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5153 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5154 if db_nslcmop["operationParams"].get("primitive_params"):
5155 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5156 db_nslcmop["operationParams"]["primitive_params"]
5157 )
5158
5159 nsr_deployed = db_nsr["_admin"].get("deployed")
5160 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
5161 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
5162 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
5163 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
5164 primitive = db_nslcmop["operationParams"]["primitive"]
5165 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
5166 timeout_ns_action = db_nslcmop["operationParams"].get(
5167 "timeout_ns_action", self.timeout.primitive
5168 )
5169
5170 if vnf_index:
5171 step = "Getting vnfr from database"
5172 db_vnfr = self.db.get_one(
5173 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5174 )
5175 if db_vnfr.get("kdur"):
5176 kdur_list = []
5177 for kdur in db_vnfr["kdur"]:
5178 if kdur.get("additionalParams"):
5179 kdur["additionalParams"] = json.loads(
5180 kdur["additionalParams"]
5181 )
5182 kdur_list.append(kdur)
5183 db_vnfr["kdur"] = kdur_list
5184 step = "Getting vnfd from database"
5185 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5186
5187 # Sync filesystem before running a primitive
5188 self.fs.sync(db_vnfr["vnfd-id"])
5189 else:
5190 step = "Getting nsd from database"
5191 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
5192
5193 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5194 # for backward compatibility
5195 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5196 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5197 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5198 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5199
5200 # look for primitive
5201 config_primitive_desc = descriptor_configuration = None
5202 if vdu_id:
5203 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
5204 elif kdu_name:
5205 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
5206 elif vnf_index:
5207 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
5208 else:
5209 descriptor_configuration = db_nsd.get("ns-configuration")
5210
5211 if descriptor_configuration and descriptor_configuration.get(
5212 "config-primitive"
5213 ):
5214 for config_primitive in descriptor_configuration["config-primitive"]:
5215 if config_primitive["name"] == primitive:
5216 config_primitive_desc = config_primitive
5217 break
5218
5219 if not config_primitive_desc:
5220 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
5221 raise LcmException(
5222 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5223 primitive
5224 )
5225 )
5226 primitive_name = primitive
5227 ee_descriptor_id = None
5228 else:
5229 primitive_name = config_primitive_desc.get(
5230 "execution-environment-primitive", primitive
5231 )
5232 ee_descriptor_id = config_primitive_desc.get(
5233 "execution-environment-ref"
5234 )
5235
5236 if vnf_index:
5237 if vdu_id:
5238 vdur = next(
5239 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5240 )
5241 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
5242 elif kdu_name:
5243 kdur = next(
5244 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5245 )
5246 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
5247 else:
5248 desc_params = parse_yaml_strings(
5249 db_vnfr.get("additionalParamsForVnf")
5250 )
5251 else:
5252 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
5253 if kdu_name and get_configuration(db_vnfd, kdu_name):
5254 kdu_configuration = get_configuration(db_vnfd, kdu_name)
5255 actions = set()
5256 for primitive in kdu_configuration.get("initial-config-primitive", []):
5257 actions.add(primitive["name"])
5258 for primitive in kdu_configuration.get("config-primitive", []):
5259 actions.add(primitive["name"])
5260 kdu = find_in_list(
5261 nsr_deployed["K8s"],
5262 lambda kdu: kdu_name == kdu["kdu-name"]
5263 and kdu["member-vnf-index"] == vnf_index,
5264 )
5265 kdu_action = (
5266 True
5267 if primitive_name in actions
5268 and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
5269 else False
5270 )
5271
5272 # TODO check if ns is in a proper status
5273 if kdu_name and (
5274 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5275 ):
5276 # kdur and desc_params already set from before
5277 if primitive_params:
5278 desc_params.update(primitive_params)
5279 # TODO Check if we will need something at vnf level
5280 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5281 if (
5282 kdu_name == kdu["kdu-name"]
5283 and kdu["member-vnf-index"] == vnf_index
5284 ):
5285 break
5286 else:
5287 raise LcmException(
5288 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5289 )
5290
5291 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5292 msg = "unknown k8scluster-type '{}'".format(
5293 kdu.get("k8scluster-type")
5294 )
5295 raise LcmException(msg)
5296
5297 db_dict = {
5298 "collection": "nsrs",
5299 "filter": {"_id": nsr_id},
5300 "path": "_admin.deployed.K8s.{}".format(index),
5301 }
5302 self.logger.debug(
5303 logging_text
5304 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5305 )
5306 step = "Executing kdu {}".format(primitive_name)
5307 if primitive_name == "upgrade":
5308 if desc_params.get("kdu_model"):
5309 kdu_model = desc_params.get("kdu_model")
5310 del desc_params["kdu_model"]
5311 else:
5312 kdu_model = kdu.get("kdu-model")
5313 parts = kdu_model.split(sep=":")
5314 if len(parts) == 2:
5315 kdu_model = parts[0]
5316 if desc_params.get("kdu_atomic_upgrade"):
5317 atomic_upgrade = desc_params.get(
5318 "kdu_atomic_upgrade"
5319 ).lower() in ("yes", "true", "1")
5320 del desc_params["kdu_atomic_upgrade"]
5321 else:
5322 atomic_upgrade = True
5323
5324 detailed_status = await asyncio.wait_for(
5325 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5326 cluster_uuid=kdu.get("k8scluster-uuid"),
5327 kdu_instance=kdu.get("kdu-instance"),
5328 atomic=atomic_upgrade,
5329 kdu_model=kdu_model,
5330 params=desc_params,
5331 db_dict=db_dict,
5332 timeout=timeout_ns_action,
5333 ),
5334 timeout=timeout_ns_action + 10,
5335 )
5336 self.logger.debug(
5337 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5338 )
5339 elif primitive_name == "rollback":
5340 detailed_status = await asyncio.wait_for(
5341 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5342 cluster_uuid=kdu.get("k8scluster-uuid"),
5343 kdu_instance=kdu.get("kdu-instance"),
5344 db_dict=db_dict,
5345 ),
5346 timeout=timeout_ns_action,
5347 )
5348 elif primitive_name == "status":
5349 detailed_status = await asyncio.wait_for(
5350 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5351 cluster_uuid=kdu.get("k8scluster-uuid"),
5352 kdu_instance=kdu.get("kdu-instance"),
5353 vca_id=vca_id,
5354 ),
5355 timeout=timeout_ns_action,
5356 )
5357 else:
5358 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5359 kdu["kdu-name"], nsr_id
5360 )
5361 params = self._map_primitive_params(
5362 config_primitive_desc, primitive_params, desc_params
5363 )
5364
5365 detailed_status = await asyncio.wait_for(
5366 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5367 cluster_uuid=kdu.get("k8scluster-uuid"),
5368 kdu_instance=kdu_instance,
5369 primitive_name=primitive_name,
5370 params=params,
5371 db_dict=db_dict,
5372 timeout=timeout_ns_action,
5373 vca_id=vca_id,
5374 ),
5375 timeout=timeout_ns_action,
5376 )
5377
5378 if detailed_status:
5379 nslcmop_operation_state = "COMPLETED"
5380 else:
5381 detailed_status = ""
5382 nslcmop_operation_state = "FAILED"
5383 else:
5384 ee_id, vca_type = self._look_for_deployed_vca(
5385 nsr_deployed["VCA"],
5386 member_vnf_index=vnf_index,
5387 vdu_id=vdu_id,
5388 vdu_count_index=vdu_count_index,
5389 ee_descriptor_id=ee_descriptor_id,
5390 )
5391 for vca_index, vca_deployed in enumerate(
5392 db_nsr["_admin"]["deployed"]["VCA"]
5393 ):
5394 if vca_deployed.get("member-vnf-index") == vnf_index:
5395 db_dict = {
5396 "collection": "nsrs",
5397 "filter": {"_id": nsr_id},
5398 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5399 }
5400 break
5401 (
5402 nslcmop_operation_state,
5403 detailed_status,
5404 ) = await self._ns_execute_primitive(
5405 ee_id,
5406 primitive=primitive_name,
5407 primitive_params=self._map_primitive_params(
5408 config_primitive_desc, primitive_params, desc_params
5409 ),
5410 timeout=timeout_ns_action,
5411 vca_type=vca_type,
5412 db_dict=db_dict,
5413 vca_id=vca_id,
5414 )
5415
5416 db_nslcmop_update["detailed-status"] = detailed_status
5417 error_description_nslcmop = (
5418 detailed_status if nslcmop_operation_state == "FAILED" else ""
5419 )
5420 self.logger.debug(
5421 logging_text
5422 + "Done with result {} {}".format(
5423 nslcmop_operation_state, detailed_status
5424 )
5425 )
5426 return # database update is called inside finally
5427
5428 except (DbException, LcmException, N2VCException, K8sException) as e:
5429 self.logger.error(logging_text + "Exit Exception {}".format(e))
5430 exc = e
5431 except asyncio.CancelledError:
5432 self.logger.error(
5433 logging_text + "Cancelled Exception while '{}'".format(step)
5434 )
5435 exc = "Operation was cancelled"
5436 except asyncio.TimeoutError:
5437 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5438 exc = "Timeout"
5439 except Exception as e:
5440 exc = traceback.format_exc()
5441 self.logger.critical(
5442 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5443 exc_info=True,
5444 )
5445 finally:
5446 if exc:
5447 db_nslcmop_update[
5448 "detailed-status"
5449 ] = (
5450 detailed_status
5451 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5452 nslcmop_operation_state = "FAILED"
5453 if db_nsr:
5454 self._write_ns_status(
5455 nsr_id=nsr_id,
5456 ns_state=db_nsr[
5457 "nsState"
5458 ], # TODO check if degraded. For the moment use previous status
5459 current_operation="IDLE",
5460 current_operation_id=None,
5461 # error_description=error_description_nsr,
5462 # error_detail=error_detail,
5463 other_update=db_nsr_update,
5464 )
5465
5466 self._write_op_status(
5467 op_id=nslcmop_id,
5468 stage="",
5469 error_message=error_description_nslcmop,
5470 operation_state=nslcmop_operation_state,
5471 other_update=db_nslcmop_update,
5472 )
5473
5474 if nslcmop_operation_state:
5475 try:
5476 await self.msg.aiowrite(
5477 "ns",
5478 "actioned",
5479 {
5480 "nsr_id": nsr_id,
5481 "nslcmop_id": nslcmop_id,
5482 "operationState": nslcmop_operation_state,
5483 },
5484 loop=self.loop,
5485 )
5486 except Exception as e:
5487 self.logger.error(
5488 logging_text + "kafka_write notification Exception {}".format(e)
5489 )
5490 self.logger.debug(logging_text + "Exit")
5491 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5492 return nslcmop_operation_state, detailed_status
5493
5494 async def terminate_vdus(
5495 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5496 ):
5497 """This method terminates VDUs
5498
5499 Args:
5500 db_vnfr: VNF instance record
5501 member_vnf_index: VNF index to identify the VDUs to be removed
5502 db_nsr: NS instance record
5503 update_db_nslcmops: Nslcmop update record
5504 """
5505 vca_scaling_info = []
5506 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5507 scaling_info["scaling_direction"] = "IN"
5508 scaling_info["vdu-delete"] = {}
5509 scaling_info["kdu-delete"] = {}
5510 db_vdur = db_vnfr.get("vdur")
5511 vdur_list = copy(db_vdur)
5512 count_index = 0
5513 for index, vdu in enumerate(vdur_list):
5514 vca_scaling_info.append(
5515 {
5516 "osm_vdu_id": vdu["vdu-id-ref"],
5517 "member-vnf-index": member_vnf_index,
5518 "type": "delete",
5519 "vdu_index": count_index,
5520 }
5521 )
5522 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5523 scaling_info["vdu"].append(
5524 {
5525 "name": vdu.get("name") or vdu.get("vdu-name"),
5526 "vdu_id": vdu["vdu-id-ref"],
5527 "interface": [],
5528 }
5529 )
5530 for interface in vdu["interfaces"]:
5531 scaling_info["vdu"][index]["interface"].append(
5532 {
5533 "name": interface["name"],
5534 "ip_address": interface["ip-address"],
5535 "mac_address": interface.get("mac-address"),
5536 }
5537 )
5538 self.logger.info("NS update scaling info{}".format(scaling_info))
5539 stage[2] = "Terminating VDUs"
5540 if scaling_info.get("vdu-delete"):
5541 # scale_process = "RO"
5542 if self.ro_config.ng:
5543 await self._scale_ng_ro(
5544 logging_text,
5545 db_nsr,
5546 update_db_nslcmops,
5547 db_vnfr,
5548 scaling_info,
5549 stage,
5550 )
5551
5552 async def remove_vnf(self, nsr_id, nslcmop_id, vnf_instance_id):
5553 """This method is to Remove VNF instances from NS.
5554
5555 Args:
5556 nsr_id: NS instance id
5557 nslcmop_id: nslcmop id of update
5558 vnf_instance_id: id of the VNF instance to be removed
5559
5560 Returns:
5561 result: (str, str) COMPLETED/FAILED, details
5562 """
5563 try:
5564 db_nsr_update = {}
5565 logging_text = "Task ns={} update ".format(nsr_id)
5566 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5567 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5568 if check_vnfr_count > 1:
5569 stage = ["", "", ""]
5570 step = "Getting nslcmop from database"
5571 self.logger.debug(
5572 step + " after having waited for previous tasks to be completed"
5573 )
5574 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5575 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5576 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5577 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5578 """ db_vnfr = self.db.get_one(
5579 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5580
5581 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5582 await self.terminate_vdus(
5583 db_vnfr,
5584 member_vnf_index,
5585 db_nsr,
5586 update_db_nslcmops,
5587 stage,
5588 logging_text,
5589 )
5590
5591 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5592 constituent_vnfr.remove(db_vnfr.get("_id"))
5593 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get(
5594 "constituent-vnfr-ref"
5595 )
5596 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5597 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5598 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5599 return "COMPLETED", "Done"
5600 else:
5601 step = "Terminate VNF Failed with"
5602 raise LcmException(
5603 "{} Cannot terminate the last VNF in this NS.".format(
5604 vnf_instance_id
5605 )
5606 )
5607 except (LcmException, asyncio.CancelledError):
5608 raise
5609 except Exception as e:
5610 self.logger.debug("Error removing VNF {}".format(e))
5611 return "FAILED", "Error removing VNF {}".format(e)
5612
5613 async def _ns_redeploy_vnf(
5614 self,
5615 nsr_id,
5616 nslcmop_id,
5617 db_vnfd,
5618 db_vnfr,
5619 db_nsr,
5620 ):
5621 """This method updates and redeploys VNF instances
5622
5623 Args:
5624 nsr_id: NS instance id
5625 nslcmop_id: nslcmop id
5626 db_vnfd: VNF descriptor
5627 db_vnfr: VNF instance record
5628 db_nsr: NS instance record
5629
5630 Returns:
5631 result: (str, str) COMPLETED/FAILED, details
5632 """
5633 try:
5634 count_index = 0
5635 stage = ["", "", ""]
5636 logging_text = "Task ns={} update ".format(nsr_id)
5637 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5638 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5639
5640 # Terminate old VNF resources
5641 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5642 await self.terminate_vdus(
5643 db_vnfr,
5644 member_vnf_index,
5645 db_nsr,
5646 update_db_nslcmops,
5647 stage,
5648 logging_text,
5649 )
5650
5651 # old_vnfd_id = db_vnfr["vnfd-id"]
5652 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5653 new_db_vnfd = db_vnfd
5654 # new_vnfd_ref = new_db_vnfd["id"]
5655 # new_vnfd_id = vnfd_id
5656
5657 # Create VDUR
5658 new_vnfr_cp = []
5659 for cp in new_db_vnfd.get("ext-cpd", ()):
5660 vnf_cp = {
5661 "name": cp.get("id"),
5662 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5663 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5664 "id": cp.get("id"),
5665 }
5666 new_vnfr_cp.append(vnf_cp)
5667 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5668 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5669 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5670 new_vnfr_update = {
5671 "revision": latest_vnfd_revision,
5672 "connection-point": new_vnfr_cp,
5673 "vdur": new_vdur,
5674 "ip-address": "",
5675 }
5676 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5677 updated_db_vnfr = self.db.get_one(
5678 "vnfrs",
5679 {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id},
5680 )
5681
5682 # Instantiate new VNF resources
5683 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5684 vca_scaling_info = []
5685 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5686 scaling_info["scaling_direction"] = "OUT"
5687 scaling_info["vdu-create"] = {}
5688 scaling_info["kdu-create"] = {}
5689 vdud_instantiate_list = db_vnfd["vdu"]
5690 for index, vdud in enumerate(vdud_instantiate_list):
5691 cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
5692 if cloud_init_text:
5693 additional_params = (
5694 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5695 or {}
5696 )
5697 cloud_init_list = []
5698 if cloud_init_text:
5699 # TODO Information of its own ip is not available because db_vnfr is not updated.
5700 additional_params["OSM"] = get_osm_params(
5701 updated_db_vnfr, vdud["id"], 1
5702 )
5703 cloud_init_list.append(
5704 self._parse_cloud_init(
5705 cloud_init_text,
5706 additional_params,
5707 db_vnfd["id"],
5708 vdud["id"],
5709 )
5710 )
5711 vca_scaling_info.append(
5712 {
5713 "osm_vdu_id": vdud["id"],
5714 "member-vnf-index": member_vnf_index,
5715 "type": "create",
5716 "vdu_index": count_index,
5717 }
5718 )
5719 scaling_info["vdu-create"][vdud["id"]] = count_index
5720 if self.ro_config.ng:
5721 self.logger.debug(
5722 "New Resources to be deployed: {}".format(scaling_info)
5723 )
5724 await self._scale_ng_ro(
5725 logging_text,
5726 db_nsr,
5727 update_db_nslcmops,
5728 updated_db_vnfr,
5729 scaling_info,
5730 stage,
5731 )
5732 return "COMPLETED", "Done"
5733 except (LcmException, asyncio.CancelledError):
5734 raise
5735 except Exception as e:
5736 self.logger.debug("Error updating VNF {}".format(e))
5737 return "FAILED", "Error updating VNF {}".format(e)
5738
5739 async def _ns_charm_upgrade(
5740 self,
5741 ee_id,
5742 charm_id,
5743 charm_type,
5744 path,
5745 timeout: float = None,
5746 ) -> (str, str):
5747 """This method upgrade charms in VNF instances
5748
5749 Args:
5750 ee_id: Execution environment id
5751 path: Local path to the charm
5752 charm_id: charm-id
5753 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5754 timeout: (Float) Timeout for the ns update operation
5755
5756 Returns:
5757 result: (str, str) COMPLETED/FAILED, details
5758 """
5759 try:
5760 charm_type = charm_type or "lxc_proxy_charm"
5761 output = await self.vca_map[charm_type].upgrade_charm(
5762 ee_id=ee_id,
5763 path=path,
5764 charm_id=charm_id,
5765 charm_type=charm_type,
5766 timeout=timeout or self.timeout.ns_update,
5767 )
5768
5769 if output:
5770 return "COMPLETED", output
5771
5772 except (LcmException, asyncio.CancelledError):
5773 raise
5774
5775 except Exception as e:
5776
5777 self.logger.debug("Error upgrading charm {}".format(path))
5778
5779 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5780
5781 async def update(self, nsr_id, nslcmop_id):
5782 """Update NS according to different update types
5783
5784 This method performs upgrade of VNF instances then updates the revision
5785 number in VNF record
5786
5787 Args:
5788 nsr_id: Network service will be updated
5789 nslcmop_id: ns lcm operation id
5790
5791 Returns:
5792 It may raise DbException, LcmException, N2VCException, K8sException
5793
5794 """
5795 # Try to lock HA task here
5796 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5797 if not task_is_locked_by_me:
5798 return
5799
5800 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5801 self.logger.debug(logging_text + "Enter")
5802
5803 # Set the required variables to be filled up later
5804 db_nsr = None
5805 db_nslcmop_update = {}
5806 vnfr_update = {}
5807 nslcmop_operation_state = None
5808 db_nsr_update = {}
5809 error_description_nslcmop = ""
5810 exc = None
5811 change_type = "updated"
5812 detailed_status = ""
5813
5814 try:
5815 # wait for any previous tasks in process
5816 step = "Waiting for previous operations to terminate"
5817 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5818 self._write_ns_status(
5819 nsr_id=nsr_id,
5820 ns_state=None,
5821 current_operation="UPDATING",
5822 current_operation_id=nslcmop_id,
5823 )
5824
5825 step = "Getting nslcmop from database"
5826 db_nslcmop = self.db.get_one(
5827 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5828 )
5829 update_type = db_nslcmop["operationParams"]["updateType"]
5830
5831 step = "Getting nsr from database"
5832 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5833 old_operational_status = db_nsr["operational-status"]
5834 db_nsr_update["operational-status"] = "updating"
5835 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5836 nsr_deployed = db_nsr["_admin"].get("deployed")
5837
5838 if update_type == "CHANGE_VNFPKG":
5839
5840 # Get the input parameters given through update request
5841 vnf_instance_id = db_nslcmop["operationParams"][
5842 "changeVnfPackageData"
5843 ].get("vnfInstanceId")
5844
5845 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5846 "vnfdId"
5847 )
5848 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5849
5850 step = "Getting vnfr from database"
5851 db_vnfr = self.db.get_one(
5852 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5853 )
5854
5855 step = "Getting vnfds from database"
5856 # Latest VNFD
5857 latest_vnfd = self.db.get_one(
5858 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5859 )
5860 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5861
5862 # Current VNFD
5863 current_vnf_revision = db_vnfr.get("revision", 1)
5864 current_vnfd = self.db.get_one(
5865 "vnfds_revisions",
5866 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5867 fail_on_empty=False,
5868 )
5869 # Charm artifact paths will be filled up later
5870 (
5871 current_charm_artifact_path,
5872 target_charm_artifact_path,
5873 charm_artifact_paths,
5874 helm_artifacts,
5875 ) = ([], [], [], [])
5876
5877 step = "Checking if revision has changed in VNFD"
5878 if current_vnf_revision != latest_vnfd_revision:
5879
5880 change_type = "policy_updated"
5881
5882 # There is new revision of VNFD, update operation is required
5883 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5884 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5885
5886 step = "Removing the VNFD packages if they exist in the local path"
5887 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5888 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5889
5890 step = "Get the VNFD packages from FSMongo"
5891 self.fs.sync(from_path=latest_vnfd_path)
5892 self.fs.sync(from_path=current_vnfd_path)
5893
5894 step = (
5895 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5896 )
5897 current_base_folder = current_vnfd["_admin"]["storage"]
5898 latest_base_folder = latest_vnfd["_admin"]["storage"]
5899
5900 for vca_index, vca_deployed in enumerate(
5901 get_iterable(nsr_deployed, "VCA")
5902 ):
5903 vnf_index = db_vnfr.get("member-vnf-index-ref")
5904
5905 # Getting charm-id and charm-type
5906 if vca_deployed.get("member-vnf-index") == vnf_index:
5907 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5908 vca_type = vca_deployed.get("type")
5909 vdu_count_index = vca_deployed.get("vdu_count_index")
5910
5911 # Getting ee-id
5912 ee_id = vca_deployed.get("ee_id")
5913
5914 step = "Getting descriptor config"
5915 if current_vnfd.get("kdu"):
5916
5917 search_key = "kdu_name"
5918 else:
5919 search_key = "vnfd_id"
5920
5921 entity_id = vca_deployed.get(search_key)
5922
5923 descriptor_config = get_configuration(
5924 current_vnfd, entity_id
5925 )
5926
5927 if "execution-environment-list" in descriptor_config:
5928 ee_list = descriptor_config.get(
5929 "execution-environment-list", []
5930 )
5931 else:
5932 ee_list = []
5933
5934 # There could be several charm used in the same VNF
5935 for ee_item in ee_list:
5936 if ee_item.get("juju"):
5937
5938 step = "Getting charm name"
5939 charm_name = ee_item["juju"].get("charm")
5940
5941 step = "Setting Charm artifact paths"
5942 current_charm_artifact_path.append(
5943 get_charm_artifact_path(
5944 current_base_folder,
5945 charm_name,
5946 vca_type,
5947 current_vnf_revision,
5948 )
5949 )
5950 target_charm_artifact_path.append(
5951 get_charm_artifact_path(
5952 latest_base_folder,
5953 charm_name,
5954 vca_type,
5955 latest_vnfd_revision,
5956 )
5957 )
5958 elif ee_item.get("helm-chart"):
5959 # add chart to list and all parameters
5960 step = "Getting helm chart name"
5961 chart_name = ee_item.get("helm-chart")
5962 if (
5963 ee_item.get("helm-version")
5964 and ee_item.get("helm-version") == "v2"
5965 ):
5966 vca_type = "helm"
5967 else:
5968 vca_type = "helm-v3"
5969 step = "Setting Helm chart artifact paths"
5970
5971 helm_artifacts.append(
5972 {
5973 "current_artifact_path": get_charm_artifact_path(
5974 current_base_folder,
5975 chart_name,
5976 vca_type,
5977 current_vnf_revision,
5978 ),
5979 "target_artifact_path": get_charm_artifact_path(
5980 latest_base_folder,
5981 chart_name,
5982 vca_type,
5983 latest_vnfd_revision,
5984 ),
5985 "ee_id": ee_id,
5986 "vca_index": vca_index,
5987 "vdu_index": vdu_count_index,
5988 }
5989 )
5990
5991 charm_artifact_paths = zip(
5992 current_charm_artifact_path, target_charm_artifact_path
5993 )
5994
5995 step = "Checking if software version has changed in VNFD"
5996 if find_software_version(current_vnfd) != find_software_version(
5997 latest_vnfd
5998 ):
5999
6000 step = "Checking if existing VNF has charm"
6001 for current_charm_path, target_charm_path in list(
6002 charm_artifact_paths
6003 ):
6004 if current_charm_path:
6005 raise LcmException(
6006 "Software version change is not supported as VNF instance {} has charm.".format(
6007 vnf_instance_id
6008 )
6009 )
6010
6011 # There is no change in the charm package, then redeploy the VNF
6012 # based on new descriptor
6013 step = "Redeploying VNF"
6014 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6015 (result, detailed_status) = await self._ns_redeploy_vnf(
6016 nsr_id, nslcmop_id, latest_vnfd, db_vnfr, db_nsr
6017 )
6018 if result == "FAILED":
6019 nslcmop_operation_state = result
6020 error_description_nslcmop = detailed_status
6021 db_nslcmop_update["detailed-status"] = detailed_status
6022 self.logger.debug(
6023 logging_text
6024 + " step {} Done with result {} {}".format(
6025 step, nslcmop_operation_state, detailed_status
6026 )
6027 )
6028
6029 else:
6030 step = "Checking if any charm package has changed or not"
6031 for current_charm_path, target_charm_path in list(
6032 charm_artifact_paths
6033 ):
6034 if (
6035 current_charm_path
6036 and target_charm_path
6037 and self.check_charm_hash_changed(
6038 current_charm_path, target_charm_path
6039 )
6040 ):
6041
6042 step = "Checking whether VNF uses juju bundle"
6043 if check_juju_bundle_existence(current_vnfd):
6044
6045 raise LcmException(
6046 "Charm upgrade is not supported for the instance which"
6047 " uses juju-bundle: {}".format(
6048 check_juju_bundle_existence(current_vnfd)
6049 )
6050 )
6051
6052 step = "Upgrading Charm"
6053 (
6054 result,
6055 detailed_status,
6056 ) = await self._ns_charm_upgrade(
6057 ee_id=ee_id,
6058 charm_id=vca_id,
6059 charm_type=vca_type,
6060 path=self.fs.path + target_charm_path,
6061 timeout=timeout_seconds,
6062 )
6063
6064 if result == "FAILED":
6065 nslcmop_operation_state = result
6066 error_description_nslcmop = detailed_status
6067
6068 db_nslcmop_update["detailed-status"] = detailed_status
6069 self.logger.debug(
6070 logging_text
6071 + " step {} Done with result {} {}".format(
6072 step, nslcmop_operation_state, detailed_status
6073 )
6074 )
6075
6076 step = "Updating policies"
6077 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6078 result = "COMPLETED"
6079 detailed_status = "Done"
6080 db_nslcmop_update["detailed-status"] = "Done"
6081
6082 # helm base EE
6083 for item in helm_artifacts:
6084 if not (
6085 item["current_artifact_path"]
6086 and item["target_artifact_path"]
6087 and self.check_charm_hash_changed(
6088 item["current_artifact_path"],
6089 item["target_artifact_path"],
6090 )
6091 ):
6092 continue
6093 db_update_entry = "_admin.deployed.VCA.{}.".format(
6094 item["vca_index"]
6095 )
6096 vnfr_id = db_vnfr["_id"]
6097 osm_config = {"osm": {"ns_id": nsr_id, "vnf_id": vnfr_id}}
6098 db_dict = {
6099 "collection": "nsrs",
6100 "filter": {"_id": nsr_id},
6101 "path": db_update_entry,
6102 }
6103 vca_type, namespace, helm_id = get_ee_id_parts(item["ee_id"])
6104 await self.vca_map[vca_type].upgrade_execution_environment(
6105 namespace=namespace,
6106 helm_id=helm_id,
6107 db_dict=db_dict,
6108 config=osm_config,
6109 artifact_path=item["target_artifact_path"],
6110 vca_type=vca_type,
6111 )
6112 vnf_id = db_vnfr.get("vnfd-ref")
6113 config_descriptor = get_configuration(latest_vnfd, vnf_id)
6114 self.logger.debug("get ssh key block")
6115 rw_mgmt_ip = None
6116 if deep_get(
6117 config_descriptor,
6118 ("config-access", "ssh-access", "required"),
6119 ):
6120 # Needed to inject a ssh key
6121 user = deep_get(
6122 config_descriptor,
6123 ("config-access", "ssh-access", "default-user"),
6124 )
6125 step = (
6126 "Install configuration Software, getting public ssh key"
6127 )
6128 pub_key = await self.vca_map[
6129 vca_type
6130 ].get_ee_ssh_public__key(
6131 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
6132 )
6133
6134 step = (
6135 "Insert public key into VM user={} ssh_key={}".format(
6136 user, pub_key
6137 )
6138 )
6139 self.logger.debug(logging_text + step)
6140
6141 # wait for RO (ip-address) Insert pub_key into VM
6142 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
6143 logging_text,
6144 nsr_id,
6145 vnfr_id,
6146 None,
6147 item["vdu_index"],
6148 user=user,
6149 pub_key=pub_key,
6150 )
6151
6152 initial_config_primitive_list = config_descriptor.get(
6153 "initial-config-primitive"
6154 )
6155 config_primitive = next(
6156 (
6157 p
6158 for p in initial_config_primitive_list
6159 if p["name"] == "config"
6160 ),
6161 None,
6162 )
6163 if not config_primitive:
6164 continue
6165
6166 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6167 if rw_mgmt_ip:
6168 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
6169 if db_vnfr.get("additionalParamsForVnf"):
6170 deploy_params.update(
6171 parse_yaml_strings(
6172 db_vnfr["additionalParamsForVnf"].copy()
6173 )
6174 )
6175 primitive_params_ = self._map_primitive_params(
6176 config_primitive, {}, deploy_params
6177 )
6178
6179 step = "execute primitive '{}' params '{}'".format(
6180 config_primitive["name"], primitive_params_
6181 )
6182 self.logger.debug(logging_text + step)
6183 await self.vca_map[vca_type].exec_primitive(
6184 ee_id=ee_id,
6185 primitive_name=config_primitive["name"],
6186 params_dict=primitive_params_,
6187 db_dict=db_dict,
6188 vca_id=vca_id,
6189 vca_type=vca_type,
6190 )
6191
6192 step = "Updating policies"
6193 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6194 detailed_status = "Done"
6195 db_nslcmop_update["detailed-status"] = "Done"
6196
6197 # If nslcmop_operation_state is None, so any operation is not failed.
6198 if not nslcmop_operation_state:
6199 nslcmop_operation_state = "COMPLETED"
6200
6201 # If update CHANGE_VNFPKG nslcmop_operation is successful
6202 # vnf revision need to be updated
6203 vnfr_update["revision"] = latest_vnfd_revision
6204 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
6205
6206 self.logger.debug(
6207 logging_text
6208 + " task Done with result {} {}".format(
6209 nslcmop_operation_state, detailed_status
6210 )
6211 )
6212 elif update_type == "REMOVE_VNF":
6213 # This part is included in https://osm.etsi.org/gerrit/11876
6214 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
6215 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
6216 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6217 step = "Removing VNF"
6218 (result, detailed_status) = await self.remove_vnf(
6219 nsr_id, nslcmop_id, vnf_instance_id
6220 )
6221 if result == "FAILED":
6222 nslcmop_operation_state = result
6223 error_description_nslcmop = detailed_status
6224 db_nslcmop_update["detailed-status"] = detailed_status
6225 change_type = "vnf_terminated"
6226 if not nslcmop_operation_state:
6227 nslcmop_operation_state = "COMPLETED"
6228 self.logger.debug(
6229 logging_text
6230 + " task Done with result {} {}".format(
6231 nslcmop_operation_state, detailed_status
6232 )
6233 )
6234
6235 elif update_type == "OPERATE_VNF":
6236 vnf_id = db_nslcmop["operationParams"]["operateVnfData"][
6237 "vnfInstanceId"
6238 ]
6239 operation_type = db_nslcmop["operationParams"]["operateVnfData"][
6240 "changeStateTo"
6241 ]
6242 additional_param = db_nslcmop["operationParams"]["operateVnfData"][
6243 "additionalParam"
6244 ]
6245 (result, detailed_status) = await self.rebuild_start_stop(
6246 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
6247 )
6248 if result == "FAILED":
6249 nslcmop_operation_state = result
6250 error_description_nslcmop = detailed_status
6251 db_nslcmop_update["detailed-status"] = detailed_status
6252 if not nslcmop_operation_state:
6253 nslcmop_operation_state = "COMPLETED"
6254 self.logger.debug(
6255 logging_text
6256 + " task Done with result {} {}".format(
6257 nslcmop_operation_state, detailed_status
6258 )
6259 )
6260
6261 # If nslcmop_operation_state is None, so any operation is not failed.
6262 # All operations are executed in overall.
6263 if not nslcmop_operation_state:
6264 nslcmop_operation_state = "COMPLETED"
6265 db_nsr_update["operational-status"] = old_operational_status
6266
6267 except (DbException, LcmException, N2VCException, K8sException) as e:
6268 self.logger.error(logging_text + "Exit Exception {}".format(e))
6269 exc = e
6270 except asyncio.CancelledError:
6271 self.logger.error(
6272 logging_text + "Cancelled Exception while '{}'".format(step)
6273 )
6274 exc = "Operation was cancelled"
6275 except asyncio.TimeoutError:
6276 self.logger.error(logging_text + "Timeout while '{}'".format(step))
6277 exc = "Timeout"
6278 except Exception as e:
6279 exc = traceback.format_exc()
6280 self.logger.critical(
6281 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6282 exc_info=True,
6283 )
6284 finally:
6285 if exc:
6286 db_nslcmop_update[
6287 "detailed-status"
6288 ] = (
6289 detailed_status
6290 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6291 nslcmop_operation_state = "FAILED"
6292 db_nsr_update["operational-status"] = old_operational_status
6293 if db_nsr:
6294 self._write_ns_status(
6295 nsr_id=nsr_id,
6296 ns_state=db_nsr["nsState"],
6297 current_operation="IDLE",
6298 current_operation_id=None,
6299 other_update=db_nsr_update,
6300 )
6301
6302 self._write_op_status(
6303 op_id=nslcmop_id,
6304 stage="",
6305 error_message=error_description_nslcmop,
6306 operation_state=nslcmop_operation_state,
6307 other_update=db_nslcmop_update,
6308 )
6309
6310 if nslcmop_operation_state:
6311 try:
6312 msg = {
6313 "nsr_id": nsr_id,
6314 "nslcmop_id": nslcmop_id,
6315 "operationState": nslcmop_operation_state,
6316 }
6317 if change_type in ("vnf_terminated", "policy_updated"):
6318 msg.update({"vnf_member_index": member_vnf_index})
6319 await self.msg.aiowrite("ns", change_type, msg, loop=self.loop)
6320 except Exception as e:
6321 self.logger.error(
6322 logging_text + "kafka_write notification Exception {}".format(e)
6323 )
6324 self.logger.debug(logging_text + "Exit")
6325 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6326 return nslcmop_operation_state, detailed_status
6327
6328 async def scale(self, nsr_id, nslcmop_id):
6329 # Try to lock HA task here
6330 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6331 if not task_is_locked_by_me:
6332 return
6333
6334 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6335 stage = ["", "", ""]
6336 tasks_dict_info = {}
6337 # ^ stage, step, VIM progress
6338 self.logger.debug(logging_text + "Enter")
6339 # get all needed from database
6340 db_nsr = None
6341 db_nslcmop_update = {}
6342 db_nsr_update = {}
6343 exc = None
6344 # in case of error, indicates what part of scale was failed to put nsr at error status
6345 scale_process = None
6346 old_operational_status = ""
6347 old_config_status = ""
6348 nsi_id = None
6349 try:
6350 # wait for any previous tasks in process
6351 step = "Waiting for previous operations to terminate"
6352 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6353 self._write_ns_status(
6354 nsr_id=nsr_id,
6355 ns_state=None,
6356 current_operation="SCALING",
6357 current_operation_id=nslcmop_id,
6358 )
6359
6360 step = "Getting nslcmop from database"
6361 self.logger.debug(
6362 step + " after having waited for previous tasks to be completed"
6363 )
6364 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6365
6366 step = "Getting nsr from database"
6367 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6368 old_operational_status = db_nsr["operational-status"]
6369 old_config_status = db_nsr["config-status"]
6370
6371 step = "Parsing scaling parameters"
6372 db_nsr_update["operational-status"] = "scaling"
6373 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6374 nsr_deployed = db_nsr["_admin"].get("deployed")
6375
6376 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6377 "scaleByStepData"
6378 ]["member-vnf-index"]
6379 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6380 "scaleByStepData"
6381 ]["scaling-group-descriptor"]
6382 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6383 # for backward compatibility
6384 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6385 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6386 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6387 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6388
6389 step = "Getting vnfr from database"
6390 db_vnfr = self.db.get_one(
6391 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6392 )
6393
6394 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6395
6396 step = "Getting vnfd from database"
6397 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6398
6399 base_folder = db_vnfd["_admin"]["storage"]
6400
6401 step = "Getting scaling-group-descriptor"
6402 scaling_descriptor = find_in_list(
6403 get_scaling_aspect(db_vnfd),
6404 lambda scale_desc: scale_desc["name"] == scaling_group,
6405 )
6406 if not scaling_descriptor:
6407 raise LcmException(
6408 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6409 "at vnfd:scaling-group-descriptor".format(scaling_group)
6410 )
6411
6412 step = "Sending scale order to VIM"
6413 # TODO check if ns is in a proper status
6414 nb_scale_op = 0
6415 if not db_nsr["_admin"].get("scaling-group"):
6416 self.update_db_2(
6417 "nsrs",
6418 nsr_id,
6419 {
6420 "_admin.scaling-group": [
6421 {"name": scaling_group, "nb-scale-op": 0}
6422 ]
6423 },
6424 )
6425 admin_scale_index = 0
6426 else:
6427 for admin_scale_index, admin_scale_info in enumerate(
6428 db_nsr["_admin"]["scaling-group"]
6429 ):
6430 if admin_scale_info["name"] == scaling_group:
6431 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6432 break
6433 else: # not found, set index one plus last element and add new entry with the name
6434 admin_scale_index += 1
6435 db_nsr_update[
6436 "_admin.scaling-group.{}.name".format(admin_scale_index)
6437 ] = scaling_group
6438
6439 vca_scaling_info = []
6440 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6441 if scaling_type == "SCALE_OUT":
6442 if "aspect-delta-details" not in scaling_descriptor:
6443 raise LcmException(
6444 "Aspect delta details not fount in scaling descriptor {}".format(
6445 scaling_descriptor["name"]
6446 )
6447 )
6448 # count if max-instance-count is reached
6449 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6450
6451 scaling_info["scaling_direction"] = "OUT"
6452 scaling_info["vdu-create"] = {}
6453 scaling_info["kdu-create"] = {}
6454 for delta in deltas:
6455 for vdu_delta in delta.get("vdu-delta", {}):
6456 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6457 # vdu_index also provides the number of instance of the targeted vdu
6458 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6459 cloud_init_text = self._get_vdu_cloud_init_content(
6460 vdud, db_vnfd
6461 )
6462 if cloud_init_text:
6463 additional_params = (
6464 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6465 or {}
6466 )
6467 cloud_init_list = []
6468
6469 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6470 max_instance_count = 10
6471 if vdu_profile and "max-number-of-instances" in vdu_profile:
6472 max_instance_count = vdu_profile.get(
6473 "max-number-of-instances", 10
6474 )
6475
6476 default_instance_num = get_number_of_instances(
6477 db_vnfd, vdud["id"]
6478 )
6479 instances_number = vdu_delta.get("number-of-instances", 1)
6480 nb_scale_op += instances_number
6481
6482 new_instance_count = nb_scale_op + default_instance_num
6483 # Control if new count is over max and vdu count is less than max.
6484 # Then assign new instance count
6485 if new_instance_count > max_instance_count > vdu_count:
6486 instances_number = new_instance_count - max_instance_count
6487 else:
6488 instances_number = instances_number
6489
6490 if new_instance_count > max_instance_count:
6491 raise LcmException(
6492 "reached the limit of {} (max-instance-count) "
6493 "scaling-out operations for the "
6494 "scaling-group-descriptor '{}'".format(
6495 nb_scale_op, scaling_group
6496 )
6497 )
6498 for x in range(vdu_delta.get("number-of-instances", 1)):
6499 if cloud_init_text:
6500 # TODO Information of its own ip is not available because db_vnfr is not updated.
6501 additional_params["OSM"] = get_osm_params(
6502 db_vnfr, vdu_delta["id"], vdu_index + x
6503 )
6504 cloud_init_list.append(
6505 self._parse_cloud_init(
6506 cloud_init_text,
6507 additional_params,
6508 db_vnfd["id"],
6509 vdud["id"],
6510 )
6511 )
6512 vca_scaling_info.append(
6513 {
6514 "osm_vdu_id": vdu_delta["id"],
6515 "member-vnf-index": vnf_index,
6516 "type": "create",
6517 "vdu_index": vdu_index + x,
6518 }
6519 )
6520 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6521 for kdu_delta in delta.get("kdu-resource-delta", {}):
6522 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6523 kdu_name = kdu_profile["kdu-name"]
6524 resource_name = kdu_profile.get("resource-name", "")
6525
6526 # Might have different kdus in the same delta
6527 # Should have list for each kdu
6528 if not scaling_info["kdu-create"].get(kdu_name, None):
6529 scaling_info["kdu-create"][kdu_name] = []
6530
6531 kdur = get_kdur(db_vnfr, kdu_name)
6532 if kdur.get("helm-chart"):
6533 k8s_cluster_type = "helm-chart-v3"
6534 self.logger.debug("kdur: {}".format(kdur))
6535 if (
6536 kdur.get("helm-version")
6537 and kdur.get("helm-version") == "v2"
6538 ):
6539 k8s_cluster_type = "helm-chart"
6540 elif kdur.get("juju-bundle"):
6541 k8s_cluster_type = "juju-bundle"
6542 else:
6543 raise LcmException(
6544 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6545 "juju-bundle. Maybe an old NBI version is running".format(
6546 db_vnfr["member-vnf-index-ref"], kdu_name
6547 )
6548 )
6549
6550 max_instance_count = 10
6551 if kdu_profile and "max-number-of-instances" in kdu_profile:
6552 max_instance_count = kdu_profile.get(
6553 "max-number-of-instances", 10
6554 )
6555
6556 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6557 deployed_kdu, _ = get_deployed_kdu(
6558 nsr_deployed, kdu_name, vnf_index
6559 )
6560 if deployed_kdu is None:
6561 raise LcmException(
6562 "KDU '{}' for vnf '{}' not deployed".format(
6563 kdu_name, vnf_index
6564 )
6565 )
6566 kdu_instance = deployed_kdu.get("kdu-instance")
6567 instance_num = await self.k8scluster_map[
6568 k8s_cluster_type
6569 ].get_scale_count(
6570 resource_name,
6571 kdu_instance,
6572 vca_id=vca_id,
6573 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6574 kdu_model=deployed_kdu.get("kdu-model"),
6575 )
6576 kdu_replica_count = instance_num + kdu_delta.get(
6577 "number-of-instances", 1
6578 )
6579
6580 # Control if new count is over max and instance_num is less than max.
6581 # Then assign max instance number to kdu replica count
6582 if kdu_replica_count > max_instance_count > instance_num:
6583 kdu_replica_count = max_instance_count
6584 if kdu_replica_count > max_instance_count:
6585 raise LcmException(
6586 "reached the limit of {} (max-instance-count) "
6587 "scaling-out operations for the "
6588 "scaling-group-descriptor '{}'".format(
6589 instance_num, scaling_group
6590 )
6591 )
6592
6593 for x in range(kdu_delta.get("number-of-instances", 1)):
6594 vca_scaling_info.append(
6595 {
6596 "osm_kdu_id": kdu_name,
6597 "member-vnf-index": vnf_index,
6598 "type": "create",
6599 "kdu_index": instance_num + x - 1,
6600 }
6601 )
6602 scaling_info["kdu-create"][kdu_name].append(
6603 {
6604 "member-vnf-index": vnf_index,
6605 "type": "create",
6606 "k8s-cluster-type": k8s_cluster_type,
6607 "resource-name": resource_name,
6608 "scale": kdu_replica_count,
6609 }
6610 )
6611 elif scaling_type == "SCALE_IN":
6612 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6613
6614 scaling_info["scaling_direction"] = "IN"
6615 scaling_info["vdu-delete"] = {}
6616 scaling_info["kdu-delete"] = {}
6617
6618 for delta in deltas:
6619 for vdu_delta in delta.get("vdu-delta", {}):
6620 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6621 min_instance_count = 0
6622 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6623 if vdu_profile and "min-number-of-instances" in vdu_profile:
6624 min_instance_count = vdu_profile["min-number-of-instances"]
6625
6626 default_instance_num = get_number_of_instances(
6627 db_vnfd, vdu_delta["id"]
6628 )
6629 instance_num = vdu_delta.get("number-of-instances", 1)
6630 nb_scale_op -= instance_num
6631
6632 new_instance_count = nb_scale_op + default_instance_num
6633
6634 if new_instance_count < min_instance_count < vdu_count:
6635 instances_number = min_instance_count - new_instance_count
6636 else:
6637 instances_number = instance_num
6638
6639 if new_instance_count < min_instance_count:
6640 raise LcmException(
6641 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6642 "scaling-group-descriptor '{}'".format(
6643 nb_scale_op, scaling_group
6644 )
6645 )
6646 for x in range(vdu_delta.get("number-of-instances", 1)):
6647 vca_scaling_info.append(
6648 {
6649 "osm_vdu_id": vdu_delta["id"],
6650 "member-vnf-index": vnf_index,
6651 "type": "delete",
6652 "vdu_index": vdu_index - 1 - x,
6653 }
6654 )
6655 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6656 for kdu_delta in delta.get("kdu-resource-delta", {}):
6657 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6658 kdu_name = kdu_profile["kdu-name"]
6659 resource_name = kdu_profile.get("resource-name", "")
6660
6661 if not scaling_info["kdu-delete"].get(kdu_name, None):
6662 scaling_info["kdu-delete"][kdu_name] = []
6663
6664 kdur = get_kdur(db_vnfr, kdu_name)
6665 if kdur.get("helm-chart"):
6666 k8s_cluster_type = "helm-chart-v3"
6667 self.logger.debug("kdur: {}".format(kdur))
6668 if (
6669 kdur.get("helm-version")
6670 and kdur.get("helm-version") == "v2"
6671 ):
6672 k8s_cluster_type = "helm-chart"
6673 elif kdur.get("juju-bundle"):
6674 k8s_cluster_type = "juju-bundle"
6675 else:
6676 raise LcmException(
6677 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6678 "juju-bundle. Maybe an old NBI version is running".format(
6679 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6680 )
6681 )
6682
6683 min_instance_count = 0
6684 if kdu_profile and "min-number-of-instances" in kdu_profile:
6685 min_instance_count = kdu_profile["min-number-of-instances"]
6686
6687 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6688 deployed_kdu, _ = get_deployed_kdu(
6689 nsr_deployed, kdu_name, vnf_index
6690 )
6691 if deployed_kdu is None:
6692 raise LcmException(
6693 "KDU '{}' for vnf '{}' not deployed".format(
6694 kdu_name, vnf_index
6695 )
6696 )
6697 kdu_instance = deployed_kdu.get("kdu-instance")
6698 instance_num = await self.k8scluster_map[
6699 k8s_cluster_type
6700 ].get_scale_count(
6701 resource_name,
6702 kdu_instance,
6703 vca_id=vca_id,
6704 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6705 kdu_model=deployed_kdu.get("kdu-model"),
6706 )
6707 kdu_replica_count = instance_num - kdu_delta.get(
6708 "number-of-instances", 1
6709 )
6710
6711 if kdu_replica_count < min_instance_count < instance_num:
6712 kdu_replica_count = min_instance_count
6713 if kdu_replica_count < min_instance_count:
6714 raise LcmException(
6715 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6716 "scaling-group-descriptor '{}'".format(
6717 instance_num, scaling_group
6718 )
6719 )
6720
6721 for x in range(kdu_delta.get("number-of-instances", 1)):
6722 vca_scaling_info.append(
6723 {
6724 "osm_kdu_id": kdu_name,
6725 "member-vnf-index": vnf_index,
6726 "type": "delete",
6727 "kdu_index": instance_num - x - 1,
6728 }
6729 )
6730 scaling_info["kdu-delete"][kdu_name].append(
6731 {
6732 "member-vnf-index": vnf_index,
6733 "type": "delete",
6734 "k8s-cluster-type": k8s_cluster_type,
6735 "resource-name": resource_name,
6736 "scale": kdu_replica_count,
6737 }
6738 )
6739
6740 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6741 vdu_delete = copy(scaling_info.get("vdu-delete"))
6742 if scaling_info["scaling_direction"] == "IN":
6743 for vdur in reversed(db_vnfr["vdur"]):
6744 if vdu_delete.get(vdur["vdu-id-ref"]):
6745 vdu_delete[vdur["vdu-id-ref"]] -= 1
6746 scaling_info["vdu"].append(
6747 {
6748 "name": vdur.get("name") or vdur.get("vdu-name"),
6749 "vdu_id": vdur["vdu-id-ref"],
6750 "interface": [],
6751 }
6752 )
6753 for interface in vdur["interfaces"]:
6754 scaling_info["vdu"][-1]["interface"].append(
6755 {
6756 "name": interface["name"],
6757 "ip_address": interface["ip-address"],
6758 "mac_address": interface.get("mac-address"),
6759 }
6760 )
6761 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6762
6763 # PRE-SCALE BEGIN
6764 step = "Executing pre-scale vnf-config-primitive"
6765 if scaling_descriptor.get("scaling-config-action"):
6766 for scaling_config_action in scaling_descriptor[
6767 "scaling-config-action"
6768 ]:
6769 if (
6770 scaling_config_action.get("trigger") == "pre-scale-in"
6771 and scaling_type == "SCALE_IN"
6772 ) or (
6773 scaling_config_action.get("trigger") == "pre-scale-out"
6774 and scaling_type == "SCALE_OUT"
6775 ):
6776 vnf_config_primitive = scaling_config_action[
6777 "vnf-config-primitive-name-ref"
6778 ]
6779 step = db_nslcmop_update[
6780 "detailed-status"
6781 ] = "executing pre-scale scaling-config-action '{}'".format(
6782 vnf_config_primitive
6783 )
6784
6785 # look for primitive
6786 for config_primitive in (
6787 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6788 ).get("config-primitive", ()):
6789 if config_primitive["name"] == vnf_config_primitive:
6790 break
6791 else:
6792 raise LcmException(
6793 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6794 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6795 "primitive".format(scaling_group, vnf_config_primitive)
6796 )
6797
6798 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6799 if db_vnfr.get("additionalParamsForVnf"):
6800 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6801
6802 scale_process = "VCA"
6803 db_nsr_update["config-status"] = "configuring pre-scaling"
6804 primitive_params = self._map_primitive_params(
6805 config_primitive, {}, vnfr_params
6806 )
6807
6808 # Pre-scale retry check: Check if this sub-operation has been executed before
6809 op_index = self._check_or_add_scale_suboperation(
6810 db_nslcmop,
6811 vnf_index,
6812 vnf_config_primitive,
6813 primitive_params,
6814 "PRE-SCALE",
6815 )
6816 if op_index == self.SUBOPERATION_STATUS_SKIP:
6817 # Skip sub-operation
6818 result = "COMPLETED"
6819 result_detail = "Done"
6820 self.logger.debug(
6821 logging_text
6822 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6823 vnf_config_primitive, result, result_detail
6824 )
6825 )
6826 else:
6827 if op_index == self.SUBOPERATION_STATUS_NEW:
6828 # New sub-operation: Get index of this sub-operation
6829 op_index = (
6830 len(db_nslcmop.get("_admin", {}).get("operations"))
6831 - 1
6832 )
6833 self.logger.debug(
6834 logging_text
6835 + "vnf_config_primitive={} New sub-operation".format(
6836 vnf_config_primitive
6837 )
6838 )
6839 else:
6840 # retry: Get registered params for this existing sub-operation
6841 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6842 op_index
6843 ]
6844 vnf_index = op.get("member_vnf_index")
6845 vnf_config_primitive = op.get("primitive")
6846 primitive_params = op.get("primitive_params")
6847 self.logger.debug(
6848 logging_text
6849 + "vnf_config_primitive={} Sub-operation retry".format(
6850 vnf_config_primitive
6851 )
6852 )
6853 # Execute the primitive, either with new (first-time) or registered (reintent) args
6854 ee_descriptor_id = config_primitive.get(
6855 "execution-environment-ref"
6856 )
6857 primitive_name = config_primitive.get(
6858 "execution-environment-primitive", vnf_config_primitive
6859 )
6860 ee_id, vca_type = self._look_for_deployed_vca(
6861 nsr_deployed["VCA"],
6862 member_vnf_index=vnf_index,
6863 vdu_id=None,
6864 vdu_count_index=None,
6865 ee_descriptor_id=ee_descriptor_id,
6866 )
6867 result, result_detail = await self._ns_execute_primitive(
6868 ee_id,
6869 primitive_name,
6870 primitive_params,
6871 vca_type=vca_type,
6872 vca_id=vca_id,
6873 )
6874 self.logger.debug(
6875 logging_text
6876 + "vnf_config_primitive={} Done with result {} {}".format(
6877 vnf_config_primitive, result, result_detail
6878 )
6879 )
6880 # Update operationState = COMPLETED | FAILED
6881 self._update_suboperation_status(
6882 db_nslcmop, op_index, result, result_detail
6883 )
6884
6885 if result == "FAILED":
6886 raise LcmException(result_detail)
6887 db_nsr_update["config-status"] = old_config_status
6888 scale_process = None
6889 # PRE-SCALE END
6890
6891 db_nsr_update[
6892 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6893 ] = nb_scale_op
6894 db_nsr_update[
6895 "_admin.scaling-group.{}.time".format(admin_scale_index)
6896 ] = time()
6897
6898 # SCALE-IN VCA - BEGIN
6899 if vca_scaling_info:
6900 step = db_nslcmop_update[
6901 "detailed-status"
6902 ] = "Deleting the execution environments"
6903 scale_process = "VCA"
6904 for vca_info in vca_scaling_info:
6905 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6906 member_vnf_index = str(vca_info["member-vnf-index"])
6907 self.logger.debug(
6908 logging_text + "vdu info: {}".format(vca_info)
6909 )
6910 if vca_info.get("osm_vdu_id"):
6911 vdu_id = vca_info["osm_vdu_id"]
6912 vdu_index = int(vca_info["vdu_index"])
6913 stage[
6914 1
6915 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6916 member_vnf_index, vdu_id, vdu_index
6917 )
6918 stage[2] = step = "Scaling in VCA"
6919 self._write_op_status(op_id=nslcmop_id, stage=stage)
6920 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6921 config_update = db_nsr["configurationStatus"]
6922 for vca_index, vca in enumerate(vca_update):
6923 if (
6924 (vca or vca.get("ee_id"))
6925 and vca["member-vnf-index"] == member_vnf_index
6926 and vca["vdu_count_index"] == vdu_index
6927 ):
6928 if vca.get("vdu_id"):
6929 config_descriptor = get_configuration(
6930 db_vnfd, vca.get("vdu_id")
6931 )
6932 elif vca.get("kdu_name"):
6933 config_descriptor = get_configuration(
6934 db_vnfd, vca.get("kdu_name")
6935 )
6936 else:
6937 config_descriptor = get_configuration(
6938 db_vnfd, db_vnfd["id"]
6939 )
6940 operation_params = (
6941 db_nslcmop.get("operationParams") or {}
6942 )
6943 exec_terminate_primitives = not operation_params.get(
6944 "skip_terminate_primitives"
6945 ) and vca.get("needed_terminate")
6946 task = asyncio.ensure_future(
6947 asyncio.wait_for(
6948 self.destroy_N2VC(
6949 logging_text,
6950 db_nslcmop,
6951 vca,
6952 config_descriptor,
6953 vca_index,
6954 destroy_ee=True,
6955 exec_primitives=exec_terminate_primitives,
6956 scaling_in=True,
6957 vca_id=vca_id,
6958 ),
6959 timeout=self.timeout.charm_delete,
6960 )
6961 )
6962 tasks_dict_info[task] = "Terminating VCA {}".format(
6963 vca.get("ee_id")
6964 )
6965 del vca_update[vca_index]
6966 del config_update[vca_index]
6967 # wait for pending tasks of terminate primitives
6968 if tasks_dict_info:
6969 self.logger.debug(
6970 logging_text
6971 + "Waiting for tasks {}".format(
6972 list(tasks_dict_info.keys())
6973 )
6974 )
6975 error_list = await self._wait_for_tasks(
6976 logging_text,
6977 tasks_dict_info,
6978 min(
6979 self.timeout.charm_delete, self.timeout.ns_terminate
6980 ),
6981 stage,
6982 nslcmop_id,
6983 )
6984 tasks_dict_info.clear()
6985 if error_list:
6986 raise LcmException("; ".join(error_list))
6987
6988 db_vca_and_config_update = {
6989 "_admin.deployed.VCA": vca_update,
6990 "configurationStatus": config_update,
6991 }
6992 self.update_db_2(
6993 "nsrs", db_nsr["_id"], db_vca_and_config_update
6994 )
6995 scale_process = None
6996 # SCALE-IN VCA - END
6997
6998 # SCALE RO - BEGIN
6999 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
7000 scale_process = "RO"
7001 if self.ro_config.ng:
7002 await self._scale_ng_ro(
7003 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
7004 )
7005 scaling_info.pop("vdu-create", None)
7006 scaling_info.pop("vdu-delete", None)
7007
7008 scale_process = None
7009 # SCALE RO - END
7010
7011 # SCALE KDU - BEGIN
7012 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
7013 scale_process = "KDU"
7014 await self._scale_kdu(
7015 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7016 )
7017 scaling_info.pop("kdu-create", None)
7018 scaling_info.pop("kdu-delete", None)
7019
7020 scale_process = None
7021 # SCALE KDU - END
7022
7023 if db_nsr_update:
7024 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7025
7026 # SCALE-UP VCA - BEGIN
7027 if vca_scaling_info:
7028 step = db_nslcmop_update[
7029 "detailed-status"
7030 ] = "Creating new execution environments"
7031 scale_process = "VCA"
7032 for vca_info in vca_scaling_info:
7033 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
7034 member_vnf_index = str(vca_info["member-vnf-index"])
7035 self.logger.debug(
7036 logging_text + "vdu info: {}".format(vca_info)
7037 )
7038 vnfd_id = db_vnfr["vnfd-ref"]
7039 if vca_info.get("osm_vdu_id"):
7040 vdu_index = int(vca_info["vdu_index"])
7041 deploy_params = {"OSM": get_osm_params(db_vnfr)}
7042 if db_vnfr.get("additionalParamsForVnf"):
7043 deploy_params.update(
7044 parse_yaml_strings(
7045 db_vnfr["additionalParamsForVnf"].copy()
7046 )
7047 )
7048 descriptor_config = get_configuration(
7049 db_vnfd, db_vnfd["id"]
7050 )
7051 if descriptor_config:
7052 vdu_id = None
7053 vdu_name = None
7054 kdu_name = None
7055 kdu_index = None
7056 self._deploy_n2vc(
7057 logging_text=logging_text
7058 + "member_vnf_index={} ".format(member_vnf_index),
7059 db_nsr=db_nsr,
7060 db_vnfr=db_vnfr,
7061 nslcmop_id=nslcmop_id,
7062 nsr_id=nsr_id,
7063 nsi_id=nsi_id,
7064 vnfd_id=vnfd_id,
7065 vdu_id=vdu_id,
7066 kdu_name=kdu_name,
7067 kdu_index=kdu_index,
7068 member_vnf_index=member_vnf_index,
7069 vdu_index=vdu_index,
7070 vdu_name=vdu_name,
7071 deploy_params=deploy_params,
7072 descriptor_config=descriptor_config,
7073 base_folder=base_folder,
7074 task_instantiation_info=tasks_dict_info,
7075 stage=stage,
7076 )
7077 vdu_id = vca_info["osm_vdu_id"]
7078 vdur = find_in_list(
7079 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
7080 )
7081 descriptor_config = get_configuration(db_vnfd, vdu_id)
7082 if vdur.get("additionalParams"):
7083 deploy_params_vdu = parse_yaml_strings(
7084 vdur["additionalParams"]
7085 )
7086 else:
7087 deploy_params_vdu = deploy_params
7088 deploy_params_vdu["OSM"] = get_osm_params(
7089 db_vnfr, vdu_id, vdu_count_index=vdu_index
7090 )
7091 if descriptor_config:
7092 vdu_name = None
7093 kdu_name = None
7094 kdu_index = None
7095 stage[
7096 1
7097 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7098 member_vnf_index, vdu_id, vdu_index
7099 )
7100 stage[2] = step = "Scaling out VCA"
7101 self._write_op_status(op_id=nslcmop_id, stage=stage)
7102 self._deploy_n2vc(
7103 logging_text=logging_text
7104 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7105 member_vnf_index, vdu_id, vdu_index
7106 ),
7107 db_nsr=db_nsr,
7108 db_vnfr=db_vnfr,
7109 nslcmop_id=nslcmop_id,
7110 nsr_id=nsr_id,
7111 nsi_id=nsi_id,
7112 vnfd_id=vnfd_id,
7113 vdu_id=vdu_id,
7114 kdu_name=kdu_name,
7115 member_vnf_index=member_vnf_index,
7116 vdu_index=vdu_index,
7117 kdu_index=kdu_index,
7118 vdu_name=vdu_name,
7119 deploy_params=deploy_params_vdu,
7120 descriptor_config=descriptor_config,
7121 base_folder=base_folder,
7122 task_instantiation_info=tasks_dict_info,
7123 stage=stage,
7124 )
7125 # SCALE-UP VCA - END
7126 scale_process = None
7127
7128 # POST-SCALE BEGIN
7129 # execute primitive service POST-SCALING
7130 step = "Executing post-scale vnf-config-primitive"
7131 if scaling_descriptor.get("scaling-config-action"):
7132 for scaling_config_action in scaling_descriptor[
7133 "scaling-config-action"
7134 ]:
7135 if (
7136 scaling_config_action.get("trigger") == "post-scale-in"
7137 and scaling_type == "SCALE_IN"
7138 ) or (
7139 scaling_config_action.get("trigger") == "post-scale-out"
7140 and scaling_type == "SCALE_OUT"
7141 ):
7142 vnf_config_primitive = scaling_config_action[
7143 "vnf-config-primitive-name-ref"
7144 ]
7145 step = db_nslcmop_update[
7146 "detailed-status"
7147 ] = "executing post-scale scaling-config-action '{}'".format(
7148 vnf_config_primitive
7149 )
7150
7151 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
7152 if db_vnfr.get("additionalParamsForVnf"):
7153 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
7154
7155 # look for primitive
7156 for config_primitive in (
7157 get_configuration(db_vnfd, db_vnfd["id"]) or {}
7158 ).get("config-primitive", ()):
7159 if config_primitive["name"] == vnf_config_primitive:
7160 break
7161 else:
7162 raise LcmException(
7163 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
7164 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
7165 "config-primitive".format(
7166 scaling_group, vnf_config_primitive
7167 )
7168 )
7169 scale_process = "VCA"
7170 db_nsr_update["config-status"] = "configuring post-scaling"
7171 primitive_params = self._map_primitive_params(
7172 config_primitive, {}, vnfr_params
7173 )
7174
7175 # Post-scale retry check: Check if this sub-operation has been executed before
7176 op_index = self._check_or_add_scale_suboperation(
7177 db_nslcmop,
7178 vnf_index,
7179 vnf_config_primitive,
7180 primitive_params,
7181 "POST-SCALE",
7182 )
7183 if op_index == self.SUBOPERATION_STATUS_SKIP:
7184 # Skip sub-operation
7185 result = "COMPLETED"
7186 result_detail = "Done"
7187 self.logger.debug(
7188 logging_text
7189 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
7190 vnf_config_primitive, result, result_detail
7191 )
7192 )
7193 else:
7194 if op_index == self.SUBOPERATION_STATUS_NEW:
7195 # New sub-operation: Get index of this sub-operation
7196 op_index = (
7197 len(db_nslcmop.get("_admin", {}).get("operations"))
7198 - 1
7199 )
7200 self.logger.debug(
7201 logging_text
7202 + "vnf_config_primitive={} New sub-operation".format(
7203 vnf_config_primitive
7204 )
7205 )
7206 else:
7207 # retry: Get registered params for this existing sub-operation
7208 op = db_nslcmop.get("_admin", {}).get("operations", [])[
7209 op_index
7210 ]
7211 vnf_index = op.get("member_vnf_index")
7212 vnf_config_primitive = op.get("primitive")
7213 primitive_params = op.get("primitive_params")
7214 self.logger.debug(
7215 logging_text
7216 + "vnf_config_primitive={} Sub-operation retry".format(
7217 vnf_config_primitive
7218 )
7219 )
7220 # Execute the primitive, either with new (first-time) or registered (reintent) args
7221 ee_descriptor_id = config_primitive.get(
7222 "execution-environment-ref"
7223 )
7224 primitive_name = config_primitive.get(
7225 "execution-environment-primitive", vnf_config_primitive
7226 )
7227 ee_id, vca_type = self._look_for_deployed_vca(
7228 nsr_deployed["VCA"],
7229 member_vnf_index=vnf_index,
7230 vdu_id=None,
7231 vdu_count_index=None,
7232 ee_descriptor_id=ee_descriptor_id,
7233 )
7234 result, result_detail = await self._ns_execute_primitive(
7235 ee_id,
7236 primitive_name,
7237 primitive_params,
7238 vca_type=vca_type,
7239 vca_id=vca_id,
7240 )
7241 self.logger.debug(
7242 logging_text
7243 + "vnf_config_primitive={} Done with result {} {}".format(
7244 vnf_config_primitive, result, result_detail
7245 )
7246 )
7247 # Update operationState = COMPLETED | FAILED
7248 self._update_suboperation_status(
7249 db_nslcmop, op_index, result, result_detail
7250 )
7251
7252 if result == "FAILED":
7253 raise LcmException(result_detail)
7254 db_nsr_update["config-status"] = old_config_status
7255 scale_process = None
7256 # POST-SCALE END
7257
7258 db_nsr_update[
7259 "detailed-status"
7260 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
7261 db_nsr_update["operational-status"] = (
7262 "running"
7263 if old_operational_status == "failed"
7264 else old_operational_status
7265 )
7266 db_nsr_update["config-status"] = old_config_status
7267 return
7268 except (
7269 ROclient.ROClientException,
7270 DbException,
7271 LcmException,
7272 NgRoException,
7273 ) as e:
7274 self.logger.error(logging_text + "Exit Exception {}".format(e))
7275 exc = e
7276 except asyncio.CancelledError:
7277 self.logger.error(
7278 logging_text + "Cancelled Exception while '{}'".format(step)
7279 )
7280 exc = "Operation was cancelled"
7281 except Exception as e:
7282 exc = traceback.format_exc()
7283 self.logger.critical(
7284 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7285 exc_info=True,
7286 )
7287 finally:
7288 self._write_ns_status(
7289 nsr_id=nsr_id,
7290 ns_state=None,
7291 current_operation="IDLE",
7292 current_operation_id=None,
7293 )
7294 if tasks_dict_info:
7295 stage[1] = "Waiting for instantiate pending tasks."
7296 self.logger.debug(logging_text + stage[1])
7297 exc = await self._wait_for_tasks(
7298 logging_text,
7299 tasks_dict_info,
7300 self.timeout.ns_deploy,
7301 stage,
7302 nslcmop_id,
7303 nsr_id=nsr_id,
7304 )
7305 if exc:
7306 db_nslcmop_update[
7307 "detailed-status"
7308 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7309 nslcmop_operation_state = "FAILED"
7310 if db_nsr:
7311 db_nsr_update["operational-status"] = old_operational_status
7312 db_nsr_update["config-status"] = old_config_status
7313 db_nsr_update["detailed-status"] = ""
7314 if scale_process:
7315 if "VCA" in scale_process:
7316 db_nsr_update["config-status"] = "failed"
7317 if "RO" in scale_process:
7318 db_nsr_update["operational-status"] = "failed"
7319 db_nsr_update[
7320 "detailed-status"
7321 ] = "FAILED scaling nslcmop={} {}: {}".format(
7322 nslcmop_id, step, exc
7323 )
7324 else:
7325 error_description_nslcmop = None
7326 nslcmop_operation_state = "COMPLETED"
7327 db_nslcmop_update["detailed-status"] = "Done"
7328
7329 self._write_op_status(
7330 op_id=nslcmop_id,
7331 stage="",
7332 error_message=error_description_nslcmop,
7333 operation_state=nslcmop_operation_state,
7334 other_update=db_nslcmop_update,
7335 )
7336 if db_nsr:
7337 self._write_ns_status(
7338 nsr_id=nsr_id,
7339 ns_state=None,
7340 current_operation="IDLE",
7341 current_operation_id=None,
7342 other_update=db_nsr_update,
7343 )
7344
7345 if nslcmop_operation_state:
7346 try:
7347 msg = {
7348 "nsr_id": nsr_id,
7349 "nslcmop_id": nslcmop_id,
7350 "operationState": nslcmop_operation_state,
7351 }
7352 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
7353 except Exception as e:
7354 self.logger.error(
7355 logging_text + "kafka_write notification Exception {}".format(e)
7356 )
7357 self.logger.debug(logging_text + "Exit")
7358 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7359
7360 async def _scale_kdu(
7361 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7362 ):
7363 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7364 for kdu_name in _scaling_info:
7365 for kdu_scaling_info in _scaling_info[kdu_name]:
7366 deployed_kdu, index = get_deployed_kdu(
7367 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7368 )
7369 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7370 kdu_instance = deployed_kdu["kdu-instance"]
7371 kdu_model = deployed_kdu.get("kdu-model")
7372 scale = int(kdu_scaling_info["scale"])
7373 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7374
7375 db_dict = {
7376 "collection": "nsrs",
7377 "filter": {"_id": nsr_id},
7378 "path": "_admin.deployed.K8s.{}".format(index),
7379 }
7380
7381 step = "scaling application {}".format(
7382 kdu_scaling_info["resource-name"]
7383 )
7384 self.logger.debug(logging_text + step)
7385
7386 if kdu_scaling_info["type"] == "delete":
7387 kdu_config = get_configuration(db_vnfd, kdu_name)
7388 if (
7389 kdu_config
7390 and kdu_config.get("terminate-config-primitive")
7391 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7392 ):
7393 terminate_config_primitive_list = kdu_config.get(
7394 "terminate-config-primitive"
7395 )
7396 terminate_config_primitive_list.sort(
7397 key=lambda val: int(val["seq"])
7398 )
7399
7400 for (
7401 terminate_config_primitive
7402 ) in terminate_config_primitive_list:
7403 primitive_params_ = self._map_primitive_params(
7404 terminate_config_primitive, {}, {}
7405 )
7406 step = "execute terminate config primitive"
7407 self.logger.debug(logging_text + step)
7408 await asyncio.wait_for(
7409 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7410 cluster_uuid=cluster_uuid,
7411 kdu_instance=kdu_instance,
7412 primitive_name=terminate_config_primitive["name"],
7413 params=primitive_params_,
7414 db_dict=db_dict,
7415 total_timeout=self.timeout.primitive,
7416 vca_id=vca_id,
7417 ),
7418 timeout=self.timeout.primitive
7419 * self.timeout.primitive_outer_factor,
7420 )
7421
7422 await asyncio.wait_for(
7423 self.k8scluster_map[k8s_cluster_type].scale(
7424 kdu_instance=kdu_instance,
7425 scale=scale,
7426 resource_name=kdu_scaling_info["resource-name"],
7427 total_timeout=self.timeout.scale_on_error,
7428 vca_id=vca_id,
7429 cluster_uuid=cluster_uuid,
7430 kdu_model=kdu_model,
7431 atomic=True,
7432 db_dict=db_dict,
7433 ),
7434 timeout=self.timeout.scale_on_error
7435 * self.timeout.scale_on_error_outer_factor,
7436 )
7437
7438 if kdu_scaling_info["type"] == "create":
7439 kdu_config = get_configuration(db_vnfd, kdu_name)
7440 if (
7441 kdu_config
7442 and kdu_config.get("initial-config-primitive")
7443 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7444 ):
7445 initial_config_primitive_list = kdu_config.get(
7446 "initial-config-primitive"
7447 )
7448 initial_config_primitive_list.sort(
7449 key=lambda val: int(val["seq"])
7450 )
7451
7452 for initial_config_primitive in initial_config_primitive_list:
7453 primitive_params_ = self._map_primitive_params(
7454 initial_config_primitive, {}, {}
7455 )
7456 step = "execute initial config primitive"
7457 self.logger.debug(logging_text + step)
7458 await asyncio.wait_for(
7459 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7460 cluster_uuid=cluster_uuid,
7461 kdu_instance=kdu_instance,
7462 primitive_name=initial_config_primitive["name"],
7463 params=primitive_params_,
7464 db_dict=db_dict,
7465 vca_id=vca_id,
7466 ),
7467 timeout=600,
7468 )
7469
7470 async def _scale_ng_ro(
7471 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7472 ):
7473 nsr_id = db_nslcmop["nsInstanceId"]
7474 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7475 db_vnfrs = {}
7476
7477 # read from db: vnfd's for every vnf
7478 db_vnfds = []
7479
7480 # for each vnf in ns, read vnfd
7481 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7482 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7483 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7484 # if we haven't this vnfd, read it from db
7485 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7486 # read from db
7487 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7488 db_vnfds.append(vnfd)
7489 n2vc_key = self.n2vc.get_public_key()
7490 n2vc_key_list = [n2vc_key]
7491 self.scale_vnfr(
7492 db_vnfr,
7493 vdu_scaling_info.get("vdu-create"),
7494 vdu_scaling_info.get("vdu-delete"),
7495 mark_delete=True,
7496 )
7497 # db_vnfr has been updated, update db_vnfrs to use it
7498 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7499 await self._instantiate_ng_ro(
7500 logging_text,
7501 nsr_id,
7502 db_nsd,
7503 db_nsr,
7504 db_nslcmop,
7505 db_vnfrs,
7506 db_vnfds,
7507 n2vc_key_list,
7508 stage=stage,
7509 start_deploy=time(),
7510 timeout_ns_deploy=self.timeout.ns_deploy,
7511 )
7512 if vdu_scaling_info.get("vdu-delete"):
7513 self.scale_vnfr(
7514 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7515 )
7516
7517 async def extract_prometheus_scrape_jobs(
7518 self,
7519 ee_id: str,
7520 artifact_path: str,
7521 ee_config_descriptor: dict,
7522 vnfr_id: str,
7523 nsr_id: str,
7524 target_ip: str,
7525 vnf_member_index: str = "",
7526 vdu_id: str = "",
7527 vdu_index: int = None,
7528 kdu_name: str = "",
7529 kdu_index: int = None,
7530 ) -> dict:
7531 """Method to extract prometheus scrape jobs from EE's Prometheus template job file
7532 This method will wait until the corresponding VDU or KDU is fully instantiated
7533
7534 Args:
7535 ee_id (str): Execution Environment ID
7536 artifact_path (str): Path where the EE's content is (including the Prometheus template file)
7537 ee_config_descriptor (dict): Execution Environment's configuration descriptor
7538 vnfr_id (str): VNFR ID where this EE applies
7539 nsr_id (str): NSR ID where this EE applies
7540 target_ip (str): VDU/KDU instance IP address
7541 vnf_member_index (str, optional): VNF index where this EE applies. Defaults to "".
7542 vdu_id (str, optional): VDU ID where this EE applies. Defaults to "".
7543 vdu_index (int, optional): VDU index where this EE applies. Defaults to None.
7544 kdu_name (str, optional): KDU name where this EE applies. Defaults to "".
7545 kdu_index (int, optional): KDU index where this EE applies. Defaults to None.
7546
7547 Raises:
7548 LcmException: When the VDU or KDU instance was not found in an hour
7549
7550 Returns:
7551 _type_: Prometheus jobs
7552 """
7553 self.logger.debug(f"KDU: {kdu_name}; KDU INDEX: {kdu_index}")
7554 # look if exist a file called 'prometheus*.j2' and
7555 artifact_content = self.fs.dir_ls(artifact_path)
7556 job_file = next(
7557 (
7558 f
7559 for f in artifact_content
7560 if f.startswith("prometheus") and f.endswith(".j2")
7561 ),
7562 None,
7563 )
7564 if not job_file:
7565 return
7566 with self.fs.file_open((artifact_path, job_file), "r") as f:
7567 job_data = f.read()
7568
7569 vdur_name = ""
7570 kdur_name = ""
7571 for r in range(360):
7572 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
7573 if vdu_id and vdu_index is not None:
7574 vdur = next(
7575 (
7576 x
7577 for x in get_iterable(db_vnfr, "vdur")
7578 if (
7579 x.get("vdu-id-ref") == vdu_id
7580 and x.get("count-index") == vdu_index
7581 )
7582 ),
7583 {},
7584 )
7585 if vdur.get("name"):
7586 vdur_name = vdur.get("name")
7587 break
7588 if kdu_name and kdu_index is not None:
7589 kdur = next(
7590 (
7591 x
7592 for x in get_iterable(db_vnfr, "kdur")
7593 if (
7594 x.get("kdu-name") == kdu_name
7595 and x.get("count-index") == kdu_index
7596 )
7597 ),
7598 {},
7599 )
7600 if kdur.get("name"):
7601 kdur_name = kdur.get("name")
7602 break
7603
7604 await asyncio.sleep(10, loop=self.loop)
7605 else:
7606 if vdu_id and vdu_index is not None:
7607 raise LcmException(
7608 f"Timeout waiting VDU with name={vdu_id} and index={vdu_index} to be intantiated"
7609 )
7610 if kdu_name and kdu_index is not None:
7611 raise LcmException(
7612 f"Timeout waiting KDU with name={kdu_name} and index={kdu_index} to be intantiated"
7613 )
7614
7615 # TODO get_service
7616 _, _, service = ee_id.partition(".") # remove prefix "namespace."
7617 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
7618 host_port = "80"
7619 vnfr_id = vnfr_id.replace("-", "")
7620 variables = {
7621 "JOB_NAME": vnfr_id,
7622 "TARGET_IP": target_ip,
7623 "EXPORTER_POD_IP": host_name,
7624 "EXPORTER_POD_PORT": host_port,
7625 "NSR_ID": nsr_id,
7626 "VNF_MEMBER_INDEX": vnf_member_index,
7627 "VDUR_NAME": vdur_name,
7628 "KDUR_NAME": kdur_name,
7629 }
7630 job_list = parse_job(job_data, variables)
7631 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7632 for job in job_list:
7633 if (
7634 not isinstance(job.get("job_name"), str)
7635 or vnfr_id not in job["job_name"]
7636 ):
7637 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
7638 job["nsr_id"] = nsr_id
7639 job["vnfr_id"] = vnfr_id
7640 return job_list
7641
7642 async def rebuild_start_stop(
7643 self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
7644 ):
7645 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7646 self.logger.info(logging_text + "Enter")
7647 stage = ["Preparing the environment", ""]
7648 # database nsrs record
7649 db_nsr_update = {}
7650 vdu_vim_name = None
7651 vim_vm_id = None
7652 # in case of error, indicates what part of scale was failed to put nsr at error status
7653 start_deploy = time()
7654 try:
7655 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7656 vim_account_id = db_vnfr.get("vim-account-id")
7657 vim_info_key = "vim:" + vim_account_id
7658 vdu_id = additional_param["vdu_id"]
7659 vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
7660 vdur = find_in_list(
7661 vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
7662 )
7663 if vdur:
7664 vdu_vim_name = vdur["name"]
7665 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7666 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7667 else:
7668 raise LcmException("Target vdu is not found")
7669 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7670 # wait for any previous tasks in process
7671 stage[1] = "Waiting for previous operations to terminate"
7672 self.logger.info(stage[1])
7673 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7674
7675 stage[1] = "Reading from database."
7676 self.logger.info(stage[1])
7677 self._write_ns_status(
7678 nsr_id=nsr_id,
7679 ns_state=None,
7680 current_operation=operation_type.upper(),
7681 current_operation_id=nslcmop_id,
7682 )
7683 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7684
7685 # read from db: ns
7686 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7687 db_nsr_update["operational-status"] = operation_type
7688 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7689 # Payload for RO
7690 desc = {
7691 operation_type: {
7692 "vim_vm_id": vim_vm_id,
7693 "vnf_id": vnf_id,
7694 "vdu_index": additional_param["count-index"],
7695 "vdu_id": vdur["id"],
7696 "target_vim": target_vim,
7697 "vim_account_id": vim_account_id,
7698 }
7699 }
7700 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7701 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7702 self.logger.info("ro nsr id: {}".format(nsr_id))
7703 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7704 self.logger.info("response from RO: {}".format(result_dict))
7705 action_id = result_dict["action_id"]
7706 await self._wait_ng_ro(
7707 nsr_id,
7708 action_id,
7709 nslcmop_id,
7710 start_deploy,
7711 self.timeout.operate,
7712 None,
7713 "start_stop_rebuild",
7714 )
7715 return "COMPLETED", "Done"
7716 except (ROclient.ROClientException, DbException, LcmException) as e:
7717 self.logger.error("Exit Exception {}".format(e))
7718 exc = e
7719 except asyncio.CancelledError:
7720 self.logger.error("Cancelled Exception while '{}'".format(stage))
7721 exc = "Operation was cancelled"
7722 except Exception as e:
7723 exc = traceback.format_exc()
7724 self.logger.critical(
7725 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7726 )
7727 return "FAILED", "Error in operate VNF {}".format(exc)
7728
7729 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7730 """
7731 Get VCA Cloud and VCA Cloud Credentials for the VIM account
7732
7733 :param: vim_account_id: VIM Account ID
7734
7735 :return: (cloud_name, cloud_credential)
7736 """
7737 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7738 return config.get("vca_cloud"), config.get("vca_cloud_credential")
7739
7740 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7741 """
7742 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7743
7744 :param: vim_account_id: VIM Account ID
7745
7746 :return: (cloud_name, cloud_credential)
7747 """
7748 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7749 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
7750
7751 async def migrate(self, nsr_id, nslcmop_id):
7752 """
7753 Migrate VNFs and VDUs instances in a NS
7754
7755 :param: nsr_id: NS Instance ID
7756 :param: nslcmop_id: nslcmop ID of migrate
7757
7758 """
7759 # Try to lock HA task here
7760 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7761 if not task_is_locked_by_me:
7762 return
7763 logging_text = "Task ns={} migrate ".format(nsr_id)
7764 self.logger.debug(logging_text + "Enter")
7765 # get all needed from database
7766 db_nslcmop = None
7767 db_nslcmop_update = {}
7768 nslcmop_operation_state = None
7769 db_nsr_update = {}
7770 target = {}
7771 exc = None
7772 # in case of error, indicates what part of scale was failed to put nsr at error status
7773 start_deploy = time()
7774
7775 try:
7776 # wait for any previous tasks in process
7777 step = "Waiting for previous operations to terminate"
7778 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7779
7780 self._write_ns_status(
7781 nsr_id=nsr_id,
7782 ns_state=None,
7783 current_operation="MIGRATING",
7784 current_operation_id=nslcmop_id,
7785 )
7786 step = "Getting nslcmop from database"
7787 self.logger.debug(
7788 step + " after having waited for previous tasks to be completed"
7789 )
7790 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7791 migrate_params = db_nslcmop.get("operationParams")
7792
7793 target = {}
7794 target.update(migrate_params)
7795 desc = await self.RO.migrate(nsr_id, target)
7796 self.logger.debug("RO return > {}".format(desc))
7797 action_id = desc["action_id"]
7798 await self._wait_ng_ro(
7799 nsr_id,
7800 action_id,
7801 nslcmop_id,
7802 start_deploy,
7803 self.timeout.migrate,
7804 operation="migrate",
7805 )
7806 except (ROclient.ROClientException, DbException, LcmException) as e:
7807 self.logger.error("Exit Exception {}".format(e))
7808 exc = e
7809 except asyncio.CancelledError:
7810 self.logger.error("Cancelled Exception while '{}'".format(step))
7811 exc = "Operation was cancelled"
7812 except Exception as e:
7813 exc = traceback.format_exc()
7814 self.logger.critical(
7815 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7816 )
7817 finally:
7818 self._write_ns_status(
7819 nsr_id=nsr_id,
7820 ns_state=None,
7821 current_operation="IDLE",
7822 current_operation_id=None,
7823 )
7824 if exc:
7825 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7826 nslcmop_operation_state = "FAILED"
7827 else:
7828 nslcmop_operation_state = "COMPLETED"
7829 db_nslcmop_update["detailed-status"] = "Done"
7830 db_nsr_update["detailed-status"] = "Done"
7831
7832 self._write_op_status(
7833 op_id=nslcmop_id,
7834 stage="",
7835 error_message="",
7836 operation_state=nslcmop_operation_state,
7837 other_update=db_nslcmop_update,
7838 )
7839 if nslcmop_operation_state:
7840 try:
7841 msg = {
7842 "nsr_id": nsr_id,
7843 "nslcmop_id": nslcmop_id,
7844 "operationState": nslcmop_operation_state,
7845 }
7846 await self.msg.aiowrite("ns", "migrated", msg, loop=self.loop)
7847 except Exception as e:
7848 self.logger.error(
7849 logging_text + "kafka_write notification Exception {}".format(e)
7850 )
7851 self.logger.debug(logging_text + "Exit")
7852 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7853
7854 async def heal(self, nsr_id, nslcmop_id):
7855 """
7856 Heal NS
7857
7858 :param nsr_id: ns instance to heal
7859 :param nslcmop_id: operation to run
7860 :return:
7861 """
7862
7863 # Try to lock HA task here
7864 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7865 if not task_is_locked_by_me:
7866 return
7867
7868 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7869 stage = ["", "", ""]
7870 tasks_dict_info = {}
7871 # ^ stage, step, VIM progress
7872 self.logger.debug(logging_text + "Enter")
7873 # get all needed from database
7874 db_nsr = None
7875 db_nslcmop_update = {}
7876 db_nsr_update = {}
7877 db_vnfrs = {} # vnf's info indexed by _id
7878 exc = None
7879 old_operational_status = ""
7880 old_config_status = ""
7881 nsi_id = None
7882 try:
7883 # wait for any previous tasks in process
7884 step = "Waiting for previous operations to terminate"
7885 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7886 self._write_ns_status(
7887 nsr_id=nsr_id,
7888 ns_state=None,
7889 current_operation="HEALING",
7890 current_operation_id=nslcmop_id,
7891 )
7892
7893 step = "Getting nslcmop from database"
7894 self.logger.debug(
7895 step + " after having waited for previous tasks to be completed"
7896 )
7897 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7898
7899 step = "Getting nsr from database"
7900 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7901 old_operational_status = db_nsr["operational-status"]
7902 old_config_status = db_nsr["config-status"]
7903
7904 db_nsr_update = {
7905 "_admin.deployed.RO.operational-status": "healing",
7906 }
7907 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7908
7909 step = "Sending heal order to VIM"
7910 await self.heal_RO(
7911 logging_text=logging_text,
7912 nsr_id=nsr_id,
7913 db_nslcmop=db_nslcmop,
7914 stage=stage,
7915 )
7916 # VCA tasks
7917 # read from db: nsd
7918 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7919 self.logger.debug(logging_text + stage[1])
7920 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7921 self.fs.sync(db_nsr["nsd-id"])
7922 db_nsr["nsd"] = nsd
7923 # read from db: vnfr's of this ns
7924 step = "Getting vnfrs from db"
7925 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7926 for vnfr in db_vnfrs_list:
7927 db_vnfrs[vnfr["_id"]] = vnfr
7928 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7929
7930 # Check for each target VNF
7931 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7932 for target_vnf in target_list:
7933 # Find this VNF in the list from DB
7934 vnfr_id = target_vnf.get("vnfInstanceId", None)
7935 if vnfr_id:
7936 db_vnfr = db_vnfrs[vnfr_id]
7937 vnfd_id = db_vnfr.get("vnfd-id")
7938 vnfd_ref = db_vnfr.get("vnfd-ref")
7939 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7940 base_folder = vnfd["_admin"]["storage"]
7941 vdu_id = None
7942 vdu_index = 0
7943 vdu_name = None
7944 kdu_name = None
7945 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7946 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7947
7948 # Check each target VDU and deploy N2VC
7949 target_vdu_list = target_vnf.get("additionalParams", {}).get(
7950 "vdu", []
7951 )
7952 if not target_vdu_list:
7953 # Codigo nuevo para crear diccionario
7954 target_vdu_list = []
7955 for existing_vdu in db_vnfr.get("vdur"):
7956 vdu_name = existing_vdu.get("vdu-name", None)
7957 vdu_index = existing_vdu.get("count-index", 0)
7958 vdu_run_day1 = target_vnf.get("additionalParams", {}).get(
7959 "run-day1", False
7960 )
7961 vdu_to_be_healed = {
7962 "vdu-id": vdu_name,
7963 "count-index": vdu_index,
7964 "run-day1": vdu_run_day1,
7965 }
7966 target_vdu_list.append(vdu_to_be_healed)
7967 for target_vdu in target_vdu_list:
7968 deploy_params_vdu = target_vdu
7969 # Set run-day1 vnf level value if not vdu level value exists
7970 if not deploy_params_vdu.get("run-day1") and target_vnf[
7971 "additionalParams"
7972 ].get("run-day1"):
7973 deploy_params_vdu["run-day1"] = target_vnf[
7974 "additionalParams"
7975 ].get("run-day1")
7976 vdu_name = target_vdu.get("vdu-id", None)
7977 # TODO: Get vdu_id from vdud.
7978 vdu_id = vdu_name
7979 # For multi instance VDU count-index is mandatory
7980 # For single session VDU count-indes is 0
7981 vdu_index = target_vdu.get("count-index", 0)
7982
7983 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7984 stage[1] = "Deploying Execution Environments."
7985 self.logger.debug(logging_text + stage[1])
7986
7987 # VNF Level charm. Normal case when proxy charms.
7988 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7989 descriptor_config = get_configuration(vnfd, vnfd_ref)
7990 if descriptor_config:
7991 # Continue if healed machine is management machine
7992 vnf_ip_address = db_vnfr.get("ip-address")
7993 target_instance = None
7994 for instance in db_vnfr.get("vdur", None):
7995 if (
7996 instance["vdu-name"] == vdu_name
7997 and instance["count-index"] == vdu_index
7998 ):
7999 target_instance = instance
8000 break
8001 if vnf_ip_address == target_instance.get("ip-address"):
8002 self._heal_n2vc(
8003 logging_text=logging_text
8004 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
8005 member_vnf_index, vdu_name, vdu_index
8006 ),
8007 db_nsr=db_nsr,
8008 db_vnfr=db_vnfr,
8009 nslcmop_id=nslcmop_id,
8010 nsr_id=nsr_id,
8011 nsi_id=nsi_id,
8012 vnfd_id=vnfd_ref,
8013 vdu_id=None,
8014 kdu_name=None,
8015 member_vnf_index=member_vnf_index,
8016 vdu_index=0,
8017 vdu_name=None,
8018 deploy_params=deploy_params_vdu,
8019 descriptor_config=descriptor_config,
8020 base_folder=base_folder,
8021 task_instantiation_info=tasks_dict_info,
8022 stage=stage,
8023 )
8024
8025 # VDU Level charm. Normal case with native charms.
8026 descriptor_config = get_configuration(vnfd, vdu_name)
8027 if descriptor_config:
8028 self._heal_n2vc(
8029 logging_text=logging_text
8030 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
8031 member_vnf_index, vdu_name, vdu_index
8032 ),
8033 db_nsr=db_nsr,
8034 db_vnfr=db_vnfr,
8035 nslcmop_id=nslcmop_id,
8036 nsr_id=nsr_id,
8037 nsi_id=nsi_id,
8038 vnfd_id=vnfd_ref,
8039 vdu_id=vdu_id,
8040 kdu_name=kdu_name,
8041 member_vnf_index=member_vnf_index,
8042 vdu_index=vdu_index,
8043 vdu_name=vdu_name,
8044 deploy_params=deploy_params_vdu,
8045 descriptor_config=descriptor_config,
8046 base_folder=base_folder,
8047 task_instantiation_info=tasks_dict_info,
8048 stage=stage,
8049 )
8050
8051 except (
8052 ROclient.ROClientException,
8053 DbException,
8054 LcmException,
8055 NgRoException,
8056 ) as e:
8057 self.logger.error(logging_text + "Exit Exception {}".format(e))
8058 exc = e
8059 except asyncio.CancelledError:
8060 self.logger.error(
8061 logging_text + "Cancelled Exception while '{}'".format(step)
8062 )
8063 exc = "Operation was cancelled"
8064 except Exception as e:
8065 exc = traceback.format_exc()
8066 self.logger.critical(
8067 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
8068 exc_info=True,
8069 )
8070 finally:
8071 if tasks_dict_info:
8072 stage[1] = "Waiting for healing pending tasks."
8073 self.logger.debug(logging_text + stage[1])
8074 exc = await self._wait_for_tasks(
8075 logging_text,
8076 tasks_dict_info,
8077 self.timeout.ns_deploy,
8078 stage,
8079 nslcmop_id,
8080 nsr_id=nsr_id,
8081 )
8082 if exc:
8083 db_nslcmop_update[
8084 "detailed-status"
8085 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
8086 nslcmop_operation_state = "FAILED"
8087 if db_nsr:
8088 db_nsr_update["operational-status"] = old_operational_status
8089 db_nsr_update["config-status"] = old_config_status
8090 db_nsr_update[
8091 "detailed-status"
8092 ] = "FAILED healing nslcmop={} {}: {}".format(nslcmop_id, step, exc)
8093 for task, task_name in tasks_dict_info.items():
8094 if not task.done() or task.cancelled() or task.exception():
8095 if task_name.startswith(self.task_name_deploy_vca):
8096 # A N2VC task is pending
8097 db_nsr_update["config-status"] = "failed"
8098 else:
8099 # RO task is pending
8100 db_nsr_update["operational-status"] = "failed"
8101 else:
8102 error_description_nslcmop = None
8103 nslcmop_operation_state = "COMPLETED"
8104 db_nslcmop_update["detailed-status"] = "Done"
8105 db_nsr_update["detailed-status"] = "Done"
8106 db_nsr_update["operational-status"] = "running"
8107 db_nsr_update["config-status"] = "configured"
8108
8109 self._write_op_status(
8110 op_id=nslcmop_id,
8111 stage="",
8112 error_message=error_description_nslcmop,
8113 operation_state=nslcmop_operation_state,
8114 other_update=db_nslcmop_update,
8115 )
8116 if db_nsr:
8117 self._write_ns_status(
8118 nsr_id=nsr_id,
8119 ns_state=None,
8120 current_operation="IDLE",
8121 current_operation_id=None,
8122 other_update=db_nsr_update,
8123 )
8124
8125 if nslcmop_operation_state:
8126 try:
8127 msg = {
8128 "nsr_id": nsr_id,
8129 "nslcmop_id": nslcmop_id,
8130 "operationState": nslcmop_operation_state,
8131 }
8132 await self.msg.aiowrite("ns", "healed", msg, loop=self.loop)
8133 except Exception as e:
8134 self.logger.error(
8135 logging_text + "kafka_write notification Exception {}".format(e)
8136 )
8137 self.logger.debug(logging_text + "Exit")
8138 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
8139
8140 async def heal_RO(
8141 self,
8142 logging_text,
8143 nsr_id,
8144 db_nslcmop,
8145 stage,
8146 ):
8147 """
8148 Heal at RO
8149 :param logging_text: preffix text to use at logging
8150 :param nsr_id: nsr identity
8151 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
8152 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
8153 :return: None or exception
8154 """
8155
8156 def get_vim_account(vim_account_id):
8157 nonlocal db_vims
8158 if vim_account_id in db_vims:
8159 return db_vims[vim_account_id]
8160 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
8161 db_vims[vim_account_id] = db_vim
8162 return db_vim
8163
8164 try:
8165 start_heal = time()
8166 ns_params = db_nslcmop.get("operationParams")
8167 if ns_params and ns_params.get("timeout_ns_heal"):
8168 timeout_ns_heal = ns_params["timeout_ns_heal"]
8169 else:
8170 timeout_ns_heal = self.timeout.ns_heal
8171
8172 db_vims = {}
8173
8174 nslcmop_id = db_nslcmop["_id"]
8175 target = {
8176 "action_id": nslcmop_id,
8177 }
8178 self.logger.warning(
8179 "db_nslcmop={} and timeout_ns_heal={}".format(
8180 db_nslcmop, timeout_ns_heal
8181 )
8182 )
8183 target.update(db_nslcmop.get("operationParams", {}))
8184
8185 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
8186 desc = await self.RO.recreate(nsr_id, target)
8187 self.logger.debug("RO return > {}".format(desc))
8188 action_id = desc["action_id"]
8189 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
8190 await self._wait_ng_ro(
8191 nsr_id,
8192 action_id,
8193 nslcmop_id,
8194 start_heal,
8195 timeout_ns_heal,
8196 stage,
8197 operation="healing",
8198 )
8199
8200 # Updating NSR
8201 db_nsr_update = {
8202 "_admin.deployed.RO.operational-status": "running",
8203 "detailed-status": " ".join(stage),
8204 }
8205 self.update_db_2("nsrs", nsr_id, db_nsr_update)
8206 self._write_op_status(nslcmop_id, stage)
8207 self.logger.debug(
8208 logging_text + "ns healed at RO. RO_id={}".format(action_id)
8209 )
8210
8211 except Exception as e:
8212 stage[2] = "ERROR healing at VIM"
8213 # self.set_vnfr_at_error(db_vnfrs, str(e))
8214 self.logger.error(
8215 "Error healing at VIM {}".format(e),
8216 exc_info=not isinstance(
8217 e,
8218 (
8219 ROclient.ROClientException,
8220 LcmException,
8221 DbException,
8222 NgRoException,
8223 ),
8224 ),
8225 )
8226 raise
8227
8228 def _heal_n2vc(
8229 self,
8230 logging_text,
8231 db_nsr,
8232 db_vnfr,
8233 nslcmop_id,
8234 nsr_id,
8235 nsi_id,
8236 vnfd_id,
8237 vdu_id,
8238 kdu_name,
8239 member_vnf_index,
8240 vdu_index,
8241 vdu_name,
8242 deploy_params,
8243 descriptor_config,
8244 base_folder,
8245 task_instantiation_info,
8246 stage,
8247 ):
8248 # launch instantiate_N2VC in a asyncio task and register task object
8249 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
8250 # if not found, create one entry and update database
8251 # fill db_nsr._admin.deployed.VCA.<index>
8252
8253 self.logger.debug(
8254 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
8255 )
8256
8257 charm_name = ""
8258 get_charm_name = False
8259 if "execution-environment-list" in descriptor_config:
8260 ee_list = descriptor_config.get("execution-environment-list", [])
8261 elif "juju" in descriptor_config:
8262 ee_list = [descriptor_config] # ns charms
8263 if "execution-environment-list" not in descriptor_config:
8264 # charm name is only required for ns charms
8265 get_charm_name = True
8266 else: # other types as script are not supported
8267 ee_list = []
8268
8269 for ee_item in ee_list:
8270 self.logger.debug(
8271 logging_text
8272 + "_deploy_n2vc ee_item juju={}, helm={}".format(
8273 ee_item.get("juju"), ee_item.get("helm-chart")
8274 )
8275 )
8276 ee_descriptor_id = ee_item.get("id")
8277 if ee_item.get("juju"):
8278 vca_name = ee_item["juju"].get("charm")
8279 if get_charm_name:
8280 charm_name = self.find_charm_name(db_nsr, str(vca_name))
8281 vca_type = (
8282 "lxc_proxy_charm"
8283 if ee_item["juju"].get("charm") is not None
8284 else "native_charm"
8285 )
8286 if ee_item["juju"].get("cloud") == "k8s":
8287 vca_type = "k8s_proxy_charm"
8288 elif ee_item["juju"].get("proxy") is False:
8289 vca_type = "native_charm"
8290 elif ee_item.get("helm-chart"):
8291 vca_name = ee_item["helm-chart"]
8292 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
8293 vca_type = "helm"
8294 else:
8295 vca_type = "helm-v3"
8296 else:
8297 self.logger.debug(
8298 logging_text + "skipping non juju neither charm configuration"
8299 )
8300 continue
8301
8302 vca_index = -1
8303 for vca_index, vca_deployed in enumerate(
8304 db_nsr["_admin"]["deployed"]["VCA"]
8305 ):
8306 if not vca_deployed:
8307 continue
8308 if (
8309 vca_deployed.get("member-vnf-index") == member_vnf_index
8310 and vca_deployed.get("vdu_id") == vdu_id
8311 and vca_deployed.get("kdu_name") == kdu_name
8312 and vca_deployed.get("vdu_count_index", 0) == vdu_index
8313 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
8314 ):
8315 break
8316 else:
8317 # not found, create one.
8318 target = (
8319 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
8320 )
8321 if vdu_id:
8322 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
8323 elif kdu_name:
8324 target += "/kdu/{}".format(kdu_name)
8325 vca_deployed = {
8326 "target_element": target,
8327 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8328 "member-vnf-index": member_vnf_index,
8329 "vdu_id": vdu_id,
8330 "kdu_name": kdu_name,
8331 "vdu_count_index": vdu_index,
8332 "operational-status": "init", # TODO revise
8333 "detailed-status": "", # TODO revise
8334 "step": "initial-deploy", # TODO revise
8335 "vnfd_id": vnfd_id,
8336 "vdu_name": vdu_name,
8337 "type": vca_type,
8338 "ee_descriptor_id": ee_descriptor_id,
8339 "charm_name": charm_name,
8340 }
8341 vca_index += 1
8342
8343 # create VCA and configurationStatus in db
8344 db_dict = {
8345 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
8346 "configurationStatus.{}".format(vca_index): dict(),
8347 }
8348 self.update_db_2("nsrs", nsr_id, db_dict)
8349
8350 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
8351
8352 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
8353 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
8354 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
8355
8356 # Launch task
8357 task_n2vc = asyncio.ensure_future(
8358 self.heal_N2VC(
8359 logging_text=logging_text,
8360 vca_index=vca_index,
8361 nsi_id=nsi_id,
8362 db_nsr=db_nsr,
8363 db_vnfr=db_vnfr,
8364 vdu_id=vdu_id,
8365 kdu_name=kdu_name,
8366 vdu_index=vdu_index,
8367 deploy_params=deploy_params,
8368 config_descriptor=descriptor_config,
8369 base_folder=base_folder,
8370 nslcmop_id=nslcmop_id,
8371 stage=stage,
8372 vca_type=vca_type,
8373 vca_name=vca_name,
8374 ee_config_descriptor=ee_item,
8375 )
8376 )
8377 self.lcm_tasks.register(
8378 "ns",
8379 nsr_id,
8380 nslcmop_id,
8381 "instantiate_N2VC-{}".format(vca_index),
8382 task_n2vc,
8383 )
8384 task_instantiation_info[
8385 task_n2vc
8386 ] = self.task_name_deploy_vca + " {}.{}".format(
8387 member_vnf_index or "", vdu_id or ""
8388 )
8389
8390 async def heal_N2VC(
8391 self,
8392 logging_text,
8393 vca_index,
8394 nsi_id,
8395 db_nsr,
8396 db_vnfr,
8397 vdu_id,
8398 kdu_name,
8399 vdu_index,
8400 config_descriptor,
8401 deploy_params,
8402 base_folder,
8403 nslcmop_id,
8404 stage,
8405 vca_type,
8406 vca_name,
8407 ee_config_descriptor,
8408 ):
8409 nsr_id = db_nsr["_id"]
8410 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
8411 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
8412 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
8413 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
8414 db_dict = {
8415 "collection": "nsrs",
8416 "filter": {"_id": nsr_id},
8417 "path": db_update_entry,
8418 }
8419 step = ""
8420 try:
8421
8422 element_type = "NS"
8423 element_under_configuration = nsr_id
8424
8425 vnfr_id = None
8426 if db_vnfr:
8427 vnfr_id = db_vnfr["_id"]
8428 osm_config["osm"]["vnf_id"] = vnfr_id
8429
8430 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8431
8432 if vca_type == "native_charm":
8433 index_number = 0
8434 else:
8435 index_number = vdu_index or 0
8436
8437 if vnfr_id:
8438 element_type = "VNF"
8439 element_under_configuration = vnfr_id
8440 namespace += ".{}-{}".format(vnfr_id, index_number)
8441 if vdu_id:
8442 namespace += ".{}-{}".format(vdu_id, index_number)
8443 element_type = "VDU"
8444 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8445 osm_config["osm"]["vdu_id"] = vdu_id
8446 elif kdu_name:
8447 namespace += ".{}".format(kdu_name)
8448 element_type = "KDU"
8449 element_under_configuration = kdu_name
8450 osm_config["osm"]["kdu_name"] = kdu_name
8451
8452 # Get artifact path
8453 if base_folder["pkg-dir"]:
8454 artifact_path = "{}/{}/{}/{}".format(
8455 base_folder["folder"],
8456 base_folder["pkg-dir"],
8457 "charms"
8458 if vca_type
8459 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8460 else "helm-charts",
8461 vca_name,
8462 )
8463 else:
8464 artifact_path = "{}/Scripts/{}/{}/".format(
8465 base_folder["folder"],
8466 "charms"
8467 if vca_type
8468 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8469 else "helm-charts",
8470 vca_name,
8471 )
8472
8473 self.logger.debug("Artifact path > {}".format(artifact_path))
8474
8475 # get initial_config_primitive_list that applies to this element
8476 initial_config_primitive_list = config_descriptor.get(
8477 "initial-config-primitive"
8478 )
8479
8480 self.logger.debug(
8481 "Initial config primitive list > {}".format(
8482 initial_config_primitive_list
8483 )
8484 )
8485
8486 # add config if not present for NS charm
8487 ee_descriptor_id = ee_config_descriptor.get("id")
8488 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8489 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8490 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8491 )
8492
8493 self.logger.debug(
8494 "Initial config primitive list #2 > {}".format(
8495 initial_config_primitive_list
8496 )
8497 )
8498 # n2vc_redesign STEP 3.1
8499 # find old ee_id if exists
8500 ee_id = vca_deployed.get("ee_id")
8501
8502 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8503 # create or register execution environment in VCA. Only for native charms when healing
8504 if vca_type == "native_charm":
8505 step = "Waiting to VM being up and getting IP address"
8506 self.logger.debug(logging_text + step)
8507 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8508 logging_text,
8509 nsr_id,
8510 vnfr_id,
8511 vdu_id,
8512 vdu_index,
8513 user=None,
8514 pub_key=None,
8515 )
8516 credentials = {"hostname": rw_mgmt_ip}
8517 # get username
8518 username = deep_get(
8519 config_descriptor, ("config-access", "ssh-access", "default-user")
8520 )
8521 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8522 # merged. Meanwhile let's get username from initial-config-primitive
8523 if not username and initial_config_primitive_list:
8524 for config_primitive in initial_config_primitive_list:
8525 for param in config_primitive.get("parameter", ()):
8526 if param["name"] == "ssh-username":
8527 username = param["value"]
8528 break
8529 if not username:
8530 raise LcmException(
8531 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8532 "'config-access.ssh-access.default-user'"
8533 )
8534 credentials["username"] = username
8535
8536 # n2vc_redesign STEP 3.2
8537 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8538 self._write_configuration_status(
8539 nsr_id=nsr_id,
8540 vca_index=vca_index,
8541 status="REGISTERING",
8542 element_under_configuration=element_under_configuration,
8543 element_type=element_type,
8544 )
8545
8546 step = "register execution environment {}".format(credentials)
8547 self.logger.debug(logging_text + step)
8548 ee_id = await self.vca_map[vca_type].register_execution_environment(
8549 credentials=credentials,
8550 namespace=namespace,
8551 db_dict=db_dict,
8552 vca_id=vca_id,
8553 )
8554
8555 # update ee_id en db
8556 db_dict_ee_id = {
8557 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8558 }
8559 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8560
8561 # for compatibility with MON/POL modules, the need model and application name at database
8562 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8563 # Not sure if this need to be done when healing
8564 """
8565 ee_id_parts = ee_id.split(".")
8566 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8567 if len(ee_id_parts) >= 2:
8568 model_name = ee_id_parts[0]
8569 application_name = ee_id_parts[1]
8570 db_nsr_update[db_update_entry + "model"] = model_name
8571 db_nsr_update[db_update_entry + "application"] = application_name
8572 """
8573
8574 # n2vc_redesign STEP 3.3
8575 # Install configuration software. Only for native charms.
8576 step = "Install configuration Software"
8577
8578 self._write_configuration_status(
8579 nsr_id=nsr_id,
8580 vca_index=vca_index,
8581 status="INSTALLING SW",
8582 element_under_configuration=element_under_configuration,
8583 element_type=element_type,
8584 # other_update=db_nsr_update,
8585 other_update=None,
8586 )
8587
8588 # TODO check if already done
8589 self.logger.debug(logging_text + step)
8590 config = None
8591 if vca_type == "native_charm":
8592 config_primitive = next(
8593 (p for p in initial_config_primitive_list if p["name"] == "config"),
8594 None,
8595 )
8596 if config_primitive:
8597 config = self._map_primitive_params(
8598 config_primitive, {}, deploy_params
8599 )
8600 await self.vca_map[vca_type].install_configuration_sw(
8601 ee_id=ee_id,
8602 artifact_path=artifact_path,
8603 db_dict=db_dict,
8604 config=config,
8605 num_units=1,
8606 vca_id=vca_id,
8607 vca_type=vca_type,
8608 )
8609
8610 # write in db flag of configuration_sw already installed
8611 self.update_db_2(
8612 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8613 )
8614
8615 # Not sure if this need to be done when healing
8616 """
8617 # add relations for this VCA (wait for other peers related with this VCA)
8618 await self._add_vca_relations(
8619 logging_text=logging_text,
8620 nsr_id=nsr_id,
8621 vca_type=vca_type,
8622 vca_index=vca_index,
8623 )
8624 """
8625
8626 # if SSH access is required, then get execution environment SSH public
8627 # if native charm we have waited already to VM be UP
8628 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8629 pub_key = None
8630 user = None
8631 # self.logger.debug("get ssh key block")
8632 if deep_get(
8633 config_descriptor, ("config-access", "ssh-access", "required")
8634 ):
8635 # self.logger.debug("ssh key needed")
8636 # Needed to inject a ssh key
8637 user = deep_get(
8638 config_descriptor,
8639 ("config-access", "ssh-access", "default-user"),
8640 )
8641 step = "Install configuration Software, getting public ssh key"
8642 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8643 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8644 )
8645
8646 step = "Insert public key into VM user={} ssh_key={}".format(
8647 user, pub_key
8648 )
8649 else:
8650 # self.logger.debug("no need to get ssh key")
8651 step = "Waiting to VM being up and getting IP address"
8652 self.logger.debug(logging_text + step)
8653
8654 # n2vc_redesign STEP 5.1
8655 # wait for RO (ip-address) Insert pub_key into VM
8656 # IMPORTANT: We need do wait for RO to complete healing operation.
8657 await self._wait_heal_ro(nsr_id, self.timeout.ns_heal)
8658 if vnfr_id:
8659 if kdu_name:
8660 rw_mgmt_ip = await self.wait_kdu_up(
8661 logging_text, nsr_id, vnfr_id, kdu_name
8662 )
8663 else:
8664 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8665 logging_text,
8666 nsr_id,
8667 vnfr_id,
8668 vdu_id,
8669 vdu_index,
8670 user=user,
8671 pub_key=pub_key,
8672 )
8673 else:
8674 rw_mgmt_ip = None # This is for a NS configuration
8675
8676 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8677
8678 # store rw_mgmt_ip in deploy params for later replacement
8679 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8680
8681 # Day1 operations.
8682 # get run-day1 operation parameter
8683 runDay1 = deploy_params.get("run-day1", False)
8684 self.logger.debug(
8685 "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id, vdu_id, runDay1)
8686 )
8687 if runDay1:
8688 # n2vc_redesign STEP 6 Execute initial config primitive
8689 step = "execute initial config primitive"
8690
8691 # wait for dependent primitives execution (NS -> VNF -> VDU)
8692 if initial_config_primitive_list:
8693 await self._wait_dependent_n2vc(
8694 nsr_id, vca_deployed_list, vca_index
8695 )
8696
8697 # stage, in function of element type: vdu, kdu, vnf or ns
8698 my_vca = vca_deployed_list[vca_index]
8699 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8700 # VDU or KDU
8701 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8702 elif my_vca.get("member-vnf-index"):
8703 # VNF
8704 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8705 else:
8706 # NS
8707 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8708
8709 self._write_configuration_status(
8710 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8711 )
8712
8713 self._write_op_status(op_id=nslcmop_id, stage=stage)
8714
8715 check_if_terminated_needed = True
8716 for initial_config_primitive in initial_config_primitive_list:
8717 # adding information on the vca_deployed if it is a NS execution environment
8718 if not vca_deployed["member-vnf-index"]:
8719 deploy_params["ns_config_info"] = json.dumps(
8720 self._get_ns_config_info(nsr_id)
8721 )
8722 # TODO check if already done
8723 primitive_params_ = self._map_primitive_params(
8724 initial_config_primitive, {}, deploy_params
8725 )
8726
8727 step = "execute primitive '{}' params '{}'".format(
8728 initial_config_primitive["name"], primitive_params_
8729 )
8730 self.logger.debug(logging_text + step)
8731 await self.vca_map[vca_type].exec_primitive(
8732 ee_id=ee_id,
8733 primitive_name=initial_config_primitive["name"],
8734 params_dict=primitive_params_,
8735 db_dict=db_dict,
8736 vca_id=vca_id,
8737 vca_type=vca_type,
8738 )
8739 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8740 if check_if_terminated_needed:
8741 if config_descriptor.get("terminate-config-primitive"):
8742 self.update_db_2(
8743 "nsrs",
8744 nsr_id,
8745 {db_update_entry + "needed_terminate": True},
8746 )
8747 check_if_terminated_needed = False
8748
8749 # TODO register in database that primitive is done
8750
8751 # STEP 7 Configure metrics
8752 # Not sure if this need to be done when healing
8753 """
8754 if vca_type == "helm" or vca_type == "helm-v3":
8755 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8756 ee_id=ee_id,
8757 artifact_path=artifact_path,
8758 ee_config_descriptor=ee_config_descriptor,
8759 vnfr_id=vnfr_id,
8760 nsr_id=nsr_id,
8761 target_ip=rw_mgmt_ip,
8762 )
8763 if prometheus_jobs:
8764 self.update_db_2(
8765 "nsrs",
8766 nsr_id,
8767 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8768 )
8769
8770 for job in prometheus_jobs:
8771 self.db.set_one(
8772 "prometheus_jobs",
8773 {"job_name": job["job_name"]},
8774 job,
8775 upsert=True,
8776 fail_on_empty=False,
8777 )
8778
8779 """
8780 step = "instantiated at VCA"
8781 self.logger.debug(logging_text + step)
8782
8783 self._write_configuration_status(
8784 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8785 )
8786
8787 except Exception as e: # TODO not use Exception but N2VC exception
8788 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8789 if not isinstance(
8790 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8791 ):
8792 self.logger.error(
8793 "Exception while {} : {}".format(step, e), exc_info=True
8794 )
8795 self._write_configuration_status(
8796 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8797 )
8798 raise LcmException("{} {}".format(step, e)) from e
8799
8800 async def _wait_heal_ro(
8801 self,
8802 nsr_id,
8803 timeout=600,
8804 ):
8805 start_time = time()
8806 while time() <= start_time + timeout:
8807 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8808 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"][
8809 "operational-status"
8810 ]
8811 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8812 if operational_status_ro != "healing":
8813 break
8814 await asyncio.sleep(15, loop=self.loop)
8815 else: # timeout_ns_deploy
8816 raise NgRoException("Timeout waiting ns to deploy")
8817
8818 async def vertical_scale(self, nsr_id, nslcmop_id):
8819 """
8820 Vertical Scale the VDUs in a NS
8821
8822 :param: nsr_id: NS Instance ID
8823 :param: nslcmop_id: nslcmop ID of migrate
8824
8825 """
8826 # Try to lock HA task here
8827 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8828 if not task_is_locked_by_me:
8829 return
8830 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8831 self.logger.debug(logging_text + "Enter")
8832 # get all needed from database
8833 db_nslcmop = None
8834 db_nslcmop_update = {}
8835 nslcmop_operation_state = None
8836 db_nsr_update = {}
8837 target = {}
8838 exc = None
8839 # in case of error, indicates what part of scale was failed to put nsr at error status
8840 start_deploy = time()
8841
8842 try:
8843 # wait for any previous tasks in process
8844 step = "Waiting for previous operations to terminate"
8845 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
8846
8847 self._write_ns_status(
8848 nsr_id=nsr_id,
8849 ns_state=None,
8850 current_operation="VerticalScale",
8851 current_operation_id=nslcmop_id,
8852 )
8853 step = "Getting nslcmop from database"
8854 self.logger.debug(
8855 step + " after having waited for previous tasks to be completed"
8856 )
8857 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8858 operationParams = db_nslcmop.get("operationParams")
8859 target = {}
8860 target.update(operationParams)
8861 desc = await self.RO.vertical_scale(nsr_id, target)
8862 self.logger.debug("RO return > {}".format(desc))
8863 action_id = desc["action_id"]
8864 await self._wait_ng_ro(
8865 nsr_id,
8866 action_id,
8867 nslcmop_id,
8868 start_deploy,
8869 self.timeout.verticalscale,
8870 operation="verticalscale",
8871 )
8872 except (ROclient.ROClientException, DbException, LcmException) as e:
8873 self.logger.error("Exit Exception {}".format(e))
8874 exc = e
8875 except asyncio.CancelledError:
8876 self.logger.error("Cancelled Exception while '{}'".format(step))
8877 exc = "Operation was cancelled"
8878 except Exception as e:
8879 exc = traceback.format_exc()
8880 self.logger.critical(
8881 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
8882 )
8883 finally:
8884 self._write_ns_status(
8885 nsr_id=nsr_id,
8886 ns_state=None,
8887 current_operation="IDLE",
8888 current_operation_id=None,
8889 )
8890 if exc:
8891 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
8892 nslcmop_operation_state = "FAILED"
8893 else:
8894 nslcmop_operation_state = "COMPLETED"
8895 db_nslcmop_update["detailed-status"] = "Done"
8896 db_nsr_update["detailed-status"] = "Done"
8897
8898 self._write_op_status(
8899 op_id=nslcmop_id,
8900 stage="",
8901 error_message="",
8902 operation_state=nslcmop_operation_state,
8903 other_update=db_nslcmop_update,
8904 )
8905 if nslcmop_operation_state:
8906 try:
8907 msg = {
8908 "nsr_id": nsr_id,
8909 "nslcmop_id": nslcmop_id,
8910 "operationState": nslcmop_operation_state,
8911 }
8912 await self.msg.aiowrite("ns", "verticalscaled", msg, loop=self.loop)
8913 except Exception as e:
8914 self.logger.error(
8915 logging_text + "kafka_write notification Exception {}".format(e)
8916 )
8917 self.logger.debug(logging_text + "Exit")
8918 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")