Feature 11002: Deorecate helmv2
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import json
27 from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33 select_autoescape,
34 )
35
36 from osm_lcm import ROclient
37 from osm_lcm.data_utils.lcm_config import LcmCfg
38 from osm_lcm.data_utils.nsr import (
39 get_deployed_kdu,
40 get_deployed_vca,
41 get_deployed_vca_list,
42 get_nsd,
43 )
44 from osm_lcm.data_utils.vca import (
45 DeployedComponent,
46 DeployedK8sResource,
47 DeployedVCA,
48 EELevel,
49 Relation,
50 EERelation,
51 safe_get_ee_relation,
52 )
53 from osm_lcm.ng_ro import NgRoClient, NgRoException
54 from osm_lcm.lcm_utils import (
55 LcmException,
56 LcmBase,
57 deep_get,
58 get_iterable,
59 populate_dict,
60 check_juju_bundle_existence,
61 get_charm_artifact_path,
62 get_ee_id_parts,
63 vld_to_ro_ip_profile,
64 )
65 from osm_lcm.data_utils.nsd import (
66 get_ns_configuration_relation_list,
67 get_vnf_profile,
68 get_vnf_profiles,
69 )
70 from osm_lcm.data_utils.vnfd import (
71 get_kdu,
72 get_kdu_services,
73 get_relation_list,
74 get_vdu_list,
75 get_vdu_profile,
76 get_ee_sorted_initial_config_primitive_list,
77 get_ee_sorted_terminate_config_primitive_list,
78 get_kdu_list,
79 get_virtual_link_profiles,
80 get_vdu,
81 get_configuration,
82 get_vdu_index,
83 get_scaling_aspect,
84 get_number_of_instances,
85 get_juju_ee_ref,
86 get_kdu_resource_profile,
87 find_software_version,
88 check_helm_ee_in_ns,
89 )
90 from osm_lcm.data_utils.list_utils import find_in_list
91 from osm_lcm.data_utils.vnfr import (
92 get_osm_params,
93 get_vdur_index,
94 get_kdur,
95 get_volumes_from_instantiation_params,
96 )
97 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
98 from osm_lcm.data_utils.database.vim_account import VimAccountDB
99 from n2vc.definitions import RelationEndpoint
100 from n2vc.k8s_helm3_conn import K8sHelm3Connector
101 from n2vc.k8s_juju_conn import K8sJujuConnector
102
103 from osm_common.dbbase import DbException
104 from osm_common.fsbase import FsException
105
106 from osm_lcm.data_utils.database.database import Database
107 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
108 from osm_lcm.data_utils.wim import (
109 get_sdn_ports,
110 get_target_wim_attrs,
111 select_feasible_wim_account,
112 )
113
114 from n2vc.n2vc_juju_conn import N2VCJujuConnector
115 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
116
117 from osm_lcm.lcm_helm_conn import LCMHelmConn
118 from osm_lcm.osm_config import OsmConfigBuilder
119 from osm_lcm.prometheus import parse_job
120
121 from copy import copy, deepcopy
122 from time import time
123 from uuid import uuid4
124
125 from random import SystemRandom
126
127 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
128
129
130 class NsLcm(LcmBase):
131 SUBOPERATION_STATUS_NOT_FOUND = -1
132 SUBOPERATION_STATUS_NEW = -2
133 SUBOPERATION_STATUS_SKIP = -3
134 EE_TLS_NAME = "ee-tls"
135 task_name_deploy_vca = "Deploying VCA"
136 rel_operation_types = {
137 "GE": ">=",
138 "LE": "<=",
139 "GT": ">",
140 "LT": "<",
141 "EQ": "==",
142 "NE": "!=",
143 }
144
145 def __init__(self, msg, lcm_tasks, config: LcmCfg):
146 """
147 Init, Connect to database, filesystem storage, and messaging
148 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
149 :return: None
150 """
151 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
152
153 self.db = Database().instance.db
154 self.fs = Filesystem().instance.fs
155 self.lcm_tasks = lcm_tasks
156 self.timeout = config.timeout
157 self.ro_config = config.RO
158 self.vca_config = config.VCA
159
160 # create N2VC connector
161 self.n2vc = N2VCJujuConnector(
162 log=self.logger,
163 on_update_db=self._on_update_n2vc_db,
164 fs=self.fs,
165 db=self.db,
166 )
167
168 self.conn_helm_ee = LCMHelmConn(
169 log=self.logger,
170 vca_config=self.vca_config,
171 on_update_db=self._on_update_n2vc_db,
172 )
173
174 self.k8sclusterhelm3 = K8sHelm3Connector(
175 kubectl_command=self.vca_config.kubectlpath,
176 helm_command=self.vca_config.helm3path,
177 fs=self.fs,
178 log=self.logger,
179 db=self.db,
180 on_update_db=None,
181 )
182
183 self.k8sclusterjuju = K8sJujuConnector(
184 kubectl_command=self.vca_config.kubectlpath,
185 juju_command=self.vca_config.jujupath,
186 log=self.logger,
187 on_update_db=self._on_update_k8s_db,
188 fs=self.fs,
189 db=self.db,
190 )
191
192 self.k8scluster_map = {
193 "helm-chart-v3": self.k8sclusterhelm3,
194 "chart": self.k8sclusterhelm3,
195 "juju-bundle": self.k8sclusterjuju,
196 "juju": self.k8sclusterjuju,
197 }
198
199 self.vca_map = {
200 "lxc_proxy_charm": self.n2vc,
201 "native_charm": self.n2vc,
202 "k8s_proxy_charm": self.n2vc,
203 "helm": self.conn_helm_ee,
204 "helm-v3": self.conn_helm_ee,
205 }
206
207 # create RO client
208 self.RO = NgRoClient(**self.ro_config.to_dict())
209
210 self.op_status_map = {
211 "instantiation": self.RO.status,
212 "termination": self.RO.status,
213 "migrate": self.RO.status,
214 "healing": self.RO.recreate_status,
215 "verticalscale": self.RO.status,
216 "start_stop_rebuild": self.RO.status,
217 }
218
219 @staticmethod
220 def increment_ip_mac(ip_mac, vm_index=1):
221 if not isinstance(ip_mac, str):
222 return ip_mac
223 try:
224 # try with ipv4 look for last dot
225 i = ip_mac.rfind(".")
226 if i > 0:
227 i += 1
228 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
229 # try with ipv6 or mac look for last colon. Operate in hex
230 i = ip_mac.rfind(":")
231 if i > 0:
232 i += 1
233 # format in hex, len can be 2 for mac or 4 for ipv6
234 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
235 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
236 )
237 except Exception:
238 pass
239 return None
240
241 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
242 # remove last dot from path (if exists)
243 if path.endswith("."):
244 path = path[:-1]
245
246 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
247 # .format(table, filter, path, updated_data))
248 try:
249 nsr_id = filter.get("_id")
250
251 # read ns record from database
252 nsr = self.db.get_one(table="nsrs", q_filter=filter)
253 current_ns_status = nsr.get("nsState")
254
255 # get vca status for NS
256 status_dict = await self.n2vc.get_status(
257 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
258 )
259
260 # vcaStatus
261 db_dict = dict()
262 db_dict["vcaStatus"] = status_dict
263
264 # update configurationStatus for this VCA
265 try:
266 vca_index = int(path[path.rfind(".") + 1 :])
267
268 vca_list = deep_get(
269 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
270 )
271 vca_status = vca_list[vca_index].get("status")
272
273 configuration_status_list = nsr.get("configurationStatus")
274 config_status = configuration_status_list[vca_index].get("status")
275
276 if config_status == "BROKEN" and vca_status != "failed":
277 db_dict["configurationStatus"][vca_index] = "READY"
278 elif config_status != "BROKEN" and vca_status == "failed":
279 db_dict["configurationStatus"][vca_index] = "BROKEN"
280 except Exception as e:
281 # not update configurationStatus
282 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
283
284 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
285 # if nsState = 'DEGRADED' check if all is OK
286 is_degraded = False
287 if current_ns_status in ("READY", "DEGRADED"):
288 error_description = ""
289 # check machines
290 if status_dict.get("machines"):
291 for machine_id in status_dict.get("machines"):
292 machine = status_dict.get("machines").get(machine_id)
293 # check machine agent-status
294 if machine.get("agent-status"):
295 s = machine.get("agent-status").get("status")
296 if s != "started":
297 is_degraded = True
298 error_description += (
299 "machine {} agent-status={} ; ".format(
300 machine_id, s
301 )
302 )
303 # check machine instance status
304 if machine.get("instance-status"):
305 s = machine.get("instance-status").get("status")
306 if s != "running":
307 is_degraded = True
308 error_description += (
309 "machine {} instance-status={} ; ".format(
310 machine_id, s
311 )
312 )
313 # check applications
314 if status_dict.get("applications"):
315 for app_id in status_dict.get("applications"):
316 app = status_dict.get("applications").get(app_id)
317 # check application status
318 if app.get("status"):
319 s = app.get("status").get("status")
320 if s != "active":
321 is_degraded = True
322 error_description += (
323 "application {} status={} ; ".format(app_id, s)
324 )
325
326 if error_description:
327 db_dict["errorDescription"] = error_description
328 if current_ns_status == "READY" and is_degraded:
329 db_dict["nsState"] = "DEGRADED"
330 if current_ns_status == "DEGRADED" and not is_degraded:
331 db_dict["nsState"] = "READY"
332
333 # write to database
334 self.update_db_2("nsrs", nsr_id, db_dict)
335
336 except (asyncio.CancelledError, asyncio.TimeoutError):
337 raise
338 except Exception as e:
339 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
340
341 async def _on_update_k8s_db(
342 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
343 ):
344 """
345 Updating vca status in NSR record
346 :param cluster_uuid: UUID of a k8s cluster
347 :param kdu_instance: The unique name of the KDU instance
348 :param filter: To get nsr_id
349 :cluster_type: The cluster type (juju, k8s)
350 :return: none
351 """
352
353 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
354 # .format(cluster_uuid, kdu_instance, filter))
355
356 nsr_id = filter.get("_id")
357 try:
358 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
359 cluster_uuid=cluster_uuid,
360 kdu_instance=kdu_instance,
361 yaml_format=False,
362 complete_status=True,
363 vca_id=vca_id,
364 )
365
366 # vcaStatus
367 db_dict = dict()
368 db_dict["vcaStatus"] = {nsr_id: vca_status}
369
370 self.logger.debug(
371 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
372 )
373
374 # write to database
375 self.update_db_2("nsrs", nsr_id, db_dict)
376 except (asyncio.CancelledError, asyncio.TimeoutError):
377 raise
378 except Exception as e:
379 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
380
381 @staticmethod
382 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
383 try:
384 env = Environment(
385 undefined=StrictUndefined,
386 autoescape=select_autoescape(default_for_string=True, default=True),
387 )
388 template = env.from_string(cloud_init_text)
389 return template.render(additional_params or {})
390 except UndefinedError as e:
391 raise LcmException(
392 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
393 "file, must be provided in the instantiation parameters inside the "
394 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
395 )
396 except (TemplateError, TemplateNotFound) as e:
397 raise LcmException(
398 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
399 vnfd_id, vdu_id, e
400 )
401 )
402
403 def _get_vdu_cloud_init_content(self, vdu, vnfd):
404 cloud_init_content = cloud_init_file = None
405 try:
406 if vdu.get("cloud-init-file"):
407 base_folder = vnfd["_admin"]["storage"]
408 if base_folder["pkg-dir"]:
409 cloud_init_file = "{}/{}/cloud_init/{}".format(
410 base_folder["folder"],
411 base_folder["pkg-dir"],
412 vdu["cloud-init-file"],
413 )
414 else:
415 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
416 base_folder["folder"],
417 vdu["cloud-init-file"],
418 )
419 with self.fs.file_open(cloud_init_file, "r") as ci_file:
420 cloud_init_content = ci_file.read()
421 elif vdu.get("cloud-init"):
422 cloud_init_content = vdu["cloud-init"]
423
424 return cloud_init_content
425 except FsException as e:
426 raise LcmException(
427 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
428 vnfd["id"], vdu["id"], cloud_init_file, e
429 )
430 )
431
432 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
433 vdur = next(
434 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
435 )
436 additional_params = vdur.get("additionalParams")
437 return parse_yaml_strings(additional_params)
438
439 @staticmethod
440 def ip_profile_2_RO(ip_profile):
441 RO_ip_profile = deepcopy(ip_profile)
442 if "dns-server" in RO_ip_profile:
443 if isinstance(RO_ip_profile["dns-server"], list):
444 RO_ip_profile["dns-address"] = []
445 for ds in RO_ip_profile.pop("dns-server"):
446 RO_ip_profile["dns-address"].append(ds["address"])
447 else:
448 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
449 if RO_ip_profile.get("ip-version") == "ipv4":
450 RO_ip_profile["ip-version"] = "IPv4"
451 if RO_ip_profile.get("ip-version") == "ipv6":
452 RO_ip_profile["ip-version"] = "IPv6"
453 if "dhcp-params" in RO_ip_profile:
454 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
455 return RO_ip_profile
456
457 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
458 db_vdu_push_list = []
459 template_vdur = []
460 db_update = {"_admin.modified": time()}
461 if vdu_create:
462 for vdu_id, vdu_count in vdu_create.items():
463 vdur = next(
464 (
465 vdur
466 for vdur in reversed(db_vnfr["vdur"])
467 if vdur["vdu-id-ref"] == vdu_id
468 ),
469 None,
470 )
471 if not vdur:
472 # Read the template saved in the db:
473 self.logger.debug(
474 "No vdur in the database. Using the vdur-template to scale"
475 )
476 vdur_template = db_vnfr.get("vdur-template")
477 if not vdur_template:
478 raise LcmException(
479 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
480 vdu_id
481 )
482 )
483 vdur = vdur_template[0]
484 # Delete a template from the database after using it
485 self.db.set_one(
486 "vnfrs",
487 {"_id": db_vnfr["_id"]},
488 None,
489 pull={"vdur-template": {"_id": vdur["_id"]}},
490 )
491 for count in range(vdu_count):
492 vdur_copy = deepcopy(vdur)
493 vdur_copy["status"] = "BUILD"
494 vdur_copy["status-detailed"] = None
495 vdur_copy["ip-address"] = None
496 vdur_copy["_id"] = str(uuid4())
497 vdur_copy["count-index"] += count + 1
498 vdur_copy["id"] = "{}-{}".format(
499 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
500 )
501 vdur_copy.pop("vim_info", None)
502 for iface in vdur_copy["interfaces"]:
503 if iface.get("fixed-ip"):
504 iface["ip-address"] = self.increment_ip_mac(
505 iface["ip-address"], count + 1
506 )
507 else:
508 iface.pop("ip-address", None)
509 if iface.get("fixed-mac"):
510 iface["mac-address"] = self.increment_ip_mac(
511 iface["mac-address"], count + 1
512 )
513 else:
514 iface.pop("mac-address", None)
515 if db_vnfr["vdur"]:
516 iface.pop(
517 "mgmt_vnf", None
518 ) # only first vdu can be managment of vnf
519 db_vdu_push_list.append(vdur_copy)
520 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
521 if vdu_delete:
522 if len(db_vnfr["vdur"]) == 1:
523 # The scale will move to 0 instances
524 self.logger.debug(
525 "Scaling to 0 !, creating the template with the last vdur"
526 )
527 template_vdur = [db_vnfr["vdur"][0]]
528 for vdu_id, vdu_count in vdu_delete.items():
529 if mark_delete:
530 indexes_to_delete = [
531 iv[0]
532 for iv in enumerate(db_vnfr["vdur"])
533 if iv[1]["vdu-id-ref"] == vdu_id
534 ]
535 db_update.update(
536 {
537 "vdur.{}.status".format(i): "DELETING"
538 for i in indexes_to_delete[-vdu_count:]
539 }
540 )
541 else:
542 # it must be deleted one by one because common.db does not allow otherwise
543 vdus_to_delete = [
544 v
545 for v in reversed(db_vnfr["vdur"])
546 if v["vdu-id-ref"] == vdu_id
547 ]
548 for vdu in vdus_to_delete[:vdu_count]:
549 self.db.set_one(
550 "vnfrs",
551 {"_id": db_vnfr["_id"]},
552 None,
553 pull={"vdur": {"_id": vdu["_id"]}},
554 )
555 db_push = {}
556 if db_vdu_push_list:
557 db_push["vdur"] = db_vdu_push_list
558 if template_vdur:
559 db_push["vdur-template"] = template_vdur
560 if not db_push:
561 db_push = None
562 db_vnfr["vdur-template"] = template_vdur
563 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
564 # modify passed dictionary db_vnfr
565 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
566 db_vnfr["vdur"] = db_vnfr_["vdur"]
567
568 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
569 """
570 Updates database nsr with the RO info for the created vld
571 :param ns_update_nsr: dictionary to be filled with the updated info
572 :param db_nsr: content of db_nsr. This is also modified
573 :param nsr_desc_RO: nsr descriptor from RO
574 :return: Nothing, LcmException is raised on errors
575 """
576
577 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
578 for net_RO in get_iterable(nsr_desc_RO, "nets"):
579 if vld["id"] != net_RO.get("ns_net_osm_id"):
580 continue
581 vld["vim-id"] = net_RO.get("vim_net_id")
582 vld["name"] = net_RO.get("vim_name")
583 vld["status"] = net_RO.get("status")
584 vld["status-detailed"] = net_RO.get("error_msg")
585 ns_update_nsr["vld.{}".format(vld_index)] = vld
586 break
587 else:
588 raise LcmException(
589 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
590 )
591
592 def set_vnfr_at_error(self, db_vnfrs, error_text):
593 try:
594 for db_vnfr in db_vnfrs.values():
595 vnfr_update = {"status": "ERROR"}
596 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
597 if "status" not in vdur:
598 vdur["status"] = "ERROR"
599 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
600 if error_text:
601 vdur["status-detailed"] = str(error_text)
602 vnfr_update[
603 "vdur.{}.status-detailed".format(vdu_index)
604 ] = "ERROR"
605 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
606 except DbException as e:
607 self.logger.error("Cannot update vnf. {}".format(e))
608
609 def _get_ns_config_info(self, nsr_id):
610 """
611 Generates a mapping between vnf,vdu elements and the N2VC id
612 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
613 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
614 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
615 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
616 """
617 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
618 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
619 mapping = {}
620 ns_config_info = {"osm-config-mapping": mapping}
621 for vca in vca_deployed_list:
622 if not vca["member-vnf-index"]:
623 continue
624 if not vca["vdu_id"]:
625 mapping[vca["member-vnf-index"]] = vca["application"]
626 else:
627 mapping[
628 "{}.{}.{}".format(
629 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
630 )
631 ] = vca["application"]
632 return ns_config_info
633
634 async def _instantiate_ng_ro(
635 self,
636 logging_text,
637 nsr_id,
638 nsd,
639 db_nsr,
640 db_nslcmop,
641 db_vnfrs,
642 db_vnfds,
643 n2vc_key_list,
644 stage,
645 start_deploy,
646 timeout_ns_deploy,
647 ):
648 db_vims = {}
649
650 def get_vim_account(vim_account_id):
651 nonlocal db_vims
652 if vim_account_id in db_vims:
653 return db_vims[vim_account_id]
654 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
655 db_vims[vim_account_id] = db_vim
656 return db_vim
657
658 # modify target_vld info with instantiation parameters
659 def parse_vld_instantiation_params(
660 target_vim, target_vld, vld_params, target_sdn
661 ):
662 if vld_params.get("ip-profile"):
663 target_vld["vim_info"][target_vim]["ip_profile"] = vld_to_ro_ip_profile(
664 vld_params["ip-profile"]
665 )
666 if vld_params.get("provider-network"):
667 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
668 "provider-network"
669 ]
670 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
671 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
672 "provider-network"
673 ]["sdn-ports"]
674
675 # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
676 # if wim_account_id is specified in vld_params, validate if it is feasible.
677 wim_account_id, db_wim = select_feasible_wim_account(
678 db_nsr, db_vnfrs, target_vld, vld_params, self.logger
679 )
680
681 if wim_account_id:
682 # WIM is needed and a feasible one was found, populate WIM target and SDN ports
683 self.logger.info("WIM selected: {:s}".format(str(wim_account_id)))
684 # update vld_params with correct WIM account Id
685 vld_params["wimAccountId"] = wim_account_id
686
687 target_wim = "wim:{}".format(wim_account_id)
688 target_wim_attrs = get_target_wim_attrs(nsr_id, target_vld, vld_params)
689 sdn_ports = get_sdn_ports(vld_params, db_wim)
690 if len(sdn_ports) > 0:
691 target_vld["vim_info"][target_wim] = target_wim_attrs
692 target_vld["vim_info"][target_wim]["sdn-ports"] = sdn_ports
693
694 self.logger.debug(
695 "Target VLD with WIM data: {:s}".format(str(target_vld))
696 )
697
698 for param in ("vim-network-name", "vim-network-id"):
699 if vld_params.get(param):
700 if isinstance(vld_params[param], dict):
701 for vim, vim_net in vld_params[param].items():
702 other_target_vim = "vim:" + vim
703 populate_dict(
704 target_vld["vim_info"],
705 (other_target_vim, param.replace("-", "_")),
706 vim_net,
707 )
708 else: # isinstance str
709 target_vld["vim_info"][target_vim][
710 param.replace("-", "_")
711 ] = vld_params[param]
712 if vld_params.get("common_id"):
713 target_vld["common_id"] = vld_params.get("common_id")
714
715 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
716 def update_ns_vld_target(target, ns_params):
717 for vnf_params in ns_params.get("vnf", ()):
718 if vnf_params.get("vimAccountId"):
719 target_vnf = next(
720 (
721 vnfr
722 for vnfr in db_vnfrs.values()
723 if vnf_params["member-vnf-index"]
724 == vnfr["member-vnf-index-ref"]
725 ),
726 None,
727 )
728 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
729 if not vdur:
730 continue
731 for a_index, a_vld in enumerate(target["ns"]["vld"]):
732 target_vld = find_in_list(
733 get_iterable(vdur, "interfaces"),
734 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
735 )
736
737 vld_params = find_in_list(
738 get_iterable(ns_params, "vld"),
739 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
740 )
741 if target_vld:
742 if vnf_params.get("vimAccountId") not in a_vld.get(
743 "vim_info", {}
744 ):
745 target_vim_network_list = [
746 v for _, v in a_vld.get("vim_info").items()
747 ]
748 target_vim_network_name = next(
749 (
750 item.get("vim_network_name", "")
751 for item in target_vim_network_list
752 ),
753 "",
754 )
755
756 target["ns"]["vld"][a_index].get("vim_info").update(
757 {
758 "vim:{}".format(vnf_params["vimAccountId"]): {
759 "vim_network_name": target_vim_network_name,
760 }
761 }
762 )
763
764 if vld_params:
765 for param in ("vim-network-name", "vim-network-id"):
766 if vld_params.get(param) and isinstance(
767 vld_params[param], dict
768 ):
769 for vim, vim_net in vld_params[
770 param
771 ].items():
772 other_target_vim = "vim:" + vim
773 populate_dict(
774 target["ns"]["vld"][a_index].get(
775 "vim_info"
776 ),
777 (
778 other_target_vim,
779 param.replace("-", "_"),
780 ),
781 vim_net,
782 )
783
784 nslcmop_id = db_nslcmop["_id"]
785 target = {
786 "name": db_nsr["name"],
787 "ns": {"vld": []},
788 "vnf": [],
789 "image": deepcopy(db_nsr["image"]),
790 "flavor": deepcopy(db_nsr["flavor"]),
791 "action_id": nslcmop_id,
792 "cloud_init_content": {},
793 }
794 for image in target["image"]:
795 image["vim_info"] = {}
796 for flavor in target["flavor"]:
797 flavor["vim_info"] = {}
798 if db_nsr.get("shared-volumes"):
799 target["shared-volumes"] = deepcopy(db_nsr["shared-volumes"])
800 for shared_volumes in target["shared-volumes"]:
801 shared_volumes["vim_info"] = {}
802 if db_nsr.get("affinity-or-anti-affinity-group"):
803 target["affinity-or-anti-affinity-group"] = deepcopy(
804 db_nsr["affinity-or-anti-affinity-group"]
805 )
806 for affinity_or_anti_affinity_group in target[
807 "affinity-or-anti-affinity-group"
808 ]:
809 affinity_or_anti_affinity_group["vim_info"] = {}
810
811 if db_nslcmop.get("lcmOperationType") != "instantiate":
812 # get parameters of instantiation:
813 db_nslcmop_instantiate = self.db.get_list(
814 "nslcmops",
815 {
816 "nsInstanceId": db_nslcmop["nsInstanceId"],
817 "lcmOperationType": "instantiate",
818 },
819 )[-1]
820 ns_params = db_nslcmop_instantiate.get("operationParams")
821 else:
822 ns_params = db_nslcmop.get("operationParams")
823 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
824 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
825
826 cp2target = {}
827 for vld_index, vld in enumerate(db_nsr.get("vld")):
828 target_vim = "vim:{}".format(ns_params["vimAccountId"])
829 target_vld = {
830 "id": vld["id"],
831 "name": vld["name"],
832 "mgmt-network": vld.get("mgmt-network", False),
833 "type": vld.get("type"),
834 "vim_info": {
835 target_vim: {
836 "vim_network_name": vld.get("vim-network-name"),
837 "vim_account_id": ns_params["vimAccountId"],
838 }
839 },
840 }
841 # check if this network needs SDN assist
842 if vld.get("pci-interfaces"):
843 db_vim = get_vim_account(ns_params["vimAccountId"])
844 if vim_config := db_vim.get("config"):
845 if sdnc_id := vim_config.get("sdn-controller"):
846 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
847 target_sdn = "sdn:{}".format(sdnc_id)
848 target_vld["vim_info"][target_sdn] = {
849 "sdn": True,
850 "target_vim": target_vim,
851 "vlds": [sdn_vld],
852 "type": vld.get("type"),
853 }
854
855 nsd_vnf_profiles = get_vnf_profiles(nsd)
856 for nsd_vnf_profile in nsd_vnf_profiles:
857 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
858 if cp["virtual-link-profile-id"] == vld["id"]:
859 cp2target[
860 "member_vnf:{}.{}".format(
861 cp["constituent-cpd-id"][0][
862 "constituent-base-element-id"
863 ],
864 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
865 )
866 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
867
868 # check at nsd descriptor, if there is an ip-profile
869 vld_params = {}
870 nsd_vlp = find_in_list(
871 get_virtual_link_profiles(nsd),
872 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
873 == vld["id"],
874 )
875 if (
876 nsd_vlp
877 and nsd_vlp.get("virtual-link-protocol-data")
878 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
879 ):
880 vld_params["ip-profile"] = nsd_vlp["virtual-link-protocol-data"][
881 "l3-protocol-data"
882 ]
883
884 # update vld_params with instantiation params
885 vld_instantiation_params = find_in_list(
886 get_iterable(ns_params, "vld"),
887 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
888 )
889 if vld_instantiation_params:
890 vld_params.update(vld_instantiation_params)
891 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
892 target["ns"]["vld"].append(target_vld)
893 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
894 update_ns_vld_target(target, ns_params)
895
896 for vnfr in db_vnfrs.values():
897 vnfd = find_in_list(
898 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
899 )
900 vnf_params = find_in_list(
901 get_iterable(ns_params, "vnf"),
902 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
903 )
904 target_vnf = deepcopy(vnfr)
905 target_vim = "vim:{}".format(vnfr["vim-account-id"])
906 for vld in target_vnf.get("vld", ()):
907 # check if connected to a ns.vld, to fill target'
908 vnf_cp = find_in_list(
909 vnfd.get("int-virtual-link-desc", ()),
910 lambda cpd: cpd.get("id") == vld["id"],
911 )
912 if vnf_cp:
913 ns_cp = "member_vnf:{}.{}".format(
914 vnfr["member-vnf-index-ref"], vnf_cp["id"]
915 )
916 if cp2target.get(ns_cp):
917 vld["target"] = cp2target[ns_cp]
918
919 vld["vim_info"] = {
920 target_vim: {"vim_network_name": vld.get("vim-network-name")}
921 }
922 # check if this network needs SDN assist
923 target_sdn = None
924 if vld.get("pci-interfaces"):
925 db_vim = get_vim_account(vnfr["vim-account-id"])
926 sdnc_id = db_vim["config"].get("sdn-controller")
927 if sdnc_id:
928 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
929 target_sdn = "sdn:{}".format(sdnc_id)
930 vld["vim_info"][target_sdn] = {
931 "sdn": True,
932 "target_vim": target_vim,
933 "vlds": [sdn_vld],
934 "type": vld.get("type"),
935 }
936
937 # check at vnfd descriptor, if there is an ip-profile
938 vld_params = {}
939 vnfd_vlp = find_in_list(
940 get_virtual_link_profiles(vnfd),
941 lambda a_link_profile: a_link_profile["id"] == vld["id"],
942 )
943 if (
944 vnfd_vlp
945 and vnfd_vlp.get("virtual-link-protocol-data")
946 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
947 ):
948 vld_params["ip-profile"] = vnfd_vlp["virtual-link-protocol-data"][
949 "l3-protocol-data"
950 ]
951 # update vld_params with instantiation params
952 if vnf_params:
953 vld_instantiation_params = find_in_list(
954 get_iterable(vnf_params, "internal-vld"),
955 lambda i_vld: i_vld["name"] == vld["id"],
956 )
957 if vld_instantiation_params:
958 vld_params.update(vld_instantiation_params)
959 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
960
961 vdur_list = []
962 for vdur in target_vnf.get("vdur", ()):
963 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
964 continue # This vdu must not be created
965 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
966
967 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
968
969 if ssh_keys_all:
970 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
971 vnf_configuration = get_configuration(vnfd, vnfd["id"])
972 if (
973 vdu_configuration
974 and vdu_configuration.get("config-access")
975 and vdu_configuration.get("config-access").get("ssh-access")
976 ):
977 vdur["ssh-keys"] = ssh_keys_all
978 vdur["ssh-access-required"] = vdu_configuration[
979 "config-access"
980 ]["ssh-access"]["required"]
981 elif (
982 vnf_configuration
983 and vnf_configuration.get("config-access")
984 and vnf_configuration.get("config-access").get("ssh-access")
985 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
986 ):
987 vdur["ssh-keys"] = ssh_keys_all
988 vdur["ssh-access-required"] = vnf_configuration[
989 "config-access"
990 ]["ssh-access"]["required"]
991 elif ssh_keys_instantiation and find_in_list(
992 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
993 ):
994 vdur["ssh-keys"] = ssh_keys_instantiation
995
996 self.logger.debug("NS > vdur > {}".format(vdur))
997
998 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
999 # cloud-init
1000 if vdud.get("cloud-init-file"):
1001 vdur["cloud-init"] = "{}:file:{}".format(
1002 vnfd["_id"], vdud.get("cloud-init-file")
1003 )
1004 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1005 if vdur["cloud-init"] not in target["cloud_init_content"]:
1006 base_folder = vnfd["_admin"]["storage"]
1007 if base_folder["pkg-dir"]:
1008 cloud_init_file = "{}/{}/cloud_init/{}".format(
1009 base_folder["folder"],
1010 base_folder["pkg-dir"],
1011 vdud.get("cloud-init-file"),
1012 )
1013 else:
1014 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1015 base_folder["folder"],
1016 vdud.get("cloud-init-file"),
1017 )
1018 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1019 target["cloud_init_content"][
1020 vdur["cloud-init"]
1021 ] = ci_file.read()
1022 elif vdud.get("cloud-init"):
1023 vdur["cloud-init"] = "{}:vdu:{}".format(
1024 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1025 )
1026 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1027 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1028 "cloud-init"
1029 ]
1030 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1031 deploy_params_vdu = self._format_additional_params(
1032 vdur.get("additionalParams") or {}
1033 )
1034 deploy_params_vdu["OSM"] = get_osm_params(
1035 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1036 )
1037 vdur["additionalParams"] = deploy_params_vdu
1038
1039 # flavor
1040 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1041 if target_vim not in ns_flavor["vim_info"]:
1042 ns_flavor["vim_info"][target_vim] = {}
1043
1044 # deal with images
1045 # in case alternative images are provided we must check if they should be applied
1046 # for the vim_type, modify the vim_type taking into account
1047 ns_image_id = int(vdur["ns-image-id"])
1048 if vdur.get("alt-image-ids"):
1049 db_vim = get_vim_account(vnfr["vim-account-id"])
1050 vim_type = db_vim["vim_type"]
1051 for alt_image_id in vdur.get("alt-image-ids"):
1052 ns_alt_image = target["image"][int(alt_image_id)]
1053 if vim_type == ns_alt_image.get("vim-type"):
1054 # must use alternative image
1055 self.logger.debug(
1056 "use alternative image id: {}".format(alt_image_id)
1057 )
1058 ns_image_id = alt_image_id
1059 vdur["ns-image-id"] = ns_image_id
1060 break
1061 ns_image = target["image"][int(ns_image_id)]
1062 if target_vim not in ns_image["vim_info"]:
1063 ns_image["vim_info"][target_vim] = {}
1064
1065 # Affinity groups
1066 if vdur.get("affinity-or-anti-affinity-group-id"):
1067 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1068 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1069 if target_vim not in ns_ags["vim_info"]:
1070 ns_ags["vim_info"][target_vim] = {}
1071
1072 # shared-volumes
1073 if vdur.get("shared-volumes-id"):
1074 for sv_id in vdur["shared-volumes-id"]:
1075 ns_sv = find_in_list(
1076 target["shared-volumes"], lambda sv: sv_id in sv["id"]
1077 )
1078 if ns_sv:
1079 ns_sv["vim_info"][target_vim] = {}
1080
1081 vdur["vim_info"] = {target_vim: {}}
1082 # instantiation parameters
1083 if vnf_params:
1084 vdu_instantiation_params = find_in_list(
1085 get_iterable(vnf_params, "vdu"),
1086 lambda i_vdu: i_vdu["id"] == vdud["id"],
1087 )
1088 if vdu_instantiation_params:
1089 # Parse the vdu_volumes from the instantiation params
1090 vdu_volumes = get_volumes_from_instantiation_params(
1091 vdu_instantiation_params, vdud
1092 )
1093 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1094 vdur["additionalParams"]["OSM"][
1095 "vim_flavor_id"
1096 ] = vdu_instantiation_params.get("vim-flavor-id")
1097 vdur_list.append(vdur)
1098 target_vnf["vdur"] = vdur_list
1099 target["vnf"].append(target_vnf)
1100
1101 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1102 desc = await self.RO.deploy(nsr_id, target)
1103 self.logger.debug("RO return > {}".format(desc))
1104 action_id = desc["action_id"]
1105 await self._wait_ng_ro(
1106 nsr_id,
1107 action_id,
1108 nslcmop_id,
1109 start_deploy,
1110 timeout_ns_deploy,
1111 stage,
1112 operation="instantiation",
1113 )
1114
1115 # Updating NSR
1116 db_nsr_update = {
1117 "_admin.deployed.RO.operational-status": "running",
1118 "detailed-status": " ".join(stage),
1119 }
1120 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1121 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1122 self._write_op_status(nslcmop_id, stage)
1123 self.logger.debug(
1124 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1125 )
1126 return
1127
1128 async def _wait_ng_ro(
1129 self,
1130 nsr_id,
1131 action_id,
1132 nslcmop_id=None,
1133 start_time=None,
1134 timeout=600,
1135 stage=None,
1136 operation=None,
1137 ):
1138 detailed_status_old = None
1139 db_nsr_update = {}
1140 start_time = start_time or time()
1141 while time() <= start_time + timeout:
1142 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1143 self.logger.debug("Wait NG RO > {}".format(desc_status))
1144 if desc_status["status"] == "FAILED":
1145 raise NgRoException(desc_status["details"])
1146 elif desc_status["status"] == "BUILD":
1147 if stage:
1148 stage[2] = "VIM: ({})".format(desc_status["details"])
1149 elif desc_status["status"] == "DONE":
1150 if stage:
1151 stage[2] = "Deployed at VIM"
1152 break
1153 else:
1154 assert False, "ROclient.check_ns_status returns unknown {}".format(
1155 desc_status["status"]
1156 )
1157 if stage and nslcmop_id and stage[2] != detailed_status_old:
1158 detailed_status_old = stage[2]
1159 db_nsr_update["detailed-status"] = " ".join(stage)
1160 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1161 self._write_op_status(nslcmop_id, stage)
1162 await asyncio.sleep(15)
1163 else: # timeout_ns_deploy
1164 raise NgRoException("Timeout waiting ns to deploy")
1165
1166 async def _terminate_ng_ro(
1167 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1168 ):
1169 db_nsr_update = {}
1170 failed_detail = []
1171 action_id = None
1172 start_deploy = time()
1173 try:
1174 target = {
1175 "ns": {"vld": []},
1176 "vnf": [],
1177 "image": [],
1178 "flavor": [],
1179 "action_id": nslcmop_id,
1180 }
1181 desc = await self.RO.deploy(nsr_id, target)
1182 action_id = desc["action_id"]
1183 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1184 self.logger.debug(
1185 logging_text
1186 + "ns terminate action at RO. action_id={}".format(action_id)
1187 )
1188
1189 # wait until done
1190 delete_timeout = 20 * 60 # 20 minutes
1191 await self._wait_ng_ro(
1192 nsr_id,
1193 action_id,
1194 nslcmop_id,
1195 start_deploy,
1196 delete_timeout,
1197 stage,
1198 operation="termination",
1199 )
1200 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1201 # delete all nsr
1202 await self.RO.delete(nsr_id)
1203 except NgRoException as e:
1204 if e.http_code == 404: # not found
1205 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1206 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1207 self.logger.debug(
1208 logging_text + "RO_action_id={} already deleted".format(action_id)
1209 )
1210 elif e.http_code == 409: # conflict
1211 failed_detail.append("delete conflict: {}".format(e))
1212 self.logger.debug(
1213 logging_text
1214 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1215 )
1216 else:
1217 failed_detail.append("delete error: {}".format(e))
1218 self.logger.error(
1219 logging_text
1220 + "RO_action_id={} delete error: {}".format(action_id, e)
1221 )
1222 except Exception as e:
1223 failed_detail.append("delete error: {}".format(e))
1224 self.logger.error(
1225 logging_text + "RO_action_id={} delete error: {}".format(action_id, e)
1226 )
1227
1228 if failed_detail:
1229 stage[2] = "Error deleting from VIM"
1230 else:
1231 stage[2] = "Deleted from VIM"
1232 db_nsr_update["detailed-status"] = " ".join(stage)
1233 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1234 self._write_op_status(nslcmop_id, stage)
1235
1236 if failed_detail:
1237 raise LcmException("; ".join(failed_detail))
1238 return
1239
1240 async def instantiate_RO(
1241 self,
1242 logging_text,
1243 nsr_id,
1244 nsd,
1245 db_nsr,
1246 db_nslcmop,
1247 db_vnfrs,
1248 db_vnfds,
1249 n2vc_key_list,
1250 stage,
1251 ):
1252 """
1253 Instantiate at RO
1254 :param logging_text: preffix text to use at logging
1255 :param nsr_id: nsr identity
1256 :param nsd: database content of ns descriptor
1257 :param db_nsr: database content of ns record
1258 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1259 :param db_vnfrs:
1260 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1261 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1262 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1263 :return: None or exception
1264 """
1265 try:
1266 start_deploy = time()
1267 ns_params = db_nslcmop.get("operationParams")
1268 if ns_params and ns_params.get("timeout_ns_deploy"):
1269 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1270 else:
1271 timeout_ns_deploy = self.timeout.ns_deploy
1272
1273 # Check for and optionally request placement optimization. Database will be updated if placement activated
1274 stage[2] = "Waiting for Placement."
1275 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1276 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1277 for vnfr in db_vnfrs.values():
1278 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1279 break
1280 else:
1281 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1282
1283 return await self._instantiate_ng_ro(
1284 logging_text,
1285 nsr_id,
1286 nsd,
1287 db_nsr,
1288 db_nslcmop,
1289 db_vnfrs,
1290 db_vnfds,
1291 n2vc_key_list,
1292 stage,
1293 start_deploy,
1294 timeout_ns_deploy,
1295 )
1296 except Exception as e:
1297 stage[2] = "ERROR deploying at VIM"
1298 self.set_vnfr_at_error(db_vnfrs, str(e))
1299 self.logger.error(
1300 "Error deploying at VIM {}".format(e),
1301 exc_info=not isinstance(
1302 e,
1303 (
1304 ROclient.ROClientException,
1305 LcmException,
1306 DbException,
1307 NgRoException,
1308 ),
1309 ),
1310 )
1311 raise
1312
1313 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1314 """
1315 Wait for kdu to be up, get ip address
1316 :param logging_text: prefix use for logging
1317 :param nsr_id:
1318 :param vnfr_id:
1319 :param kdu_name:
1320 :return: IP address, K8s services
1321 """
1322
1323 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1324 nb_tries = 0
1325
1326 while nb_tries < 360:
1327 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1328 kdur = next(
1329 (
1330 x
1331 for x in get_iterable(db_vnfr, "kdur")
1332 if x.get("kdu-name") == kdu_name
1333 ),
1334 None,
1335 )
1336 if not kdur:
1337 raise LcmException(
1338 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1339 )
1340 if kdur.get("status"):
1341 if kdur["status"] in ("READY", "ENABLED"):
1342 return kdur.get("ip-address"), kdur.get("services")
1343 else:
1344 raise LcmException(
1345 "target KDU={} is in error state".format(kdu_name)
1346 )
1347
1348 await asyncio.sleep(10)
1349 nb_tries += 1
1350 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1351
1352 async def wait_vm_up_insert_key_ro(
1353 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1354 ):
1355 """
1356 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1357 :param logging_text: prefix use for logging
1358 :param nsr_id:
1359 :param vnfr_id:
1360 :param vdu_id:
1361 :param vdu_index:
1362 :param pub_key: public ssh key to inject, None to skip
1363 :param user: user to apply the public ssh key
1364 :return: IP address
1365 """
1366
1367 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1368 ip_address = None
1369 target_vdu_id = None
1370 ro_retries = 0
1371
1372 while True:
1373 ro_retries += 1
1374 if ro_retries >= 360: # 1 hour
1375 raise LcmException(
1376 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1377 )
1378
1379 await asyncio.sleep(10)
1380
1381 # get ip address
1382 if not target_vdu_id:
1383 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1384
1385 if not vdu_id: # for the VNF case
1386 if db_vnfr.get("status") == "ERROR":
1387 raise LcmException(
1388 "Cannot inject ssh-key because target VNF is in error state"
1389 )
1390 ip_address = db_vnfr.get("ip-address")
1391 if not ip_address:
1392 continue
1393 vdur = next(
1394 (
1395 x
1396 for x in get_iterable(db_vnfr, "vdur")
1397 if x.get("ip-address") == ip_address
1398 ),
1399 None,
1400 )
1401 else: # VDU case
1402 vdur = next(
1403 (
1404 x
1405 for x in get_iterable(db_vnfr, "vdur")
1406 if x.get("vdu-id-ref") == vdu_id
1407 and x.get("count-index") == vdu_index
1408 ),
1409 None,
1410 )
1411
1412 if (
1413 not vdur and len(db_vnfr.get("vdur", ())) == 1
1414 ): # If only one, this should be the target vdu
1415 vdur = db_vnfr["vdur"][0]
1416 if not vdur:
1417 raise LcmException(
1418 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1419 vnfr_id, vdu_id, vdu_index
1420 )
1421 )
1422 # New generation RO stores information at "vim_info"
1423 ng_ro_status = None
1424 target_vim = None
1425 if vdur.get("vim_info"):
1426 target_vim = next(
1427 t for t in vdur["vim_info"]
1428 ) # there should be only one key
1429 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1430 if (
1431 vdur.get("pdu-type")
1432 or vdur.get("status") == "ACTIVE"
1433 or ng_ro_status == "ACTIVE"
1434 ):
1435 ip_address = vdur.get("ip-address")
1436 if not ip_address:
1437 continue
1438 target_vdu_id = vdur["vdu-id-ref"]
1439 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1440 raise LcmException(
1441 "Cannot inject ssh-key because target VM is in error state"
1442 )
1443
1444 if not target_vdu_id:
1445 continue
1446
1447 # inject public key into machine
1448 if pub_key and user:
1449 self.logger.debug(logging_text + "Inserting RO key")
1450 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1451 if vdur.get("pdu-type"):
1452 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1453 return ip_address
1454 try:
1455 target = {
1456 "action": {
1457 "action": "inject_ssh_key",
1458 "key": pub_key,
1459 "user": user,
1460 },
1461 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1462 }
1463 desc = await self.RO.deploy(nsr_id, target)
1464 action_id = desc["action_id"]
1465 await self._wait_ng_ro(
1466 nsr_id, action_id, timeout=600, operation="instantiation"
1467 )
1468 break
1469 except NgRoException as e:
1470 raise LcmException(
1471 "Reaching max tries injecting key. Error: {}".format(e)
1472 )
1473 else:
1474 break
1475
1476 return ip_address
1477
1478 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1479 """
1480 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1481 """
1482 my_vca = vca_deployed_list[vca_index]
1483 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1484 # vdu or kdu: no dependencies
1485 return
1486 timeout = 300
1487 while timeout >= 0:
1488 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1489 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1490 configuration_status_list = db_nsr["configurationStatus"]
1491 for index, vca_deployed in enumerate(configuration_status_list):
1492 if index == vca_index:
1493 # myself
1494 continue
1495 if not my_vca.get("member-vnf-index") or (
1496 vca_deployed.get("member-vnf-index")
1497 == my_vca.get("member-vnf-index")
1498 ):
1499 internal_status = configuration_status_list[index].get("status")
1500 if internal_status == "READY":
1501 continue
1502 elif internal_status == "BROKEN":
1503 raise LcmException(
1504 "Configuration aborted because dependent charm/s has failed"
1505 )
1506 else:
1507 break
1508 else:
1509 # no dependencies, return
1510 return
1511 await asyncio.sleep(10)
1512 timeout -= 1
1513
1514 raise LcmException("Configuration aborted because dependent charm/s timeout")
1515
1516 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1517 vca_id = None
1518 if db_vnfr:
1519 vca_id = deep_get(db_vnfr, ("vca-id",))
1520 elif db_nsr:
1521 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1522 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1523 return vca_id
1524
1525 async def instantiate_N2VC(
1526 self,
1527 logging_text,
1528 vca_index,
1529 nsi_id,
1530 db_nsr,
1531 db_vnfr,
1532 vdu_id,
1533 kdu_name,
1534 vdu_index,
1535 kdu_index,
1536 config_descriptor,
1537 deploy_params,
1538 base_folder,
1539 nslcmop_id,
1540 stage,
1541 vca_type,
1542 vca_name,
1543 ee_config_descriptor,
1544 ):
1545 nsr_id = db_nsr["_id"]
1546 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1547 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1548 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1549 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1550 db_dict = {
1551 "collection": "nsrs",
1552 "filter": {"_id": nsr_id},
1553 "path": db_update_entry,
1554 }
1555 step = ""
1556 try:
1557 element_type = "NS"
1558 element_under_configuration = nsr_id
1559
1560 vnfr_id = None
1561 if db_vnfr:
1562 vnfr_id = db_vnfr["_id"]
1563 osm_config["osm"]["vnf_id"] = vnfr_id
1564
1565 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1566
1567 if vca_type == "native_charm":
1568 index_number = 0
1569 else:
1570 index_number = vdu_index or 0
1571
1572 if vnfr_id:
1573 element_type = "VNF"
1574 element_under_configuration = vnfr_id
1575 namespace += ".{}-{}".format(vnfr_id, index_number)
1576 if vdu_id:
1577 namespace += ".{}-{}".format(vdu_id, index_number)
1578 element_type = "VDU"
1579 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1580 osm_config["osm"]["vdu_id"] = vdu_id
1581 elif kdu_name:
1582 namespace += ".{}".format(kdu_name)
1583 element_type = "KDU"
1584 element_under_configuration = kdu_name
1585 osm_config["osm"]["kdu_name"] = kdu_name
1586
1587 # Get artifact path
1588 if base_folder["pkg-dir"]:
1589 artifact_path = "{}/{}/{}/{}".format(
1590 base_folder["folder"],
1591 base_folder["pkg-dir"],
1592 "charms"
1593 if vca_type
1594 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1595 else "helm-charts",
1596 vca_name,
1597 )
1598 else:
1599 artifact_path = "{}/Scripts/{}/{}/".format(
1600 base_folder["folder"],
1601 "charms"
1602 if vca_type
1603 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1604 else "helm-charts",
1605 vca_name,
1606 )
1607
1608 self.logger.debug("Artifact path > {}".format(artifact_path))
1609
1610 # get initial_config_primitive_list that applies to this element
1611 initial_config_primitive_list = config_descriptor.get(
1612 "initial-config-primitive"
1613 )
1614
1615 self.logger.debug(
1616 "Initial config primitive list > {}".format(
1617 initial_config_primitive_list
1618 )
1619 )
1620
1621 # add config if not present for NS charm
1622 ee_descriptor_id = ee_config_descriptor.get("id")
1623 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1624 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1625 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1626 )
1627
1628 self.logger.debug(
1629 "Initial config primitive list #2 > {}".format(
1630 initial_config_primitive_list
1631 )
1632 )
1633 # n2vc_redesign STEP 3.1
1634 # find old ee_id if exists
1635 ee_id = vca_deployed.get("ee_id")
1636
1637 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1638 # create or register execution environment in VCA
1639 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm-v3"):
1640 self._write_configuration_status(
1641 nsr_id=nsr_id,
1642 vca_index=vca_index,
1643 status="CREATING",
1644 element_under_configuration=element_under_configuration,
1645 element_type=element_type,
1646 )
1647
1648 step = "create execution environment"
1649 self.logger.debug(logging_text + step)
1650
1651 ee_id = None
1652 credentials = None
1653 if vca_type == "k8s_proxy_charm":
1654 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1655 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1656 namespace=namespace,
1657 artifact_path=artifact_path,
1658 db_dict=db_dict,
1659 vca_id=vca_id,
1660 )
1661 elif vca_type == "helm-v3":
1662 ee_id, credentials = await self.vca_map[
1663 vca_type
1664 ].create_execution_environment(
1665 namespace=nsr_id,
1666 reuse_ee_id=ee_id,
1667 db_dict=db_dict,
1668 config=osm_config,
1669 artifact_path=artifact_path,
1670 chart_model=vca_name,
1671 vca_type=vca_type,
1672 )
1673 else:
1674 ee_id, credentials = await self.vca_map[
1675 vca_type
1676 ].create_execution_environment(
1677 namespace=namespace,
1678 reuse_ee_id=ee_id,
1679 db_dict=db_dict,
1680 vca_id=vca_id,
1681 )
1682
1683 elif vca_type == "native_charm":
1684 step = "Waiting to VM being up and getting IP address"
1685 self.logger.debug(logging_text + step)
1686 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1687 logging_text,
1688 nsr_id,
1689 vnfr_id,
1690 vdu_id,
1691 vdu_index,
1692 user=None,
1693 pub_key=None,
1694 )
1695 credentials = {"hostname": rw_mgmt_ip}
1696 # get username
1697 username = deep_get(
1698 config_descriptor, ("config-access", "ssh-access", "default-user")
1699 )
1700 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1701 # merged. Meanwhile let's get username from initial-config-primitive
1702 if not username and initial_config_primitive_list:
1703 for config_primitive in initial_config_primitive_list:
1704 for param in config_primitive.get("parameter", ()):
1705 if param["name"] == "ssh-username":
1706 username = param["value"]
1707 break
1708 if not username:
1709 raise LcmException(
1710 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1711 "'config-access.ssh-access.default-user'"
1712 )
1713 credentials["username"] = username
1714 # n2vc_redesign STEP 3.2
1715
1716 self._write_configuration_status(
1717 nsr_id=nsr_id,
1718 vca_index=vca_index,
1719 status="REGISTERING",
1720 element_under_configuration=element_under_configuration,
1721 element_type=element_type,
1722 )
1723
1724 step = "register execution environment {}".format(credentials)
1725 self.logger.debug(logging_text + step)
1726 ee_id = await self.vca_map[vca_type].register_execution_environment(
1727 credentials=credentials,
1728 namespace=namespace,
1729 db_dict=db_dict,
1730 vca_id=vca_id,
1731 )
1732
1733 # for compatibility with MON/POL modules, the need model and application name at database
1734 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1735 ee_id_parts = ee_id.split(".")
1736 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1737 if len(ee_id_parts) >= 2:
1738 model_name = ee_id_parts[0]
1739 application_name = ee_id_parts[1]
1740 db_nsr_update[db_update_entry + "model"] = model_name
1741 db_nsr_update[db_update_entry + "application"] = application_name
1742
1743 # n2vc_redesign STEP 3.3
1744 step = "Install configuration Software"
1745
1746 self._write_configuration_status(
1747 nsr_id=nsr_id,
1748 vca_index=vca_index,
1749 status="INSTALLING SW",
1750 element_under_configuration=element_under_configuration,
1751 element_type=element_type,
1752 other_update=db_nsr_update,
1753 )
1754
1755 # TODO check if already done
1756 self.logger.debug(logging_text + step)
1757 config = None
1758 if vca_type == "native_charm":
1759 config_primitive = next(
1760 (p for p in initial_config_primitive_list if p["name"] == "config"),
1761 None,
1762 )
1763 if config_primitive:
1764 config = self._map_primitive_params(
1765 config_primitive, {}, deploy_params
1766 )
1767 num_units = 1
1768 if vca_type == "lxc_proxy_charm":
1769 if element_type == "NS":
1770 num_units = db_nsr.get("config-units") or 1
1771 elif element_type == "VNF":
1772 num_units = db_vnfr.get("config-units") or 1
1773 elif element_type == "VDU":
1774 for v in db_vnfr["vdur"]:
1775 if vdu_id == v["vdu-id-ref"]:
1776 num_units = v.get("config-units") or 1
1777 break
1778 if vca_type != "k8s_proxy_charm":
1779 await self.vca_map[vca_type].install_configuration_sw(
1780 ee_id=ee_id,
1781 artifact_path=artifact_path,
1782 db_dict=db_dict,
1783 config=config,
1784 num_units=num_units,
1785 vca_id=vca_id,
1786 vca_type=vca_type,
1787 )
1788
1789 # write in db flag of configuration_sw already installed
1790 self.update_db_2(
1791 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
1792 )
1793
1794 # add relations for this VCA (wait for other peers related with this VCA)
1795 is_relation_added = await self._add_vca_relations(
1796 logging_text=logging_text,
1797 nsr_id=nsr_id,
1798 vca_type=vca_type,
1799 vca_index=vca_index,
1800 )
1801
1802 if not is_relation_added:
1803 raise LcmException("Relations could not be added to VCA.")
1804
1805 # if SSH access is required, then get execution environment SSH public
1806 # if native charm we have waited already to VM be UP
1807 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm-v3"):
1808 pub_key = None
1809 user = None
1810 # self.logger.debug("get ssh key block")
1811 if deep_get(
1812 config_descriptor, ("config-access", "ssh-access", "required")
1813 ):
1814 # self.logger.debug("ssh key needed")
1815 # Needed to inject a ssh key
1816 user = deep_get(
1817 config_descriptor,
1818 ("config-access", "ssh-access", "default-user"),
1819 )
1820 step = "Install configuration Software, getting public ssh key"
1821 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
1822 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
1823 )
1824
1825 step = "Insert public key into VM user={} ssh_key={}".format(
1826 user, pub_key
1827 )
1828 else:
1829 # self.logger.debug("no need to get ssh key")
1830 step = "Waiting to VM being up and getting IP address"
1831 self.logger.debug(logging_text + step)
1832
1833 # default rw_mgmt_ip to None, avoiding the non definition of the variable
1834 rw_mgmt_ip = None
1835
1836 # n2vc_redesign STEP 5.1
1837 # wait for RO (ip-address) Insert pub_key into VM
1838 if vnfr_id:
1839 if kdu_name:
1840 rw_mgmt_ip, services = await self.wait_kdu_up(
1841 logging_text, nsr_id, vnfr_id, kdu_name
1842 )
1843 vnfd = self.db.get_one(
1844 "vnfds_revisions",
1845 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
1846 )
1847 kdu = get_kdu(vnfd, kdu_name)
1848 kdu_services = [
1849 service["name"] for service in get_kdu_services(kdu)
1850 ]
1851 exposed_services = []
1852 for service in services:
1853 if any(s in service["name"] for s in kdu_services):
1854 exposed_services.append(service)
1855 await self.vca_map[vca_type].exec_primitive(
1856 ee_id=ee_id,
1857 primitive_name="config",
1858 params_dict={
1859 "osm-config": json.dumps(
1860 OsmConfigBuilder(
1861 k8s={"services": exposed_services}
1862 ).build()
1863 )
1864 },
1865 vca_id=vca_id,
1866 )
1867
1868 # This verification is needed in order to avoid trying to add a public key
1869 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
1870 # for a KNF and not for its KDUs, the previous verification gives False, and the code
1871 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
1872 # or it is a KNF)
1873 elif db_vnfr.get("vdur"):
1874 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1875 logging_text,
1876 nsr_id,
1877 vnfr_id,
1878 vdu_id,
1879 vdu_index,
1880 user=user,
1881 pub_key=pub_key,
1882 )
1883
1884 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
1885
1886 # store rw_mgmt_ip in deploy params for later replacement
1887 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
1888
1889 # n2vc_redesign STEP 6 Execute initial config primitive
1890 step = "execute initial config primitive"
1891
1892 # wait for dependent primitives execution (NS -> VNF -> VDU)
1893 if initial_config_primitive_list:
1894 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
1895
1896 # stage, in function of element type: vdu, kdu, vnf or ns
1897 my_vca = vca_deployed_list[vca_index]
1898 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1899 # VDU or KDU
1900 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
1901 elif my_vca.get("member-vnf-index"):
1902 # VNF
1903 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
1904 else:
1905 # NS
1906 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
1907
1908 self._write_configuration_status(
1909 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
1910 )
1911
1912 self._write_op_status(op_id=nslcmop_id, stage=stage)
1913
1914 check_if_terminated_needed = True
1915 for initial_config_primitive in initial_config_primitive_list:
1916 # adding information on the vca_deployed if it is a NS execution environment
1917 if not vca_deployed["member-vnf-index"]:
1918 deploy_params["ns_config_info"] = json.dumps(
1919 self._get_ns_config_info(nsr_id)
1920 )
1921 # TODO check if already done
1922 primitive_params_ = self._map_primitive_params(
1923 initial_config_primitive, {}, deploy_params
1924 )
1925
1926 step = "execute primitive '{}' params '{}'".format(
1927 initial_config_primitive["name"], primitive_params_
1928 )
1929 self.logger.debug(logging_text + step)
1930 await self.vca_map[vca_type].exec_primitive(
1931 ee_id=ee_id,
1932 primitive_name=initial_config_primitive["name"],
1933 params_dict=primitive_params_,
1934 db_dict=db_dict,
1935 vca_id=vca_id,
1936 vca_type=vca_type,
1937 )
1938 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
1939 if check_if_terminated_needed:
1940 if config_descriptor.get("terminate-config-primitive"):
1941 self.update_db_2(
1942 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
1943 )
1944 check_if_terminated_needed = False
1945
1946 # TODO register in database that primitive is done
1947
1948 # STEP 7 Configure metrics
1949 if vca_type == "helm-v3":
1950 # TODO: review for those cases where the helm chart is a reference and
1951 # is not part of the NF package
1952 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
1953 ee_id=ee_id,
1954 artifact_path=artifact_path,
1955 ee_config_descriptor=ee_config_descriptor,
1956 vnfr_id=vnfr_id,
1957 nsr_id=nsr_id,
1958 target_ip=rw_mgmt_ip,
1959 element_type=element_type,
1960 vnf_member_index=db_vnfr.get("member-vnf-index-ref", ""),
1961 vdu_id=vdu_id,
1962 vdu_index=vdu_index,
1963 kdu_name=kdu_name,
1964 kdu_index=kdu_index,
1965 )
1966 if prometheus_jobs:
1967 self.update_db_2(
1968 "nsrs",
1969 nsr_id,
1970 {db_update_entry + "prometheus_jobs": prometheus_jobs},
1971 )
1972
1973 for job in prometheus_jobs:
1974 self.db.set_one(
1975 "prometheus_jobs",
1976 {"job_name": job["job_name"]},
1977 job,
1978 upsert=True,
1979 fail_on_empty=False,
1980 )
1981
1982 step = "instantiated at VCA"
1983 self.logger.debug(logging_text + step)
1984
1985 self._write_configuration_status(
1986 nsr_id=nsr_id, vca_index=vca_index, status="READY"
1987 )
1988
1989 except Exception as e: # TODO not use Exception but N2VC exception
1990 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
1991 if not isinstance(
1992 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
1993 ):
1994 self.logger.error(
1995 "Exception while {} : {}".format(step, e), exc_info=True
1996 )
1997 self._write_configuration_status(
1998 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
1999 )
2000 raise LcmException("{}. {}".format(step, e)) from e
2001
2002 def _write_ns_status(
2003 self,
2004 nsr_id: str,
2005 ns_state: str,
2006 current_operation: str,
2007 current_operation_id: str,
2008 error_description: str = None,
2009 error_detail: str = None,
2010 other_update: dict = None,
2011 ):
2012 """
2013 Update db_nsr fields.
2014 :param nsr_id:
2015 :param ns_state:
2016 :param current_operation:
2017 :param current_operation_id:
2018 :param error_description:
2019 :param error_detail:
2020 :param other_update: Other required changes at database if provided, will be cleared
2021 :return:
2022 """
2023 try:
2024 db_dict = other_update or {}
2025 db_dict[
2026 "_admin.nslcmop"
2027 ] = current_operation_id # for backward compatibility
2028 db_dict["_admin.current-operation"] = current_operation_id
2029 db_dict["_admin.operation-type"] = (
2030 current_operation if current_operation != "IDLE" else None
2031 )
2032 db_dict["currentOperation"] = current_operation
2033 db_dict["currentOperationID"] = current_operation_id
2034 db_dict["errorDescription"] = error_description
2035 db_dict["errorDetail"] = error_detail
2036
2037 if ns_state:
2038 db_dict["nsState"] = ns_state
2039 self.update_db_2("nsrs", nsr_id, db_dict)
2040 except DbException as e:
2041 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2042
2043 def _write_op_status(
2044 self,
2045 op_id: str,
2046 stage: list = None,
2047 error_message: str = None,
2048 queuePosition: int = 0,
2049 operation_state: str = None,
2050 other_update: dict = None,
2051 ):
2052 try:
2053 db_dict = other_update or {}
2054 db_dict["queuePosition"] = queuePosition
2055 if isinstance(stage, list):
2056 db_dict["stage"] = stage[0]
2057 db_dict["detailed-status"] = " ".join(stage)
2058 elif stage is not None:
2059 db_dict["stage"] = str(stage)
2060
2061 if error_message is not None:
2062 db_dict["errorMessage"] = error_message
2063 if operation_state is not None:
2064 db_dict["operationState"] = operation_state
2065 db_dict["statusEnteredTime"] = time()
2066 self.update_db_2("nslcmops", op_id, db_dict)
2067 except DbException as e:
2068 self.logger.warn(
2069 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2070 )
2071
2072 def _write_all_config_status(self, db_nsr: dict, status: str):
2073 try:
2074 nsr_id = db_nsr["_id"]
2075 # configurationStatus
2076 config_status = db_nsr.get("configurationStatus")
2077 if config_status:
2078 db_nsr_update = {
2079 "configurationStatus.{}.status".format(index): status
2080 for index, v in enumerate(config_status)
2081 if v
2082 }
2083 # update status
2084 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2085
2086 except DbException as e:
2087 self.logger.warn(
2088 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2089 )
2090
2091 def _write_configuration_status(
2092 self,
2093 nsr_id: str,
2094 vca_index: int,
2095 status: str = None,
2096 element_under_configuration: str = None,
2097 element_type: str = None,
2098 other_update: dict = None,
2099 ):
2100 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2101 # .format(vca_index, status))
2102
2103 try:
2104 db_path = "configurationStatus.{}.".format(vca_index)
2105 db_dict = other_update or {}
2106 if status:
2107 db_dict[db_path + "status"] = status
2108 if element_under_configuration:
2109 db_dict[
2110 db_path + "elementUnderConfiguration"
2111 ] = element_under_configuration
2112 if element_type:
2113 db_dict[db_path + "elementType"] = element_type
2114 self.update_db_2("nsrs", nsr_id, db_dict)
2115 except DbException as e:
2116 self.logger.warn(
2117 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2118 status, nsr_id, vca_index, e
2119 )
2120 )
2121
2122 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2123 """
2124 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2125 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2126 Database is used because the result can be obtained from a different LCM worker in case of HA.
2127 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2128 :param db_nslcmop: database content of nslcmop
2129 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2130 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2131 computed 'vim-account-id'
2132 """
2133 modified = False
2134 nslcmop_id = db_nslcmop["_id"]
2135 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2136 if placement_engine == "PLA":
2137 self.logger.debug(
2138 logging_text + "Invoke and wait for placement optimization"
2139 )
2140 await self.msg.aiowrite("pla", "get_placement", {"nslcmopId": nslcmop_id})
2141 db_poll_interval = 5
2142 wait = db_poll_interval * 10
2143 pla_result = None
2144 while not pla_result and wait >= 0:
2145 await asyncio.sleep(db_poll_interval)
2146 wait -= db_poll_interval
2147 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2148 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2149
2150 if not pla_result:
2151 raise LcmException(
2152 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2153 )
2154
2155 for pla_vnf in pla_result["vnf"]:
2156 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2157 if not pla_vnf.get("vimAccountId") or not vnfr:
2158 continue
2159 modified = True
2160 self.db.set_one(
2161 "vnfrs",
2162 {"_id": vnfr["_id"]},
2163 {"vim-account-id": pla_vnf["vimAccountId"]},
2164 )
2165 # Modifies db_vnfrs
2166 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2167 return modified
2168
2169 def _gather_vnfr_healing_alerts(self, vnfr, vnfd):
2170 alerts = []
2171 nsr_id = vnfr["nsr-id-ref"]
2172 df = vnfd.get("df", [{}])[0]
2173 # Checking for auto-healing configuration
2174 if "healing-aspect" in df:
2175 healing_aspects = df["healing-aspect"]
2176 for healing in healing_aspects:
2177 for healing_policy in healing.get("healing-policy", ()):
2178 vdu_id = healing_policy["vdu-id"]
2179 vdur = next(
2180 (vdur for vdur in vnfr["vdur"] if vdu_id == vdur["vdu-id-ref"]),
2181 {},
2182 )
2183 if not vdur:
2184 continue
2185 metric_name = "vm_status"
2186 vdu_name = vdur.get("name")
2187 vnf_member_index = vnfr["member-vnf-index-ref"]
2188 uuid = str(uuid4())
2189 name = f"healing_{uuid}"
2190 action = healing_policy
2191 # action_on_recovery = healing.get("action-on-recovery")
2192 # cooldown_time = healing.get("cooldown-time")
2193 # day1 = healing.get("day1")
2194 alert = {
2195 "uuid": uuid,
2196 "name": name,
2197 "metric": metric_name,
2198 "tags": {
2199 "ns_id": nsr_id,
2200 "vnf_member_index": vnf_member_index,
2201 "vdu_name": vdu_name,
2202 },
2203 "alarm_status": "ok",
2204 "action_type": "healing",
2205 "action": action,
2206 }
2207 alerts.append(alert)
2208 return alerts
2209
2210 def _gather_vnfr_scaling_alerts(self, vnfr, vnfd):
2211 alerts = []
2212 nsr_id = vnfr["nsr-id-ref"]
2213 df = vnfd.get("df", [{}])[0]
2214 # Checking for auto-scaling configuration
2215 if "scaling-aspect" in df:
2216 scaling_aspects = df["scaling-aspect"]
2217 all_vnfd_monitoring_params = {}
2218 for ivld in vnfd.get("int-virtual-link-desc", ()):
2219 for mp in ivld.get("monitoring-parameters", ()):
2220 all_vnfd_monitoring_params[mp.get("id")] = mp
2221 for vdu in vnfd.get("vdu", ()):
2222 for mp in vdu.get("monitoring-parameter", ()):
2223 all_vnfd_monitoring_params[mp.get("id")] = mp
2224 for df in vnfd.get("df", ()):
2225 for mp in df.get("monitoring-parameter", ()):
2226 all_vnfd_monitoring_params[mp.get("id")] = mp
2227 for scaling_aspect in scaling_aspects:
2228 scaling_group_name = scaling_aspect.get("name", "")
2229 # Get monitored VDUs
2230 all_monitored_vdus = set()
2231 for delta in scaling_aspect.get("aspect-delta-details", {}).get(
2232 "deltas", ()
2233 ):
2234 for vdu_delta in delta.get("vdu-delta", ()):
2235 all_monitored_vdus.add(vdu_delta.get("id"))
2236 monitored_vdurs = list(
2237 filter(
2238 lambda vdur: vdur["vdu-id-ref"] in all_monitored_vdus,
2239 vnfr["vdur"],
2240 )
2241 )
2242 if not monitored_vdurs:
2243 self.logger.error(
2244 "Scaling criteria is referring to a vnf-monitoring-param that does not contain a reference to a vdu or vnf metric"
2245 )
2246 continue
2247 for scaling_policy in scaling_aspect.get("scaling-policy", ()):
2248 if scaling_policy["scaling-type"] != "automatic":
2249 continue
2250 threshold_time = scaling_policy.get("threshold-time", "1")
2251 cooldown_time = scaling_policy.get("cooldown-time", "0")
2252 for scaling_criteria in scaling_policy["scaling-criteria"]:
2253 monitoring_param_ref = scaling_criteria.get(
2254 "vnf-monitoring-param-ref"
2255 )
2256 vnf_monitoring_param = all_vnfd_monitoring_params[
2257 monitoring_param_ref
2258 ]
2259 for vdur in monitored_vdurs:
2260 vdu_id = vdur["vdu-id-ref"]
2261 metric_name = vnf_monitoring_param.get("performance-metric")
2262 metric_name = f"osm_{metric_name}"
2263 vnf_member_index = vnfr["member-vnf-index-ref"]
2264 scalein_threshold = scaling_criteria.get(
2265 "scale-in-threshold"
2266 )
2267 scaleout_threshold = scaling_criteria.get(
2268 "scale-out-threshold"
2269 )
2270 # Looking for min/max-number-of-instances
2271 instances_min_number = 1
2272 instances_max_number = 1
2273 vdu_profile = df["vdu-profile"]
2274 if vdu_profile:
2275 profile = next(
2276 item for item in vdu_profile if item["id"] == vdu_id
2277 )
2278 instances_min_number = profile.get(
2279 "min-number-of-instances", 1
2280 )
2281 instances_max_number = profile.get(
2282 "max-number-of-instances", 1
2283 )
2284
2285 if scalein_threshold:
2286 uuid = str(uuid4())
2287 name = f"scalein_{uuid}"
2288 operation = scaling_criteria[
2289 "scale-in-relational-operation"
2290 ]
2291 rel_operator = self.rel_operation_types.get(
2292 operation, "<="
2293 )
2294 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2295 expression = f"(count ({metric_selector}) > {instances_min_number}) and (avg({metric_selector}) {rel_operator} {scalein_threshold})"
2296 labels = {
2297 "ns_id": nsr_id,
2298 "vnf_member_index": vnf_member_index,
2299 "vdu_id": vdu_id,
2300 }
2301 prom_cfg = {
2302 "alert": name,
2303 "expr": expression,
2304 "for": str(threshold_time) + "m",
2305 "labels": labels,
2306 }
2307 action = scaling_policy
2308 action = {
2309 "scaling-group": scaling_group_name,
2310 "cooldown-time": cooldown_time,
2311 }
2312 alert = {
2313 "uuid": uuid,
2314 "name": name,
2315 "metric": metric_name,
2316 "tags": {
2317 "ns_id": nsr_id,
2318 "vnf_member_index": vnf_member_index,
2319 "vdu_id": vdu_id,
2320 },
2321 "alarm_status": "ok",
2322 "action_type": "scale_in",
2323 "action": action,
2324 "prometheus_config": prom_cfg,
2325 }
2326 alerts.append(alert)
2327
2328 if scaleout_threshold:
2329 uuid = str(uuid4())
2330 name = f"scaleout_{uuid}"
2331 operation = scaling_criteria[
2332 "scale-out-relational-operation"
2333 ]
2334 rel_operator = self.rel_operation_types.get(
2335 operation, "<="
2336 )
2337 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2338 expression = f"(count ({metric_selector}) < {instances_max_number}) and (avg({metric_selector}) {rel_operator} {scaleout_threshold})"
2339 labels = {
2340 "ns_id": nsr_id,
2341 "vnf_member_index": vnf_member_index,
2342 "vdu_id": vdu_id,
2343 }
2344 prom_cfg = {
2345 "alert": name,
2346 "expr": expression,
2347 "for": str(threshold_time) + "m",
2348 "labels": labels,
2349 }
2350 action = scaling_policy
2351 action = {
2352 "scaling-group": scaling_group_name,
2353 "cooldown-time": cooldown_time,
2354 }
2355 alert = {
2356 "uuid": uuid,
2357 "name": name,
2358 "metric": metric_name,
2359 "tags": {
2360 "ns_id": nsr_id,
2361 "vnf_member_index": vnf_member_index,
2362 "vdu_id": vdu_id,
2363 },
2364 "alarm_status": "ok",
2365 "action_type": "scale_out",
2366 "action": action,
2367 "prometheus_config": prom_cfg,
2368 }
2369 alerts.append(alert)
2370 return alerts
2371
2372 def _gather_vnfr_alarm_alerts(self, vnfr, vnfd):
2373 alerts = []
2374 nsr_id = vnfr["nsr-id-ref"]
2375 vnf_member_index = vnfr["member-vnf-index-ref"]
2376
2377 # Checking for VNF alarm configuration
2378 for vdur in vnfr["vdur"]:
2379 vdu_id = vdur["vdu-id-ref"]
2380 vdu = next(filter(lambda vdu: vdu["id"] == vdu_id, vnfd["vdu"]))
2381 if "alarm" in vdu:
2382 # Get VDU monitoring params, since alerts are based on them
2383 vdu_monitoring_params = {}
2384 for mp in vdu.get("monitoring-parameter", []):
2385 vdu_monitoring_params[mp.get("id")] = mp
2386 if not vdu_monitoring_params:
2387 self.logger.error(
2388 "VDU alarm refers to a VDU monitoring param, but there are no VDU monitoring params in the VDU"
2389 )
2390 continue
2391 # Get alarms in the VDU
2392 alarm_descriptors = vdu["alarm"]
2393 # Create VDU alarms for each alarm in the VDU
2394 for alarm_descriptor in alarm_descriptors:
2395 # Check that the VDU alarm refers to a proper monitoring param
2396 alarm_monitoring_param = alarm_descriptor.get(
2397 "vnf-monitoring-param-ref", ""
2398 )
2399 vdu_specific_monitoring_param = vdu_monitoring_params.get(
2400 alarm_monitoring_param, {}
2401 )
2402 if not vdu_specific_monitoring_param:
2403 self.logger.error(
2404 "VDU alarm refers to a VDU monitoring param not present in the VDU"
2405 )
2406 continue
2407 metric_name = vdu_specific_monitoring_param.get(
2408 "performance-metric"
2409 )
2410 if not metric_name:
2411 self.logger.error(
2412 "VDU alarm refers to a VDU monitoring param that has no associated performance-metric"
2413 )
2414 continue
2415 # Set params of the alarm to be created in Prometheus
2416 metric_name = f"osm_{metric_name}"
2417 metric_threshold = alarm_descriptor.get("value")
2418 uuid = str(uuid4())
2419 alert_name = f"vdu_alarm_{uuid}"
2420 operation = alarm_descriptor["operation"]
2421 rel_operator = self.rel_operation_types.get(operation, "<=")
2422 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2423 expression = f"{metric_selector} {rel_operator} {metric_threshold}"
2424 labels = {
2425 "ns_id": nsr_id,
2426 "vnf_member_index": vnf_member_index,
2427 "vdu_id": vdu_id,
2428 "vdu_name": "{{ $labels.vdu_name }}",
2429 }
2430 prom_cfg = {
2431 "alert": alert_name,
2432 "expr": expression,
2433 "for": "1m", # default value. Ideally, this should be related to an IM param, but there is not such param
2434 "labels": labels,
2435 }
2436 alarm_action = dict()
2437 for action_type in ["ok", "insufficient-data", "alarm"]:
2438 if (
2439 "actions" in alarm_descriptor
2440 and action_type in alarm_descriptor["actions"]
2441 ):
2442 alarm_action[action_type] = alarm_descriptor["actions"][
2443 action_type
2444 ]
2445 alert = {
2446 "uuid": uuid,
2447 "name": alert_name,
2448 "metric": metric_name,
2449 "tags": {
2450 "ns_id": nsr_id,
2451 "vnf_member_index": vnf_member_index,
2452 "vdu_id": vdu_id,
2453 },
2454 "alarm_status": "ok",
2455 "action_type": "vdu_alarm",
2456 "action": alarm_action,
2457 "prometheus_config": prom_cfg,
2458 }
2459 alerts.append(alert)
2460 return alerts
2461
2462 def update_nsrs_with_pla_result(self, params):
2463 try:
2464 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2465 self.update_db_2(
2466 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2467 )
2468 except Exception as e:
2469 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2470
2471 async def instantiate(self, nsr_id, nslcmop_id):
2472 """
2473
2474 :param nsr_id: ns instance to deploy
2475 :param nslcmop_id: operation to run
2476 :return:
2477 """
2478
2479 # Try to lock HA task here
2480 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2481 if not task_is_locked_by_me:
2482 self.logger.debug(
2483 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2484 )
2485 return
2486
2487 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2488 self.logger.debug(logging_text + "Enter")
2489
2490 # get all needed from database
2491
2492 # database nsrs record
2493 db_nsr = None
2494
2495 # database nslcmops record
2496 db_nslcmop = None
2497
2498 # update operation on nsrs
2499 db_nsr_update = {}
2500 # update operation on nslcmops
2501 db_nslcmop_update = {}
2502
2503 timeout_ns_deploy = self.timeout.ns_deploy
2504
2505 nslcmop_operation_state = None
2506 db_vnfrs = {} # vnf's info indexed by member-index
2507 # n2vc_info = {}
2508 tasks_dict_info = {} # from task to info text
2509 exc = None
2510 error_list = []
2511 stage = [
2512 "Stage 1/5: preparation of the environment.",
2513 "Waiting for previous operations to terminate.",
2514 "",
2515 ]
2516 # ^ stage, step, VIM progress
2517 try:
2518 # wait for any previous tasks in process
2519 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2520
2521 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2522 stage[1] = "Reading from database."
2523 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2524 db_nsr_update["detailed-status"] = "creating"
2525 db_nsr_update["operational-status"] = "init"
2526 self._write_ns_status(
2527 nsr_id=nsr_id,
2528 ns_state="BUILDING",
2529 current_operation="INSTANTIATING",
2530 current_operation_id=nslcmop_id,
2531 other_update=db_nsr_update,
2532 )
2533 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2534
2535 # read from db: operation
2536 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2537 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2538 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2539 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2540 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2541 )
2542 ns_params = db_nslcmop.get("operationParams")
2543 if ns_params and ns_params.get("timeout_ns_deploy"):
2544 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2545
2546 # read from db: ns
2547 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2548 self.logger.debug(logging_text + stage[1])
2549 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2550 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2551 self.logger.debug(logging_text + stage[1])
2552 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2553 self.fs.sync(db_nsr["nsd-id"])
2554 db_nsr["nsd"] = nsd
2555 # nsr_name = db_nsr["name"] # TODO short-name??
2556
2557 # read from db: vnf's of this ns
2558 stage[1] = "Getting vnfrs from db."
2559 self.logger.debug(logging_text + stage[1])
2560 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2561
2562 # read from db: vnfd's for every vnf
2563 db_vnfds = [] # every vnfd data
2564
2565 # for each vnf in ns, read vnfd
2566 for vnfr in db_vnfrs_list:
2567 if vnfr.get("kdur"):
2568 kdur_list = []
2569 for kdur in vnfr["kdur"]:
2570 if kdur.get("additionalParams"):
2571 kdur["additionalParams"] = json.loads(
2572 kdur["additionalParams"]
2573 )
2574 kdur_list.append(kdur)
2575 vnfr["kdur"] = kdur_list
2576
2577 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2578 vnfd_id = vnfr["vnfd-id"]
2579 vnfd_ref = vnfr["vnfd-ref"]
2580 self.fs.sync(vnfd_id)
2581
2582 # if we haven't this vnfd, read it from db
2583 if vnfd_id not in db_vnfds:
2584 # read from db
2585 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2586 vnfd_id, vnfd_ref
2587 )
2588 self.logger.debug(logging_text + stage[1])
2589 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2590
2591 # store vnfd
2592 db_vnfds.append(vnfd)
2593
2594 # Get or generates the _admin.deployed.VCA list
2595 vca_deployed_list = None
2596 if db_nsr["_admin"].get("deployed"):
2597 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2598 if vca_deployed_list is None:
2599 vca_deployed_list = []
2600 configuration_status_list = []
2601 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2602 db_nsr_update["configurationStatus"] = configuration_status_list
2603 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2604 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2605 elif isinstance(vca_deployed_list, dict):
2606 # maintain backward compatibility. Change a dict to list at database
2607 vca_deployed_list = list(vca_deployed_list.values())
2608 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2609 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2610
2611 if not isinstance(
2612 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2613 ):
2614 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2615 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2616
2617 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2618 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2619 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2620 self.db.set_list(
2621 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2622 )
2623
2624 # n2vc_redesign STEP 2 Deploy Network Scenario
2625 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2626 self._write_op_status(op_id=nslcmop_id, stage=stage)
2627
2628 stage[1] = "Deploying KDUs."
2629 # self.logger.debug(logging_text + "Before deploy_kdus")
2630 # Call to deploy_kdus in case exists the "vdu:kdu" param
2631 await self.deploy_kdus(
2632 logging_text=logging_text,
2633 nsr_id=nsr_id,
2634 nslcmop_id=nslcmop_id,
2635 db_vnfrs=db_vnfrs,
2636 db_vnfds=db_vnfds,
2637 task_instantiation_info=tasks_dict_info,
2638 )
2639
2640 stage[1] = "Getting VCA public key."
2641 # n2vc_redesign STEP 1 Get VCA public ssh-key
2642 # feature 1429. Add n2vc public key to needed VMs
2643 n2vc_key = self.n2vc.get_public_key()
2644 n2vc_key_list = [n2vc_key]
2645 if self.vca_config.public_key:
2646 n2vc_key_list.append(self.vca_config.public_key)
2647
2648 stage[1] = "Deploying NS at VIM."
2649 task_ro = asyncio.ensure_future(
2650 self.instantiate_RO(
2651 logging_text=logging_text,
2652 nsr_id=nsr_id,
2653 nsd=nsd,
2654 db_nsr=db_nsr,
2655 db_nslcmop=db_nslcmop,
2656 db_vnfrs=db_vnfrs,
2657 db_vnfds=db_vnfds,
2658 n2vc_key_list=n2vc_key_list,
2659 stage=stage,
2660 )
2661 )
2662 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2663 tasks_dict_info[task_ro] = "Deploying at VIM"
2664
2665 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2666 stage[1] = "Deploying Execution Environments."
2667 self.logger.debug(logging_text + stage[1])
2668
2669 # create namespace and certificate if any helm based EE is present in the NS
2670 if check_helm_ee_in_ns(db_vnfds):
2671 await self.vca_map["helm-v3"].setup_ns_namespace(
2672 name=nsr_id,
2673 )
2674 # create TLS certificates
2675 await self.vca_map["helm-v3"].create_tls_certificate(
2676 secret_name=self.EE_TLS_NAME,
2677 dns_prefix="*",
2678 nsr_id=nsr_id,
2679 usage="server auth",
2680 namespace=nsr_id,
2681 )
2682
2683 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2684 for vnf_profile in get_vnf_profiles(nsd):
2685 vnfd_id = vnf_profile["vnfd-id"]
2686 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2687 member_vnf_index = str(vnf_profile["id"])
2688 db_vnfr = db_vnfrs[member_vnf_index]
2689 base_folder = vnfd["_admin"]["storage"]
2690 vdu_id = None
2691 vdu_index = 0
2692 vdu_name = None
2693 kdu_name = None
2694 kdu_index = None
2695
2696 # Get additional parameters
2697 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2698 if db_vnfr.get("additionalParamsForVnf"):
2699 deploy_params.update(
2700 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2701 )
2702
2703 descriptor_config = get_configuration(vnfd, vnfd["id"])
2704 if descriptor_config:
2705 self._deploy_n2vc(
2706 logging_text=logging_text
2707 + "member_vnf_index={} ".format(member_vnf_index),
2708 db_nsr=db_nsr,
2709 db_vnfr=db_vnfr,
2710 nslcmop_id=nslcmop_id,
2711 nsr_id=nsr_id,
2712 nsi_id=nsi_id,
2713 vnfd_id=vnfd_id,
2714 vdu_id=vdu_id,
2715 kdu_name=kdu_name,
2716 member_vnf_index=member_vnf_index,
2717 vdu_index=vdu_index,
2718 kdu_index=kdu_index,
2719 vdu_name=vdu_name,
2720 deploy_params=deploy_params,
2721 descriptor_config=descriptor_config,
2722 base_folder=base_folder,
2723 task_instantiation_info=tasks_dict_info,
2724 stage=stage,
2725 )
2726
2727 # Deploy charms for each VDU that supports one.
2728 for vdud in get_vdu_list(vnfd):
2729 vdu_id = vdud["id"]
2730 descriptor_config = get_configuration(vnfd, vdu_id)
2731 vdur = find_in_list(
2732 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2733 )
2734
2735 if vdur.get("additionalParams"):
2736 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2737 else:
2738 deploy_params_vdu = deploy_params
2739 deploy_params_vdu["OSM"] = get_osm_params(
2740 db_vnfr, vdu_id, vdu_count_index=0
2741 )
2742 vdud_count = get_number_of_instances(vnfd, vdu_id)
2743
2744 self.logger.debug("VDUD > {}".format(vdud))
2745 self.logger.debug(
2746 "Descriptor config > {}".format(descriptor_config)
2747 )
2748 if descriptor_config:
2749 vdu_name = None
2750 kdu_name = None
2751 kdu_index = None
2752 for vdu_index in range(vdud_count):
2753 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2754 self._deploy_n2vc(
2755 logging_text=logging_text
2756 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2757 member_vnf_index, vdu_id, vdu_index
2758 ),
2759 db_nsr=db_nsr,
2760 db_vnfr=db_vnfr,
2761 nslcmop_id=nslcmop_id,
2762 nsr_id=nsr_id,
2763 nsi_id=nsi_id,
2764 vnfd_id=vnfd_id,
2765 vdu_id=vdu_id,
2766 kdu_name=kdu_name,
2767 kdu_index=kdu_index,
2768 member_vnf_index=member_vnf_index,
2769 vdu_index=vdu_index,
2770 vdu_name=vdu_name,
2771 deploy_params=deploy_params_vdu,
2772 descriptor_config=descriptor_config,
2773 base_folder=base_folder,
2774 task_instantiation_info=tasks_dict_info,
2775 stage=stage,
2776 )
2777 for kdud in get_kdu_list(vnfd):
2778 kdu_name = kdud["name"]
2779 descriptor_config = get_configuration(vnfd, kdu_name)
2780 if descriptor_config:
2781 vdu_id = None
2782 vdu_index = 0
2783 vdu_name = None
2784 kdu_index, kdur = next(
2785 x
2786 for x in enumerate(db_vnfr["kdur"])
2787 if x[1]["kdu-name"] == kdu_name
2788 )
2789 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2790 if kdur.get("additionalParams"):
2791 deploy_params_kdu.update(
2792 parse_yaml_strings(kdur["additionalParams"].copy())
2793 )
2794
2795 self._deploy_n2vc(
2796 logging_text=logging_text,
2797 db_nsr=db_nsr,
2798 db_vnfr=db_vnfr,
2799 nslcmop_id=nslcmop_id,
2800 nsr_id=nsr_id,
2801 nsi_id=nsi_id,
2802 vnfd_id=vnfd_id,
2803 vdu_id=vdu_id,
2804 kdu_name=kdu_name,
2805 member_vnf_index=member_vnf_index,
2806 vdu_index=vdu_index,
2807 kdu_index=kdu_index,
2808 vdu_name=vdu_name,
2809 deploy_params=deploy_params_kdu,
2810 descriptor_config=descriptor_config,
2811 base_folder=base_folder,
2812 task_instantiation_info=tasks_dict_info,
2813 stage=stage,
2814 )
2815
2816 # Check if each vnf has exporter for metric collection if so update prometheus job records
2817 if "exporters-endpoints" in vnfd.get("df")[0]:
2818 exporter_config = vnfd.get("df")[0].get("exporters-endpoints")
2819 self.logger.debug("exporter config :{}".format(exporter_config))
2820 artifact_path = "{}/{}/{}".format(
2821 base_folder["folder"],
2822 base_folder["pkg-dir"],
2823 "exporter-endpoint",
2824 )
2825 ee_id = None
2826 ee_config_descriptor = exporter_config
2827 vnfr_id = db_vnfr["id"]
2828 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2829 logging_text,
2830 nsr_id,
2831 vnfr_id,
2832 vdu_id=None,
2833 vdu_index=None,
2834 user=None,
2835 pub_key=None,
2836 )
2837 self.logger.debug("rw_mgmt_ip:{}".format(rw_mgmt_ip))
2838 self.logger.debug("Artifact_path:{}".format(artifact_path))
2839 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
2840 vdu_id_for_prom = None
2841 vdu_index_for_prom = None
2842 for x in get_iterable(db_vnfr, "vdur"):
2843 vdu_id_for_prom = x.get("vdu-id-ref")
2844 vdu_index_for_prom = x.get("count-index")
2845 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2846 ee_id=ee_id,
2847 artifact_path=artifact_path,
2848 ee_config_descriptor=ee_config_descriptor,
2849 vnfr_id=vnfr_id,
2850 nsr_id=nsr_id,
2851 target_ip=rw_mgmt_ip,
2852 element_type="VDU",
2853 vdu_id=vdu_id_for_prom,
2854 vdu_index=vdu_index_for_prom,
2855 )
2856
2857 self.logger.debug("Prometheus job:{}".format(prometheus_jobs))
2858 if prometheus_jobs:
2859 db_nsr_update["_admin.deployed.prometheus_jobs"] = prometheus_jobs
2860 self.update_db_2(
2861 "nsrs",
2862 nsr_id,
2863 db_nsr_update,
2864 )
2865
2866 for job in prometheus_jobs:
2867 self.db.set_one(
2868 "prometheus_jobs",
2869 {"job_name": job["job_name"]},
2870 job,
2871 upsert=True,
2872 fail_on_empty=False,
2873 )
2874
2875 # Check if this NS has a charm configuration
2876 descriptor_config = nsd.get("ns-configuration")
2877 if descriptor_config and descriptor_config.get("juju"):
2878 vnfd_id = None
2879 db_vnfr = None
2880 member_vnf_index = None
2881 vdu_id = None
2882 kdu_name = None
2883 kdu_index = None
2884 vdu_index = 0
2885 vdu_name = None
2886
2887 # Get additional parameters
2888 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2889 if db_nsr.get("additionalParamsForNs"):
2890 deploy_params.update(
2891 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2892 )
2893 base_folder = nsd["_admin"]["storage"]
2894 self._deploy_n2vc(
2895 logging_text=logging_text,
2896 db_nsr=db_nsr,
2897 db_vnfr=db_vnfr,
2898 nslcmop_id=nslcmop_id,
2899 nsr_id=nsr_id,
2900 nsi_id=nsi_id,
2901 vnfd_id=vnfd_id,
2902 vdu_id=vdu_id,
2903 kdu_name=kdu_name,
2904 member_vnf_index=member_vnf_index,
2905 vdu_index=vdu_index,
2906 kdu_index=kdu_index,
2907 vdu_name=vdu_name,
2908 deploy_params=deploy_params,
2909 descriptor_config=descriptor_config,
2910 base_folder=base_folder,
2911 task_instantiation_info=tasks_dict_info,
2912 stage=stage,
2913 )
2914
2915 # rest of staff will be done at finally
2916
2917 except (
2918 ROclient.ROClientException,
2919 DbException,
2920 LcmException,
2921 N2VCException,
2922 ) as e:
2923 self.logger.error(
2924 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2925 )
2926 exc = e
2927 except asyncio.CancelledError:
2928 self.logger.error(
2929 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2930 )
2931 exc = "Operation was cancelled"
2932 except Exception as e:
2933 exc = traceback.format_exc()
2934 self.logger.critical(
2935 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2936 exc_info=True,
2937 )
2938 finally:
2939 if exc:
2940 error_list.append(str(exc))
2941 try:
2942 # wait for pending tasks
2943 if tasks_dict_info:
2944 stage[1] = "Waiting for instantiate pending tasks."
2945 self.logger.debug(logging_text + stage[1])
2946 error_list += await self._wait_for_tasks(
2947 logging_text,
2948 tasks_dict_info,
2949 timeout_ns_deploy,
2950 stage,
2951 nslcmop_id,
2952 nsr_id=nsr_id,
2953 )
2954 stage[1] = stage[2] = ""
2955 except asyncio.CancelledError:
2956 error_list.append("Cancelled")
2957 # TODO cancel all tasks
2958 except Exception as exc:
2959 error_list.append(str(exc))
2960
2961 # update operation-status
2962 db_nsr_update["operational-status"] = "running"
2963 # let's begin with VCA 'configured' status (later we can change it)
2964 db_nsr_update["config-status"] = "configured"
2965 for task, task_name in tasks_dict_info.items():
2966 if not task.done() or task.cancelled() or task.exception():
2967 if task_name.startswith(self.task_name_deploy_vca):
2968 # A N2VC task is pending
2969 db_nsr_update["config-status"] = "failed"
2970 else:
2971 # RO or KDU task is pending
2972 db_nsr_update["operational-status"] = "failed"
2973
2974 # update status at database
2975 if error_list:
2976 error_detail = ". ".join(error_list)
2977 self.logger.error(logging_text + error_detail)
2978 error_description_nslcmop = "{} Detail: {}".format(
2979 stage[0], error_detail
2980 )
2981 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2982 nslcmop_id, stage[0]
2983 )
2984
2985 db_nsr_update["detailed-status"] = (
2986 error_description_nsr + " Detail: " + error_detail
2987 )
2988 db_nslcmop_update["detailed-status"] = error_detail
2989 nslcmop_operation_state = "FAILED"
2990 ns_state = "BROKEN"
2991 else:
2992 error_detail = None
2993 error_description_nsr = error_description_nslcmop = None
2994 ns_state = "READY"
2995 db_nsr_update["detailed-status"] = "Done"
2996 db_nslcmop_update["detailed-status"] = "Done"
2997 nslcmop_operation_state = "COMPLETED"
2998 # Gather auto-healing and auto-scaling alerts for each vnfr
2999 healing_alerts = []
3000 scaling_alerts = []
3001 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
3002 vnfd = next(
3003 (sub for sub in db_vnfds if sub["_id"] == vnfr["vnfd-id"]), None
3004 )
3005 healing_alerts = self._gather_vnfr_healing_alerts(vnfr, vnfd)
3006 for alert in healing_alerts:
3007 self.logger.info(f"Storing healing alert in MongoDB: {alert}")
3008 self.db.create("alerts", alert)
3009
3010 scaling_alerts = self._gather_vnfr_scaling_alerts(vnfr, vnfd)
3011 for alert in scaling_alerts:
3012 self.logger.info(f"Storing scaling alert in MongoDB: {alert}")
3013 self.db.create("alerts", alert)
3014
3015 alarm_alerts = self._gather_vnfr_alarm_alerts(vnfr, vnfd)
3016 for alert in alarm_alerts:
3017 self.logger.info(f"Storing VNF alarm alert in MongoDB: {alert}")
3018 self.db.create("alerts", alert)
3019 if db_nsr:
3020 self._write_ns_status(
3021 nsr_id=nsr_id,
3022 ns_state=ns_state,
3023 current_operation="IDLE",
3024 current_operation_id=None,
3025 error_description=error_description_nsr,
3026 error_detail=error_detail,
3027 other_update=db_nsr_update,
3028 )
3029 self._write_op_status(
3030 op_id=nslcmop_id,
3031 stage="",
3032 error_message=error_description_nslcmop,
3033 operation_state=nslcmop_operation_state,
3034 other_update=db_nslcmop_update,
3035 )
3036
3037 if nslcmop_operation_state:
3038 try:
3039 await self.msg.aiowrite(
3040 "ns",
3041 "instantiated",
3042 {
3043 "nsr_id": nsr_id,
3044 "nslcmop_id": nslcmop_id,
3045 "operationState": nslcmop_operation_state,
3046 "startTime": db_nslcmop["startTime"],
3047 "links": db_nslcmop["links"],
3048 "operationParams": {
3049 "nsInstanceId": nsr_id,
3050 "nsdId": db_nsr["nsd-id"],
3051 },
3052 },
3053 )
3054 except Exception as e:
3055 self.logger.error(
3056 logging_text + "kafka_write notification Exception {}".format(e)
3057 )
3058
3059 self.logger.debug(logging_text + "Exit")
3060 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
3061
3062 def _get_vnfd(self, vnfd_id: str, projects_read: str, cached_vnfds: Dict[str, Any]):
3063 if vnfd_id not in cached_vnfds:
3064 cached_vnfds[vnfd_id] = self.db.get_one(
3065 "vnfds", {"id": vnfd_id, "_admin.projects_read": projects_read}
3066 )
3067 return cached_vnfds[vnfd_id]
3068
3069 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
3070 if vnf_profile_id not in cached_vnfrs:
3071 cached_vnfrs[vnf_profile_id] = self.db.get_one(
3072 "vnfrs",
3073 {
3074 "member-vnf-index-ref": vnf_profile_id,
3075 "nsr-id-ref": nsr_id,
3076 },
3077 )
3078 return cached_vnfrs[vnf_profile_id]
3079
3080 def _is_deployed_vca_in_relation(
3081 self, vca: DeployedVCA, relation: Relation
3082 ) -> bool:
3083 found = False
3084 for endpoint in (relation.provider, relation.requirer):
3085 if endpoint["kdu-resource-profile-id"]:
3086 continue
3087 found = (
3088 vca.vnf_profile_id == endpoint.vnf_profile_id
3089 and vca.vdu_profile_id == endpoint.vdu_profile_id
3090 and vca.execution_environment_ref == endpoint.execution_environment_ref
3091 )
3092 if found:
3093 break
3094 return found
3095
3096 def _update_ee_relation_data_with_implicit_data(
3097 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
3098 ):
3099 ee_relation_data = safe_get_ee_relation(
3100 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
3101 )
3102 ee_relation_level = EELevel.get_level(ee_relation_data)
3103 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
3104 "execution-environment-ref"
3105 ]:
3106 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
3107 vnfd_id = vnf_profile["vnfd-id"]
3108 project = nsd["_admin"]["projects_read"][0]
3109 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3110 entity_id = (
3111 vnfd_id
3112 if ee_relation_level == EELevel.VNF
3113 else ee_relation_data["vdu-profile-id"]
3114 )
3115 ee = get_juju_ee_ref(db_vnfd, entity_id)
3116 if not ee:
3117 raise Exception(
3118 f"not execution environments found for ee_relation {ee_relation_data}"
3119 )
3120 ee_relation_data["execution-environment-ref"] = ee["id"]
3121 return ee_relation_data
3122
3123 def _get_ns_relations(
3124 self,
3125 nsr_id: str,
3126 nsd: Dict[str, Any],
3127 vca: DeployedVCA,
3128 cached_vnfds: Dict[str, Any],
3129 ) -> List[Relation]:
3130 relations = []
3131 db_ns_relations = get_ns_configuration_relation_list(nsd)
3132 for r in db_ns_relations:
3133 provider_dict = None
3134 requirer_dict = None
3135 if all(key in r for key in ("provider", "requirer")):
3136 provider_dict = r["provider"]
3137 requirer_dict = r["requirer"]
3138 elif "entities" in r:
3139 provider_id = r["entities"][0]["id"]
3140 provider_dict = {
3141 "nsr-id": nsr_id,
3142 "endpoint": r["entities"][0]["endpoint"],
3143 }
3144 if provider_id != nsd["id"]:
3145 provider_dict["vnf-profile-id"] = provider_id
3146 requirer_id = r["entities"][1]["id"]
3147 requirer_dict = {
3148 "nsr-id": nsr_id,
3149 "endpoint": r["entities"][1]["endpoint"],
3150 }
3151 if requirer_id != nsd["id"]:
3152 requirer_dict["vnf-profile-id"] = requirer_id
3153 else:
3154 raise Exception(
3155 "provider/requirer or entities must be included in the relation."
3156 )
3157 relation_provider = self._update_ee_relation_data_with_implicit_data(
3158 nsr_id, nsd, provider_dict, cached_vnfds
3159 )
3160 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3161 nsr_id, nsd, requirer_dict, cached_vnfds
3162 )
3163 provider = EERelation(relation_provider)
3164 requirer = EERelation(relation_requirer)
3165 relation = Relation(r["name"], provider, requirer)
3166 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3167 if vca_in_relation:
3168 relations.append(relation)
3169 return relations
3170
3171 def _get_vnf_relations(
3172 self,
3173 nsr_id: str,
3174 nsd: Dict[str, Any],
3175 vca: DeployedVCA,
3176 cached_vnfds: Dict[str, Any],
3177 ) -> List[Relation]:
3178 relations = []
3179 if vca.target_element == "ns":
3180 self.logger.debug("VCA is a NS charm, not a VNF.")
3181 return relations
3182 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
3183 vnf_profile_id = vnf_profile["id"]
3184 vnfd_id = vnf_profile["vnfd-id"]
3185 project = nsd["_admin"]["projects_read"][0]
3186 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3187 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
3188 for r in db_vnf_relations:
3189 provider_dict = None
3190 requirer_dict = None
3191 if all(key in r for key in ("provider", "requirer")):
3192 provider_dict = r["provider"]
3193 requirer_dict = r["requirer"]
3194 elif "entities" in r:
3195 provider_id = r["entities"][0]["id"]
3196 provider_dict = {
3197 "nsr-id": nsr_id,
3198 "vnf-profile-id": vnf_profile_id,
3199 "endpoint": r["entities"][0]["endpoint"],
3200 }
3201 if provider_id != vnfd_id:
3202 provider_dict["vdu-profile-id"] = provider_id
3203 requirer_id = r["entities"][1]["id"]
3204 requirer_dict = {
3205 "nsr-id": nsr_id,
3206 "vnf-profile-id": vnf_profile_id,
3207 "endpoint": r["entities"][1]["endpoint"],
3208 }
3209 if requirer_id != vnfd_id:
3210 requirer_dict["vdu-profile-id"] = requirer_id
3211 else:
3212 raise Exception(
3213 "provider/requirer or entities must be included in the relation."
3214 )
3215 relation_provider = self._update_ee_relation_data_with_implicit_data(
3216 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3217 )
3218 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3219 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3220 )
3221 provider = EERelation(relation_provider)
3222 requirer = EERelation(relation_requirer)
3223 relation = Relation(r["name"], provider, requirer)
3224 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3225 if vca_in_relation:
3226 relations.append(relation)
3227 return relations
3228
3229 def _get_kdu_resource_data(
3230 self,
3231 ee_relation: EERelation,
3232 db_nsr: Dict[str, Any],
3233 cached_vnfds: Dict[str, Any],
3234 ) -> DeployedK8sResource:
3235 nsd = get_nsd(db_nsr)
3236 vnf_profiles = get_vnf_profiles(nsd)
3237 vnfd_id = find_in_list(
3238 vnf_profiles,
3239 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3240 )["vnfd-id"]
3241 project = nsd["_admin"]["projects_read"][0]
3242 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3243 kdu_resource_profile = get_kdu_resource_profile(
3244 db_vnfd, ee_relation.kdu_resource_profile_id
3245 )
3246 kdu_name = kdu_resource_profile["kdu-name"]
3247 deployed_kdu, _ = get_deployed_kdu(
3248 db_nsr.get("_admin", ()).get("deployed", ()),
3249 kdu_name,
3250 ee_relation.vnf_profile_id,
3251 )
3252 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3253 return deployed_kdu
3254
3255 def _get_deployed_component(
3256 self,
3257 ee_relation: EERelation,
3258 db_nsr: Dict[str, Any],
3259 cached_vnfds: Dict[str, Any],
3260 ) -> DeployedComponent:
3261 nsr_id = db_nsr["_id"]
3262 deployed_component = None
3263 ee_level = EELevel.get_level(ee_relation)
3264 if ee_level == EELevel.NS:
3265 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3266 if vca:
3267 deployed_component = DeployedVCA(nsr_id, vca)
3268 elif ee_level == EELevel.VNF:
3269 vca = get_deployed_vca(
3270 db_nsr,
3271 {
3272 "vdu_id": None,
3273 "member-vnf-index": ee_relation.vnf_profile_id,
3274 "ee_descriptor_id": ee_relation.execution_environment_ref,
3275 },
3276 )
3277 if vca:
3278 deployed_component = DeployedVCA(nsr_id, vca)
3279 elif ee_level == EELevel.VDU:
3280 vca = get_deployed_vca(
3281 db_nsr,
3282 {
3283 "vdu_id": ee_relation.vdu_profile_id,
3284 "member-vnf-index": ee_relation.vnf_profile_id,
3285 "ee_descriptor_id": ee_relation.execution_environment_ref,
3286 },
3287 )
3288 if vca:
3289 deployed_component = DeployedVCA(nsr_id, vca)
3290 elif ee_level == EELevel.KDU:
3291 kdu_resource_data = self._get_kdu_resource_data(
3292 ee_relation, db_nsr, cached_vnfds
3293 )
3294 if kdu_resource_data:
3295 deployed_component = DeployedK8sResource(kdu_resource_data)
3296 return deployed_component
3297
3298 async def _add_relation(
3299 self,
3300 relation: Relation,
3301 vca_type: str,
3302 db_nsr: Dict[str, Any],
3303 cached_vnfds: Dict[str, Any],
3304 cached_vnfrs: Dict[str, Any],
3305 ) -> bool:
3306 deployed_provider = self._get_deployed_component(
3307 relation.provider, db_nsr, cached_vnfds
3308 )
3309 deployed_requirer = self._get_deployed_component(
3310 relation.requirer, db_nsr, cached_vnfds
3311 )
3312 if (
3313 deployed_provider
3314 and deployed_requirer
3315 and deployed_provider.config_sw_installed
3316 and deployed_requirer.config_sw_installed
3317 ):
3318 provider_db_vnfr = (
3319 self._get_vnfr(
3320 relation.provider.nsr_id,
3321 relation.provider.vnf_profile_id,
3322 cached_vnfrs,
3323 )
3324 if relation.provider.vnf_profile_id
3325 else None
3326 )
3327 requirer_db_vnfr = (
3328 self._get_vnfr(
3329 relation.requirer.nsr_id,
3330 relation.requirer.vnf_profile_id,
3331 cached_vnfrs,
3332 )
3333 if relation.requirer.vnf_profile_id
3334 else None
3335 )
3336 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3337 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3338 provider_relation_endpoint = RelationEndpoint(
3339 deployed_provider.ee_id,
3340 provider_vca_id,
3341 relation.provider.endpoint,
3342 )
3343 requirer_relation_endpoint = RelationEndpoint(
3344 deployed_requirer.ee_id,
3345 requirer_vca_id,
3346 relation.requirer.endpoint,
3347 )
3348 try:
3349 await self.vca_map[vca_type].add_relation(
3350 provider=provider_relation_endpoint,
3351 requirer=requirer_relation_endpoint,
3352 )
3353 except N2VCException as exception:
3354 self.logger.error(exception)
3355 raise LcmException(exception)
3356 return True
3357 return False
3358
3359 async def _add_vca_relations(
3360 self,
3361 logging_text,
3362 nsr_id,
3363 vca_type: str,
3364 vca_index: int,
3365 timeout: int = 3600,
3366 ) -> bool:
3367 # steps:
3368 # 1. find all relations for this VCA
3369 # 2. wait for other peers related
3370 # 3. add relations
3371
3372 try:
3373 # STEP 1: find all relations for this VCA
3374
3375 # read nsr record
3376 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3377 nsd = get_nsd(db_nsr)
3378
3379 # this VCA data
3380 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3381 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3382
3383 cached_vnfds = {}
3384 cached_vnfrs = {}
3385 relations = []
3386 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3387 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3388
3389 # if no relations, terminate
3390 if not relations:
3391 self.logger.debug(logging_text + " No relations")
3392 return True
3393
3394 self.logger.debug(logging_text + " adding relations {}".format(relations))
3395
3396 # add all relations
3397 start = time()
3398 while True:
3399 # check timeout
3400 now = time()
3401 if now - start >= timeout:
3402 self.logger.error(logging_text + " : timeout adding relations")
3403 return False
3404
3405 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3406 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3407
3408 # for each relation, find the VCA's related
3409 for relation in relations.copy():
3410 added = await self._add_relation(
3411 relation,
3412 vca_type,
3413 db_nsr,
3414 cached_vnfds,
3415 cached_vnfrs,
3416 )
3417 if added:
3418 relations.remove(relation)
3419
3420 if not relations:
3421 self.logger.debug("Relations added")
3422 break
3423 await asyncio.sleep(5.0)
3424
3425 return True
3426
3427 except Exception as e:
3428 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3429 return False
3430
3431 async def _install_kdu(
3432 self,
3433 nsr_id: str,
3434 nsr_db_path: str,
3435 vnfr_data: dict,
3436 kdu_index: int,
3437 kdud: dict,
3438 vnfd: dict,
3439 k8s_instance_info: dict,
3440 k8params: dict = None,
3441 timeout: int = 600,
3442 vca_id: str = None,
3443 ):
3444 try:
3445 k8sclustertype = k8s_instance_info["k8scluster-type"]
3446 # Instantiate kdu
3447 db_dict_install = {
3448 "collection": "nsrs",
3449 "filter": {"_id": nsr_id},
3450 "path": nsr_db_path,
3451 }
3452
3453 if k8s_instance_info.get("kdu-deployment-name"):
3454 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3455 else:
3456 kdu_instance = self.k8scluster_map[
3457 k8sclustertype
3458 ].generate_kdu_instance_name(
3459 db_dict=db_dict_install,
3460 kdu_model=k8s_instance_info["kdu-model"],
3461 kdu_name=k8s_instance_info["kdu-name"],
3462 )
3463
3464 # Update the nsrs table with the kdu-instance value
3465 self.update_db_2(
3466 item="nsrs",
3467 _id=nsr_id,
3468 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3469 )
3470
3471 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3472 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3473 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3474 # namespace, this first verification could be removed, and the next step would be done for any kind
3475 # of KNF.
3476 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3477 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3478 if k8sclustertype in ("juju", "juju-bundle"):
3479 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3480 # that the user passed a namespace which he wants its KDU to be deployed in)
3481 if (
3482 self.db.count(
3483 table="nsrs",
3484 q_filter={
3485 "_id": nsr_id,
3486 "_admin.projects_write": k8s_instance_info["namespace"],
3487 "_admin.projects_read": k8s_instance_info["namespace"],
3488 },
3489 )
3490 > 0
3491 ):
3492 self.logger.debug(
3493 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3494 )
3495 self.update_db_2(
3496 item="nsrs",
3497 _id=nsr_id,
3498 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3499 )
3500 k8s_instance_info["namespace"] = kdu_instance
3501
3502 await self.k8scluster_map[k8sclustertype].install(
3503 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3504 kdu_model=k8s_instance_info["kdu-model"],
3505 atomic=True,
3506 params=k8params,
3507 db_dict=db_dict_install,
3508 timeout=timeout,
3509 kdu_name=k8s_instance_info["kdu-name"],
3510 namespace=k8s_instance_info["namespace"],
3511 kdu_instance=kdu_instance,
3512 vca_id=vca_id,
3513 )
3514
3515 # Obtain services to obtain management service ip
3516 services = await self.k8scluster_map[k8sclustertype].get_services(
3517 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3518 kdu_instance=kdu_instance,
3519 namespace=k8s_instance_info["namespace"],
3520 )
3521
3522 # Obtain management service info (if exists)
3523 vnfr_update_dict = {}
3524 kdu_config = get_configuration(vnfd, kdud["name"])
3525 if kdu_config:
3526 target_ee_list = kdu_config.get("execution-environment-list", [])
3527 else:
3528 target_ee_list = []
3529
3530 if services:
3531 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3532 mgmt_services = [
3533 service
3534 for service in kdud.get("service", [])
3535 if service.get("mgmt-service")
3536 ]
3537 for mgmt_service in mgmt_services:
3538 for service in services:
3539 if service["name"].startswith(mgmt_service["name"]):
3540 # Mgmt service found, Obtain service ip
3541 ip = service.get("external_ip", service.get("cluster_ip"))
3542 if isinstance(ip, list) and len(ip) == 1:
3543 ip = ip[0]
3544
3545 vnfr_update_dict[
3546 "kdur.{}.ip-address".format(kdu_index)
3547 ] = ip
3548
3549 # Check if must update also mgmt ip at the vnf
3550 service_external_cp = mgmt_service.get(
3551 "external-connection-point-ref"
3552 )
3553 if service_external_cp:
3554 if (
3555 deep_get(vnfd, ("mgmt-interface", "cp"))
3556 == service_external_cp
3557 ):
3558 vnfr_update_dict["ip-address"] = ip
3559
3560 if find_in_list(
3561 target_ee_list,
3562 lambda ee: ee.get(
3563 "external-connection-point-ref", ""
3564 )
3565 == service_external_cp,
3566 ):
3567 vnfr_update_dict[
3568 "kdur.{}.ip-address".format(kdu_index)
3569 ] = ip
3570 break
3571 else:
3572 self.logger.warn(
3573 "Mgmt service name: {} not found".format(
3574 mgmt_service["name"]
3575 )
3576 )
3577
3578 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3579 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3580
3581 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3582 if (
3583 kdu_config
3584 and kdu_config.get("initial-config-primitive")
3585 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3586 ):
3587 initial_config_primitive_list = kdu_config.get(
3588 "initial-config-primitive"
3589 )
3590 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3591
3592 for initial_config_primitive in initial_config_primitive_list:
3593 primitive_params_ = self._map_primitive_params(
3594 initial_config_primitive, {}, {}
3595 )
3596
3597 await asyncio.wait_for(
3598 self.k8scluster_map[k8sclustertype].exec_primitive(
3599 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3600 kdu_instance=kdu_instance,
3601 primitive_name=initial_config_primitive["name"],
3602 params=primitive_params_,
3603 db_dict=db_dict_install,
3604 vca_id=vca_id,
3605 ),
3606 timeout=timeout,
3607 )
3608
3609 except Exception as e:
3610 # Prepare update db with error and raise exception
3611 try:
3612 self.update_db_2(
3613 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3614 )
3615 self.update_db_2(
3616 "vnfrs",
3617 vnfr_data.get("_id"),
3618 {"kdur.{}.status".format(kdu_index): "ERROR"},
3619 )
3620 except Exception as error:
3621 # ignore to keep original exception
3622 self.logger.warning(
3623 f"An exception occurred while updating DB: {str(error)}"
3624 )
3625 # reraise original error
3626 raise
3627
3628 return kdu_instance
3629
3630 async def deploy_kdus(
3631 self,
3632 logging_text,
3633 nsr_id,
3634 nslcmop_id,
3635 db_vnfrs,
3636 db_vnfds,
3637 task_instantiation_info,
3638 ):
3639 # Launch kdus if present in the descriptor
3640
3641 k8scluster_id_2_uuic = {
3642 "helm-chart-v3": {},
3643 "juju-bundle": {},
3644 }
3645
3646 async def _get_cluster_id(cluster_id, cluster_type):
3647 nonlocal k8scluster_id_2_uuic
3648 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3649 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3650
3651 # check if K8scluster is creating and wait look if previous tasks in process
3652 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3653 "k8scluster", cluster_id
3654 )
3655 if task_dependency:
3656 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3657 task_name, cluster_id
3658 )
3659 self.logger.debug(logging_text + text)
3660 await asyncio.wait(task_dependency, timeout=3600)
3661
3662 db_k8scluster = self.db.get_one(
3663 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3664 )
3665 if not db_k8scluster:
3666 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3667
3668 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3669 if not k8s_id:
3670 if cluster_type == "helm-chart-v3":
3671 try:
3672 # backward compatibility for existing clusters that have not been initialized for helm v3
3673 k8s_credentials = yaml.safe_dump(
3674 db_k8scluster.get("credentials")
3675 )
3676 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3677 k8s_credentials, reuse_cluster_uuid=cluster_id
3678 )
3679 db_k8scluster_update = {}
3680 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3681 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3682 db_k8scluster_update[
3683 "_admin.helm-chart-v3.created"
3684 ] = uninstall_sw
3685 db_k8scluster_update[
3686 "_admin.helm-chart-v3.operationalState"
3687 ] = "ENABLED"
3688 self.update_db_2(
3689 "k8sclusters", cluster_id, db_k8scluster_update
3690 )
3691 except Exception as e:
3692 self.logger.error(
3693 logging_text
3694 + "error initializing helm-v3 cluster: {}".format(str(e))
3695 )
3696 raise LcmException(
3697 "K8s cluster '{}' has not been initialized for '{}'".format(
3698 cluster_id, cluster_type
3699 )
3700 )
3701 else:
3702 raise LcmException(
3703 "K8s cluster '{}' has not been initialized for '{}'".format(
3704 cluster_id, cluster_type
3705 )
3706 )
3707 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3708 return k8s_id
3709
3710 logging_text += "Deploy kdus: "
3711 step = ""
3712 try:
3713 db_nsr_update = {"_admin.deployed.K8s": []}
3714 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3715
3716 index = 0
3717 updated_cluster_list = []
3718 updated_v3_cluster_list = []
3719
3720 for vnfr_data in db_vnfrs.values():
3721 vca_id = self.get_vca_id(vnfr_data, {})
3722 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3723 # Step 0: Prepare and set parameters
3724 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3725 vnfd_id = vnfr_data.get("vnfd-id")
3726 vnfd_with_id = find_in_list(
3727 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3728 )
3729 kdud = next(
3730 kdud
3731 for kdud in vnfd_with_id["kdu"]
3732 if kdud["name"] == kdur["kdu-name"]
3733 )
3734 namespace = kdur.get("k8s-namespace")
3735 kdu_deployment_name = kdur.get("kdu-deployment-name")
3736 if kdur.get("helm-chart"):
3737 kdumodel = kdur["helm-chart"]
3738 # Default version: helm3, if helm-version is v2 assign v2
3739 k8sclustertype = "helm-chart-v3"
3740 self.logger.debug("kdur: {}".format(kdur))
3741 elif kdur.get("juju-bundle"):
3742 kdumodel = kdur["juju-bundle"]
3743 k8sclustertype = "juju-bundle"
3744 else:
3745 raise LcmException(
3746 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3747 "juju-bundle. Maybe an old NBI version is running".format(
3748 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3749 )
3750 )
3751 # check if kdumodel is a file and exists
3752 try:
3753 vnfd_with_id = find_in_list(
3754 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3755 )
3756 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3757 if storage: # may be not present if vnfd has not artifacts
3758 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3759 if storage["pkg-dir"]:
3760 filename = "{}/{}/{}s/{}".format(
3761 storage["folder"],
3762 storage["pkg-dir"],
3763 k8sclustertype,
3764 kdumodel,
3765 )
3766 else:
3767 filename = "{}/Scripts/{}s/{}".format(
3768 storage["folder"],
3769 k8sclustertype,
3770 kdumodel,
3771 )
3772 if self.fs.file_exists(
3773 filename, mode="file"
3774 ) or self.fs.file_exists(filename, mode="dir"):
3775 kdumodel = self.fs.path + filename
3776 except (asyncio.TimeoutError, asyncio.CancelledError):
3777 raise
3778 except Exception as e: # it is not a file
3779 self.logger.warning(f"An exception occurred: {str(e)}")
3780
3781 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3782 step = "Synchronize repos for k8s cluster '{}'".format(
3783 k8s_cluster_id
3784 )
3785 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3786
3787 # Synchronize repos
3788 if (
3789 k8sclustertype == "helm-chart"
3790 and cluster_uuid not in updated_cluster_list
3791 ) or (
3792 k8sclustertype == "helm-chart-v3"
3793 and cluster_uuid not in updated_v3_cluster_list
3794 ):
3795 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3796 self.k8scluster_map[k8sclustertype].synchronize_repos(
3797 cluster_uuid=cluster_uuid
3798 )
3799 )
3800 if del_repo_list or added_repo_dict:
3801 if k8sclustertype == "helm-chart":
3802 unset = {
3803 "_admin.helm_charts_added." + item: None
3804 for item in del_repo_list
3805 }
3806 updated = {
3807 "_admin.helm_charts_added." + item: name
3808 for item, name in added_repo_dict.items()
3809 }
3810 updated_cluster_list.append(cluster_uuid)
3811 elif k8sclustertype == "helm-chart-v3":
3812 unset = {
3813 "_admin.helm_charts_v3_added." + item: None
3814 for item in del_repo_list
3815 }
3816 updated = {
3817 "_admin.helm_charts_v3_added." + item: name
3818 for item, name in added_repo_dict.items()
3819 }
3820 updated_v3_cluster_list.append(cluster_uuid)
3821 self.logger.debug(
3822 logging_text + "repos synchronized on k8s cluster "
3823 "'{}' to_delete: {}, to_add: {}".format(
3824 k8s_cluster_id, del_repo_list, added_repo_dict
3825 )
3826 )
3827 self.db.set_one(
3828 "k8sclusters",
3829 {"_id": k8s_cluster_id},
3830 updated,
3831 unset=unset,
3832 )
3833
3834 # Instantiate kdu
3835 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3836 vnfr_data["member-vnf-index-ref"],
3837 kdur["kdu-name"],
3838 k8s_cluster_id,
3839 )
3840 k8s_instance_info = {
3841 "kdu-instance": None,
3842 "k8scluster-uuid": cluster_uuid,
3843 "k8scluster-type": k8sclustertype,
3844 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3845 "kdu-name": kdur["kdu-name"],
3846 "kdu-model": kdumodel,
3847 "namespace": namespace,
3848 "kdu-deployment-name": kdu_deployment_name,
3849 }
3850 db_path = "_admin.deployed.K8s.{}".format(index)
3851 db_nsr_update[db_path] = k8s_instance_info
3852 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3853 vnfd_with_id = find_in_list(
3854 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3855 )
3856 task = asyncio.ensure_future(
3857 self._install_kdu(
3858 nsr_id,
3859 db_path,
3860 vnfr_data,
3861 kdu_index,
3862 kdud,
3863 vnfd_with_id,
3864 k8s_instance_info,
3865 k8params=desc_params,
3866 timeout=1800,
3867 vca_id=vca_id,
3868 )
3869 )
3870 self.lcm_tasks.register(
3871 "ns",
3872 nsr_id,
3873 nslcmop_id,
3874 "instantiate_KDU-{}".format(index),
3875 task,
3876 )
3877 task_instantiation_info[task] = "Deploying KDU {}".format(
3878 kdur["kdu-name"]
3879 )
3880
3881 index += 1
3882
3883 except (LcmException, asyncio.CancelledError):
3884 raise
3885 except Exception as e:
3886 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3887 if isinstance(e, (N2VCException, DbException)):
3888 self.logger.error(logging_text + msg)
3889 else:
3890 self.logger.critical(logging_text + msg, exc_info=True)
3891 raise LcmException(msg)
3892 finally:
3893 if db_nsr_update:
3894 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3895
3896 def _deploy_n2vc(
3897 self,
3898 logging_text,
3899 db_nsr,
3900 db_vnfr,
3901 nslcmop_id,
3902 nsr_id,
3903 nsi_id,
3904 vnfd_id,
3905 vdu_id,
3906 kdu_name,
3907 member_vnf_index,
3908 vdu_index,
3909 kdu_index,
3910 vdu_name,
3911 deploy_params,
3912 descriptor_config,
3913 base_folder,
3914 task_instantiation_info,
3915 stage,
3916 ):
3917 # launch instantiate_N2VC in a asyncio task and register task object
3918 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3919 # if not found, create one entry and update database
3920 # fill db_nsr._admin.deployed.VCA.<index>
3921
3922 self.logger.debug(
3923 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3924 )
3925
3926 charm_name = ""
3927 get_charm_name = False
3928 if "execution-environment-list" in descriptor_config:
3929 ee_list = descriptor_config.get("execution-environment-list", [])
3930 elif "juju" in descriptor_config:
3931 ee_list = [descriptor_config] # ns charms
3932 if "execution-environment-list" not in descriptor_config:
3933 # charm name is only required for ns charms
3934 get_charm_name = True
3935 else: # other types as script are not supported
3936 ee_list = []
3937
3938 for ee_item in ee_list:
3939 self.logger.debug(
3940 logging_text
3941 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3942 ee_item.get("juju"), ee_item.get("helm-chart")
3943 )
3944 )
3945 ee_descriptor_id = ee_item.get("id")
3946 if ee_item.get("juju"):
3947 vca_name = ee_item["juju"].get("charm")
3948 if get_charm_name:
3949 charm_name = self.find_charm_name(db_nsr, str(vca_name))
3950 vca_type = (
3951 "lxc_proxy_charm"
3952 if ee_item["juju"].get("charm") is not None
3953 else "native_charm"
3954 )
3955 if ee_item["juju"].get("cloud") == "k8s":
3956 vca_type = "k8s_proxy_charm"
3957 elif ee_item["juju"].get("proxy") is False:
3958 vca_type = "native_charm"
3959 elif ee_item.get("helm-chart"):
3960 vca_name = ee_item["helm-chart"]
3961 vca_type = "helm-v3"
3962 else:
3963 self.logger.debug(
3964 logging_text + "skipping non juju neither charm configuration"
3965 )
3966 continue
3967
3968 vca_index = -1
3969 for vca_index, vca_deployed in enumerate(
3970 db_nsr["_admin"]["deployed"]["VCA"]
3971 ):
3972 if not vca_deployed:
3973 continue
3974 if (
3975 vca_deployed.get("member-vnf-index") == member_vnf_index
3976 and vca_deployed.get("vdu_id") == vdu_id
3977 and vca_deployed.get("kdu_name") == kdu_name
3978 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3979 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3980 ):
3981 break
3982 else:
3983 # not found, create one.
3984 target = (
3985 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3986 )
3987 if vdu_id:
3988 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3989 elif kdu_name:
3990 target += "/kdu/{}".format(kdu_name)
3991 vca_deployed = {
3992 "target_element": target,
3993 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
3994 "member-vnf-index": member_vnf_index,
3995 "vdu_id": vdu_id,
3996 "kdu_name": kdu_name,
3997 "vdu_count_index": vdu_index,
3998 "operational-status": "init", # TODO revise
3999 "detailed-status": "", # TODO revise
4000 "step": "initial-deploy", # TODO revise
4001 "vnfd_id": vnfd_id,
4002 "vdu_name": vdu_name,
4003 "type": vca_type,
4004 "ee_descriptor_id": ee_descriptor_id,
4005 "charm_name": charm_name,
4006 }
4007 vca_index += 1
4008
4009 # create VCA and configurationStatus in db
4010 db_dict = {
4011 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
4012 "configurationStatus.{}".format(vca_index): dict(),
4013 }
4014 self.update_db_2("nsrs", nsr_id, db_dict)
4015
4016 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
4017
4018 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
4019 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
4020 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
4021
4022 # Launch task
4023 task_n2vc = asyncio.ensure_future(
4024 self.instantiate_N2VC(
4025 logging_text=logging_text,
4026 vca_index=vca_index,
4027 nsi_id=nsi_id,
4028 db_nsr=db_nsr,
4029 db_vnfr=db_vnfr,
4030 vdu_id=vdu_id,
4031 kdu_name=kdu_name,
4032 vdu_index=vdu_index,
4033 kdu_index=kdu_index,
4034 deploy_params=deploy_params,
4035 config_descriptor=descriptor_config,
4036 base_folder=base_folder,
4037 nslcmop_id=nslcmop_id,
4038 stage=stage,
4039 vca_type=vca_type,
4040 vca_name=vca_name,
4041 ee_config_descriptor=ee_item,
4042 )
4043 )
4044 self.lcm_tasks.register(
4045 "ns",
4046 nsr_id,
4047 nslcmop_id,
4048 "instantiate_N2VC-{}".format(vca_index),
4049 task_n2vc,
4050 )
4051 task_instantiation_info[
4052 task_n2vc
4053 ] = self.task_name_deploy_vca + " {}.{}".format(
4054 member_vnf_index or "", vdu_id or ""
4055 )
4056
4057 def _format_additional_params(self, params):
4058 params = params or {}
4059 for key, value in params.items():
4060 if str(value).startswith("!!yaml "):
4061 params[key] = yaml.safe_load(value[7:])
4062 return params
4063
4064 def _get_terminate_primitive_params(self, seq, vnf_index):
4065 primitive = seq.get("name")
4066 primitive_params = {}
4067 params = {
4068 "member_vnf_index": vnf_index,
4069 "primitive": primitive,
4070 "primitive_params": primitive_params,
4071 }
4072 desc_params = {}
4073 return self._map_primitive_params(seq, params, desc_params)
4074
4075 # sub-operations
4076
4077 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
4078 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
4079 if op.get("operationState") == "COMPLETED":
4080 # b. Skip sub-operation
4081 # _ns_execute_primitive() or RO.create_action() will NOT be executed
4082 return self.SUBOPERATION_STATUS_SKIP
4083 else:
4084 # c. retry executing sub-operation
4085 # The sub-operation exists, and operationState != 'COMPLETED'
4086 # Update operationState = 'PROCESSING' to indicate a retry.
4087 operationState = "PROCESSING"
4088 detailed_status = "In progress"
4089 self._update_suboperation_status(
4090 db_nslcmop, op_index, operationState, detailed_status
4091 )
4092 # Return the sub-operation index
4093 # _ns_execute_primitive() or RO.create_action() will be called from scale()
4094 # with arguments extracted from the sub-operation
4095 return op_index
4096
4097 # Find a sub-operation where all keys in a matching dictionary must match
4098 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
4099 def _find_suboperation(self, db_nslcmop, match):
4100 if db_nslcmop and match:
4101 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
4102 for i, op in enumerate(op_list):
4103 if all(op.get(k) == match[k] for k in match):
4104 return i
4105 return self.SUBOPERATION_STATUS_NOT_FOUND
4106
4107 # Update status for a sub-operation given its index
4108 def _update_suboperation_status(
4109 self, db_nslcmop, op_index, operationState, detailed_status
4110 ):
4111 # Update DB for HA tasks
4112 q_filter = {"_id": db_nslcmop["_id"]}
4113 update_dict = {
4114 "_admin.operations.{}.operationState".format(op_index): operationState,
4115 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
4116 }
4117 self.db.set_one(
4118 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
4119 )
4120
4121 # Add sub-operation, return the index of the added sub-operation
4122 # Optionally, set operationState, detailed-status, and operationType
4123 # Status and type are currently set for 'scale' sub-operations:
4124 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
4125 # 'detailed-status' : status message
4126 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
4127 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
4128 def _add_suboperation(
4129 self,
4130 db_nslcmop,
4131 vnf_index,
4132 vdu_id,
4133 vdu_count_index,
4134 vdu_name,
4135 primitive,
4136 mapped_primitive_params,
4137 operationState=None,
4138 detailed_status=None,
4139 operationType=None,
4140 RO_nsr_id=None,
4141 RO_scaling_info=None,
4142 ):
4143 if not db_nslcmop:
4144 return self.SUBOPERATION_STATUS_NOT_FOUND
4145 # Get the "_admin.operations" list, if it exists
4146 db_nslcmop_admin = db_nslcmop.get("_admin", {})
4147 op_list = db_nslcmop_admin.get("operations")
4148 # Create or append to the "_admin.operations" list
4149 new_op = {
4150 "member_vnf_index": vnf_index,
4151 "vdu_id": vdu_id,
4152 "vdu_count_index": vdu_count_index,
4153 "primitive": primitive,
4154 "primitive_params": mapped_primitive_params,
4155 }
4156 if operationState:
4157 new_op["operationState"] = operationState
4158 if detailed_status:
4159 new_op["detailed-status"] = detailed_status
4160 if operationType:
4161 new_op["lcmOperationType"] = operationType
4162 if RO_nsr_id:
4163 new_op["RO_nsr_id"] = RO_nsr_id
4164 if RO_scaling_info:
4165 new_op["RO_scaling_info"] = RO_scaling_info
4166 if not op_list:
4167 # No existing operations, create key 'operations' with current operation as first list element
4168 db_nslcmop_admin.update({"operations": [new_op]})
4169 op_list = db_nslcmop_admin.get("operations")
4170 else:
4171 # Existing operations, append operation to list
4172 op_list.append(new_op)
4173
4174 db_nslcmop_update = {"_admin.operations": op_list}
4175 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
4176 op_index = len(op_list) - 1
4177 return op_index
4178
4179 # Helper methods for scale() sub-operations
4180
4181 # pre-scale/post-scale:
4182 # Check for 3 different cases:
4183 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4184 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4185 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4186 def _check_or_add_scale_suboperation(
4187 self,
4188 db_nslcmop,
4189 vnf_index,
4190 vnf_config_primitive,
4191 primitive_params,
4192 operationType,
4193 RO_nsr_id=None,
4194 RO_scaling_info=None,
4195 ):
4196 # Find this sub-operation
4197 if RO_nsr_id and RO_scaling_info:
4198 operationType = "SCALE-RO"
4199 match = {
4200 "member_vnf_index": vnf_index,
4201 "RO_nsr_id": RO_nsr_id,
4202 "RO_scaling_info": RO_scaling_info,
4203 }
4204 else:
4205 match = {
4206 "member_vnf_index": vnf_index,
4207 "primitive": vnf_config_primitive,
4208 "primitive_params": primitive_params,
4209 "lcmOperationType": operationType,
4210 }
4211 op_index = self._find_suboperation(db_nslcmop, match)
4212 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4213 # a. New sub-operation
4214 # The sub-operation does not exist, add it.
4215 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4216 # The following parameters are set to None for all kind of scaling:
4217 vdu_id = None
4218 vdu_count_index = None
4219 vdu_name = None
4220 if RO_nsr_id and RO_scaling_info:
4221 vnf_config_primitive = None
4222 primitive_params = None
4223 else:
4224 RO_nsr_id = None
4225 RO_scaling_info = None
4226 # Initial status for sub-operation
4227 operationState = "PROCESSING"
4228 detailed_status = "In progress"
4229 # Add sub-operation for pre/post-scaling (zero or more operations)
4230 self._add_suboperation(
4231 db_nslcmop,
4232 vnf_index,
4233 vdu_id,
4234 vdu_count_index,
4235 vdu_name,
4236 vnf_config_primitive,
4237 primitive_params,
4238 operationState,
4239 detailed_status,
4240 operationType,
4241 RO_nsr_id,
4242 RO_scaling_info,
4243 )
4244 return self.SUBOPERATION_STATUS_NEW
4245 else:
4246 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4247 # or op_index (operationState != 'COMPLETED')
4248 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4249
4250 # Function to return execution_environment id
4251
4252 async def destroy_N2VC(
4253 self,
4254 logging_text,
4255 db_nslcmop,
4256 vca_deployed,
4257 config_descriptor,
4258 vca_index,
4259 destroy_ee=True,
4260 exec_primitives=True,
4261 scaling_in=False,
4262 vca_id: str = None,
4263 ):
4264 """
4265 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4266 :param logging_text:
4267 :param db_nslcmop:
4268 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4269 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4270 :param vca_index: index in the database _admin.deployed.VCA
4271 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4272 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4273 not executed properly
4274 :param scaling_in: True destroys the application, False destroys the model
4275 :return: None or exception
4276 """
4277
4278 self.logger.debug(
4279 logging_text
4280 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4281 vca_index, vca_deployed, config_descriptor, destroy_ee
4282 )
4283 )
4284
4285 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4286
4287 # execute terminate_primitives
4288 if exec_primitives:
4289 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4290 config_descriptor.get("terminate-config-primitive"),
4291 vca_deployed.get("ee_descriptor_id"),
4292 )
4293 vdu_id = vca_deployed.get("vdu_id")
4294 vdu_count_index = vca_deployed.get("vdu_count_index")
4295 vdu_name = vca_deployed.get("vdu_name")
4296 vnf_index = vca_deployed.get("member-vnf-index")
4297 if terminate_primitives and vca_deployed.get("needed_terminate"):
4298 for seq in terminate_primitives:
4299 # For each sequence in list, get primitive and call _ns_execute_primitive()
4300 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4301 vnf_index, seq.get("name")
4302 )
4303 self.logger.debug(logging_text + step)
4304 # Create the primitive for each sequence, i.e. "primitive": "touch"
4305 primitive = seq.get("name")
4306 mapped_primitive_params = self._get_terminate_primitive_params(
4307 seq, vnf_index
4308 )
4309
4310 # Add sub-operation
4311 self._add_suboperation(
4312 db_nslcmop,
4313 vnf_index,
4314 vdu_id,
4315 vdu_count_index,
4316 vdu_name,
4317 primitive,
4318 mapped_primitive_params,
4319 )
4320 # Sub-operations: Call _ns_execute_primitive() instead of action()
4321 try:
4322 result, result_detail = await self._ns_execute_primitive(
4323 vca_deployed["ee_id"],
4324 primitive,
4325 mapped_primitive_params,
4326 vca_type=vca_type,
4327 vca_id=vca_id,
4328 )
4329 except LcmException:
4330 # this happens when VCA is not deployed. In this case it is not needed to terminate
4331 continue
4332 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4333 if result not in result_ok:
4334 raise LcmException(
4335 "terminate_primitive {} for vnf_member_index={} fails with "
4336 "error {}".format(seq.get("name"), vnf_index, result_detail)
4337 )
4338 # set that this VCA do not need terminated
4339 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4340 vca_index
4341 )
4342 self.update_db_2(
4343 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4344 )
4345
4346 # Delete Prometheus Jobs if any
4347 # This uses NSR_ID, so it will destroy any jobs under this index
4348 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4349
4350 if destroy_ee:
4351 await self.vca_map[vca_type].delete_execution_environment(
4352 vca_deployed["ee_id"],
4353 scaling_in=scaling_in,
4354 vca_type=vca_type,
4355 vca_id=vca_id,
4356 )
4357
4358 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4359 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4360 namespace = "." + db_nsr["_id"]
4361 try:
4362 await self.n2vc.delete_namespace(
4363 namespace=namespace,
4364 total_timeout=self.timeout.charm_delete,
4365 vca_id=vca_id,
4366 )
4367 except N2VCNotFound: # already deleted. Skip
4368 pass
4369 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4370
4371 async def terminate(self, nsr_id, nslcmop_id):
4372 # Try to lock HA task here
4373 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4374 if not task_is_locked_by_me:
4375 return
4376
4377 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4378 self.logger.debug(logging_text + "Enter")
4379 timeout_ns_terminate = self.timeout.ns_terminate
4380 db_nsr = None
4381 db_nslcmop = None
4382 operation_params = None
4383 exc = None
4384 error_list = [] # annotates all failed error messages
4385 db_nslcmop_update = {}
4386 autoremove = False # autoremove after terminated
4387 tasks_dict_info = {}
4388 db_nsr_update = {}
4389 stage = [
4390 "Stage 1/3: Preparing task.",
4391 "Waiting for previous operations to terminate.",
4392 "",
4393 ]
4394 # ^ contains [stage, step, VIM-status]
4395 try:
4396 # wait for any previous tasks in process
4397 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4398
4399 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4400 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4401 operation_params = db_nslcmop.get("operationParams") or {}
4402 if operation_params.get("timeout_ns_terminate"):
4403 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4404 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4405 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4406
4407 db_nsr_update["operational-status"] = "terminating"
4408 db_nsr_update["config-status"] = "terminating"
4409 self._write_ns_status(
4410 nsr_id=nsr_id,
4411 ns_state="TERMINATING",
4412 current_operation="TERMINATING",
4413 current_operation_id=nslcmop_id,
4414 other_update=db_nsr_update,
4415 )
4416 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4417 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4418 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4419 return
4420
4421 stage[1] = "Getting vnf descriptors from db."
4422 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4423 db_vnfrs_dict = {
4424 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4425 }
4426 db_vnfds_from_id = {}
4427 db_vnfds_from_member_index = {}
4428 # Loop over VNFRs
4429 for vnfr in db_vnfrs_list:
4430 vnfd_id = vnfr["vnfd-id"]
4431 if vnfd_id not in db_vnfds_from_id:
4432 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4433 db_vnfds_from_id[vnfd_id] = vnfd
4434 db_vnfds_from_member_index[
4435 vnfr["member-vnf-index-ref"]
4436 ] = db_vnfds_from_id[vnfd_id]
4437
4438 # Destroy individual execution environments when there are terminating primitives.
4439 # Rest of EE will be deleted at once
4440 # TODO - check before calling _destroy_N2VC
4441 # if not operation_params.get("skip_terminate_primitives"):#
4442 # or not vca.get("needed_terminate"):
4443 stage[0] = "Stage 2/3 execute terminating primitives."
4444 self.logger.debug(logging_text + stage[0])
4445 stage[1] = "Looking execution environment that needs terminate."
4446 self.logger.debug(logging_text + stage[1])
4447
4448 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4449 config_descriptor = None
4450 vca_member_vnf_index = vca.get("member-vnf-index")
4451 vca_id = self.get_vca_id(
4452 db_vnfrs_dict.get(vca_member_vnf_index)
4453 if vca_member_vnf_index
4454 else None,
4455 db_nsr,
4456 )
4457 if not vca or not vca.get("ee_id"):
4458 continue
4459 if not vca.get("member-vnf-index"):
4460 # ns
4461 config_descriptor = db_nsr.get("ns-configuration")
4462 elif vca.get("vdu_id"):
4463 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4464 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4465 elif vca.get("kdu_name"):
4466 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4467 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4468 else:
4469 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4470 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4471 vca_type = vca.get("type")
4472 exec_terminate_primitives = not operation_params.get(
4473 "skip_terminate_primitives"
4474 ) and vca.get("needed_terminate")
4475 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4476 # pending native charms
4477 destroy_ee = True if vca_type in ("helm-v3", "native_charm") else False
4478 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4479 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4480 task = asyncio.ensure_future(
4481 self.destroy_N2VC(
4482 logging_text,
4483 db_nslcmop,
4484 vca,
4485 config_descriptor,
4486 vca_index,
4487 destroy_ee,
4488 exec_terminate_primitives,
4489 vca_id=vca_id,
4490 )
4491 )
4492 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4493
4494 # wait for pending tasks of terminate primitives
4495 if tasks_dict_info:
4496 self.logger.debug(
4497 logging_text
4498 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4499 )
4500 error_list = await self._wait_for_tasks(
4501 logging_text,
4502 tasks_dict_info,
4503 min(self.timeout.charm_delete, timeout_ns_terminate),
4504 stage,
4505 nslcmop_id,
4506 )
4507 tasks_dict_info.clear()
4508 if error_list:
4509 return # raise LcmException("; ".join(error_list))
4510
4511 # remove All execution environments at once
4512 stage[0] = "Stage 3/3 delete all."
4513
4514 if nsr_deployed.get("VCA"):
4515 stage[1] = "Deleting all execution environments."
4516 self.logger.debug(logging_text + stage[1])
4517 vca_id = self.get_vca_id({}, db_nsr)
4518 task_delete_ee = asyncio.ensure_future(
4519 asyncio.wait_for(
4520 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4521 timeout=self.timeout.charm_delete,
4522 )
4523 )
4524 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4525 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4526
4527 # Delete Namespace and Certificates if necessary
4528 if check_helm_ee_in_ns(list(db_vnfds_from_member_index.values())):
4529 await self.vca_map["helm-v3"].delete_tls_certificate(
4530 namespace=db_nslcmop["nsInstanceId"],
4531 certificate_name=self.EE_TLS_NAME,
4532 )
4533 await self.vca_map["helm-v3"].delete_namespace(
4534 namespace=db_nslcmop["nsInstanceId"],
4535 )
4536
4537 # Delete from k8scluster
4538 stage[1] = "Deleting KDUs."
4539 self.logger.debug(logging_text + stage[1])
4540 # print(nsr_deployed)
4541 for kdu in get_iterable(nsr_deployed, "K8s"):
4542 if not kdu or not kdu.get("kdu-instance"):
4543 continue
4544 kdu_instance = kdu.get("kdu-instance")
4545 if kdu.get("k8scluster-type") in self.k8scluster_map:
4546 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4547 vca_id = self.get_vca_id({}, db_nsr)
4548 task_delete_kdu_instance = asyncio.ensure_future(
4549 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4550 cluster_uuid=kdu.get("k8scluster-uuid"),
4551 kdu_instance=kdu_instance,
4552 vca_id=vca_id,
4553 namespace=kdu.get("namespace"),
4554 )
4555 )
4556 else:
4557 self.logger.error(
4558 logging_text
4559 + "Unknown k8s deployment type {}".format(
4560 kdu.get("k8scluster-type")
4561 )
4562 )
4563 continue
4564 tasks_dict_info[
4565 task_delete_kdu_instance
4566 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4567
4568 # remove from RO
4569 stage[1] = "Deleting ns from VIM."
4570 if self.ro_config.ng:
4571 task_delete_ro = asyncio.ensure_future(
4572 self._terminate_ng_ro(
4573 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4574 )
4575 )
4576 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4577
4578 # rest of staff will be done at finally
4579
4580 except (
4581 ROclient.ROClientException,
4582 DbException,
4583 LcmException,
4584 N2VCException,
4585 ) as e:
4586 self.logger.error(logging_text + "Exit Exception {}".format(e))
4587 exc = e
4588 except asyncio.CancelledError:
4589 self.logger.error(
4590 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4591 )
4592 exc = "Operation was cancelled"
4593 except Exception as e:
4594 exc = traceback.format_exc()
4595 self.logger.critical(
4596 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4597 exc_info=True,
4598 )
4599 finally:
4600 if exc:
4601 error_list.append(str(exc))
4602 try:
4603 # wait for pending tasks
4604 if tasks_dict_info:
4605 stage[1] = "Waiting for terminate pending tasks."
4606 self.logger.debug(logging_text + stage[1])
4607 error_list += await self._wait_for_tasks(
4608 logging_text,
4609 tasks_dict_info,
4610 timeout_ns_terminate,
4611 stage,
4612 nslcmop_id,
4613 )
4614 stage[1] = stage[2] = ""
4615 except asyncio.CancelledError:
4616 error_list.append("Cancelled")
4617 # TODO cancell all tasks
4618 except Exception as exc:
4619 error_list.append(str(exc))
4620 # update status at database
4621 if error_list:
4622 error_detail = "; ".join(error_list)
4623 # self.logger.error(logging_text + error_detail)
4624 error_description_nslcmop = "{} Detail: {}".format(
4625 stage[0], error_detail
4626 )
4627 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4628 nslcmop_id, stage[0]
4629 )
4630
4631 db_nsr_update["operational-status"] = "failed"
4632 db_nsr_update["detailed-status"] = (
4633 error_description_nsr + " Detail: " + error_detail
4634 )
4635 db_nslcmop_update["detailed-status"] = error_detail
4636 nslcmop_operation_state = "FAILED"
4637 ns_state = "BROKEN"
4638 else:
4639 error_detail = None
4640 error_description_nsr = error_description_nslcmop = None
4641 ns_state = "NOT_INSTANTIATED"
4642 db_nsr_update["operational-status"] = "terminated"
4643 db_nsr_update["detailed-status"] = "Done"
4644 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4645 db_nslcmop_update["detailed-status"] = "Done"
4646 nslcmop_operation_state = "COMPLETED"
4647
4648 if db_nsr:
4649 self._write_ns_status(
4650 nsr_id=nsr_id,
4651 ns_state=ns_state,
4652 current_operation="IDLE",
4653 current_operation_id=None,
4654 error_description=error_description_nsr,
4655 error_detail=error_detail,
4656 other_update=db_nsr_update,
4657 )
4658 self._write_op_status(
4659 op_id=nslcmop_id,
4660 stage="",
4661 error_message=error_description_nslcmop,
4662 operation_state=nslcmop_operation_state,
4663 other_update=db_nslcmop_update,
4664 )
4665 if ns_state == "NOT_INSTANTIATED":
4666 try:
4667 self.db.set_list(
4668 "vnfrs",
4669 {"nsr-id-ref": nsr_id},
4670 {"_admin.nsState": "NOT_INSTANTIATED"},
4671 )
4672 except DbException as e:
4673 self.logger.warn(
4674 logging_text
4675 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4676 nsr_id, e
4677 )
4678 )
4679 if operation_params:
4680 autoremove = operation_params.get("autoremove", False)
4681 if nslcmop_operation_state:
4682 try:
4683 await self.msg.aiowrite(
4684 "ns",
4685 "terminated",
4686 {
4687 "nsr_id": nsr_id,
4688 "nslcmop_id": nslcmop_id,
4689 "operationState": nslcmop_operation_state,
4690 "autoremove": autoremove,
4691 },
4692 )
4693 except Exception as e:
4694 self.logger.error(
4695 logging_text + "kafka_write notification Exception {}".format(e)
4696 )
4697 self.logger.debug(f"Deleting alerts: ns_id={nsr_id}")
4698 self.db.del_list("alerts", {"tags.ns_id": nsr_id})
4699
4700 self.logger.debug(logging_text + "Exit")
4701 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4702
4703 async def _wait_for_tasks(
4704 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4705 ):
4706 time_start = time()
4707 error_detail_list = []
4708 error_list = []
4709 pending_tasks = list(created_tasks_info.keys())
4710 num_tasks = len(pending_tasks)
4711 num_done = 0
4712 stage[1] = "{}/{}.".format(num_done, num_tasks)
4713 self._write_op_status(nslcmop_id, stage)
4714 while pending_tasks:
4715 new_error = None
4716 _timeout = timeout + time_start - time()
4717 done, pending_tasks = await asyncio.wait(
4718 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4719 )
4720 num_done += len(done)
4721 if not done: # Timeout
4722 for task in pending_tasks:
4723 new_error = created_tasks_info[task] + ": Timeout"
4724 error_detail_list.append(new_error)
4725 error_list.append(new_error)
4726 break
4727 for task in done:
4728 if task.cancelled():
4729 exc = "Cancelled"
4730 else:
4731 exc = task.exception()
4732 if exc:
4733 if isinstance(exc, asyncio.TimeoutError):
4734 exc = "Timeout"
4735 new_error = created_tasks_info[task] + ": {}".format(exc)
4736 error_list.append(created_tasks_info[task])
4737 error_detail_list.append(new_error)
4738 if isinstance(
4739 exc,
4740 (
4741 str,
4742 DbException,
4743 N2VCException,
4744 ROclient.ROClientException,
4745 LcmException,
4746 K8sException,
4747 NgRoException,
4748 ),
4749 ):
4750 self.logger.error(logging_text + new_error)
4751 else:
4752 exc_traceback = "".join(
4753 traceback.format_exception(None, exc, exc.__traceback__)
4754 )
4755 self.logger.error(
4756 logging_text
4757 + created_tasks_info[task]
4758 + " "
4759 + exc_traceback
4760 )
4761 else:
4762 self.logger.debug(
4763 logging_text + created_tasks_info[task] + ": Done"
4764 )
4765 stage[1] = "{}/{}.".format(num_done, num_tasks)
4766 if new_error:
4767 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4768 if nsr_id: # update also nsr
4769 self.update_db_2(
4770 "nsrs",
4771 nsr_id,
4772 {
4773 "errorDescription": "Error at: " + ", ".join(error_list),
4774 "errorDetail": ". ".join(error_detail_list),
4775 },
4776 )
4777 self._write_op_status(nslcmop_id, stage)
4778 return error_detail_list
4779
4780 @staticmethod
4781 def _map_primitive_params(primitive_desc, params, instantiation_params):
4782 """
4783 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4784 The default-value is used. If it is between < > it look for a value at instantiation_params
4785 :param primitive_desc: portion of VNFD/NSD that describes primitive
4786 :param params: Params provided by user
4787 :param instantiation_params: Instantiation params provided by user
4788 :return: a dictionary with the calculated params
4789 """
4790 calculated_params = {}
4791 for parameter in primitive_desc.get("parameter", ()):
4792 param_name = parameter["name"]
4793 if param_name in params:
4794 calculated_params[param_name] = params[param_name]
4795 elif "default-value" in parameter or "value" in parameter:
4796 if "value" in parameter:
4797 calculated_params[param_name] = parameter["value"]
4798 else:
4799 calculated_params[param_name] = parameter["default-value"]
4800 if (
4801 isinstance(calculated_params[param_name], str)
4802 and calculated_params[param_name].startswith("<")
4803 and calculated_params[param_name].endswith(">")
4804 ):
4805 if calculated_params[param_name][1:-1] in instantiation_params:
4806 calculated_params[param_name] = instantiation_params[
4807 calculated_params[param_name][1:-1]
4808 ]
4809 else:
4810 raise LcmException(
4811 "Parameter {} needed to execute primitive {} not provided".format(
4812 calculated_params[param_name], primitive_desc["name"]
4813 )
4814 )
4815 else:
4816 raise LcmException(
4817 "Parameter {} needed to execute primitive {} not provided".format(
4818 param_name, primitive_desc["name"]
4819 )
4820 )
4821
4822 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4823 calculated_params[param_name] = yaml.safe_dump(
4824 calculated_params[param_name], default_flow_style=True, width=256
4825 )
4826 elif isinstance(calculated_params[param_name], str) and calculated_params[
4827 param_name
4828 ].startswith("!!yaml "):
4829 calculated_params[param_name] = calculated_params[param_name][7:]
4830 if parameter.get("data-type") == "INTEGER":
4831 try:
4832 calculated_params[param_name] = int(calculated_params[param_name])
4833 except ValueError: # error converting string to int
4834 raise LcmException(
4835 "Parameter {} of primitive {} must be integer".format(
4836 param_name, primitive_desc["name"]
4837 )
4838 )
4839 elif parameter.get("data-type") == "BOOLEAN":
4840 calculated_params[param_name] = not (
4841 (str(calculated_params[param_name])).lower() == "false"
4842 )
4843
4844 # add always ns_config_info if primitive name is config
4845 if primitive_desc["name"] == "config":
4846 if "ns_config_info" in instantiation_params:
4847 calculated_params["ns_config_info"] = instantiation_params[
4848 "ns_config_info"
4849 ]
4850 return calculated_params
4851
4852 def _look_for_deployed_vca(
4853 self,
4854 deployed_vca,
4855 member_vnf_index,
4856 vdu_id,
4857 vdu_count_index,
4858 kdu_name=None,
4859 ee_descriptor_id=None,
4860 ):
4861 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4862 for vca in deployed_vca:
4863 if not vca:
4864 continue
4865 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4866 continue
4867 if (
4868 vdu_count_index is not None
4869 and vdu_count_index != vca["vdu_count_index"]
4870 ):
4871 continue
4872 if kdu_name and kdu_name != vca["kdu_name"]:
4873 continue
4874 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4875 continue
4876 break
4877 else:
4878 # vca_deployed not found
4879 raise LcmException(
4880 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4881 " is not deployed".format(
4882 member_vnf_index,
4883 vdu_id,
4884 vdu_count_index,
4885 kdu_name,
4886 ee_descriptor_id,
4887 )
4888 )
4889 # get ee_id
4890 ee_id = vca.get("ee_id")
4891 vca_type = vca.get(
4892 "type", "lxc_proxy_charm"
4893 ) # default value for backward compatibility - proxy charm
4894 if not ee_id:
4895 raise LcmException(
4896 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4897 "execution environment".format(
4898 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4899 )
4900 )
4901 return ee_id, vca_type
4902
4903 async def _ns_execute_primitive(
4904 self,
4905 ee_id,
4906 primitive,
4907 primitive_params,
4908 retries=0,
4909 retries_interval=30,
4910 timeout=None,
4911 vca_type=None,
4912 db_dict=None,
4913 vca_id: str = None,
4914 ) -> (str, str):
4915 try:
4916 if primitive == "config":
4917 primitive_params = {"params": primitive_params}
4918
4919 vca_type = vca_type or "lxc_proxy_charm"
4920
4921 while retries >= 0:
4922 try:
4923 output = await asyncio.wait_for(
4924 self.vca_map[vca_type].exec_primitive(
4925 ee_id=ee_id,
4926 primitive_name=primitive,
4927 params_dict=primitive_params,
4928 progress_timeout=self.timeout.progress_primitive,
4929 total_timeout=self.timeout.primitive,
4930 db_dict=db_dict,
4931 vca_id=vca_id,
4932 vca_type=vca_type,
4933 ),
4934 timeout=timeout or self.timeout.primitive,
4935 )
4936 # execution was OK
4937 break
4938 except asyncio.CancelledError:
4939 raise
4940 except Exception as e:
4941 retries -= 1
4942 if retries >= 0:
4943 self.logger.debug(
4944 "Error executing action {} on {} -> {}".format(
4945 primitive, ee_id, e
4946 )
4947 )
4948 # wait and retry
4949 await asyncio.sleep(retries_interval)
4950 else:
4951 if isinstance(e, asyncio.TimeoutError):
4952 e = N2VCException(
4953 message="Timed out waiting for action to complete"
4954 )
4955 return "FAILED", getattr(e, "message", repr(e))
4956
4957 return "COMPLETED", output
4958
4959 except (LcmException, asyncio.CancelledError):
4960 raise
4961 except Exception as e:
4962 return "FAIL", "Error executing action {}: {}".format(primitive, e)
4963
4964 async def vca_status_refresh(self, nsr_id, nslcmop_id):
4965 """
4966 Updating the vca_status with latest juju information in nsrs record
4967 :param: nsr_id: Id of the nsr
4968 :param: nslcmop_id: Id of the nslcmop
4969 :return: None
4970 """
4971
4972 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
4973 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4974 vca_id = self.get_vca_id({}, db_nsr)
4975 if db_nsr["_admin"]["deployed"]["K8s"]:
4976 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
4977 cluster_uuid, kdu_instance, cluster_type = (
4978 k8s["k8scluster-uuid"],
4979 k8s["kdu-instance"],
4980 k8s["k8scluster-type"],
4981 )
4982 await self._on_update_k8s_db(
4983 cluster_uuid=cluster_uuid,
4984 kdu_instance=kdu_instance,
4985 filter={"_id": nsr_id},
4986 vca_id=vca_id,
4987 cluster_type=cluster_type,
4988 )
4989 else:
4990 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
4991 table, filter = "nsrs", {"_id": nsr_id}
4992 path = "_admin.deployed.VCA.{}.".format(vca_index)
4993 await self._on_update_n2vc_db(table, filter, path, {})
4994
4995 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
4996 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
4997
4998 async def action(self, nsr_id, nslcmop_id):
4999 # Try to lock HA task here
5000 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5001 if not task_is_locked_by_me:
5002 return
5003
5004 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
5005 self.logger.debug(logging_text + "Enter")
5006 # get all needed from database
5007 db_nsr = None
5008 db_nslcmop = None
5009 db_nsr_update = {}
5010 db_nslcmop_update = {}
5011 nslcmop_operation_state = None
5012 error_description_nslcmop = None
5013 exc = None
5014 step = ""
5015 try:
5016 # wait for any previous tasks in process
5017 step = "Waiting for previous operations to terminate"
5018 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5019
5020 self._write_ns_status(
5021 nsr_id=nsr_id,
5022 ns_state=None,
5023 current_operation="RUNNING ACTION",
5024 current_operation_id=nslcmop_id,
5025 )
5026
5027 step = "Getting information from database"
5028 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5029 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5030 if db_nslcmop["operationParams"].get("primitive_params"):
5031 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5032 db_nslcmop["operationParams"]["primitive_params"]
5033 )
5034
5035 nsr_deployed = db_nsr["_admin"].get("deployed")
5036 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
5037 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
5038 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
5039 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
5040 primitive = db_nslcmop["operationParams"]["primitive"]
5041 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
5042 timeout_ns_action = db_nslcmop["operationParams"].get(
5043 "timeout_ns_action", self.timeout.primitive
5044 )
5045
5046 if vnf_index:
5047 step = "Getting vnfr from database"
5048 db_vnfr = self.db.get_one(
5049 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5050 )
5051 if db_vnfr.get("kdur"):
5052 kdur_list = []
5053 for kdur in db_vnfr["kdur"]:
5054 if kdur.get("additionalParams"):
5055 kdur["additionalParams"] = json.loads(
5056 kdur["additionalParams"]
5057 )
5058 kdur_list.append(kdur)
5059 db_vnfr["kdur"] = kdur_list
5060 step = "Getting vnfd from database"
5061 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5062
5063 # Sync filesystem before running a primitive
5064 self.fs.sync(db_vnfr["vnfd-id"])
5065 else:
5066 step = "Getting nsd from database"
5067 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
5068
5069 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5070 # for backward compatibility
5071 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5072 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5073 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5074 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5075
5076 # look for primitive
5077 config_primitive_desc = descriptor_configuration = None
5078 if vdu_id:
5079 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
5080 elif kdu_name:
5081 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
5082 elif vnf_index:
5083 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
5084 else:
5085 descriptor_configuration = db_nsd.get("ns-configuration")
5086
5087 if descriptor_configuration and descriptor_configuration.get(
5088 "config-primitive"
5089 ):
5090 for config_primitive in descriptor_configuration["config-primitive"]:
5091 if config_primitive["name"] == primitive:
5092 config_primitive_desc = config_primitive
5093 break
5094
5095 if not config_primitive_desc:
5096 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
5097 raise LcmException(
5098 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5099 primitive
5100 )
5101 )
5102 primitive_name = primitive
5103 ee_descriptor_id = None
5104 else:
5105 primitive_name = config_primitive_desc.get(
5106 "execution-environment-primitive", primitive
5107 )
5108 ee_descriptor_id = config_primitive_desc.get(
5109 "execution-environment-ref"
5110 )
5111
5112 if vnf_index:
5113 if vdu_id:
5114 vdur = next(
5115 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5116 )
5117 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
5118 elif kdu_name:
5119 kdur = next(
5120 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5121 )
5122 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
5123 else:
5124 desc_params = parse_yaml_strings(
5125 db_vnfr.get("additionalParamsForVnf")
5126 )
5127 else:
5128 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
5129 if kdu_name and get_configuration(db_vnfd, kdu_name):
5130 kdu_configuration = get_configuration(db_vnfd, kdu_name)
5131 actions = set()
5132 for primitive in kdu_configuration.get("initial-config-primitive", []):
5133 actions.add(primitive["name"])
5134 for primitive in kdu_configuration.get("config-primitive", []):
5135 actions.add(primitive["name"])
5136 kdu = find_in_list(
5137 nsr_deployed["K8s"],
5138 lambda kdu: kdu_name == kdu["kdu-name"]
5139 and kdu["member-vnf-index"] == vnf_index,
5140 )
5141 kdu_action = (
5142 True
5143 if primitive_name in actions
5144 and kdu["k8scluster-type"] != "helm-chart-v3"
5145 else False
5146 )
5147
5148 # TODO check if ns is in a proper status
5149 if kdu_name and (
5150 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5151 ):
5152 # kdur and desc_params already set from before
5153 if primitive_params:
5154 desc_params.update(primitive_params)
5155 # TODO Check if we will need something at vnf level
5156 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5157 if (
5158 kdu_name == kdu["kdu-name"]
5159 and kdu["member-vnf-index"] == vnf_index
5160 ):
5161 break
5162 else:
5163 raise LcmException(
5164 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5165 )
5166
5167 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5168 msg = "unknown k8scluster-type '{}'".format(
5169 kdu.get("k8scluster-type")
5170 )
5171 raise LcmException(msg)
5172
5173 db_dict = {
5174 "collection": "nsrs",
5175 "filter": {"_id": nsr_id},
5176 "path": "_admin.deployed.K8s.{}".format(index),
5177 }
5178 self.logger.debug(
5179 logging_text
5180 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5181 )
5182 step = "Executing kdu {}".format(primitive_name)
5183 if primitive_name == "upgrade":
5184 if desc_params.get("kdu_model"):
5185 kdu_model = desc_params.get("kdu_model")
5186 del desc_params["kdu_model"]
5187 else:
5188 kdu_model = kdu.get("kdu-model")
5189 if kdu_model.count("/") < 2: # helm chart is not embedded
5190 parts = kdu_model.split(sep=":")
5191 if len(parts) == 2:
5192 kdu_model = parts[0]
5193 if desc_params.get("kdu_atomic_upgrade"):
5194 atomic_upgrade = desc_params.get(
5195 "kdu_atomic_upgrade"
5196 ).lower() in ("yes", "true", "1")
5197 del desc_params["kdu_atomic_upgrade"]
5198 else:
5199 atomic_upgrade = True
5200
5201 detailed_status = await asyncio.wait_for(
5202 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5203 cluster_uuid=kdu.get("k8scluster-uuid"),
5204 kdu_instance=kdu.get("kdu-instance"),
5205 atomic=atomic_upgrade,
5206 kdu_model=kdu_model,
5207 params=desc_params,
5208 db_dict=db_dict,
5209 timeout=timeout_ns_action,
5210 ),
5211 timeout=timeout_ns_action + 10,
5212 )
5213 self.logger.debug(
5214 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5215 )
5216 elif primitive_name == "rollback":
5217 detailed_status = await asyncio.wait_for(
5218 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5219 cluster_uuid=kdu.get("k8scluster-uuid"),
5220 kdu_instance=kdu.get("kdu-instance"),
5221 db_dict=db_dict,
5222 ),
5223 timeout=timeout_ns_action,
5224 )
5225 elif primitive_name == "status":
5226 detailed_status = await asyncio.wait_for(
5227 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5228 cluster_uuid=kdu.get("k8scluster-uuid"),
5229 kdu_instance=kdu.get("kdu-instance"),
5230 vca_id=vca_id,
5231 ),
5232 timeout=timeout_ns_action,
5233 )
5234 else:
5235 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5236 kdu["kdu-name"], nsr_id
5237 )
5238 params = self._map_primitive_params(
5239 config_primitive_desc, primitive_params, desc_params
5240 )
5241
5242 detailed_status = await asyncio.wait_for(
5243 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5244 cluster_uuid=kdu.get("k8scluster-uuid"),
5245 kdu_instance=kdu_instance,
5246 primitive_name=primitive_name,
5247 params=params,
5248 db_dict=db_dict,
5249 timeout=timeout_ns_action,
5250 vca_id=vca_id,
5251 ),
5252 timeout=timeout_ns_action,
5253 )
5254
5255 if detailed_status:
5256 nslcmop_operation_state = "COMPLETED"
5257 else:
5258 detailed_status = ""
5259 nslcmop_operation_state = "FAILED"
5260 else:
5261 ee_id, vca_type = self._look_for_deployed_vca(
5262 nsr_deployed["VCA"],
5263 member_vnf_index=vnf_index,
5264 vdu_id=vdu_id,
5265 vdu_count_index=vdu_count_index,
5266 ee_descriptor_id=ee_descriptor_id,
5267 )
5268 for vca_index, vca_deployed in enumerate(
5269 db_nsr["_admin"]["deployed"]["VCA"]
5270 ):
5271 if vca_deployed.get("member-vnf-index") == vnf_index:
5272 db_dict = {
5273 "collection": "nsrs",
5274 "filter": {"_id": nsr_id},
5275 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5276 }
5277 break
5278 (
5279 nslcmop_operation_state,
5280 detailed_status,
5281 ) = await self._ns_execute_primitive(
5282 ee_id,
5283 primitive=primitive_name,
5284 primitive_params=self._map_primitive_params(
5285 config_primitive_desc, primitive_params, desc_params
5286 ),
5287 timeout=timeout_ns_action,
5288 vca_type=vca_type,
5289 db_dict=db_dict,
5290 vca_id=vca_id,
5291 )
5292
5293 db_nslcmop_update["detailed-status"] = detailed_status
5294 error_description_nslcmop = (
5295 detailed_status if nslcmop_operation_state == "FAILED" else ""
5296 )
5297 self.logger.debug(
5298 logging_text
5299 + "Done with result {} {}".format(
5300 nslcmop_operation_state, detailed_status
5301 )
5302 )
5303 return # database update is called inside finally
5304
5305 except (DbException, LcmException, N2VCException, K8sException) as e:
5306 self.logger.error(logging_text + "Exit Exception {}".format(e))
5307 exc = e
5308 except asyncio.CancelledError:
5309 self.logger.error(
5310 logging_text + "Cancelled Exception while '{}'".format(step)
5311 )
5312 exc = "Operation was cancelled"
5313 except asyncio.TimeoutError:
5314 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5315 exc = "Timeout"
5316 except Exception as e:
5317 exc = traceback.format_exc()
5318 self.logger.critical(
5319 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5320 exc_info=True,
5321 )
5322 finally:
5323 if exc:
5324 db_nslcmop_update[
5325 "detailed-status"
5326 ] = (
5327 detailed_status
5328 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5329 nslcmop_operation_state = "FAILED"
5330 if db_nsr:
5331 self._write_ns_status(
5332 nsr_id=nsr_id,
5333 ns_state=db_nsr[
5334 "nsState"
5335 ], # TODO check if degraded. For the moment use previous status
5336 current_operation="IDLE",
5337 current_operation_id=None,
5338 # error_description=error_description_nsr,
5339 # error_detail=error_detail,
5340 other_update=db_nsr_update,
5341 )
5342
5343 self._write_op_status(
5344 op_id=nslcmop_id,
5345 stage="",
5346 error_message=error_description_nslcmop,
5347 operation_state=nslcmop_operation_state,
5348 other_update=db_nslcmop_update,
5349 )
5350
5351 if nslcmop_operation_state:
5352 try:
5353 await self.msg.aiowrite(
5354 "ns",
5355 "actioned",
5356 {
5357 "nsr_id": nsr_id,
5358 "nslcmop_id": nslcmop_id,
5359 "operationState": nslcmop_operation_state,
5360 },
5361 )
5362 except Exception as e:
5363 self.logger.error(
5364 logging_text + "kafka_write notification Exception {}".format(e)
5365 )
5366 self.logger.debug(logging_text + "Exit")
5367 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5368 return nslcmop_operation_state, detailed_status
5369
5370 async def terminate_vdus(
5371 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5372 ):
5373 """This method terminates VDUs
5374
5375 Args:
5376 db_vnfr: VNF instance record
5377 member_vnf_index: VNF index to identify the VDUs to be removed
5378 db_nsr: NS instance record
5379 update_db_nslcmops: Nslcmop update record
5380 """
5381 vca_scaling_info = []
5382 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5383 scaling_info["scaling_direction"] = "IN"
5384 scaling_info["vdu-delete"] = {}
5385 scaling_info["kdu-delete"] = {}
5386 db_vdur = db_vnfr.get("vdur")
5387 vdur_list = copy(db_vdur)
5388 count_index = 0
5389 for index, vdu in enumerate(vdur_list):
5390 vca_scaling_info.append(
5391 {
5392 "osm_vdu_id": vdu["vdu-id-ref"],
5393 "member-vnf-index": member_vnf_index,
5394 "type": "delete",
5395 "vdu_index": count_index,
5396 }
5397 )
5398 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5399 scaling_info["vdu"].append(
5400 {
5401 "name": vdu.get("name") or vdu.get("vdu-name"),
5402 "vdu_id": vdu["vdu-id-ref"],
5403 "interface": [],
5404 }
5405 )
5406 for interface in vdu["interfaces"]:
5407 scaling_info["vdu"][index]["interface"].append(
5408 {
5409 "name": interface["name"],
5410 "ip_address": interface["ip-address"],
5411 "mac_address": interface.get("mac-address"),
5412 }
5413 )
5414 self.logger.info("NS update scaling info{}".format(scaling_info))
5415 stage[2] = "Terminating VDUs"
5416 if scaling_info.get("vdu-delete"):
5417 # scale_process = "RO"
5418 if self.ro_config.ng:
5419 await self._scale_ng_ro(
5420 logging_text,
5421 db_nsr,
5422 update_db_nslcmops,
5423 db_vnfr,
5424 scaling_info,
5425 stage,
5426 )
5427
5428 async def remove_vnf(self, nsr_id, nslcmop_id, vnf_instance_id):
5429 """This method is to Remove VNF instances from NS.
5430
5431 Args:
5432 nsr_id: NS instance id
5433 nslcmop_id: nslcmop id of update
5434 vnf_instance_id: id of the VNF instance to be removed
5435
5436 Returns:
5437 result: (str, str) COMPLETED/FAILED, details
5438 """
5439 try:
5440 db_nsr_update = {}
5441 logging_text = "Task ns={} update ".format(nsr_id)
5442 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5443 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5444 if check_vnfr_count > 1:
5445 stage = ["", "", ""]
5446 step = "Getting nslcmop from database"
5447 self.logger.debug(
5448 step + " after having waited for previous tasks to be completed"
5449 )
5450 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5451 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5452 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5453 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5454 """ db_vnfr = self.db.get_one(
5455 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5456
5457 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5458 await self.terminate_vdus(
5459 db_vnfr,
5460 member_vnf_index,
5461 db_nsr,
5462 update_db_nslcmops,
5463 stage,
5464 logging_text,
5465 )
5466
5467 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5468 constituent_vnfr.remove(db_vnfr.get("_id"))
5469 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get(
5470 "constituent-vnfr-ref"
5471 )
5472 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5473 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5474 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5475 return "COMPLETED", "Done"
5476 else:
5477 step = "Terminate VNF Failed with"
5478 raise LcmException(
5479 "{} Cannot terminate the last VNF in this NS.".format(
5480 vnf_instance_id
5481 )
5482 )
5483 except (LcmException, asyncio.CancelledError):
5484 raise
5485 except Exception as e:
5486 self.logger.debug("Error removing VNF {}".format(e))
5487 return "FAILED", "Error removing VNF {}".format(e)
5488
5489 async def _ns_redeploy_vnf(
5490 self,
5491 nsr_id,
5492 nslcmop_id,
5493 db_vnfd,
5494 db_vnfr,
5495 db_nsr,
5496 ):
5497 """This method updates and redeploys VNF instances
5498
5499 Args:
5500 nsr_id: NS instance id
5501 nslcmop_id: nslcmop id
5502 db_vnfd: VNF descriptor
5503 db_vnfr: VNF instance record
5504 db_nsr: NS instance record
5505
5506 Returns:
5507 result: (str, str) COMPLETED/FAILED, details
5508 """
5509 try:
5510 count_index = 0
5511 stage = ["", "", ""]
5512 logging_text = "Task ns={} update ".format(nsr_id)
5513 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5514 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5515
5516 # Terminate old VNF resources
5517 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5518 await self.terminate_vdus(
5519 db_vnfr,
5520 member_vnf_index,
5521 db_nsr,
5522 update_db_nslcmops,
5523 stage,
5524 logging_text,
5525 )
5526
5527 # old_vnfd_id = db_vnfr["vnfd-id"]
5528 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5529 new_db_vnfd = db_vnfd
5530 # new_vnfd_ref = new_db_vnfd["id"]
5531 # new_vnfd_id = vnfd_id
5532
5533 # Create VDUR
5534 new_vnfr_cp = []
5535 for cp in new_db_vnfd.get("ext-cpd", ()):
5536 vnf_cp = {
5537 "name": cp.get("id"),
5538 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5539 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5540 "id": cp.get("id"),
5541 }
5542 new_vnfr_cp.append(vnf_cp)
5543 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5544 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5545 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5546 new_vnfr_update = {
5547 "revision": latest_vnfd_revision,
5548 "connection-point": new_vnfr_cp,
5549 "vdur": new_vdur,
5550 "ip-address": "",
5551 }
5552 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5553 updated_db_vnfr = self.db.get_one(
5554 "vnfrs",
5555 {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id},
5556 )
5557
5558 # Instantiate new VNF resources
5559 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5560 vca_scaling_info = []
5561 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5562 scaling_info["scaling_direction"] = "OUT"
5563 scaling_info["vdu-create"] = {}
5564 scaling_info["kdu-create"] = {}
5565 vdud_instantiate_list = db_vnfd["vdu"]
5566 for index, vdud in enumerate(vdud_instantiate_list):
5567 cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
5568 if cloud_init_text:
5569 additional_params = (
5570 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5571 or {}
5572 )
5573 cloud_init_list = []
5574 if cloud_init_text:
5575 # TODO Information of its own ip is not available because db_vnfr is not updated.
5576 additional_params["OSM"] = get_osm_params(
5577 updated_db_vnfr, vdud["id"], 1
5578 )
5579 cloud_init_list.append(
5580 self._parse_cloud_init(
5581 cloud_init_text,
5582 additional_params,
5583 db_vnfd["id"],
5584 vdud["id"],
5585 )
5586 )
5587 vca_scaling_info.append(
5588 {
5589 "osm_vdu_id": vdud["id"],
5590 "member-vnf-index": member_vnf_index,
5591 "type": "create",
5592 "vdu_index": count_index,
5593 }
5594 )
5595 scaling_info["vdu-create"][vdud["id"]] = count_index
5596 if self.ro_config.ng:
5597 self.logger.debug(
5598 "New Resources to be deployed: {}".format(scaling_info)
5599 )
5600 await self._scale_ng_ro(
5601 logging_text,
5602 db_nsr,
5603 update_db_nslcmops,
5604 updated_db_vnfr,
5605 scaling_info,
5606 stage,
5607 )
5608 return "COMPLETED", "Done"
5609 except (LcmException, asyncio.CancelledError):
5610 raise
5611 except Exception as e:
5612 self.logger.debug("Error updating VNF {}".format(e))
5613 return "FAILED", "Error updating VNF {}".format(e)
5614
5615 async def _ns_charm_upgrade(
5616 self,
5617 ee_id,
5618 charm_id,
5619 charm_type,
5620 path,
5621 timeout: float = None,
5622 ) -> (str, str):
5623 """This method upgrade charms in VNF instances
5624
5625 Args:
5626 ee_id: Execution environment id
5627 path: Local path to the charm
5628 charm_id: charm-id
5629 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5630 timeout: (Float) Timeout for the ns update operation
5631
5632 Returns:
5633 result: (str, str) COMPLETED/FAILED, details
5634 """
5635 try:
5636 charm_type = charm_type or "lxc_proxy_charm"
5637 output = await self.vca_map[charm_type].upgrade_charm(
5638 ee_id=ee_id,
5639 path=path,
5640 charm_id=charm_id,
5641 charm_type=charm_type,
5642 timeout=timeout or self.timeout.ns_update,
5643 )
5644
5645 if output:
5646 return "COMPLETED", output
5647
5648 except (LcmException, asyncio.CancelledError):
5649 raise
5650
5651 except Exception as e:
5652 self.logger.debug("Error upgrading charm {}".format(path))
5653
5654 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5655
5656 async def update(self, nsr_id, nslcmop_id):
5657 """Update NS according to different update types
5658
5659 This method performs upgrade of VNF instances then updates the revision
5660 number in VNF record
5661
5662 Args:
5663 nsr_id: Network service will be updated
5664 nslcmop_id: ns lcm operation id
5665
5666 Returns:
5667 It may raise DbException, LcmException, N2VCException, K8sException
5668
5669 """
5670 # Try to lock HA task here
5671 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5672 if not task_is_locked_by_me:
5673 return
5674
5675 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5676 self.logger.debug(logging_text + "Enter")
5677
5678 # Set the required variables to be filled up later
5679 db_nsr = None
5680 db_nslcmop_update = {}
5681 vnfr_update = {}
5682 nslcmop_operation_state = None
5683 db_nsr_update = {}
5684 error_description_nslcmop = ""
5685 exc = None
5686 change_type = "updated"
5687 detailed_status = ""
5688 member_vnf_index = None
5689
5690 try:
5691 # wait for any previous tasks in process
5692 step = "Waiting for previous operations to terminate"
5693 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5694 self._write_ns_status(
5695 nsr_id=nsr_id,
5696 ns_state=None,
5697 current_operation="UPDATING",
5698 current_operation_id=nslcmop_id,
5699 )
5700
5701 step = "Getting nslcmop from database"
5702 db_nslcmop = self.db.get_one(
5703 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5704 )
5705 update_type = db_nslcmop["operationParams"]["updateType"]
5706
5707 step = "Getting nsr from database"
5708 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5709 old_operational_status = db_nsr["operational-status"]
5710 db_nsr_update["operational-status"] = "updating"
5711 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5712 nsr_deployed = db_nsr["_admin"].get("deployed")
5713
5714 if update_type == "CHANGE_VNFPKG":
5715 # Get the input parameters given through update request
5716 vnf_instance_id = db_nslcmop["operationParams"][
5717 "changeVnfPackageData"
5718 ].get("vnfInstanceId")
5719
5720 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5721 "vnfdId"
5722 )
5723 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5724
5725 step = "Getting vnfr from database"
5726 db_vnfr = self.db.get_one(
5727 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5728 )
5729
5730 step = "Getting vnfds from database"
5731 # Latest VNFD
5732 latest_vnfd = self.db.get_one(
5733 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5734 )
5735 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5736
5737 # Current VNFD
5738 current_vnf_revision = db_vnfr.get("revision", 1)
5739 current_vnfd = self.db.get_one(
5740 "vnfds_revisions",
5741 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5742 fail_on_empty=False,
5743 )
5744 # Charm artifact paths will be filled up later
5745 (
5746 current_charm_artifact_path,
5747 target_charm_artifact_path,
5748 charm_artifact_paths,
5749 helm_artifacts,
5750 ) = ([], [], [], [])
5751
5752 step = "Checking if revision has changed in VNFD"
5753 if current_vnf_revision != latest_vnfd_revision:
5754 change_type = "policy_updated"
5755
5756 # There is new revision of VNFD, update operation is required
5757 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5758 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5759
5760 step = "Removing the VNFD packages if they exist in the local path"
5761 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5762 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5763
5764 step = "Get the VNFD packages from FSMongo"
5765 self.fs.sync(from_path=latest_vnfd_path)
5766 self.fs.sync(from_path=current_vnfd_path)
5767
5768 step = (
5769 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5770 )
5771 current_base_folder = current_vnfd["_admin"]["storage"]
5772 latest_base_folder = latest_vnfd["_admin"]["storage"]
5773
5774 for vca_index, vca_deployed in enumerate(
5775 get_iterable(nsr_deployed, "VCA")
5776 ):
5777 vnf_index = db_vnfr.get("member-vnf-index-ref")
5778
5779 # Getting charm-id and charm-type
5780 if vca_deployed.get("member-vnf-index") == vnf_index:
5781 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5782 vca_type = vca_deployed.get("type")
5783 vdu_count_index = vca_deployed.get("vdu_count_index")
5784
5785 # Getting ee-id
5786 ee_id = vca_deployed.get("ee_id")
5787
5788 step = "Getting descriptor config"
5789 if current_vnfd.get("kdu"):
5790 search_key = "kdu_name"
5791 else:
5792 search_key = "vnfd_id"
5793
5794 entity_id = vca_deployed.get(search_key)
5795
5796 descriptor_config = get_configuration(
5797 current_vnfd, entity_id
5798 )
5799
5800 if "execution-environment-list" in descriptor_config:
5801 ee_list = descriptor_config.get(
5802 "execution-environment-list", []
5803 )
5804 else:
5805 ee_list = []
5806
5807 # There could be several charm used in the same VNF
5808 for ee_item in ee_list:
5809 if ee_item.get("juju"):
5810 step = "Getting charm name"
5811 charm_name = ee_item["juju"].get("charm")
5812
5813 step = "Setting Charm artifact paths"
5814 current_charm_artifact_path.append(
5815 get_charm_artifact_path(
5816 current_base_folder,
5817 charm_name,
5818 vca_type,
5819 current_vnf_revision,
5820 )
5821 )
5822 target_charm_artifact_path.append(
5823 get_charm_artifact_path(
5824 latest_base_folder,
5825 charm_name,
5826 vca_type,
5827 latest_vnfd_revision,
5828 )
5829 )
5830 elif ee_item.get("helm-chart"):
5831 # add chart to list and all parameters
5832 step = "Getting helm chart name"
5833 chart_name = ee_item.get("helm-chart")
5834 vca_type = "helm-v3"
5835 step = "Setting Helm chart artifact paths"
5836
5837 helm_artifacts.append(
5838 {
5839 "current_artifact_path": get_charm_artifact_path(
5840 current_base_folder,
5841 chart_name,
5842 vca_type,
5843 current_vnf_revision,
5844 ),
5845 "target_artifact_path": get_charm_artifact_path(
5846 latest_base_folder,
5847 chart_name,
5848 vca_type,
5849 latest_vnfd_revision,
5850 ),
5851 "ee_id": ee_id,
5852 "vca_index": vca_index,
5853 "vdu_index": vdu_count_index,
5854 }
5855 )
5856
5857 charm_artifact_paths = zip(
5858 current_charm_artifact_path, target_charm_artifact_path
5859 )
5860
5861 step = "Checking if software version has changed in VNFD"
5862 if find_software_version(current_vnfd) != find_software_version(
5863 latest_vnfd
5864 ):
5865 step = "Checking if existing VNF has charm"
5866 for current_charm_path, target_charm_path in list(
5867 charm_artifact_paths
5868 ):
5869 if current_charm_path:
5870 raise LcmException(
5871 "Software version change is not supported as VNF instance {} has charm.".format(
5872 vnf_instance_id
5873 )
5874 )
5875
5876 # There is no change in the charm package, then redeploy the VNF
5877 # based on new descriptor
5878 step = "Redeploying VNF"
5879 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5880 (result, detailed_status) = await self._ns_redeploy_vnf(
5881 nsr_id, nslcmop_id, latest_vnfd, db_vnfr, db_nsr
5882 )
5883 if result == "FAILED":
5884 nslcmop_operation_state = result
5885 error_description_nslcmop = detailed_status
5886 db_nslcmop_update["detailed-status"] = detailed_status
5887 self.logger.debug(
5888 logging_text
5889 + " step {} Done with result {} {}".format(
5890 step, nslcmop_operation_state, detailed_status
5891 )
5892 )
5893
5894 else:
5895 step = "Checking if any charm package has changed or not"
5896 for current_charm_path, target_charm_path in list(
5897 charm_artifact_paths
5898 ):
5899 if (
5900 current_charm_path
5901 and target_charm_path
5902 and self.check_charm_hash_changed(
5903 current_charm_path, target_charm_path
5904 )
5905 ):
5906 step = "Checking whether VNF uses juju bundle"
5907 if check_juju_bundle_existence(current_vnfd):
5908 raise LcmException(
5909 "Charm upgrade is not supported for the instance which"
5910 " uses juju-bundle: {}".format(
5911 check_juju_bundle_existence(current_vnfd)
5912 )
5913 )
5914
5915 step = "Upgrading Charm"
5916 (
5917 result,
5918 detailed_status,
5919 ) = await self._ns_charm_upgrade(
5920 ee_id=ee_id,
5921 charm_id=vca_id,
5922 charm_type=vca_type,
5923 path=self.fs.path + target_charm_path,
5924 timeout=timeout_seconds,
5925 )
5926
5927 if result == "FAILED":
5928 nslcmop_operation_state = result
5929 error_description_nslcmop = detailed_status
5930
5931 db_nslcmop_update["detailed-status"] = detailed_status
5932 self.logger.debug(
5933 logging_text
5934 + " step {} Done with result {} {}".format(
5935 step, nslcmop_operation_state, detailed_status
5936 )
5937 )
5938
5939 step = "Updating policies"
5940 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5941 result = "COMPLETED"
5942 detailed_status = "Done"
5943 db_nslcmop_update["detailed-status"] = "Done"
5944
5945 # helm base EE
5946 for item in helm_artifacts:
5947 if not (
5948 item["current_artifact_path"]
5949 and item["target_artifact_path"]
5950 and self.check_charm_hash_changed(
5951 item["current_artifact_path"],
5952 item["target_artifact_path"],
5953 )
5954 ):
5955 continue
5956 db_update_entry = "_admin.deployed.VCA.{}.".format(
5957 item["vca_index"]
5958 )
5959 vnfr_id = db_vnfr["_id"]
5960 osm_config = {"osm": {"ns_id": nsr_id, "vnf_id": vnfr_id}}
5961 db_dict = {
5962 "collection": "nsrs",
5963 "filter": {"_id": nsr_id},
5964 "path": db_update_entry,
5965 }
5966 vca_type, namespace, helm_id = get_ee_id_parts(item["ee_id"])
5967 await self.vca_map[vca_type].upgrade_execution_environment(
5968 namespace=namespace,
5969 helm_id=helm_id,
5970 db_dict=db_dict,
5971 config=osm_config,
5972 artifact_path=item["target_artifact_path"],
5973 vca_type=vca_type,
5974 )
5975 vnf_id = db_vnfr.get("vnfd-ref")
5976 config_descriptor = get_configuration(latest_vnfd, vnf_id)
5977 self.logger.debug("get ssh key block")
5978 rw_mgmt_ip = None
5979 if deep_get(
5980 config_descriptor,
5981 ("config-access", "ssh-access", "required"),
5982 ):
5983 # Needed to inject a ssh key
5984 user = deep_get(
5985 config_descriptor,
5986 ("config-access", "ssh-access", "default-user"),
5987 )
5988 step = (
5989 "Install configuration Software, getting public ssh key"
5990 )
5991 pub_key = await self.vca_map[
5992 vca_type
5993 ].get_ee_ssh_public__key(
5994 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
5995 )
5996
5997 step = (
5998 "Insert public key into VM user={} ssh_key={}".format(
5999 user, pub_key
6000 )
6001 )
6002 self.logger.debug(logging_text + step)
6003
6004 # wait for RO (ip-address) Insert pub_key into VM
6005 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
6006 logging_text,
6007 nsr_id,
6008 vnfr_id,
6009 None,
6010 item["vdu_index"],
6011 user=user,
6012 pub_key=pub_key,
6013 )
6014
6015 initial_config_primitive_list = config_descriptor.get(
6016 "initial-config-primitive"
6017 )
6018 config_primitive = next(
6019 (
6020 p
6021 for p in initial_config_primitive_list
6022 if p["name"] == "config"
6023 ),
6024 None,
6025 )
6026 if not config_primitive:
6027 continue
6028
6029 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6030 if rw_mgmt_ip:
6031 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
6032 if db_vnfr.get("additionalParamsForVnf"):
6033 deploy_params.update(
6034 parse_yaml_strings(
6035 db_vnfr["additionalParamsForVnf"].copy()
6036 )
6037 )
6038 primitive_params_ = self._map_primitive_params(
6039 config_primitive, {}, deploy_params
6040 )
6041
6042 step = "execute primitive '{}' params '{}'".format(
6043 config_primitive["name"], primitive_params_
6044 )
6045 self.logger.debug(logging_text + step)
6046 await self.vca_map[vca_type].exec_primitive(
6047 ee_id=ee_id,
6048 primitive_name=config_primitive["name"],
6049 params_dict=primitive_params_,
6050 db_dict=db_dict,
6051 vca_id=vca_id,
6052 vca_type=vca_type,
6053 )
6054
6055 step = "Updating policies"
6056 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6057 detailed_status = "Done"
6058 db_nslcmop_update["detailed-status"] = "Done"
6059
6060 # If nslcmop_operation_state is None, so any operation is not failed.
6061 if not nslcmop_operation_state:
6062 nslcmop_operation_state = "COMPLETED"
6063
6064 # If update CHANGE_VNFPKG nslcmop_operation is successful
6065 # vnf revision need to be updated
6066 vnfr_update["revision"] = latest_vnfd_revision
6067 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
6068
6069 self.logger.debug(
6070 logging_text
6071 + " task Done with result {} {}".format(
6072 nslcmop_operation_state, detailed_status
6073 )
6074 )
6075 elif update_type == "REMOVE_VNF":
6076 # This part is included in https://osm.etsi.org/gerrit/11876
6077 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
6078 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
6079 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6080 step = "Removing VNF"
6081 (result, detailed_status) = await self.remove_vnf(
6082 nsr_id, nslcmop_id, vnf_instance_id
6083 )
6084 if result == "FAILED":
6085 nslcmop_operation_state = result
6086 error_description_nslcmop = detailed_status
6087 db_nslcmop_update["detailed-status"] = detailed_status
6088 change_type = "vnf_terminated"
6089 if not nslcmop_operation_state:
6090 nslcmop_operation_state = "COMPLETED"
6091 self.logger.debug(
6092 logging_text
6093 + " task Done with result {} {}".format(
6094 nslcmop_operation_state, detailed_status
6095 )
6096 )
6097
6098 elif update_type == "OPERATE_VNF":
6099 vnf_id = db_nslcmop["operationParams"]["operateVnfData"][
6100 "vnfInstanceId"
6101 ]
6102 operation_type = db_nslcmop["operationParams"]["operateVnfData"][
6103 "changeStateTo"
6104 ]
6105 additional_param = db_nslcmop["operationParams"]["operateVnfData"][
6106 "additionalParam"
6107 ]
6108 (result, detailed_status) = await self.rebuild_start_stop(
6109 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
6110 )
6111 if result == "FAILED":
6112 nslcmop_operation_state = result
6113 error_description_nslcmop = detailed_status
6114 db_nslcmop_update["detailed-status"] = detailed_status
6115 if not nslcmop_operation_state:
6116 nslcmop_operation_state = "COMPLETED"
6117 self.logger.debug(
6118 logging_text
6119 + " task Done with result {} {}".format(
6120 nslcmop_operation_state, detailed_status
6121 )
6122 )
6123
6124 # If nslcmop_operation_state is None, so any operation is not failed.
6125 # All operations are executed in overall.
6126 if not nslcmop_operation_state:
6127 nslcmop_operation_state = "COMPLETED"
6128 db_nsr_update["operational-status"] = old_operational_status
6129
6130 except (DbException, LcmException, N2VCException, K8sException) as e:
6131 self.logger.error(logging_text + "Exit Exception {}".format(e))
6132 exc = e
6133 except asyncio.CancelledError:
6134 self.logger.error(
6135 logging_text + "Cancelled Exception while '{}'".format(step)
6136 )
6137 exc = "Operation was cancelled"
6138 except asyncio.TimeoutError:
6139 self.logger.error(logging_text + "Timeout while '{}'".format(step))
6140 exc = "Timeout"
6141 except Exception as e:
6142 exc = traceback.format_exc()
6143 self.logger.critical(
6144 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6145 exc_info=True,
6146 )
6147 finally:
6148 if exc:
6149 db_nslcmop_update[
6150 "detailed-status"
6151 ] = (
6152 detailed_status
6153 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6154 nslcmop_operation_state = "FAILED"
6155 db_nsr_update["operational-status"] = old_operational_status
6156 if db_nsr:
6157 self._write_ns_status(
6158 nsr_id=nsr_id,
6159 ns_state=db_nsr["nsState"],
6160 current_operation="IDLE",
6161 current_operation_id=None,
6162 other_update=db_nsr_update,
6163 )
6164
6165 self._write_op_status(
6166 op_id=nslcmop_id,
6167 stage="",
6168 error_message=error_description_nslcmop,
6169 operation_state=nslcmop_operation_state,
6170 other_update=db_nslcmop_update,
6171 )
6172
6173 if nslcmop_operation_state:
6174 try:
6175 msg = {
6176 "nsr_id": nsr_id,
6177 "nslcmop_id": nslcmop_id,
6178 "operationState": nslcmop_operation_state,
6179 }
6180 if (
6181 change_type in ("vnf_terminated", "policy_updated")
6182 and member_vnf_index
6183 ):
6184 msg.update({"vnf_member_index": member_vnf_index})
6185 await self.msg.aiowrite("ns", change_type, msg)
6186 except Exception as e:
6187 self.logger.error(
6188 logging_text + "kafka_write notification Exception {}".format(e)
6189 )
6190 self.logger.debug(logging_text + "Exit")
6191 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6192 return nslcmop_operation_state, detailed_status
6193
6194 async def scale(self, nsr_id, nslcmop_id):
6195 # Try to lock HA task here
6196 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6197 if not task_is_locked_by_me:
6198 return
6199
6200 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6201 stage = ["", "", ""]
6202 tasks_dict_info = {}
6203 # ^ stage, step, VIM progress
6204 self.logger.debug(logging_text + "Enter")
6205 # get all needed from database
6206 db_nsr = None
6207 db_nslcmop_update = {}
6208 db_nsr_update = {}
6209 exc = None
6210 # in case of error, indicates what part of scale was failed to put nsr at error status
6211 scale_process = None
6212 old_operational_status = ""
6213 old_config_status = ""
6214 nsi_id = None
6215 try:
6216 # wait for any previous tasks in process
6217 step = "Waiting for previous operations to terminate"
6218 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6219 self._write_ns_status(
6220 nsr_id=nsr_id,
6221 ns_state=None,
6222 current_operation="SCALING",
6223 current_operation_id=nslcmop_id,
6224 )
6225
6226 step = "Getting nslcmop from database"
6227 self.logger.debug(
6228 step + " after having waited for previous tasks to be completed"
6229 )
6230 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6231
6232 step = "Getting nsr from database"
6233 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6234 old_operational_status = db_nsr["operational-status"]
6235 old_config_status = db_nsr["config-status"]
6236
6237 step = "Parsing scaling parameters"
6238 db_nsr_update["operational-status"] = "scaling"
6239 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6240 nsr_deployed = db_nsr["_admin"].get("deployed")
6241
6242 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6243 "scaleByStepData"
6244 ]["member-vnf-index"]
6245 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6246 "scaleByStepData"
6247 ]["scaling-group-descriptor"]
6248 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6249 # for backward compatibility
6250 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6251 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6252 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6253 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6254
6255 step = "Getting vnfr from database"
6256 db_vnfr = self.db.get_one(
6257 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6258 )
6259
6260 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6261
6262 step = "Getting vnfd from database"
6263 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6264
6265 base_folder = db_vnfd["_admin"]["storage"]
6266
6267 step = "Getting scaling-group-descriptor"
6268 scaling_descriptor = find_in_list(
6269 get_scaling_aspect(db_vnfd),
6270 lambda scale_desc: scale_desc["name"] == scaling_group,
6271 )
6272 if not scaling_descriptor:
6273 raise LcmException(
6274 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6275 "at vnfd:scaling-group-descriptor".format(scaling_group)
6276 )
6277
6278 step = "Sending scale order to VIM"
6279 # TODO check if ns is in a proper status
6280 nb_scale_op = 0
6281 if not db_nsr["_admin"].get("scaling-group"):
6282 self.update_db_2(
6283 "nsrs",
6284 nsr_id,
6285 {
6286 "_admin.scaling-group": [
6287 {"name": scaling_group, "nb-scale-op": 0}
6288 ]
6289 },
6290 )
6291 admin_scale_index = 0
6292 else:
6293 for admin_scale_index, admin_scale_info in enumerate(
6294 db_nsr["_admin"]["scaling-group"]
6295 ):
6296 if admin_scale_info["name"] == scaling_group:
6297 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6298 break
6299 else: # not found, set index one plus last element and add new entry with the name
6300 admin_scale_index += 1
6301 db_nsr_update[
6302 "_admin.scaling-group.{}.name".format(admin_scale_index)
6303 ] = scaling_group
6304
6305 vca_scaling_info = []
6306 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6307 if scaling_type == "SCALE_OUT":
6308 if "aspect-delta-details" not in scaling_descriptor:
6309 raise LcmException(
6310 "Aspect delta details not fount in scaling descriptor {}".format(
6311 scaling_descriptor["name"]
6312 )
6313 )
6314 # count if max-instance-count is reached
6315 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6316
6317 scaling_info["scaling_direction"] = "OUT"
6318 scaling_info["vdu-create"] = {}
6319 scaling_info["kdu-create"] = {}
6320 for delta in deltas:
6321 for vdu_delta in delta.get("vdu-delta", {}):
6322 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6323 # vdu_index also provides the number of instance of the targeted vdu
6324 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6325 cloud_init_text = self._get_vdu_cloud_init_content(
6326 vdud, db_vnfd
6327 )
6328 if cloud_init_text:
6329 additional_params = (
6330 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6331 or {}
6332 )
6333 cloud_init_list = []
6334
6335 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6336 max_instance_count = 10
6337 if vdu_profile and "max-number-of-instances" in vdu_profile:
6338 max_instance_count = vdu_profile.get(
6339 "max-number-of-instances", 10
6340 )
6341
6342 default_instance_num = get_number_of_instances(
6343 db_vnfd, vdud["id"]
6344 )
6345 instances_number = vdu_delta.get("number-of-instances", 1)
6346 nb_scale_op += instances_number
6347
6348 new_instance_count = nb_scale_op + default_instance_num
6349 # Control if new count is over max and vdu count is less than max.
6350 # Then assign new instance count
6351 if new_instance_count > max_instance_count > vdu_count:
6352 instances_number = new_instance_count - max_instance_count
6353 else:
6354 instances_number = instances_number
6355
6356 if new_instance_count > max_instance_count:
6357 raise LcmException(
6358 "reached the limit of {} (max-instance-count) "
6359 "scaling-out operations for the "
6360 "scaling-group-descriptor '{}'".format(
6361 nb_scale_op, scaling_group
6362 )
6363 )
6364 for x in range(vdu_delta.get("number-of-instances", 1)):
6365 if cloud_init_text:
6366 # TODO Information of its own ip is not available because db_vnfr is not updated.
6367 additional_params["OSM"] = get_osm_params(
6368 db_vnfr, vdu_delta["id"], vdu_index + x
6369 )
6370 cloud_init_list.append(
6371 self._parse_cloud_init(
6372 cloud_init_text,
6373 additional_params,
6374 db_vnfd["id"],
6375 vdud["id"],
6376 )
6377 )
6378 vca_scaling_info.append(
6379 {
6380 "osm_vdu_id": vdu_delta["id"],
6381 "member-vnf-index": vnf_index,
6382 "type": "create",
6383 "vdu_index": vdu_index + x,
6384 }
6385 )
6386 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6387 for kdu_delta in delta.get("kdu-resource-delta", {}):
6388 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6389 kdu_name = kdu_profile["kdu-name"]
6390 resource_name = kdu_profile.get("resource-name", "")
6391
6392 # Might have different kdus in the same delta
6393 # Should have list for each kdu
6394 if not scaling_info["kdu-create"].get(kdu_name, None):
6395 scaling_info["kdu-create"][kdu_name] = []
6396
6397 kdur = get_kdur(db_vnfr, kdu_name)
6398 if kdur.get("helm-chart"):
6399 k8s_cluster_type = "helm-chart-v3"
6400 self.logger.debug("kdur: {}".format(kdur))
6401 elif kdur.get("juju-bundle"):
6402 k8s_cluster_type = "juju-bundle"
6403 else:
6404 raise LcmException(
6405 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6406 "juju-bundle. Maybe an old NBI version is running".format(
6407 db_vnfr["member-vnf-index-ref"], kdu_name
6408 )
6409 )
6410
6411 max_instance_count = 10
6412 if kdu_profile and "max-number-of-instances" in kdu_profile:
6413 max_instance_count = kdu_profile.get(
6414 "max-number-of-instances", 10
6415 )
6416
6417 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6418 deployed_kdu, _ = get_deployed_kdu(
6419 nsr_deployed, kdu_name, vnf_index
6420 )
6421 if deployed_kdu is None:
6422 raise LcmException(
6423 "KDU '{}' for vnf '{}' not deployed".format(
6424 kdu_name, vnf_index
6425 )
6426 )
6427 kdu_instance = deployed_kdu.get("kdu-instance")
6428 instance_num = await self.k8scluster_map[
6429 k8s_cluster_type
6430 ].get_scale_count(
6431 resource_name,
6432 kdu_instance,
6433 vca_id=vca_id,
6434 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6435 kdu_model=deployed_kdu.get("kdu-model"),
6436 )
6437 kdu_replica_count = instance_num + kdu_delta.get(
6438 "number-of-instances", 1
6439 )
6440
6441 # Control if new count is over max and instance_num is less than max.
6442 # Then assign max instance number to kdu replica count
6443 if kdu_replica_count > max_instance_count > instance_num:
6444 kdu_replica_count = max_instance_count
6445 if kdu_replica_count > max_instance_count:
6446 raise LcmException(
6447 "reached the limit of {} (max-instance-count) "
6448 "scaling-out operations for the "
6449 "scaling-group-descriptor '{}'".format(
6450 instance_num, scaling_group
6451 )
6452 )
6453
6454 for x in range(kdu_delta.get("number-of-instances", 1)):
6455 vca_scaling_info.append(
6456 {
6457 "osm_kdu_id": kdu_name,
6458 "member-vnf-index": vnf_index,
6459 "type": "create",
6460 "kdu_index": instance_num + x - 1,
6461 }
6462 )
6463 scaling_info["kdu-create"][kdu_name].append(
6464 {
6465 "member-vnf-index": vnf_index,
6466 "type": "create",
6467 "k8s-cluster-type": k8s_cluster_type,
6468 "resource-name": resource_name,
6469 "scale": kdu_replica_count,
6470 }
6471 )
6472 elif scaling_type == "SCALE_IN":
6473 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6474
6475 scaling_info["scaling_direction"] = "IN"
6476 scaling_info["vdu-delete"] = {}
6477 scaling_info["kdu-delete"] = {}
6478
6479 for delta in deltas:
6480 for vdu_delta in delta.get("vdu-delta", {}):
6481 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6482 min_instance_count = 0
6483 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6484 if vdu_profile and "min-number-of-instances" in vdu_profile:
6485 min_instance_count = vdu_profile["min-number-of-instances"]
6486
6487 default_instance_num = get_number_of_instances(
6488 db_vnfd, vdu_delta["id"]
6489 )
6490 instance_num = vdu_delta.get("number-of-instances", 1)
6491 nb_scale_op -= instance_num
6492
6493 new_instance_count = nb_scale_op + default_instance_num
6494
6495 if new_instance_count < min_instance_count < vdu_count:
6496 instances_number = min_instance_count - new_instance_count
6497 else:
6498 instances_number = instance_num
6499
6500 if new_instance_count < min_instance_count:
6501 raise LcmException(
6502 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6503 "scaling-group-descriptor '{}'".format(
6504 nb_scale_op, scaling_group
6505 )
6506 )
6507 for x in range(vdu_delta.get("number-of-instances", 1)):
6508 vca_scaling_info.append(
6509 {
6510 "osm_vdu_id": vdu_delta["id"],
6511 "member-vnf-index": vnf_index,
6512 "type": "delete",
6513 "vdu_index": vdu_index - 1 - x,
6514 }
6515 )
6516 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6517 for kdu_delta in delta.get("kdu-resource-delta", {}):
6518 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6519 kdu_name = kdu_profile["kdu-name"]
6520 resource_name = kdu_profile.get("resource-name", "")
6521
6522 if not scaling_info["kdu-delete"].get(kdu_name, None):
6523 scaling_info["kdu-delete"][kdu_name] = []
6524
6525 kdur = get_kdur(db_vnfr, kdu_name)
6526 if kdur.get("helm-chart"):
6527 k8s_cluster_type = "helm-chart-v3"
6528 self.logger.debug("kdur: {}".format(kdur))
6529 elif kdur.get("juju-bundle"):
6530 k8s_cluster_type = "juju-bundle"
6531 else:
6532 raise LcmException(
6533 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6534 "juju-bundle. Maybe an old NBI version is running".format(
6535 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6536 )
6537 )
6538
6539 min_instance_count = 0
6540 if kdu_profile and "min-number-of-instances" in kdu_profile:
6541 min_instance_count = kdu_profile["min-number-of-instances"]
6542
6543 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6544 deployed_kdu, _ = get_deployed_kdu(
6545 nsr_deployed, kdu_name, vnf_index
6546 )
6547 if deployed_kdu is None:
6548 raise LcmException(
6549 "KDU '{}' for vnf '{}' not deployed".format(
6550 kdu_name, vnf_index
6551 )
6552 )
6553 kdu_instance = deployed_kdu.get("kdu-instance")
6554 instance_num = await self.k8scluster_map[
6555 k8s_cluster_type
6556 ].get_scale_count(
6557 resource_name,
6558 kdu_instance,
6559 vca_id=vca_id,
6560 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6561 kdu_model=deployed_kdu.get("kdu-model"),
6562 )
6563 kdu_replica_count = instance_num - kdu_delta.get(
6564 "number-of-instances", 1
6565 )
6566
6567 if kdu_replica_count < min_instance_count < instance_num:
6568 kdu_replica_count = min_instance_count
6569 if kdu_replica_count < min_instance_count:
6570 raise LcmException(
6571 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6572 "scaling-group-descriptor '{}'".format(
6573 instance_num, scaling_group
6574 )
6575 )
6576
6577 for x in range(kdu_delta.get("number-of-instances", 1)):
6578 vca_scaling_info.append(
6579 {
6580 "osm_kdu_id": kdu_name,
6581 "member-vnf-index": vnf_index,
6582 "type": "delete",
6583 "kdu_index": instance_num - x - 1,
6584 }
6585 )
6586 scaling_info["kdu-delete"][kdu_name].append(
6587 {
6588 "member-vnf-index": vnf_index,
6589 "type": "delete",
6590 "k8s-cluster-type": k8s_cluster_type,
6591 "resource-name": resource_name,
6592 "scale": kdu_replica_count,
6593 }
6594 )
6595
6596 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6597 vdu_delete = copy(scaling_info.get("vdu-delete"))
6598 if scaling_info["scaling_direction"] == "IN":
6599 for vdur in reversed(db_vnfr["vdur"]):
6600 if vdu_delete.get(vdur["vdu-id-ref"]):
6601 vdu_delete[vdur["vdu-id-ref"]] -= 1
6602 scaling_info["vdu"].append(
6603 {
6604 "name": vdur.get("name") or vdur.get("vdu-name"),
6605 "vdu_id": vdur["vdu-id-ref"],
6606 "interface": [],
6607 }
6608 )
6609 for interface in vdur["interfaces"]:
6610 scaling_info["vdu"][-1]["interface"].append(
6611 {
6612 "name": interface["name"],
6613 "ip_address": interface["ip-address"],
6614 "mac_address": interface.get("mac-address"),
6615 }
6616 )
6617 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6618
6619 # PRE-SCALE BEGIN
6620 step = "Executing pre-scale vnf-config-primitive"
6621 if scaling_descriptor.get("scaling-config-action"):
6622 for scaling_config_action in scaling_descriptor[
6623 "scaling-config-action"
6624 ]:
6625 if (
6626 scaling_config_action.get("trigger") == "pre-scale-in"
6627 and scaling_type == "SCALE_IN"
6628 ) or (
6629 scaling_config_action.get("trigger") == "pre-scale-out"
6630 and scaling_type == "SCALE_OUT"
6631 ):
6632 vnf_config_primitive = scaling_config_action[
6633 "vnf-config-primitive-name-ref"
6634 ]
6635 step = db_nslcmop_update[
6636 "detailed-status"
6637 ] = "executing pre-scale scaling-config-action '{}'".format(
6638 vnf_config_primitive
6639 )
6640
6641 # look for primitive
6642 for config_primitive in (
6643 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6644 ).get("config-primitive", ()):
6645 if config_primitive["name"] == vnf_config_primitive:
6646 break
6647 else:
6648 raise LcmException(
6649 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6650 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6651 "primitive".format(scaling_group, vnf_config_primitive)
6652 )
6653
6654 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6655 if db_vnfr.get("additionalParamsForVnf"):
6656 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6657
6658 scale_process = "VCA"
6659 db_nsr_update["config-status"] = "configuring pre-scaling"
6660 primitive_params = self._map_primitive_params(
6661 config_primitive, {}, vnfr_params
6662 )
6663
6664 # Pre-scale retry check: Check if this sub-operation has been executed before
6665 op_index = self._check_or_add_scale_suboperation(
6666 db_nslcmop,
6667 vnf_index,
6668 vnf_config_primitive,
6669 primitive_params,
6670 "PRE-SCALE",
6671 )
6672 if op_index == self.SUBOPERATION_STATUS_SKIP:
6673 # Skip sub-operation
6674 result = "COMPLETED"
6675 result_detail = "Done"
6676 self.logger.debug(
6677 logging_text
6678 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6679 vnf_config_primitive, result, result_detail
6680 )
6681 )
6682 else:
6683 if op_index == self.SUBOPERATION_STATUS_NEW:
6684 # New sub-operation: Get index of this sub-operation
6685 op_index = (
6686 len(db_nslcmop.get("_admin", {}).get("operations"))
6687 - 1
6688 )
6689 self.logger.debug(
6690 logging_text
6691 + "vnf_config_primitive={} New sub-operation".format(
6692 vnf_config_primitive
6693 )
6694 )
6695 else:
6696 # retry: Get registered params for this existing sub-operation
6697 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6698 op_index
6699 ]
6700 vnf_index = op.get("member_vnf_index")
6701 vnf_config_primitive = op.get("primitive")
6702 primitive_params = op.get("primitive_params")
6703 self.logger.debug(
6704 logging_text
6705 + "vnf_config_primitive={} Sub-operation retry".format(
6706 vnf_config_primitive
6707 )
6708 )
6709 # Execute the primitive, either with new (first-time) or registered (reintent) args
6710 ee_descriptor_id = config_primitive.get(
6711 "execution-environment-ref"
6712 )
6713 primitive_name = config_primitive.get(
6714 "execution-environment-primitive", vnf_config_primitive
6715 )
6716 ee_id, vca_type = self._look_for_deployed_vca(
6717 nsr_deployed["VCA"],
6718 member_vnf_index=vnf_index,
6719 vdu_id=None,
6720 vdu_count_index=None,
6721 ee_descriptor_id=ee_descriptor_id,
6722 )
6723 result, result_detail = await self._ns_execute_primitive(
6724 ee_id,
6725 primitive_name,
6726 primitive_params,
6727 vca_type=vca_type,
6728 vca_id=vca_id,
6729 )
6730 self.logger.debug(
6731 logging_text
6732 + "vnf_config_primitive={} Done with result {} {}".format(
6733 vnf_config_primitive, result, result_detail
6734 )
6735 )
6736 # Update operationState = COMPLETED | FAILED
6737 self._update_suboperation_status(
6738 db_nslcmop, op_index, result, result_detail
6739 )
6740
6741 if result == "FAILED":
6742 raise LcmException(result_detail)
6743 db_nsr_update["config-status"] = old_config_status
6744 scale_process = None
6745 # PRE-SCALE END
6746
6747 db_nsr_update[
6748 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6749 ] = nb_scale_op
6750 db_nsr_update[
6751 "_admin.scaling-group.{}.time".format(admin_scale_index)
6752 ] = time()
6753
6754 # SCALE-IN VCA - BEGIN
6755 if vca_scaling_info:
6756 step = db_nslcmop_update[
6757 "detailed-status"
6758 ] = "Deleting the execution environments"
6759 scale_process = "VCA"
6760 for vca_info in vca_scaling_info:
6761 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6762 member_vnf_index = str(vca_info["member-vnf-index"])
6763 self.logger.debug(
6764 logging_text + "vdu info: {}".format(vca_info)
6765 )
6766 if vca_info.get("osm_vdu_id"):
6767 vdu_id = vca_info["osm_vdu_id"]
6768 vdu_index = int(vca_info["vdu_index"])
6769 stage[
6770 1
6771 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6772 member_vnf_index, vdu_id, vdu_index
6773 )
6774 stage[2] = step = "Scaling in VCA"
6775 self._write_op_status(op_id=nslcmop_id, stage=stage)
6776 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6777 config_update = db_nsr["configurationStatus"]
6778 for vca_index, vca in enumerate(vca_update):
6779 if (
6780 (vca or vca.get("ee_id"))
6781 and vca["member-vnf-index"] == member_vnf_index
6782 and vca["vdu_count_index"] == vdu_index
6783 ):
6784 if vca.get("vdu_id"):
6785 config_descriptor = get_configuration(
6786 db_vnfd, vca.get("vdu_id")
6787 )
6788 elif vca.get("kdu_name"):
6789 config_descriptor = get_configuration(
6790 db_vnfd, vca.get("kdu_name")
6791 )
6792 else:
6793 config_descriptor = get_configuration(
6794 db_vnfd, db_vnfd["id"]
6795 )
6796 operation_params = (
6797 db_nslcmop.get("operationParams") or {}
6798 )
6799 exec_terminate_primitives = not operation_params.get(
6800 "skip_terminate_primitives"
6801 ) and vca.get("needed_terminate")
6802 task = asyncio.ensure_future(
6803 asyncio.wait_for(
6804 self.destroy_N2VC(
6805 logging_text,
6806 db_nslcmop,
6807 vca,
6808 config_descriptor,
6809 vca_index,
6810 destroy_ee=True,
6811 exec_primitives=exec_terminate_primitives,
6812 scaling_in=True,
6813 vca_id=vca_id,
6814 ),
6815 timeout=self.timeout.charm_delete,
6816 )
6817 )
6818 tasks_dict_info[task] = "Terminating VCA {}".format(
6819 vca.get("ee_id")
6820 )
6821 del vca_update[vca_index]
6822 del config_update[vca_index]
6823 # wait for pending tasks of terminate primitives
6824 if tasks_dict_info:
6825 self.logger.debug(
6826 logging_text
6827 + "Waiting for tasks {}".format(
6828 list(tasks_dict_info.keys())
6829 )
6830 )
6831 error_list = await self._wait_for_tasks(
6832 logging_text,
6833 tasks_dict_info,
6834 min(
6835 self.timeout.charm_delete, self.timeout.ns_terminate
6836 ),
6837 stage,
6838 nslcmop_id,
6839 )
6840 tasks_dict_info.clear()
6841 if error_list:
6842 raise LcmException("; ".join(error_list))
6843
6844 db_vca_and_config_update = {
6845 "_admin.deployed.VCA": vca_update,
6846 "configurationStatus": config_update,
6847 }
6848 self.update_db_2(
6849 "nsrs", db_nsr["_id"], db_vca_and_config_update
6850 )
6851 scale_process = None
6852 # SCALE-IN VCA - END
6853
6854 # SCALE RO - BEGIN
6855 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
6856 scale_process = "RO"
6857 if self.ro_config.ng:
6858 await self._scale_ng_ro(
6859 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
6860 )
6861 scaling_info.pop("vdu-create", None)
6862 scaling_info.pop("vdu-delete", None)
6863
6864 scale_process = None
6865 # SCALE RO - END
6866
6867 # SCALE KDU - BEGIN
6868 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
6869 scale_process = "KDU"
6870 await self._scale_kdu(
6871 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6872 )
6873 scaling_info.pop("kdu-create", None)
6874 scaling_info.pop("kdu-delete", None)
6875
6876 scale_process = None
6877 # SCALE KDU - END
6878
6879 if db_nsr_update:
6880 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6881
6882 # SCALE-UP VCA - BEGIN
6883 if vca_scaling_info:
6884 step = db_nslcmop_update[
6885 "detailed-status"
6886 ] = "Creating new execution environments"
6887 scale_process = "VCA"
6888 for vca_info in vca_scaling_info:
6889 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
6890 member_vnf_index = str(vca_info["member-vnf-index"])
6891 self.logger.debug(
6892 logging_text + "vdu info: {}".format(vca_info)
6893 )
6894 vnfd_id = db_vnfr["vnfd-ref"]
6895 if vca_info.get("osm_vdu_id"):
6896 vdu_index = int(vca_info["vdu_index"])
6897 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6898 if db_vnfr.get("additionalParamsForVnf"):
6899 deploy_params.update(
6900 parse_yaml_strings(
6901 db_vnfr["additionalParamsForVnf"].copy()
6902 )
6903 )
6904 descriptor_config = get_configuration(
6905 db_vnfd, db_vnfd["id"]
6906 )
6907 if descriptor_config:
6908 vdu_id = None
6909 vdu_name = None
6910 kdu_name = None
6911 kdu_index = None
6912 self._deploy_n2vc(
6913 logging_text=logging_text
6914 + "member_vnf_index={} ".format(member_vnf_index),
6915 db_nsr=db_nsr,
6916 db_vnfr=db_vnfr,
6917 nslcmop_id=nslcmop_id,
6918 nsr_id=nsr_id,
6919 nsi_id=nsi_id,
6920 vnfd_id=vnfd_id,
6921 vdu_id=vdu_id,
6922 kdu_name=kdu_name,
6923 kdu_index=kdu_index,
6924 member_vnf_index=member_vnf_index,
6925 vdu_index=vdu_index,
6926 vdu_name=vdu_name,
6927 deploy_params=deploy_params,
6928 descriptor_config=descriptor_config,
6929 base_folder=base_folder,
6930 task_instantiation_info=tasks_dict_info,
6931 stage=stage,
6932 )
6933 vdu_id = vca_info["osm_vdu_id"]
6934 vdur = find_in_list(
6935 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
6936 )
6937 descriptor_config = get_configuration(db_vnfd, vdu_id)
6938 if vdur.get("additionalParams"):
6939 deploy_params_vdu = parse_yaml_strings(
6940 vdur["additionalParams"]
6941 )
6942 else:
6943 deploy_params_vdu = deploy_params
6944 deploy_params_vdu["OSM"] = get_osm_params(
6945 db_vnfr, vdu_id, vdu_count_index=vdu_index
6946 )
6947 if descriptor_config:
6948 vdu_name = None
6949 kdu_name = None
6950 kdu_index = None
6951 stage[
6952 1
6953 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6954 member_vnf_index, vdu_id, vdu_index
6955 )
6956 stage[2] = step = "Scaling out VCA"
6957 self._write_op_status(op_id=nslcmop_id, stage=stage)
6958 self._deploy_n2vc(
6959 logging_text=logging_text
6960 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6961 member_vnf_index, vdu_id, vdu_index
6962 ),
6963 db_nsr=db_nsr,
6964 db_vnfr=db_vnfr,
6965 nslcmop_id=nslcmop_id,
6966 nsr_id=nsr_id,
6967 nsi_id=nsi_id,
6968 vnfd_id=vnfd_id,
6969 vdu_id=vdu_id,
6970 kdu_name=kdu_name,
6971 member_vnf_index=member_vnf_index,
6972 vdu_index=vdu_index,
6973 kdu_index=kdu_index,
6974 vdu_name=vdu_name,
6975 deploy_params=deploy_params_vdu,
6976 descriptor_config=descriptor_config,
6977 base_folder=base_folder,
6978 task_instantiation_info=tasks_dict_info,
6979 stage=stage,
6980 )
6981 # SCALE-UP VCA - END
6982 scale_process = None
6983
6984 # POST-SCALE BEGIN
6985 # execute primitive service POST-SCALING
6986 step = "Executing post-scale vnf-config-primitive"
6987 if scaling_descriptor.get("scaling-config-action"):
6988 for scaling_config_action in scaling_descriptor[
6989 "scaling-config-action"
6990 ]:
6991 if (
6992 scaling_config_action.get("trigger") == "post-scale-in"
6993 and scaling_type == "SCALE_IN"
6994 ) or (
6995 scaling_config_action.get("trigger") == "post-scale-out"
6996 and scaling_type == "SCALE_OUT"
6997 ):
6998 vnf_config_primitive = scaling_config_action[
6999 "vnf-config-primitive-name-ref"
7000 ]
7001 step = db_nslcmop_update[
7002 "detailed-status"
7003 ] = "executing post-scale scaling-config-action '{}'".format(
7004 vnf_config_primitive
7005 )
7006
7007 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
7008 if db_vnfr.get("additionalParamsForVnf"):
7009 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
7010
7011 # look for primitive
7012 for config_primitive in (
7013 get_configuration(db_vnfd, db_vnfd["id"]) or {}
7014 ).get("config-primitive", ()):
7015 if config_primitive["name"] == vnf_config_primitive:
7016 break
7017 else:
7018 raise LcmException(
7019 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
7020 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
7021 "config-primitive".format(
7022 scaling_group, vnf_config_primitive
7023 )
7024 )
7025 scale_process = "VCA"
7026 db_nsr_update["config-status"] = "configuring post-scaling"
7027 primitive_params = self._map_primitive_params(
7028 config_primitive, {}, vnfr_params
7029 )
7030
7031 # Post-scale retry check: Check if this sub-operation has been executed before
7032 op_index = self._check_or_add_scale_suboperation(
7033 db_nslcmop,
7034 vnf_index,
7035 vnf_config_primitive,
7036 primitive_params,
7037 "POST-SCALE",
7038 )
7039 if op_index == self.SUBOPERATION_STATUS_SKIP:
7040 # Skip sub-operation
7041 result = "COMPLETED"
7042 result_detail = "Done"
7043 self.logger.debug(
7044 logging_text
7045 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
7046 vnf_config_primitive, result, result_detail
7047 )
7048 )
7049 else:
7050 if op_index == self.SUBOPERATION_STATUS_NEW:
7051 # New sub-operation: Get index of this sub-operation
7052 op_index = (
7053 len(db_nslcmop.get("_admin", {}).get("operations"))
7054 - 1
7055 )
7056 self.logger.debug(
7057 logging_text
7058 + "vnf_config_primitive={} New sub-operation".format(
7059 vnf_config_primitive
7060 )
7061 )
7062 else:
7063 # retry: Get registered params for this existing sub-operation
7064 op = db_nslcmop.get("_admin", {}).get("operations", [])[
7065 op_index
7066 ]
7067 vnf_index = op.get("member_vnf_index")
7068 vnf_config_primitive = op.get("primitive")
7069 primitive_params = op.get("primitive_params")
7070 self.logger.debug(
7071 logging_text
7072 + "vnf_config_primitive={} Sub-operation retry".format(
7073 vnf_config_primitive
7074 )
7075 )
7076 # Execute the primitive, either with new (first-time) or registered (reintent) args
7077 ee_descriptor_id = config_primitive.get(
7078 "execution-environment-ref"
7079 )
7080 primitive_name = config_primitive.get(
7081 "execution-environment-primitive", vnf_config_primitive
7082 )
7083 ee_id, vca_type = self._look_for_deployed_vca(
7084 nsr_deployed["VCA"],
7085 member_vnf_index=vnf_index,
7086 vdu_id=None,
7087 vdu_count_index=None,
7088 ee_descriptor_id=ee_descriptor_id,
7089 )
7090 result, result_detail = await self._ns_execute_primitive(
7091 ee_id,
7092 primitive_name,
7093 primitive_params,
7094 vca_type=vca_type,
7095 vca_id=vca_id,
7096 )
7097 self.logger.debug(
7098 logging_text
7099 + "vnf_config_primitive={} Done with result {} {}".format(
7100 vnf_config_primitive, result, result_detail
7101 )
7102 )
7103 # Update operationState = COMPLETED | FAILED
7104 self._update_suboperation_status(
7105 db_nslcmop, op_index, result, result_detail
7106 )
7107
7108 if result == "FAILED":
7109 raise LcmException(result_detail)
7110 db_nsr_update["config-status"] = old_config_status
7111 scale_process = None
7112 # POST-SCALE END
7113
7114 db_nsr_update[
7115 "detailed-status"
7116 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
7117 db_nsr_update["operational-status"] = (
7118 "running"
7119 if old_operational_status == "failed"
7120 else old_operational_status
7121 )
7122 db_nsr_update["config-status"] = old_config_status
7123 return
7124 except (
7125 ROclient.ROClientException,
7126 DbException,
7127 LcmException,
7128 NgRoException,
7129 ) as e:
7130 self.logger.error(logging_text + "Exit Exception {}".format(e))
7131 exc = e
7132 except asyncio.CancelledError:
7133 self.logger.error(
7134 logging_text + "Cancelled Exception while '{}'".format(step)
7135 )
7136 exc = "Operation was cancelled"
7137 except Exception as e:
7138 exc = traceback.format_exc()
7139 self.logger.critical(
7140 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7141 exc_info=True,
7142 )
7143 finally:
7144 self._write_ns_status(
7145 nsr_id=nsr_id,
7146 ns_state=None,
7147 current_operation="IDLE",
7148 current_operation_id=None,
7149 )
7150 if tasks_dict_info:
7151 stage[1] = "Waiting for instantiate pending tasks."
7152 self.logger.debug(logging_text + stage[1])
7153 exc = await self._wait_for_tasks(
7154 logging_text,
7155 tasks_dict_info,
7156 self.timeout.ns_deploy,
7157 stage,
7158 nslcmop_id,
7159 nsr_id=nsr_id,
7160 )
7161 if exc:
7162 db_nslcmop_update[
7163 "detailed-status"
7164 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7165 nslcmop_operation_state = "FAILED"
7166 if db_nsr:
7167 db_nsr_update["operational-status"] = old_operational_status
7168 db_nsr_update["config-status"] = old_config_status
7169 db_nsr_update["detailed-status"] = ""
7170 if scale_process:
7171 if "VCA" in scale_process:
7172 db_nsr_update["config-status"] = "failed"
7173 if "RO" in scale_process:
7174 db_nsr_update["operational-status"] = "failed"
7175 db_nsr_update[
7176 "detailed-status"
7177 ] = "FAILED scaling nslcmop={} {}: {}".format(
7178 nslcmop_id, step, exc
7179 )
7180 else:
7181 error_description_nslcmop = None
7182 nslcmop_operation_state = "COMPLETED"
7183 db_nslcmop_update["detailed-status"] = "Done"
7184
7185 self._write_op_status(
7186 op_id=nslcmop_id,
7187 stage="",
7188 error_message=error_description_nslcmop,
7189 operation_state=nslcmop_operation_state,
7190 other_update=db_nslcmop_update,
7191 )
7192 if db_nsr:
7193 self._write_ns_status(
7194 nsr_id=nsr_id,
7195 ns_state=None,
7196 current_operation="IDLE",
7197 current_operation_id=None,
7198 other_update=db_nsr_update,
7199 )
7200
7201 if nslcmop_operation_state:
7202 try:
7203 msg = {
7204 "nsr_id": nsr_id,
7205 "nslcmop_id": nslcmop_id,
7206 "operationState": nslcmop_operation_state,
7207 }
7208 await self.msg.aiowrite("ns", "scaled", msg)
7209 except Exception as e:
7210 self.logger.error(
7211 logging_text + "kafka_write notification Exception {}".format(e)
7212 )
7213 self.logger.debug(logging_text + "Exit")
7214 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7215
7216 async def _scale_kdu(
7217 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7218 ):
7219 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7220 for kdu_name in _scaling_info:
7221 for kdu_scaling_info in _scaling_info[kdu_name]:
7222 deployed_kdu, index = get_deployed_kdu(
7223 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7224 )
7225 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7226 kdu_instance = deployed_kdu["kdu-instance"]
7227 kdu_model = deployed_kdu.get("kdu-model")
7228 scale = int(kdu_scaling_info["scale"])
7229 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7230
7231 db_dict = {
7232 "collection": "nsrs",
7233 "filter": {"_id": nsr_id},
7234 "path": "_admin.deployed.K8s.{}".format(index),
7235 }
7236
7237 step = "scaling application {}".format(
7238 kdu_scaling_info["resource-name"]
7239 )
7240 self.logger.debug(logging_text + step)
7241
7242 if kdu_scaling_info["type"] == "delete":
7243 kdu_config = get_configuration(db_vnfd, kdu_name)
7244 if (
7245 kdu_config
7246 and kdu_config.get("terminate-config-primitive")
7247 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7248 ):
7249 terminate_config_primitive_list = kdu_config.get(
7250 "terminate-config-primitive"
7251 )
7252 terminate_config_primitive_list.sort(
7253 key=lambda val: int(val["seq"])
7254 )
7255
7256 for (
7257 terminate_config_primitive
7258 ) in terminate_config_primitive_list:
7259 primitive_params_ = self._map_primitive_params(
7260 terminate_config_primitive, {}, {}
7261 )
7262 step = "execute terminate config primitive"
7263 self.logger.debug(logging_text + step)
7264 await asyncio.wait_for(
7265 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7266 cluster_uuid=cluster_uuid,
7267 kdu_instance=kdu_instance,
7268 primitive_name=terminate_config_primitive["name"],
7269 params=primitive_params_,
7270 db_dict=db_dict,
7271 total_timeout=self.timeout.primitive,
7272 vca_id=vca_id,
7273 ),
7274 timeout=self.timeout.primitive
7275 * self.timeout.primitive_outer_factor,
7276 )
7277
7278 await asyncio.wait_for(
7279 self.k8scluster_map[k8s_cluster_type].scale(
7280 kdu_instance=kdu_instance,
7281 scale=scale,
7282 resource_name=kdu_scaling_info["resource-name"],
7283 total_timeout=self.timeout.scale_on_error,
7284 vca_id=vca_id,
7285 cluster_uuid=cluster_uuid,
7286 kdu_model=kdu_model,
7287 atomic=True,
7288 db_dict=db_dict,
7289 ),
7290 timeout=self.timeout.scale_on_error
7291 * self.timeout.scale_on_error_outer_factor,
7292 )
7293
7294 if kdu_scaling_info["type"] == "create":
7295 kdu_config = get_configuration(db_vnfd, kdu_name)
7296 if (
7297 kdu_config
7298 and kdu_config.get("initial-config-primitive")
7299 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7300 ):
7301 initial_config_primitive_list = kdu_config.get(
7302 "initial-config-primitive"
7303 )
7304 initial_config_primitive_list.sort(
7305 key=lambda val: int(val["seq"])
7306 )
7307
7308 for initial_config_primitive in initial_config_primitive_list:
7309 primitive_params_ = self._map_primitive_params(
7310 initial_config_primitive, {}, {}
7311 )
7312 step = "execute initial config primitive"
7313 self.logger.debug(logging_text + step)
7314 await asyncio.wait_for(
7315 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7316 cluster_uuid=cluster_uuid,
7317 kdu_instance=kdu_instance,
7318 primitive_name=initial_config_primitive["name"],
7319 params=primitive_params_,
7320 db_dict=db_dict,
7321 vca_id=vca_id,
7322 ),
7323 timeout=600,
7324 )
7325
7326 async def _scale_ng_ro(
7327 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7328 ):
7329 nsr_id = db_nslcmop["nsInstanceId"]
7330 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7331 db_vnfrs = {}
7332
7333 # read from db: vnfd's for every vnf
7334 db_vnfds = []
7335
7336 # for each vnf in ns, read vnfd
7337 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7338 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7339 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7340 # if we haven't this vnfd, read it from db
7341 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7342 # read from db
7343 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7344 db_vnfds.append(vnfd)
7345 n2vc_key = self.n2vc.get_public_key()
7346 n2vc_key_list = [n2vc_key]
7347 self.scale_vnfr(
7348 db_vnfr,
7349 vdu_scaling_info.get("vdu-create"),
7350 vdu_scaling_info.get("vdu-delete"),
7351 mark_delete=True,
7352 )
7353 # db_vnfr has been updated, update db_vnfrs to use it
7354 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7355 await self._instantiate_ng_ro(
7356 logging_text,
7357 nsr_id,
7358 db_nsd,
7359 db_nsr,
7360 db_nslcmop,
7361 db_vnfrs,
7362 db_vnfds,
7363 n2vc_key_list,
7364 stage=stage,
7365 start_deploy=time(),
7366 timeout_ns_deploy=self.timeout.ns_deploy,
7367 )
7368 if vdu_scaling_info.get("vdu-delete"):
7369 self.scale_vnfr(
7370 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7371 )
7372
7373 async def extract_prometheus_scrape_jobs(
7374 self,
7375 ee_id: str,
7376 artifact_path: str,
7377 ee_config_descriptor: dict,
7378 vnfr_id: str,
7379 nsr_id: str,
7380 target_ip: str,
7381 element_type: str,
7382 vnf_member_index: str = "",
7383 vdu_id: str = "",
7384 vdu_index: int = None,
7385 kdu_name: str = "",
7386 kdu_index: int = None,
7387 ) -> dict:
7388 """Method to extract prometheus scrape jobs from EE's Prometheus template job file
7389 This method will wait until the corresponding VDU or KDU is fully instantiated
7390
7391 Args:
7392 ee_id (str): Execution Environment ID
7393 artifact_path (str): Path where the EE's content is (including the Prometheus template file)
7394 ee_config_descriptor (dict): Execution Environment's configuration descriptor
7395 vnfr_id (str): VNFR ID where this EE applies
7396 nsr_id (str): NSR ID where this EE applies
7397 target_ip (str): VDU/KDU instance IP address
7398 element_type (str): NS or VNF or VDU or KDU
7399 vnf_member_index (str, optional): VNF index where this EE applies. Defaults to "".
7400 vdu_id (str, optional): VDU ID where this EE applies. Defaults to "".
7401 vdu_index (int, optional): VDU index where this EE applies. Defaults to None.
7402 kdu_name (str, optional): KDU name where this EE applies. Defaults to "".
7403 kdu_index (int, optional): KDU index where this EE applies. Defaults to None.
7404
7405 Raises:
7406 LcmException: When the VDU or KDU instance was not found in an hour
7407
7408 Returns:
7409 _type_: Prometheus jobs
7410 """
7411 # default the vdur and kdur names to an empty string, to avoid any later
7412 # problem with Prometheus when the element type is not VDU or KDU
7413 vdur_name = ""
7414 kdur_name = ""
7415
7416 # look if exist a file called 'prometheus*.j2' and
7417 artifact_content = self.fs.dir_ls(artifact_path)
7418 job_file = next(
7419 (
7420 f
7421 for f in artifact_content
7422 if f.startswith("prometheus") and f.endswith(".j2")
7423 ),
7424 None,
7425 )
7426 if not job_file:
7427 return
7428 self.logger.debug("Artifact path{}".format(artifact_path))
7429 self.logger.debug("job file{}".format(job_file))
7430 with self.fs.file_open((artifact_path, job_file), "r") as f:
7431 job_data = f.read()
7432
7433 # obtain the VDUR or KDUR, if the element type is VDU or KDU
7434 if element_type in ("VDU", "KDU"):
7435 for _ in range(360):
7436 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
7437 if vdu_id and vdu_index is not None:
7438 vdur = next(
7439 (
7440 x
7441 for x in get_iterable(db_vnfr, "vdur")
7442 if (
7443 x.get("vdu-id-ref") == vdu_id
7444 and x.get("count-index") == vdu_index
7445 )
7446 ),
7447 {},
7448 )
7449 if vdur.get("name"):
7450 vdur_name = vdur.get("name")
7451 break
7452 if kdu_name and kdu_index is not None:
7453 kdur = next(
7454 (
7455 x
7456 for x in get_iterable(db_vnfr, "kdur")
7457 if (
7458 x.get("kdu-name") == kdu_name
7459 and x.get("count-index") == kdu_index
7460 )
7461 ),
7462 {},
7463 )
7464 if kdur.get("name"):
7465 kdur_name = kdur.get("name")
7466 break
7467
7468 await asyncio.sleep(10)
7469 else:
7470 if vdu_id and vdu_index is not None:
7471 raise LcmException(
7472 f"Timeout waiting VDU with name={vdu_id} and index={vdu_index} to be intantiated"
7473 )
7474 if kdu_name and kdu_index is not None:
7475 raise LcmException(
7476 f"Timeout waiting KDU with name={kdu_name} and index={kdu_index} to be intantiated"
7477 )
7478
7479 if ee_id is not None:
7480 _, namespace, helm_id = get_ee_id_parts(
7481 ee_id
7482 ) # get namespace and EE gRPC service name
7483 host_name = f'{helm_id}-{ee_config_descriptor["metric-service"]}.{namespace}.svc' # svc_name.namespace.svc
7484 host_port = "80"
7485 vnfr_id = vnfr_id.replace("-", "")
7486 variables = {
7487 "JOB_NAME": vnfr_id,
7488 "TARGET_IP": target_ip,
7489 "EXPORTER_POD_IP": host_name,
7490 "EXPORTER_POD_PORT": host_port,
7491 "NSR_ID": nsr_id,
7492 "VNF_MEMBER_INDEX": vnf_member_index,
7493 "VDUR_NAME": vdur_name,
7494 "KDUR_NAME": kdur_name,
7495 "ELEMENT_TYPE": element_type,
7496 }
7497 else:
7498 metric_path = ee_config_descriptor["metric-path"]
7499 target_port = ee_config_descriptor["metric-port"]
7500 vnfr_id = vnfr_id.replace("-", "")
7501 variables = {
7502 "JOB_NAME": vnfr_id,
7503 "TARGET_IP": target_ip,
7504 "TARGET_PORT": target_port,
7505 "METRIC_PATH": metric_path,
7506 }
7507
7508 job_list = parse_job(job_data, variables)
7509 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7510 for job in job_list:
7511 if (
7512 not isinstance(job.get("job_name"), str)
7513 or vnfr_id not in job["job_name"]
7514 ):
7515 job["job_name"] = vnfr_id + "_" + str(SystemRandom().randint(1, 10000))
7516 job["nsr_id"] = nsr_id
7517 job["vnfr_id"] = vnfr_id
7518 return job_list
7519
7520 async def rebuild_start_stop(
7521 self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
7522 ):
7523 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7524 self.logger.info(logging_text + "Enter")
7525 stage = ["Preparing the environment", ""]
7526 # database nsrs record
7527 db_nsr_update = {}
7528 vdu_vim_name = None
7529 vim_vm_id = None
7530 # in case of error, indicates what part of scale was failed to put nsr at error status
7531 start_deploy = time()
7532 try:
7533 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7534 vim_account_id = db_vnfr.get("vim-account-id")
7535 vim_info_key = "vim:" + vim_account_id
7536 vdu_id = additional_param["vdu_id"]
7537 vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
7538 vdur = find_in_list(
7539 vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
7540 )
7541 if vdur:
7542 vdu_vim_name = vdur["name"]
7543 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7544 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7545 else:
7546 raise LcmException("Target vdu is not found")
7547 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7548 # wait for any previous tasks in process
7549 stage[1] = "Waiting for previous operations to terminate"
7550 self.logger.info(stage[1])
7551 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7552
7553 stage[1] = "Reading from database."
7554 self.logger.info(stage[1])
7555 self._write_ns_status(
7556 nsr_id=nsr_id,
7557 ns_state=None,
7558 current_operation=operation_type.upper(),
7559 current_operation_id=nslcmop_id,
7560 )
7561 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7562
7563 # read from db: ns
7564 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7565 db_nsr_update["operational-status"] = operation_type
7566 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7567 # Payload for RO
7568 desc = {
7569 operation_type: {
7570 "vim_vm_id": vim_vm_id,
7571 "vnf_id": vnf_id,
7572 "vdu_index": additional_param["count-index"],
7573 "vdu_id": vdur["id"],
7574 "target_vim": target_vim,
7575 "vim_account_id": vim_account_id,
7576 }
7577 }
7578 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7579 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7580 self.logger.info("ro nsr id: {}".format(nsr_id))
7581 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7582 self.logger.info("response from RO: {}".format(result_dict))
7583 action_id = result_dict["action_id"]
7584 await self._wait_ng_ro(
7585 nsr_id,
7586 action_id,
7587 nslcmop_id,
7588 start_deploy,
7589 self.timeout.operate,
7590 None,
7591 "start_stop_rebuild",
7592 )
7593 return "COMPLETED", "Done"
7594 except (ROclient.ROClientException, DbException, LcmException) as e:
7595 self.logger.error("Exit Exception {}".format(e))
7596 exc = e
7597 except asyncio.CancelledError:
7598 self.logger.error("Cancelled Exception while '{}'".format(stage))
7599 exc = "Operation was cancelled"
7600 except Exception as e:
7601 exc = traceback.format_exc()
7602 self.logger.critical(
7603 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7604 )
7605 return "FAILED", "Error in operate VNF {}".format(exc)
7606
7607 async def migrate(self, nsr_id, nslcmop_id):
7608 """
7609 Migrate VNFs and VDUs instances in a NS
7610
7611 :param: nsr_id: NS Instance ID
7612 :param: nslcmop_id: nslcmop ID of migrate
7613
7614 """
7615 # Try to lock HA task here
7616 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7617 if not task_is_locked_by_me:
7618 return
7619 logging_text = "Task ns={} migrate ".format(nsr_id)
7620 self.logger.debug(logging_text + "Enter")
7621 # get all needed from database
7622 db_nslcmop = None
7623 db_nslcmop_update = {}
7624 nslcmop_operation_state = None
7625 db_nsr_update = {}
7626 target = {}
7627 exc = None
7628 # in case of error, indicates what part of scale was failed to put nsr at error status
7629 start_deploy = time()
7630
7631 try:
7632 # wait for any previous tasks in process
7633 step = "Waiting for previous operations to terminate"
7634 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7635
7636 self._write_ns_status(
7637 nsr_id=nsr_id,
7638 ns_state=None,
7639 current_operation="MIGRATING",
7640 current_operation_id=nslcmop_id,
7641 )
7642 step = "Getting nslcmop from database"
7643 self.logger.debug(
7644 step + " after having waited for previous tasks to be completed"
7645 )
7646 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7647 migrate_params = db_nslcmop.get("operationParams")
7648
7649 target = {}
7650 target.update(migrate_params)
7651 desc = await self.RO.migrate(nsr_id, target)
7652 self.logger.debug("RO return > {}".format(desc))
7653 action_id = desc["action_id"]
7654 await self._wait_ng_ro(
7655 nsr_id,
7656 action_id,
7657 nslcmop_id,
7658 start_deploy,
7659 self.timeout.migrate,
7660 operation="migrate",
7661 )
7662 except (ROclient.ROClientException, DbException, LcmException) as e:
7663 self.logger.error("Exit Exception {}".format(e))
7664 exc = e
7665 except asyncio.CancelledError:
7666 self.logger.error("Cancelled Exception while '{}'".format(step))
7667 exc = "Operation was cancelled"
7668 except Exception as e:
7669 exc = traceback.format_exc()
7670 self.logger.critical(
7671 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7672 )
7673 finally:
7674 self._write_ns_status(
7675 nsr_id=nsr_id,
7676 ns_state=None,
7677 current_operation="IDLE",
7678 current_operation_id=None,
7679 )
7680 if exc:
7681 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7682 nslcmop_operation_state = "FAILED"
7683 else:
7684 nslcmop_operation_state = "COMPLETED"
7685 db_nslcmop_update["detailed-status"] = "Done"
7686 db_nsr_update["detailed-status"] = "Done"
7687
7688 self._write_op_status(
7689 op_id=nslcmop_id,
7690 stage="",
7691 error_message="",
7692 operation_state=nslcmop_operation_state,
7693 other_update=db_nslcmop_update,
7694 )
7695 if nslcmop_operation_state:
7696 try:
7697 msg = {
7698 "nsr_id": nsr_id,
7699 "nslcmop_id": nslcmop_id,
7700 "operationState": nslcmop_operation_state,
7701 }
7702 await self.msg.aiowrite("ns", "migrated", msg)
7703 except Exception as e:
7704 self.logger.error(
7705 logging_text + "kafka_write notification Exception {}".format(e)
7706 )
7707 self.logger.debug(logging_text + "Exit")
7708 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7709
7710 async def heal(self, nsr_id, nslcmop_id):
7711 """
7712 Heal NS
7713
7714 :param nsr_id: ns instance to heal
7715 :param nslcmop_id: operation to run
7716 :return:
7717 """
7718
7719 # Try to lock HA task here
7720 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7721 if not task_is_locked_by_me:
7722 return
7723
7724 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7725 stage = ["", "", ""]
7726 tasks_dict_info = {}
7727 # ^ stage, step, VIM progress
7728 self.logger.debug(logging_text + "Enter")
7729 # get all needed from database
7730 db_nsr = None
7731 db_nslcmop_update = {}
7732 db_nsr_update = {}
7733 db_vnfrs = {} # vnf's info indexed by _id
7734 exc = None
7735 old_operational_status = ""
7736 old_config_status = ""
7737 nsi_id = None
7738 try:
7739 # wait for any previous tasks in process
7740 step = "Waiting for previous operations to terminate"
7741 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7742 self._write_ns_status(
7743 nsr_id=nsr_id,
7744 ns_state=None,
7745 current_operation="HEALING",
7746 current_operation_id=nslcmop_id,
7747 )
7748
7749 step = "Getting nslcmop from database"
7750 self.logger.debug(
7751 step + " after having waited for previous tasks to be completed"
7752 )
7753 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7754
7755 step = "Getting nsr from database"
7756 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7757 old_operational_status = db_nsr["operational-status"]
7758 old_config_status = db_nsr["config-status"]
7759
7760 db_nsr_update = {
7761 "_admin.deployed.RO.operational-status": "healing",
7762 }
7763 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7764
7765 step = "Sending heal order to VIM"
7766 await self.heal_RO(
7767 logging_text=logging_text,
7768 nsr_id=nsr_id,
7769 db_nslcmop=db_nslcmop,
7770 stage=stage,
7771 )
7772 # VCA tasks
7773 # read from db: nsd
7774 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7775 self.logger.debug(logging_text + stage[1])
7776 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7777 self.fs.sync(db_nsr["nsd-id"])
7778 db_nsr["nsd"] = nsd
7779 # read from db: vnfr's of this ns
7780 step = "Getting vnfrs from db"
7781 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7782 for vnfr in db_vnfrs_list:
7783 db_vnfrs[vnfr["_id"]] = vnfr
7784 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7785
7786 # Check for each target VNF
7787 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7788 for target_vnf in target_list:
7789 # Find this VNF in the list from DB
7790 vnfr_id = target_vnf.get("vnfInstanceId", None)
7791 if vnfr_id:
7792 db_vnfr = db_vnfrs[vnfr_id]
7793 vnfd_id = db_vnfr.get("vnfd-id")
7794 vnfd_ref = db_vnfr.get("vnfd-ref")
7795 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7796 base_folder = vnfd["_admin"]["storage"]
7797 vdu_id = None
7798 vdu_index = 0
7799 vdu_name = None
7800 kdu_name = None
7801 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7802 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7803
7804 # Check each target VDU and deploy N2VC
7805 target_vdu_list = target_vnf.get("additionalParams", {}).get(
7806 "vdu", []
7807 )
7808 if not target_vdu_list:
7809 # Codigo nuevo para crear diccionario
7810 target_vdu_list = []
7811 for existing_vdu in db_vnfr.get("vdur"):
7812 vdu_name = existing_vdu.get("vdu-name", None)
7813 vdu_index = existing_vdu.get("count-index", 0)
7814 vdu_run_day1 = target_vnf.get("additionalParams", {}).get(
7815 "run-day1", False
7816 )
7817 vdu_to_be_healed = {
7818 "vdu-id": vdu_name,
7819 "count-index": vdu_index,
7820 "run-day1": vdu_run_day1,
7821 }
7822 target_vdu_list.append(vdu_to_be_healed)
7823 for target_vdu in target_vdu_list:
7824 deploy_params_vdu = target_vdu
7825 # Set run-day1 vnf level value if not vdu level value exists
7826 if not deploy_params_vdu.get("run-day1") and target_vnf.get(
7827 "additionalParams", {}
7828 ).get("run-day1"):
7829 deploy_params_vdu["run-day1"] = target_vnf[
7830 "additionalParams"
7831 ].get("run-day1")
7832 vdu_name = target_vdu.get("vdu-id", None)
7833 # TODO: Get vdu_id from vdud.
7834 vdu_id = vdu_name
7835 # For multi instance VDU count-index is mandatory
7836 # For single session VDU count-indes is 0
7837 vdu_index = target_vdu.get("count-index", 0)
7838
7839 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7840 stage[1] = "Deploying Execution Environments."
7841 self.logger.debug(logging_text + stage[1])
7842
7843 # VNF Level charm. Normal case when proxy charms.
7844 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7845 descriptor_config = get_configuration(vnfd, vnfd_ref)
7846 if descriptor_config:
7847 # Continue if healed machine is management machine
7848 vnf_ip_address = db_vnfr.get("ip-address")
7849 target_instance = None
7850 for instance in db_vnfr.get("vdur", None):
7851 if (
7852 instance["vdu-name"] == vdu_name
7853 and instance["count-index"] == vdu_index
7854 ):
7855 target_instance = instance
7856 break
7857 if vnf_ip_address == target_instance.get("ip-address"):
7858 self._heal_n2vc(
7859 logging_text=logging_text
7860 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7861 member_vnf_index, vdu_name, vdu_index
7862 ),
7863 db_nsr=db_nsr,
7864 db_vnfr=db_vnfr,
7865 nslcmop_id=nslcmop_id,
7866 nsr_id=nsr_id,
7867 nsi_id=nsi_id,
7868 vnfd_id=vnfd_ref,
7869 vdu_id=None,
7870 kdu_name=None,
7871 member_vnf_index=member_vnf_index,
7872 vdu_index=0,
7873 vdu_name=None,
7874 deploy_params=deploy_params_vdu,
7875 descriptor_config=descriptor_config,
7876 base_folder=base_folder,
7877 task_instantiation_info=tasks_dict_info,
7878 stage=stage,
7879 )
7880
7881 # VDU Level charm. Normal case with native charms.
7882 descriptor_config = get_configuration(vnfd, vdu_name)
7883 if descriptor_config:
7884 self._heal_n2vc(
7885 logging_text=logging_text
7886 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7887 member_vnf_index, vdu_name, vdu_index
7888 ),
7889 db_nsr=db_nsr,
7890 db_vnfr=db_vnfr,
7891 nslcmop_id=nslcmop_id,
7892 nsr_id=nsr_id,
7893 nsi_id=nsi_id,
7894 vnfd_id=vnfd_ref,
7895 vdu_id=vdu_id,
7896 kdu_name=kdu_name,
7897 member_vnf_index=member_vnf_index,
7898 vdu_index=vdu_index,
7899 vdu_name=vdu_name,
7900 deploy_params=deploy_params_vdu,
7901 descriptor_config=descriptor_config,
7902 base_folder=base_folder,
7903 task_instantiation_info=tasks_dict_info,
7904 stage=stage,
7905 )
7906
7907 except (
7908 ROclient.ROClientException,
7909 DbException,
7910 LcmException,
7911 NgRoException,
7912 ) as e:
7913 self.logger.error(logging_text + "Exit Exception {}".format(e))
7914 exc = e
7915 except asyncio.CancelledError:
7916 self.logger.error(
7917 logging_text + "Cancelled Exception while '{}'".format(step)
7918 )
7919 exc = "Operation was cancelled"
7920 except Exception as e:
7921 exc = traceback.format_exc()
7922 self.logger.critical(
7923 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7924 exc_info=True,
7925 )
7926 finally:
7927 if tasks_dict_info:
7928 stage[1] = "Waiting for healing pending tasks."
7929 self.logger.debug(logging_text + stage[1])
7930 exc = await self._wait_for_tasks(
7931 logging_text,
7932 tasks_dict_info,
7933 self.timeout.ns_deploy,
7934 stage,
7935 nslcmop_id,
7936 nsr_id=nsr_id,
7937 )
7938 if exc:
7939 db_nslcmop_update[
7940 "detailed-status"
7941 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7942 nslcmop_operation_state = "FAILED"
7943 if db_nsr:
7944 db_nsr_update["operational-status"] = old_operational_status
7945 db_nsr_update["config-status"] = old_config_status
7946 db_nsr_update[
7947 "detailed-status"
7948 ] = "FAILED healing nslcmop={} {}: {}".format(nslcmop_id, step, exc)
7949 for task, task_name in tasks_dict_info.items():
7950 if not task.done() or task.cancelled() or task.exception():
7951 if task_name.startswith(self.task_name_deploy_vca):
7952 # A N2VC task is pending
7953 db_nsr_update["config-status"] = "failed"
7954 else:
7955 # RO task is pending
7956 db_nsr_update["operational-status"] = "failed"
7957 else:
7958 error_description_nslcmop = None
7959 nslcmop_operation_state = "COMPLETED"
7960 db_nslcmop_update["detailed-status"] = "Done"
7961 db_nsr_update["detailed-status"] = "Done"
7962 db_nsr_update["operational-status"] = "running"
7963 db_nsr_update["config-status"] = "configured"
7964
7965 self._write_op_status(
7966 op_id=nslcmop_id,
7967 stage="",
7968 error_message=error_description_nslcmop,
7969 operation_state=nslcmop_operation_state,
7970 other_update=db_nslcmop_update,
7971 )
7972 if db_nsr:
7973 self._write_ns_status(
7974 nsr_id=nsr_id,
7975 ns_state=None,
7976 current_operation="IDLE",
7977 current_operation_id=None,
7978 other_update=db_nsr_update,
7979 )
7980
7981 if nslcmop_operation_state:
7982 try:
7983 msg = {
7984 "nsr_id": nsr_id,
7985 "nslcmop_id": nslcmop_id,
7986 "operationState": nslcmop_operation_state,
7987 }
7988 await self.msg.aiowrite("ns", "healed", msg)
7989 except Exception as e:
7990 self.logger.error(
7991 logging_text + "kafka_write notification Exception {}".format(e)
7992 )
7993 self.logger.debug(logging_text + "Exit")
7994 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
7995
7996 async def heal_RO(
7997 self,
7998 logging_text,
7999 nsr_id,
8000 db_nslcmop,
8001 stage,
8002 ):
8003 """
8004 Heal at RO
8005 :param logging_text: preffix text to use at logging
8006 :param nsr_id: nsr identity
8007 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
8008 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
8009 :return: None or exception
8010 """
8011
8012 def get_vim_account(vim_account_id):
8013 nonlocal db_vims
8014 if vim_account_id in db_vims:
8015 return db_vims[vim_account_id]
8016 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
8017 db_vims[vim_account_id] = db_vim
8018 return db_vim
8019
8020 try:
8021 start_heal = time()
8022 ns_params = db_nslcmop.get("operationParams")
8023 if ns_params and ns_params.get("timeout_ns_heal"):
8024 timeout_ns_heal = ns_params["timeout_ns_heal"]
8025 else:
8026 timeout_ns_heal = self.timeout.ns_heal
8027
8028 db_vims = {}
8029
8030 nslcmop_id = db_nslcmop["_id"]
8031 target = {
8032 "action_id": nslcmop_id,
8033 }
8034 self.logger.warning(
8035 "db_nslcmop={} and timeout_ns_heal={}".format(
8036 db_nslcmop, timeout_ns_heal
8037 )
8038 )
8039 target.update(db_nslcmop.get("operationParams", {}))
8040
8041 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
8042 desc = await self.RO.recreate(nsr_id, target)
8043 self.logger.debug("RO return > {}".format(desc))
8044 action_id = desc["action_id"]
8045 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
8046 await self._wait_ng_ro(
8047 nsr_id,
8048 action_id,
8049 nslcmop_id,
8050 start_heal,
8051 timeout_ns_heal,
8052 stage,
8053 operation="healing",
8054 )
8055
8056 # Updating NSR
8057 db_nsr_update = {
8058 "_admin.deployed.RO.operational-status": "running",
8059 "detailed-status": " ".join(stage),
8060 }
8061 self.update_db_2("nsrs", nsr_id, db_nsr_update)
8062 self._write_op_status(nslcmop_id, stage)
8063 self.logger.debug(
8064 logging_text + "ns healed at RO. RO_id={}".format(action_id)
8065 )
8066
8067 except Exception as e:
8068 stage[2] = "ERROR healing at VIM"
8069 # self.set_vnfr_at_error(db_vnfrs, str(e))
8070 self.logger.error(
8071 "Error healing at VIM {}".format(e),
8072 exc_info=not isinstance(
8073 e,
8074 (
8075 ROclient.ROClientException,
8076 LcmException,
8077 DbException,
8078 NgRoException,
8079 ),
8080 ),
8081 )
8082 raise
8083
8084 def _heal_n2vc(
8085 self,
8086 logging_text,
8087 db_nsr,
8088 db_vnfr,
8089 nslcmop_id,
8090 nsr_id,
8091 nsi_id,
8092 vnfd_id,
8093 vdu_id,
8094 kdu_name,
8095 member_vnf_index,
8096 vdu_index,
8097 vdu_name,
8098 deploy_params,
8099 descriptor_config,
8100 base_folder,
8101 task_instantiation_info,
8102 stage,
8103 ):
8104 # launch instantiate_N2VC in a asyncio task and register task object
8105 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
8106 # if not found, create one entry and update database
8107 # fill db_nsr._admin.deployed.VCA.<index>
8108
8109 self.logger.debug(
8110 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
8111 )
8112
8113 charm_name = ""
8114 get_charm_name = False
8115 if "execution-environment-list" in descriptor_config:
8116 ee_list = descriptor_config.get("execution-environment-list", [])
8117 elif "juju" in descriptor_config:
8118 ee_list = [descriptor_config] # ns charms
8119 if "execution-environment-list" not in descriptor_config:
8120 # charm name is only required for ns charms
8121 get_charm_name = True
8122 else: # other types as script are not supported
8123 ee_list = []
8124
8125 for ee_item in ee_list:
8126 self.logger.debug(
8127 logging_text
8128 + "_deploy_n2vc ee_item juju={}, helm={}".format(
8129 ee_item.get("juju"), ee_item.get("helm-chart")
8130 )
8131 )
8132 ee_descriptor_id = ee_item.get("id")
8133 if ee_item.get("juju"):
8134 vca_name = ee_item["juju"].get("charm")
8135 if get_charm_name:
8136 charm_name = self.find_charm_name(db_nsr, str(vca_name))
8137 vca_type = (
8138 "lxc_proxy_charm"
8139 if ee_item["juju"].get("charm") is not None
8140 else "native_charm"
8141 )
8142 if ee_item["juju"].get("cloud") == "k8s":
8143 vca_type = "k8s_proxy_charm"
8144 elif ee_item["juju"].get("proxy") is False:
8145 vca_type = "native_charm"
8146 elif ee_item.get("helm-chart"):
8147 vca_name = ee_item["helm-chart"]
8148 vca_type = "helm-v3"
8149 else:
8150 self.logger.debug(
8151 logging_text + "skipping non juju neither charm configuration"
8152 )
8153 continue
8154
8155 vca_index = -1
8156 for vca_index, vca_deployed in enumerate(
8157 db_nsr["_admin"]["deployed"]["VCA"]
8158 ):
8159 if not vca_deployed:
8160 continue
8161 if (
8162 vca_deployed.get("member-vnf-index") == member_vnf_index
8163 and vca_deployed.get("vdu_id") == vdu_id
8164 and vca_deployed.get("kdu_name") == kdu_name
8165 and vca_deployed.get("vdu_count_index", 0) == vdu_index
8166 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
8167 ):
8168 break
8169 else:
8170 # not found, create one.
8171 target = (
8172 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
8173 )
8174 if vdu_id:
8175 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
8176 elif kdu_name:
8177 target += "/kdu/{}".format(kdu_name)
8178 vca_deployed = {
8179 "target_element": target,
8180 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8181 "member-vnf-index": member_vnf_index,
8182 "vdu_id": vdu_id,
8183 "kdu_name": kdu_name,
8184 "vdu_count_index": vdu_index,
8185 "operational-status": "init", # TODO revise
8186 "detailed-status": "", # TODO revise
8187 "step": "initial-deploy", # TODO revise
8188 "vnfd_id": vnfd_id,
8189 "vdu_name": vdu_name,
8190 "type": vca_type,
8191 "ee_descriptor_id": ee_descriptor_id,
8192 "charm_name": charm_name,
8193 }
8194 vca_index += 1
8195
8196 # create VCA and configurationStatus in db
8197 db_dict = {
8198 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
8199 "configurationStatus.{}".format(vca_index): dict(),
8200 }
8201 self.update_db_2("nsrs", nsr_id, db_dict)
8202
8203 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
8204
8205 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
8206 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
8207 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
8208
8209 # Launch task
8210 task_n2vc = asyncio.ensure_future(
8211 self.heal_N2VC(
8212 logging_text=logging_text,
8213 vca_index=vca_index,
8214 nsi_id=nsi_id,
8215 db_nsr=db_nsr,
8216 db_vnfr=db_vnfr,
8217 vdu_id=vdu_id,
8218 kdu_name=kdu_name,
8219 vdu_index=vdu_index,
8220 deploy_params=deploy_params,
8221 config_descriptor=descriptor_config,
8222 base_folder=base_folder,
8223 nslcmop_id=nslcmop_id,
8224 stage=stage,
8225 vca_type=vca_type,
8226 vca_name=vca_name,
8227 ee_config_descriptor=ee_item,
8228 )
8229 )
8230 self.lcm_tasks.register(
8231 "ns",
8232 nsr_id,
8233 nslcmop_id,
8234 "instantiate_N2VC-{}".format(vca_index),
8235 task_n2vc,
8236 )
8237 task_instantiation_info[
8238 task_n2vc
8239 ] = self.task_name_deploy_vca + " {}.{}".format(
8240 member_vnf_index or "", vdu_id or ""
8241 )
8242
8243 async def heal_N2VC(
8244 self,
8245 logging_text,
8246 vca_index,
8247 nsi_id,
8248 db_nsr,
8249 db_vnfr,
8250 vdu_id,
8251 kdu_name,
8252 vdu_index,
8253 config_descriptor,
8254 deploy_params,
8255 base_folder,
8256 nslcmop_id,
8257 stage,
8258 vca_type,
8259 vca_name,
8260 ee_config_descriptor,
8261 ):
8262 nsr_id = db_nsr["_id"]
8263 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
8264 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
8265 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
8266 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
8267 db_dict = {
8268 "collection": "nsrs",
8269 "filter": {"_id": nsr_id},
8270 "path": db_update_entry,
8271 }
8272 step = ""
8273 try:
8274 element_type = "NS"
8275 element_under_configuration = nsr_id
8276
8277 vnfr_id = None
8278 if db_vnfr:
8279 vnfr_id = db_vnfr["_id"]
8280 osm_config["osm"]["vnf_id"] = vnfr_id
8281
8282 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8283
8284 if vca_type == "native_charm":
8285 index_number = 0
8286 else:
8287 index_number = vdu_index or 0
8288
8289 if vnfr_id:
8290 element_type = "VNF"
8291 element_under_configuration = vnfr_id
8292 namespace += ".{}-{}".format(vnfr_id, index_number)
8293 if vdu_id:
8294 namespace += ".{}-{}".format(vdu_id, index_number)
8295 element_type = "VDU"
8296 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8297 osm_config["osm"]["vdu_id"] = vdu_id
8298 elif kdu_name:
8299 namespace += ".{}".format(kdu_name)
8300 element_type = "KDU"
8301 element_under_configuration = kdu_name
8302 osm_config["osm"]["kdu_name"] = kdu_name
8303
8304 # Get artifact path
8305 if base_folder["pkg-dir"]:
8306 artifact_path = "{}/{}/{}/{}".format(
8307 base_folder["folder"],
8308 base_folder["pkg-dir"],
8309 "charms"
8310 if vca_type
8311 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8312 else "helm-charts",
8313 vca_name,
8314 )
8315 else:
8316 artifact_path = "{}/Scripts/{}/{}/".format(
8317 base_folder["folder"],
8318 "charms"
8319 if vca_type
8320 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8321 else "helm-charts",
8322 vca_name,
8323 )
8324
8325 self.logger.debug("Artifact path > {}".format(artifact_path))
8326
8327 # get initial_config_primitive_list that applies to this element
8328 initial_config_primitive_list = config_descriptor.get(
8329 "initial-config-primitive"
8330 )
8331
8332 self.logger.debug(
8333 "Initial config primitive list > {}".format(
8334 initial_config_primitive_list
8335 )
8336 )
8337
8338 # add config if not present for NS charm
8339 ee_descriptor_id = ee_config_descriptor.get("id")
8340 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8341 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8342 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8343 )
8344
8345 self.logger.debug(
8346 "Initial config primitive list #2 > {}".format(
8347 initial_config_primitive_list
8348 )
8349 )
8350 # n2vc_redesign STEP 3.1
8351 # find old ee_id if exists
8352 ee_id = vca_deployed.get("ee_id")
8353
8354 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8355 # create or register execution environment in VCA. Only for native charms when healing
8356 if vca_type == "native_charm":
8357 step = "Waiting to VM being up and getting IP address"
8358 self.logger.debug(logging_text + step)
8359 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8360 logging_text,
8361 nsr_id,
8362 vnfr_id,
8363 vdu_id,
8364 vdu_index,
8365 user=None,
8366 pub_key=None,
8367 )
8368 credentials = {"hostname": rw_mgmt_ip}
8369 # get username
8370 username = deep_get(
8371 config_descriptor, ("config-access", "ssh-access", "default-user")
8372 )
8373 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8374 # merged. Meanwhile let's get username from initial-config-primitive
8375 if not username and initial_config_primitive_list:
8376 for config_primitive in initial_config_primitive_list:
8377 for param in config_primitive.get("parameter", ()):
8378 if param["name"] == "ssh-username":
8379 username = param["value"]
8380 break
8381 if not username:
8382 raise LcmException(
8383 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8384 "'config-access.ssh-access.default-user'"
8385 )
8386 credentials["username"] = username
8387
8388 # n2vc_redesign STEP 3.2
8389 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8390 self._write_configuration_status(
8391 nsr_id=nsr_id,
8392 vca_index=vca_index,
8393 status="REGISTERING",
8394 element_under_configuration=element_under_configuration,
8395 element_type=element_type,
8396 )
8397
8398 step = "register execution environment {}".format(credentials)
8399 self.logger.debug(logging_text + step)
8400 ee_id = await self.vca_map[vca_type].register_execution_environment(
8401 credentials=credentials,
8402 namespace=namespace,
8403 db_dict=db_dict,
8404 vca_id=vca_id,
8405 )
8406
8407 # update ee_id en db
8408 db_dict_ee_id = {
8409 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8410 }
8411 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8412
8413 # for compatibility with MON/POL modules, the need model and application name at database
8414 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8415 # Not sure if this need to be done when healing
8416 """
8417 ee_id_parts = ee_id.split(".")
8418 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8419 if len(ee_id_parts) >= 2:
8420 model_name = ee_id_parts[0]
8421 application_name = ee_id_parts[1]
8422 db_nsr_update[db_update_entry + "model"] = model_name
8423 db_nsr_update[db_update_entry + "application"] = application_name
8424 """
8425
8426 # n2vc_redesign STEP 3.3
8427 # Install configuration software. Only for native charms.
8428 step = "Install configuration Software"
8429
8430 self._write_configuration_status(
8431 nsr_id=nsr_id,
8432 vca_index=vca_index,
8433 status="INSTALLING SW",
8434 element_under_configuration=element_under_configuration,
8435 element_type=element_type,
8436 # other_update=db_nsr_update,
8437 other_update=None,
8438 )
8439
8440 # TODO check if already done
8441 self.logger.debug(logging_text + step)
8442 config = None
8443 if vca_type == "native_charm":
8444 config_primitive = next(
8445 (p for p in initial_config_primitive_list if p["name"] == "config"),
8446 None,
8447 )
8448 if config_primitive:
8449 config = self._map_primitive_params(
8450 config_primitive, {}, deploy_params
8451 )
8452 await self.vca_map[vca_type].install_configuration_sw(
8453 ee_id=ee_id,
8454 artifact_path=artifact_path,
8455 db_dict=db_dict,
8456 config=config,
8457 num_units=1,
8458 vca_id=vca_id,
8459 vca_type=vca_type,
8460 )
8461
8462 # write in db flag of configuration_sw already installed
8463 self.update_db_2(
8464 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8465 )
8466
8467 # Not sure if this need to be done when healing
8468 """
8469 # add relations for this VCA (wait for other peers related with this VCA)
8470 await self._add_vca_relations(
8471 logging_text=logging_text,
8472 nsr_id=nsr_id,
8473 vca_type=vca_type,
8474 vca_index=vca_index,
8475 )
8476 """
8477
8478 # if SSH access is required, then get execution environment SSH public
8479 # if native charm we have waited already to VM be UP
8480 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm-v3"):
8481 pub_key = None
8482 user = None
8483 # self.logger.debug("get ssh key block")
8484 if deep_get(
8485 config_descriptor, ("config-access", "ssh-access", "required")
8486 ):
8487 # self.logger.debug("ssh key needed")
8488 # Needed to inject a ssh key
8489 user = deep_get(
8490 config_descriptor,
8491 ("config-access", "ssh-access", "default-user"),
8492 )
8493 step = "Install configuration Software, getting public ssh key"
8494 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8495 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8496 )
8497
8498 step = "Insert public key into VM user={} ssh_key={}".format(
8499 user, pub_key
8500 )
8501 else:
8502 # self.logger.debug("no need to get ssh key")
8503 step = "Waiting to VM being up and getting IP address"
8504 self.logger.debug(logging_text + step)
8505
8506 # n2vc_redesign STEP 5.1
8507 # wait for RO (ip-address) Insert pub_key into VM
8508 # IMPORTANT: We need do wait for RO to complete healing operation.
8509 await self._wait_heal_ro(nsr_id, self.timeout.ns_heal)
8510 if vnfr_id:
8511 if kdu_name:
8512 rw_mgmt_ip = await self.wait_kdu_up(
8513 logging_text, nsr_id, vnfr_id, kdu_name
8514 )
8515 else:
8516 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8517 logging_text,
8518 nsr_id,
8519 vnfr_id,
8520 vdu_id,
8521 vdu_index,
8522 user=user,
8523 pub_key=pub_key,
8524 )
8525 else:
8526 rw_mgmt_ip = None # This is for a NS configuration
8527
8528 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8529
8530 # store rw_mgmt_ip in deploy params for later replacement
8531 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8532
8533 # Day1 operations.
8534 # get run-day1 operation parameter
8535 runDay1 = deploy_params.get("run-day1", False)
8536 self.logger.debug(
8537 "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id, vdu_id, runDay1)
8538 )
8539 if runDay1:
8540 # n2vc_redesign STEP 6 Execute initial config primitive
8541 step = "execute initial config primitive"
8542
8543 # wait for dependent primitives execution (NS -> VNF -> VDU)
8544 if initial_config_primitive_list:
8545 await self._wait_dependent_n2vc(
8546 nsr_id, vca_deployed_list, vca_index
8547 )
8548
8549 # stage, in function of element type: vdu, kdu, vnf or ns
8550 my_vca = vca_deployed_list[vca_index]
8551 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8552 # VDU or KDU
8553 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8554 elif my_vca.get("member-vnf-index"):
8555 # VNF
8556 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8557 else:
8558 # NS
8559 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8560
8561 self._write_configuration_status(
8562 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8563 )
8564
8565 self._write_op_status(op_id=nslcmop_id, stage=stage)
8566
8567 check_if_terminated_needed = True
8568 for initial_config_primitive in initial_config_primitive_list:
8569 # adding information on the vca_deployed if it is a NS execution environment
8570 if not vca_deployed["member-vnf-index"]:
8571 deploy_params["ns_config_info"] = json.dumps(
8572 self._get_ns_config_info(nsr_id)
8573 )
8574 # TODO check if already done
8575 primitive_params_ = self._map_primitive_params(
8576 initial_config_primitive, {}, deploy_params
8577 )
8578
8579 step = "execute primitive '{}' params '{}'".format(
8580 initial_config_primitive["name"], primitive_params_
8581 )
8582 self.logger.debug(logging_text + step)
8583 await self.vca_map[vca_type].exec_primitive(
8584 ee_id=ee_id,
8585 primitive_name=initial_config_primitive["name"],
8586 params_dict=primitive_params_,
8587 db_dict=db_dict,
8588 vca_id=vca_id,
8589 vca_type=vca_type,
8590 )
8591 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8592 if check_if_terminated_needed:
8593 if config_descriptor.get("terminate-config-primitive"):
8594 self.update_db_2(
8595 "nsrs",
8596 nsr_id,
8597 {db_update_entry + "needed_terminate": True},
8598 )
8599 check_if_terminated_needed = False
8600
8601 # TODO register in database that primitive is done
8602
8603 # STEP 7 Configure metrics
8604 # Not sure if this need to be done when healing
8605 """
8606 if vca_type == "helm" or vca_type == "helm-v3":
8607 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8608 ee_id=ee_id,
8609 artifact_path=artifact_path,
8610 ee_config_descriptor=ee_config_descriptor,
8611 vnfr_id=vnfr_id,
8612 nsr_id=nsr_id,
8613 target_ip=rw_mgmt_ip,
8614 )
8615 if prometheus_jobs:
8616 self.update_db_2(
8617 "nsrs",
8618 nsr_id,
8619 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8620 )
8621
8622 for job in prometheus_jobs:
8623 self.db.set_one(
8624 "prometheus_jobs",
8625 {"job_name": job["job_name"]},
8626 job,
8627 upsert=True,
8628 fail_on_empty=False,
8629 )
8630
8631 """
8632 step = "instantiated at VCA"
8633 self.logger.debug(logging_text + step)
8634
8635 self._write_configuration_status(
8636 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8637 )
8638
8639 except Exception as e: # TODO not use Exception but N2VC exception
8640 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8641 if not isinstance(
8642 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8643 ):
8644 self.logger.error(
8645 "Exception while {} : {}".format(step, e), exc_info=True
8646 )
8647 self._write_configuration_status(
8648 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8649 )
8650 raise LcmException("{} {}".format(step, e)) from e
8651
8652 async def _wait_heal_ro(
8653 self,
8654 nsr_id,
8655 timeout=600,
8656 ):
8657 start_time = time()
8658 while time() <= start_time + timeout:
8659 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8660 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"][
8661 "operational-status"
8662 ]
8663 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8664 if operational_status_ro != "healing":
8665 break
8666 await asyncio.sleep(15)
8667 else: # timeout_ns_deploy
8668 raise NgRoException("Timeout waiting ns to deploy")
8669
8670 async def vertical_scale(self, nsr_id, nslcmop_id):
8671 """
8672 Vertical Scale the VDUs in a NS
8673
8674 :param: nsr_id: NS Instance ID
8675 :param: nslcmop_id: nslcmop ID of migrate
8676
8677 """
8678 # Try to lock HA task here
8679 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8680 if not task_is_locked_by_me:
8681 return
8682 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8683 self.logger.debug(logging_text + "Enter")
8684 # get all needed from database
8685 db_nslcmop = None
8686 db_nslcmop_update = {}
8687 nslcmop_operation_state = None
8688 db_nsr_update = {}
8689 target = {}
8690 exc = None
8691 # in case of error, indicates what part of scale was failed to put nsr at error status
8692 start_deploy = time()
8693
8694 try:
8695 # wait for any previous tasks in process
8696 step = "Waiting for previous operations to terminate"
8697 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
8698
8699 self._write_ns_status(
8700 nsr_id=nsr_id,
8701 ns_state=None,
8702 current_operation="VerticalScale",
8703 current_operation_id=nslcmop_id,
8704 )
8705 step = "Getting nslcmop from database"
8706 self.logger.debug(
8707 step + " after having waited for previous tasks to be completed"
8708 )
8709 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8710 operationParams = db_nslcmop.get("operationParams")
8711 target = {}
8712 target.update(operationParams)
8713 desc = await self.RO.vertical_scale(nsr_id, target)
8714 self.logger.debug("RO return > {}".format(desc))
8715 action_id = desc["action_id"]
8716 await self._wait_ng_ro(
8717 nsr_id,
8718 action_id,
8719 nslcmop_id,
8720 start_deploy,
8721 self.timeout.verticalscale,
8722 operation="verticalscale",
8723 )
8724 except (ROclient.ROClientException, DbException, LcmException) as e:
8725 self.logger.error("Exit Exception {}".format(e))
8726 exc = e
8727 except asyncio.CancelledError:
8728 self.logger.error("Cancelled Exception while '{}'".format(step))
8729 exc = "Operation was cancelled"
8730 except Exception as e:
8731 exc = traceback.format_exc()
8732 self.logger.critical(
8733 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
8734 )
8735 finally:
8736 self._write_ns_status(
8737 nsr_id=nsr_id,
8738 ns_state=None,
8739 current_operation="IDLE",
8740 current_operation_id=None,
8741 )
8742 if exc:
8743 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
8744 nslcmop_operation_state = "FAILED"
8745 else:
8746 nslcmop_operation_state = "COMPLETED"
8747 db_nslcmop_update["detailed-status"] = "Done"
8748 db_nsr_update["detailed-status"] = "Done"
8749
8750 self._write_op_status(
8751 op_id=nslcmop_id,
8752 stage="",
8753 error_message="",
8754 operation_state=nslcmop_operation_state,
8755 other_update=db_nslcmop_update,
8756 )
8757 if nslcmop_operation_state:
8758 try:
8759 msg = {
8760 "nsr_id": nsr_id,
8761 "nslcmop_id": nslcmop_id,
8762 "operationState": nslcmop_operation_state,
8763 }
8764 await self.msg.aiowrite("ns", "verticalscaled", msg)
8765 except Exception as e:
8766 self.logger.error(
8767 logging_text + "kafka_write notification Exception {}".format(e)
8768 )
8769 self.logger.debug(logging_text + "Exit")
8770 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")