Fix multiple minor security vulnerabilities
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import json
27 from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33 select_autoescape,
34 )
35
36 from osm_lcm import ROclient
37 from osm_lcm.data_utils.lcm_config import LcmCfg
38 from osm_lcm.data_utils.nsr import (
39 get_deployed_kdu,
40 get_deployed_vca,
41 get_deployed_vca_list,
42 get_nsd,
43 )
44 from osm_lcm.data_utils.vca import (
45 DeployedComponent,
46 DeployedK8sResource,
47 DeployedVCA,
48 EELevel,
49 Relation,
50 EERelation,
51 safe_get_ee_relation,
52 )
53 from osm_lcm.ng_ro import NgRoClient, NgRoException
54 from osm_lcm.lcm_utils import (
55 LcmException,
56 LcmBase,
57 deep_get,
58 get_iterable,
59 populate_dict,
60 check_juju_bundle_existence,
61 get_charm_artifact_path,
62 get_ee_id_parts,
63 vld_to_ro_ip_profile,
64 )
65 from osm_lcm.data_utils.nsd import (
66 get_ns_configuration_relation_list,
67 get_vnf_profile,
68 get_vnf_profiles,
69 )
70 from osm_lcm.data_utils.vnfd import (
71 get_kdu,
72 get_kdu_services,
73 get_relation_list,
74 get_vdu_list,
75 get_vdu_profile,
76 get_ee_sorted_initial_config_primitive_list,
77 get_ee_sorted_terminate_config_primitive_list,
78 get_kdu_list,
79 get_virtual_link_profiles,
80 get_vdu,
81 get_configuration,
82 get_vdu_index,
83 get_scaling_aspect,
84 get_number_of_instances,
85 get_juju_ee_ref,
86 get_kdu_resource_profile,
87 find_software_version,
88 check_helm_ee_in_ns,
89 )
90 from osm_lcm.data_utils.list_utils import find_in_list
91 from osm_lcm.data_utils.vnfr import (
92 get_osm_params,
93 get_vdur_index,
94 get_kdur,
95 get_volumes_from_instantiation_params,
96 )
97 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
98 from osm_lcm.data_utils.database.vim_account import VimAccountDB
99 from n2vc.definitions import RelationEndpoint
100 from n2vc.k8s_helm_conn import K8sHelmConnector
101 from n2vc.k8s_helm3_conn import K8sHelm3Connector
102 from n2vc.k8s_juju_conn import K8sJujuConnector
103
104 from osm_common.dbbase import DbException
105 from osm_common.fsbase import FsException
106
107 from osm_lcm.data_utils.database.database import Database
108 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
109 from osm_lcm.data_utils.wim import (
110 get_sdn_ports,
111 get_target_wim_attrs,
112 select_feasible_wim_account,
113 )
114
115 from n2vc.n2vc_juju_conn import N2VCJujuConnector
116 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
117
118 from osm_lcm.lcm_helm_conn import LCMHelmConn
119 from osm_lcm.osm_config import OsmConfigBuilder
120 from osm_lcm.prometheus import parse_job
121
122 from copy import copy, deepcopy
123 from time import time
124 from uuid import uuid4
125
126 from random import SystemRandom
127
128 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
129
130
131 class NsLcm(LcmBase):
132 SUBOPERATION_STATUS_NOT_FOUND = -1
133 SUBOPERATION_STATUS_NEW = -2
134 SUBOPERATION_STATUS_SKIP = -3
135 EE_TLS_NAME = "ee-tls"
136 task_name_deploy_vca = "Deploying VCA"
137 rel_operation_types = {
138 "GE": ">=",
139 "LE": "<=",
140 "GT": ">",
141 "LT": "<",
142 "EQ": "==",
143 "NE": "!=",
144 }
145
146 def __init__(self, msg, lcm_tasks, config: LcmCfg):
147 """
148 Init, Connect to database, filesystem storage, and messaging
149 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
150 :return: None
151 """
152 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
153
154 self.db = Database().instance.db
155 self.fs = Filesystem().instance.fs
156 self.lcm_tasks = lcm_tasks
157 self.timeout = config.timeout
158 self.ro_config = config.RO
159 self.vca_config = config.VCA
160
161 # create N2VC connector
162 self.n2vc = N2VCJujuConnector(
163 log=self.logger,
164 on_update_db=self._on_update_n2vc_db,
165 fs=self.fs,
166 db=self.db,
167 )
168
169 self.conn_helm_ee = LCMHelmConn(
170 log=self.logger,
171 vca_config=self.vca_config,
172 on_update_db=self._on_update_n2vc_db,
173 )
174
175 self.k8sclusterhelm2 = K8sHelmConnector(
176 kubectl_command=self.vca_config.kubectlpath,
177 helm_command=self.vca_config.helmpath,
178 log=self.logger,
179 on_update_db=None,
180 fs=self.fs,
181 db=self.db,
182 )
183
184 self.k8sclusterhelm3 = K8sHelm3Connector(
185 kubectl_command=self.vca_config.kubectlpath,
186 helm_command=self.vca_config.helm3path,
187 fs=self.fs,
188 log=self.logger,
189 db=self.db,
190 on_update_db=None,
191 )
192
193 self.k8sclusterjuju = K8sJujuConnector(
194 kubectl_command=self.vca_config.kubectlpath,
195 juju_command=self.vca_config.jujupath,
196 log=self.logger,
197 on_update_db=self._on_update_k8s_db,
198 fs=self.fs,
199 db=self.db,
200 )
201
202 self.k8scluster_map = {
203 "helm-chart": self.k8sclusterhelm2,
204 "helm-chart-v3": self.k8sclusterhelm3,
205 "chart": self.k8sclusterhelm3,
206 "juju-bundle": self.k8sclusterjuju,
207 "juju": self.k8sclusterjuju,
208 }
209
210 self.vca_map = {
211 "lxc_proxy_charm": self.n2vc,
212 "native_charm": self.n2vc,
213 "k8s_proxy_charm": self.n2vc,
214 "helm": self.conn_helm_ee,
215 "helm-v3": self.conn_helm_ee,
216 }
217
218 # create RO client
219 self.RO = NgRoClient(**self.ro_config.to_dict())
220
221 self.op_status_map = {
222 "instantiation": self.RO.status,
223 "termination": self.RO.status,
224 "migrate": self.RO.status,
225 "healing": self.RO.recreate_status,
226 "verticalscale": self.RO.status,
227 "start_stop_rebuild": self.RO.status,
228 }
229
230 @staticmethod
231 def increment_ip_mac(ip_mac, vm_index=1):
232 if not isinstance(ip_mac, str):
233 return ip_mac
234 try:
235 # try with ipv4 look for last dot
236 i = ip_mac.rfind(".")
237 if i > 0:
238 i += 1
239 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
240 # try with ipv6 or mac look for last colon. Operate in hex
241 i = ip_mac.rfind(":")
242 if i > 0:
243 i += 1
244 # format in hex, len can be 2 for mac or 4 for ipv6
245 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
246 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
247 )
248 except Exception:
249 pass
250 return None
251
252 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
253 # remove last dot from path (if exists)
254 if path.endswith("."):
255 path = path[:-1]
256
257 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
258 # .format(table, filter, path, updated_data))
259 try:
260 nsr_id = filter.get("_id")
261
262 # read ns record from database
263 nsr = self.db.get_one(table="nsrs", q_filter=filter)
264 current_ns_status = nsr.get("nsState")
265
266 # get vca status for NS
267 status_dict = await self.n2vc.get_status(
268 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
269 )
270
271 # vcaStatus
272 db_dict = dict()
273 db_dict["vcaStatus"] = status_dict
274
275 # update configurationStatus for this VCA
276 try:
277 vca_index = int(path[path.rfind(".") + 1 :])
278
279 vca_list = deep_get(
280 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
281 )
282 vca_status = vca_list[vca_index].get("status")
283
284 configuration_status_list = nsr.get("configurationStatus")
285 config_status = configuration_status_list[vca_index].get("status")
286
287 if config_status == "BROKEN" and vca_status != "failed":
288 db_dict["configurationStatus"][vca_index] = "READY"
289 elif config_status != "BROKEN" and vca_status == "failed":
290 db_dict["configurationStatus"][vca_index] = "BROKEN"
291 except Exception as e:
292 # not update configurationStatus
293 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
294
295 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
296 # if nsState = 'DEGRADED' check if all is OK
297 is_degraded = False
298 if current_ns_status in ("READY", "DEGRADED"):
299 error_description = ""
300 # check machines
301 if status_dict.get("machines"):
302 for machine_id in status_dict.get("machines"):
303 machine = status_dict.get("machines").get(machine_id)
304 # check machine agent-status
305 if machine.get("agent-status"):
306 s = machine.get("agent-status").get("status")
307 if s != "started":
308 is_degraded = True
309 error_description += (
310 "machine {} agent-status={} ; ".format(
311 machine_id, s
312 )
313 )
314 # check machine instance status
315 if machine.get("instance-status"):
316 s = machine.get("instance-status").get("status")
317 if s != "running":
318 is_degraded = True
319 error_description += (
320 "machine {} instance-status={} ; ".format(
321 machine_id, s
322 )
323 )
324 # check applications
325 if status_dict.get("applications"):
326 for app_id in status_dict.get("applications"):
327 app = status_dict.get("applications").get(app_id)
328 # check application status
329 if app.get("status"):
330 s = app.get("status").get("status")
331 if s != "active":
332 is_degraded = True
333 error_description += (
334 "application {} status={} ; ".format(app_id, s)
335 )
336
337 if error_description:
338 db_dict["errorDescription"] = error_description
339 if current_ns_status == "READY" and is_degraded:
340 db_dict["nsState"] = "DEGRADED"
341 if current_ns_status == "DEGRADED" and not is_degraded:
342 db_dict["nsState"] = "READY"
343
344 # write to database
345 self.update_db_2("nsrs", nsr_id, db_dict)
346
347 except (asyncio.CancelledError, asyncio.TimeoutError):
348 raise
349 except Exception as e:
350 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
351
352 async def _on_update_k8s_db(
353 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
354 ):
355 """
356 Updating vca status in NSR record
357 :param cluster_uuid: UUID of a k8s cluster
358 :param kdu_instance: The unique name of the KDU instance
359 :param filter: To get nsr_id
360 :cluster_type: The cluster type (juju, k8s)
361 :return: none
362 """
363
364 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
365 # .format(cluster_uuid, kdu_instance, filter))
366
367 nsr_id = filter.get("_id")
368 try:
369 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
370 cluster_uuid=cluster_uuid,
371 kdu_instance=kdu_instance,
372 yaml_format=False,
373 complete_status=True,
374 vca_id=vca_id,
375 )
376
377 # vcaStatus
378 db_dict = dict()
379 db_dict["vcaStatus"] = {nsr_id: vca_status}
380
381 self.logger.debug(
382 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
383 )
384
385 # write to database
386 self.update_db_2("nsrs", nsr_id, db_dict)
387 except (asyncio.CancelledError, asyncio.TimeoutError):
388 raise
389 except Exception as e:
390 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
391
392 @staticmethod
393 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
394 try:
395 env = Environment(
396 undefined=StrictUndefined,
397 autoescape=select_autoescape(default_for_string=True, default=True),
398 )
399 template = env.from_string(cloud_init_text)
400 return template.render(additional_params or {})
401 except UndefinedError as e:
402 raise LcmException(
403 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
404 "file, must be provided in the instantiation parameters inside the "
405 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
406 )
407 except (TemplateError, TemplateNotFound) as e:
408 raise LcmException(
409 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
410 vnfd_id, vdu_id, e
411 )
412 )
413
414 def _get_vdu_cloud_init_content(self, vdu, vnfd):
415 cloud_init_content = cloud_init_file = None
416 try:
417 if vdu.get("cloud-init-file"):
418 base_folder = vnfd["_admin"]["storage"]
419 if base_folder["pkg-dir"]:
420 cloud_init_file = "{}/{}/cloud_init/{}".format(
421 base_folder["folder"],
422 base_folder["pkg-dir"],
423 vdu["cloud-init-file"],
424 )
425 else:
426 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
427 base_folder["folder"],
428 vdu["cloud-init-file"],
429 )
430 with self.fs.file_open(cloud_init_file, "r") as ci_file:
431 cloud_init_content = ci_file.read()
432 elif vdu.get("cloud-init"):
433 cloud_init_content = vdu["cloud-init"]
434
435 return cloud_init_content
436 except FsException as e:
437 raise LcmException(
438 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
439 vnfd["id"], vdu["id"], cloud_init_file, e
440 )
441 )
442
443 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
444 vdur = next(
445 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
446 )
447 additional_params = vdur.get("additionalParams")
448 return parse_yaml_strings(additional_params)
449
450 @staticmethod
451 def ip_profile_2_RO(ip_profile):
452 RO_ip_profile = deepcopy(ip_profile)
453 if "dns-server" in RO_ip_profile:
454 if isinstance(RO_ip_profile["dns-server"], list):
455 RO_ip_profile["dns-address"] = []
456 for ds in RO_ip_profile.pop("dns-server"):
457 RO_ip_profile["dns-address"].append(ds["address"])
458 else:
459 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
460 if RO_ip_profile.get("ip-version") == "ipv4":
461 RO_ip_profile["ip-version"] = "IPv4"
462 if RO_ip_profile.get("ip-version") == "ipv6":
463 RO_ip_profile["ip-version"] = "IPv6"
464 if "dhcp-params" in RO_ip_profile:
465 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
466 return RO_ip_profile
467
468 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
469 db_vdu_push_list = []
470 template_vdur = []
471 db_update = {"_admin.modified": time()}
472 if vdu_create:
473 for vdu_id, vdu_count in vdu_create.items():
474 vdur = next(
475 (
476 vdur
477 for vdur in reversed(db_vnfr["vdur"])
478 if vdur["vdu-id-ref"] == vdu_id
479 ),
480 None,
481 )
482 if not vdur:
483 # Read the template saved in the db:
484 self.logger.debug(
485 "No vdur in the database. Using the vdur-template to scale"
486 )
487 vdur_template = db_vnfr.get("vdur-template")
488 if not vdur_template:
489 raise LcmException(
490 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
491 vdu_id
492 )
493 )
494 vdur = vdur_template[0]
495 # Delete a template from the database after using it
496 self.db.set_one(
497 "vnfrs",
498 {"_id": db_vnfr["_id"]},
499 None,
500 pull={"vdur-template": {"_id": vdur["_id"]}},
501 )
502 for count in range(vdu_count):
503 vdur_copy = deepcopy(vdur)
504 vdur_copy["status"] = "BUILD"
505 vdur_copy["status-detailed"] = None
506 vdur_copy["ip-address"] = None
507 vdur_copy["_id"] = str(uuid4())
508 vdur_copy["count-index"] += count + 1
509 vdur_copy["id"] = "{}-{}".format(
510 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
511 )
512 vdur_copy.pop("vim_info", None)
513 for iface in vdur_copy["interfaces"]:
514 if iface.get("fixed-ip"):
515 iface["ip-address"] = self.increment_ip_mac(
516 iface["ip-address"], count + 1
517 )
518 else:
519 iface.pop("ip-address", None)
520 if iface.get("fixed-mac"):
521 iface["mac-address"] = self.increment_ip_mac(
522 iface["mac-address"], count + 1
523 )
524 else:
525 iface.pop("mac-address", None)
526 if db_vnfr["vdur"]:
527 iface.pop(
528 "mgmt_vnf", None
529 ) # only first vdu can be managment of vnf
530 db_vdu_push_list.append(vdur_copy)
531 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
532 if vdu_delete:
533 if len(db_vnfr["vdur"]) == 1:
534 # The scale will move to 0 instances
535 self.logger.debug(
536 "Scaling to 0 !, creating the template with the last vdur"
537 )
538 template_vdur = [db_vnfr["vdur"][0]]
539 for vdu_id, vdu_count in vdu_delete.items():
540 if mark_delete:
541 indexes_to_delete = [
542 iv[0]
543 for iv in enumerate(db_vnfr["vdur"])
544 if iv[1]["vdu-id-ref"] == vdu_id
545 ]
546 db_update.update(
547 {
548 "vdur.{}.status".format(i): "DELETING"
549 for i in indexes_to_delete[-vdu_count:]
550 }
551 )
552 else:
553 # it must be deleted one by one because common.db does not allow otherwise
554 vdus_to_delete = [
555 v
556 for v in reversed(db_vnfr["vdur"])
557 if v["vdu-id-ref"] == vdu_id
558 ]
559 for vdu in vdus_to_delete[:vdu_count]:
560 self.db.set_one(
561 "vnfrs",
562 {"_id": db_vnfr["_id"]},
563 None,
564 pull={"vdur": {"_id": vdu["_id"]}},
565 )
566 db_push = {}
567 if db_vdu_push_list:
568 db_push["vdur"] = db_vdu_push_list
569 if template_vdur:
570 db_push["vdur-template"] = template_vdur
571 if not db_push:
572 db_push = None
573 db_vnfr["vdur-template"] = template_vdur
574 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
575 # modify passed dictionary db_vnfr
576 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
577 db_vnfr["vdur"] = db_vnfr_["vdur"]
578
579 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
580 """
581 Updates database nsr with the RO info for the created vld
582 :param ns_update_nsr: dictionary to be filled with the updated info
583 :param db_nsr: content of db_nsr. This is also modified
584 :param nsr_desc_RO: nsr descriptor from RO
585 :return: Nothing, LcmException is raised on errors
586 """
587
588 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
589 for net_RO in get_iterable(nsr_desc_RO, "nets"):
590 if vld["id"] != net_RO.get("ns_net_osm_id"):
591 continue
592 vld["vim-id"] = net_RO.get("vim_net_id")
593 vld["name"] = net_RO.get("vim_name")
594 vld["status"] = net_RO.get("status")
595 vld["status-detailed"] = net_RO.get("error_msg")
596 ns_update_nsr["vld.{}".format(vld_index)] = vld
597 break
598 else:
599 raise LcmException(
600 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
601 )
602
603 def set_vnfr_at_error(self, db_vnfrs, error_text):
604 try:
605 for db_vnfr in db_vnfrs.values():
606 vnfr_update = {"status": "ERROR"}
607 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
608 if "status" not in vdur:
609 vdur["status"] = "ERROR"
610 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
611 if error_text:
612 vdur["status-detailed"] = str(error_text)
613 vnfr_update[
614 "vdur.{}.status-detailed".format(vdu_index)
615 ] = "ERROR"
616 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
617 except DbException as e:
618 self.logger.error("Cannot update vnf. {}".format(e))
619
620 def _get_ns_config_info(self, nsr_id):
621 """
622 Generates a mapping between vnf,vdu elements and the N2VC id
623 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
624 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
625 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
626 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
627 """
628 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
629 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
630 mapping = {}
631 ns_config_info = {"osm-config-mapping": mapping}
632 for vca in vca_deployed_list:
633 if not vca["member-vnf-index"]:
634 continue
635 if not vca["vdu_id"]:
636 mapping[vca["member-vnf-index"]] = vca["application"]
637 else:
638 mapping[
639 "{}.{}.{}".format(
640 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
641 )
642 ] = vca["application"]
643 return ns_config_info
644
645 async def _instantiate_ng_ro(
646 self,
647 logging_text,
648 nsr_id,
649 nsd,
650 db_nsr,
651 db_nslcmop,
652 db_vnfrs,
653 db_vnfds,
654 n2vc_key_list,
655 stage,
656 start_deploy,
657 timeout_ns_deploy,
658 ):
659 db_vims = {}
660
661 def get_vim_account(vim_account_id):
662 nonlocal db_vims
663 if vim_account_id in db_vims:
664 return db_vims[vim_account_id]
665 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
666 db_vims[vim_account_id] = db_vim
667 return db_vim
668
669 # modify target_vld info with instantiation parameters
670 def parse_vld_instantiation_params(
671 target_vim, target_vld, vld_params, target_sdn
672 ):
673 if vld_params.get("ip-profile"):
674 target_vld["vim_info"][target_vim]["ip_profile"] = vld_to_ro_ip_profile(
675 vld_params["ip-profile"]
676 )
677 if vld_params.get("provider-network"):
678 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
679 "provider-network"
680 ]
681 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
682 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
683 "provider-network"
684 ]["sdn-ports"]
685
686 # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
687 # if wim_account_id is specified in vld_params, validate if it is feasible.
688 wim_account_id, db_wim = select_feasible_wim_account(
689 db_nsr, db_vnfrs, target_vld, vld_params, self.logger
690 )
691
692 if wim_account_id:
693 # WIM is needed and a feasible one was found, populate WIM target and SDN ports
694 self.logger.info("WIM selected: {:s}".format(str(wim_account_id)))
695 # update vld_params with correct WIM account Id
696 vld_params["wimAccountId"] = wim_account_id
697
698 target_wim = "wim:{}".format(wim_account_id)
699 target_wim_attrs = get_target_wim_attrs(nsr_id, target_vld, vld_params)
700 sdn_ports = get_sdn_ports(vld_params, db_wim)
701 if len(sdn_ports) > 0:
702 target_vld["vim_info"][target_wim] = target_wim_attrs
703 target_vld["vim_info"][target_wim]["sdn-ports"] = sdn_ports
704
705 self.logger.debug(
706 "Target VLD with WIM data: {:s}".format(str(target_vld))
707 )
708
709 for param in ("vim-network-name", "vim-network-id"):
710 if vld_params.get(param):
711 if isinstance(vld_params[param], dict):
712 for vim, vim_net in vld_params[param].items():
713 other_target_vim = "vim:" + vim
714 populate_dict(
715 target_vld["vim_info"],
716 (other_target_vim, param.replace("-", "_")),
717 vim_net,
718 )
719 else: # isinstance str
720 target_vld["vim_info"][target_vim][
721 param.replace("-", "_")
722 ] = vld_params[param]
723 if vld_params.get("common_id"):
724 target_vld["common_id"] = vld_params.get("common_id")
725
726 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
727 def update_ns_vld_target(target, ns_params):
728 for vnf_params in ns_params.get("vnf", ()):
729 if vnf_params.get("vimAccountId"):
730 target_vnf = next(
731 (
732 vnfr
733 for vnfr in db_vnfrs.values()
734 if vnf_params["member-vnf-index"]
735 == vnfr["member-vnf-index-ref"]
736 ),
737 None,
738 )
739 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
740 if not vdur:
741 continue
742 for a_index, a_vld in enumerate(target["ns"]["vld"]):
743 target_vld = find_in_list(
744 get_iterable(vdur, "interfaces"),
745 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
746 )
747
748 vld_params = find_in_list(
749 get_iterable(ns_params, "vld"),
750 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
751 )
752 if target_vld:
753 if vnf_params.get("vimAccountId") not in a_vld.get(
754 "vim_info", {}
755 ):
756 target_vim_network_list = [
757 v for _, v in a_vld.get("vim_info").items()
758 ]
759 target_vim_network_name = next(
760 (
761 item.get("vim_network_name", "")
762 for item in target_vim_network_list
763 ),
764 "",
765 )
766
767 target["ns"]["vld"][a_index].get("vim_info").update(
768 {
769 "vim:{}".format(vnf_params["vimAccountId"]): {
770 "vim_network_name": target_vim_network_name,
771 }
772 }
773 )
774
775 if vld_params:
776 for param in ("vim-network-name", "vim-network-id"):
777 if vld_params.get(param) and isinstance(
778 vld_params[param], dict
779 ):
780 for vim, vim_net in vld_params[
781 param
782 ].items():
783 other_target_vim = "vim:" + vim
784 populate_dict(
785 target["ns"]["vld"][a_index].get(
786 "vim_info"
787 ),
788 (
789 other_target_vim,
790 param.replace("-", "_"),
791 ),
792 vim_net,
793 )
794
795 nslcmop_id = db_nslcmop["_id"]
796 target = {
797 "name": db_nsr["name"],
798 "ns": {"vld": []},
799 "vnf": [],
800 "image": deepcopy(db_nsr["image"]),
801 "flavor": deepcopy(db_nsr["flavor"]),
802 "action_id": nslcmop_id,
803 "cloud_init_content": {},
804 }
805 for image in target["image"]:
806 image["vim_info"] = {}
807 for flavor in target["flavor"]:
808 flavor["vim_info"] = {}
809 if db_nsr.get("shared-volumes"):
810 target["shared-volumes"] = deepcopy(db_nsr["shared-volumes"])
811 for shared_volumes in target["shared-volumes"]:
812 shared_volumes["vim_info"] = {}
813 if db_nsr.get("affinity-or-anti-affinity-group"):
814 target["affinity-or-anti-affinity-group"] = deepcopy(
815 db_nsr["affinity-or-anti-affinity-group"]
816 )
817 for affinity_or_anti_affinity_group in target[
818 "affinity-or-anti-affinity-group"
819 ]:
820 affinity_or_anti_affinity_group["vim_info"] = {}
821
822 if db_nslcmop.get("lcmOperationType") != "instantiate":
823 # get parameters of instantiation:
824 db_nslcmop_instantiate = self.db.get_list(
825 "nslcmops",
826 {
827 "nsInstanceId": db_nslcmop["nsInstanceId"],
828 "lcmOperationType": "instantiate",
829 },
830 )[-1]
831 ns_params = db_nslcmop_instantiate.get("operationParams")
832 else:
833 ns_params = db_nslcmop.get("operationParams")
834 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
835 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
836
837 cp2target = {}
838 for vld_index, vld in enumerate(db_nsr.get("vld")):
839 target_vim = "vim:{}".format(ns_params["vimAccountId"])
840 target_vld = {
841 "id": vld["id"],
842 "name": vld["name"],
843 "mgmt-network": vld.get("mgmt-network", False),
844 "type": vld.get("type"),
845 "vim_info": {
846 target_vim: {
847 "vim_network_name": vld.get("vim-network-name"),
848 "vim_account_id": ns_params["vimAccountId"],
849 }
850 },
851 }
852 # check if this network needs SDN assist
853 if vld.get("pci-interfaces"):
854 db_vim = get_vim_account(ns_params["vimAccountId"])
855 if vim_config := db_vim.get("config"):
856 if sdnc_id := vim_config.get("sdn-controller"):
857 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
858 target_sdn = "sdn:{}".format(sdnc_id)
859 target_vld["vim_info"][target_sdn] = {
860 "sdn": True,
861 "target_vim": target_vim,
862 "vlds": [sdn_vld],
863 "type": vld.get("type"),
864 }
865
866 nsd_vnf_profiles = get_vnf_profiles(nsd)
867 for nsd_vnf_profile in nsd_vnf_profiles:
868 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
869 if cp["virtual-link-profile-id"] == vld["id"]:
870 cp2target[
871 "member_vnf:{}.{}".format(
872 cp["constituent-cpd-id"][0][
873 "constituent-base-element-id"
874 ],
875 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
876 )
877 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
878
879 # check at nsd descriptor, if there is an ip-profile
880 vld_params = {}
881 nsd_vlp = find_in_list(
882 get_virtual_link_profiles(nsd),
883 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
884 == vld["id"],
885 )
886 if (
887 nsd_vlp
888 and nsd_vlp.get("virtual-link-protocol-data")
889 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
890 ):
891 vld_params["ip-profile"] = nsd_vlp["virtual-link-protocol-data"][
892 "l3-protocol-data"
893 ]
894
895 # update vld_params with instantiation params
896 vld_instantiation_params = find_in_list(
897 get_iterable(ns_params, "vld"),
898 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
899 )
900 if vld_instantiation_params:
901 vld_params.update(vld_instantiation_params)
902 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
903 target["ns"]["vld"].append(target_vld)
904 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
905 update_ns_vld_target(target, ns_params)
906
907 for vnfr in db_vnfrs.values():
908 vnfd = find_in_list(
909 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
910 )
911 vnf_params = find_in_list(
912 get_iterable(ns_params, "vnf"),
913 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
914 )
915 target_vnf = deepcopy(vnfr)
916 target_vim = "vim:{}".format(vnfr["vim-account-id"])
917 for vld in target_vnf.get("vld", ()):
918 # check if connected to a ns.vld, to fill target'
919 vnf_cp = find_in_list(
920 vnfd.get("int-virtual-link-desc", ()),
921 lambda cpd: cpd.get("id") == vld["id"],
922 )
923 if vnf_cp:
924 ns_cp = "member_vnf:{}.{}".format(
925 vnfr["member-vnf-index-ref"], vnf_cp["id"]
926 )
927 if cp2target.get(ns_cp):
928 vld["target"] = cp2target[ns_cp]
929
930 vld["vim_info"] = {
931 target_vim: {"vim_network_name": vld.get("vim-network-name")}
932 }
933 # check if this network needs SDN assist
934 target_sdn = None
935 if vld.get("pci-interfaces"):
936 db_vim = get_vim_account(vnfr["vim-account-id"])
937 sdnc_id = db_vim["config"].get("sdn-controller")
938 if sdnc_id:
939 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
940 target_sdn = "sdn:{}".format(sdnc_id)
941 vld["vim_info"][target_sdn] = {
942 "sdn": True,
943 "target_vim": target_vim,
944 "vlds": [sdn_vld],
945 "type": vld.get("type"),
946 }
947
948 # check at vnfd descriptor, if there is an ip-profile
949 vld_params = {}
950 vnfd_vlp = find_in_list(
951 get_virtual_link_profiles(vnfd),
952 lambda a_link_profile: a_link_profile["id"] == vld["id"],
953 )
954 if (
955 vnfd_vlp
956 and vnfd_vlp.get("virtual-link-protocol-data")
957 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
958 ):
959 vld_params["ip-profile"] = vnfd_vlp["virtual-link-protocol-data"][
960 "l3-protocol-data"
961 ]
962 # update vld_params with instantiation params
963 if vnf_params:
964 vld_instantiation_params = find_in_list(
965 get_iterable(vnf_params, "internal-vld"),
966 lambda i_vld: i_vld["name"] == vld["id"],
967 )
968 if vld_instantiation_params:
969 vld_params.update(vld_instantiation_params)
970 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
971
972 vdur_list = []
973 for vdur in target_vnf.get("vdur", ()):
974 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
975 continue # This vdu must not be created
976 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
977
978 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
979
980 if ssh_keys_all:
981 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
982 vnf_configuration = get_configuration(vnfd, vnfd["id"])
983 if (
984 vdu_configuration
985 and vdu_configuration.get("config-access")
986 and vdu_configuration.get("config-access").get("ssh-access")
987 ):
988 vdur["ssh-keys"] = ssh_keys_all
989 vdur["ssh-access-required"] = vdu_configuration[
990 "config-access"
991 ]["ssh-access"]["required"]
992 elif (
993 vnf_configuration
994 and vnf_configuration.get("config-access")
995 and vnf_configuration.get("config-access").get("ssh-access")
996 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
997 ):
998 vdur["ssh-keys"] = ssh_keys_all
999 vdur["ssh-access-required"] = vnf_configuration[
1000 "config-access"
1001 ]["ssh-access"]["required"]
1002 elif ssh_keys_instantiation and find_in_list(
1003 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1004 ):
1005 vdur["ssh-keys"] = ssh_keys_instantiation
1006
1007 self.logger.debug("NS > vdur > {}".format(vdur))
1008
1009 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1010 # cloud-init
1011 if vdud.get("cloud-init-file"):
1012 vdur["cloud-init"] = "{}:file:{}".format(
1013 vnfd["_id"], vdud.get("cloud-init-file")
1014 )
1015 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1016 if vdur["cloud-init"] not in target["cloud_init_content"]:
1017 base_folder = vnfd["_admin"]["storage"]
1018 if base_folder["pkg-dir"]:
1019 cloud_init_file = "{}/{}/cloud_init/{}".format(
1020 base_folder["folder"],
1021 base_folder["pkg-dir"],
1022 vdud.get("cloud-init-file"),
1023 )
1024 else:
1025 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1026 base_folder["folder"],
1027 vdud.get("cloud-init-file"),
1028 )
1029 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1030 target["cloud_init_content"][
1031 vdur["cloud-init"]
1032 ] = ci_file.read()
1033 elif vdud.get("cloud-init"):
1034 vdur["cloud-init"] = "{}:vdu:{}".format(
1035 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1036 )
1037 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1038 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1039 "cloud-init"
1040 ]
1041 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1042 deploy_params_vdu = self._format_additional_params(
1043 vdur.get("additionalParams") or {}
1044 )
1045 deploy_params_vdu["OSM"] = get_osm_params(
1046 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1047 )
1048 vdur["additionalParams"] = deploy_params_vdu
1049
1050 # flavor
1051 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1052 if target_vim not in ns_flavor["vim_info"]:
1053 ns_flavor["vim_info"][target_vim] = {}
1054
1055 # deal with images
1056 # in case alternative images are provided we must check if they should be applied
1057 # for the vim_type, modify the vim_type taking into account
1058 ns_image_id = int(vdur["ns-image-id"])
1059 if vdur.get("alt-image-ids"):
1060 db_vim = get_vim_account(vnfr["vim-account-id"])
1061 vim_type = db_vim["vim_type"]
1062 for alt_image_id in vdur.get("alt-image-ids"):
1063 ns_alt_image = target["image"][int(alt_image_id)]
1064 if vim_type == ns_alt_image.get("vim-type"):
1065 # must use alternative image
1066 self.logger.debug(
1067 "use alternative image id: {}".format(alt_image_id)
1068 )
1069 ns_image_id = alt_image_id
1070 vdur["ns-image-id"] = ns_image_id
1071 break
1072 ns_image = target["image"][int(ns_image_id)]
1073 if target_vim not in ns_image["vim_info"]:
1074 ns_image["vim_info"][target_vim] = {}
1075
1076 # Affinity groups
1077 if vdur.get("affinity-or-anti-affinity-group-id"):
1078 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1079 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1080 if target_vim not in ns_ags["vim_info"]:
1081 ns_ags["vim_info"][target_vim] = {}
1082
1083 # shared-volumes
1084 if vdur.get("shared-volumes-id"):
1085 for sv_id in vdur["shared-volumes-id"]:
1086 ns_sv = find_in_list(
1087 target["shared-volumes"], lambda sv: sv_id in sv["id"]
1088 )
1089 if ns_sv:
1090 ns_sv["vim_info"][target_vim] = {}
1091
1092 vdur["vim_info"] = {target_vim: {}}
1093 # instantiation parameters
1094 if vnf_params:
1095 vdu_instantiation_params = find_in_list(
1096 get_iterable(vnf_params, "vdu"),
1097 lambda i_vdu: i_vdu["id"] == vdud["id"],
1098 )
1099 if vdu_instantiation_params:
1100 # Parse the vdu_volumes from the instantiation params
1101 vdu_volumes = get_volumes_from_instantiation_params(
1102 vdu_instantiation_params, vdud
1103 )
1104 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1105 vdur["additionalParams"]["OSM"][
1106 "vim_flavor_id"
1107 ] = vdu_instantiation_params.get("vim-flavor-id")
1108 vdur_list.append(vdur)
1109 target_vnf["vdur"] = vdur_list
1110 target["vnf"].append(target_vnf)
1111
1112 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1113 desc = await self.RO.deploy(nsr_id, target)
1114 self.logger.debug("RO return > {}".format(desc))
1115 action_id = desc["action_id"]
1116 await self._wait_ng_ro(
1117 nsr_id,
1118 action_id,
1119 nslcmop_id,
1120 start_deploy,
1121 timeout_ns_deploy,
1122 stage,
1123 operation="instantiation",
1124 )
1125
1126 # Updating NSR
1127 db_nsr_update = {
1128 "_admin.deployed.RO.operational-status": "running",
1129 "detailed-status": " ".join(stage),
1130 }
1131 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1132 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1133 self._write_op_status(nslcmop_id, stage)
1134 self.logger.debug(
1135 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1136 )
1137 return
1138
1139 async def _wait_ng_ro(
1140 self,
1141 nsr_id,
1142 action_id,
1143 nslcmop_id=None,
1144 start_time=None,
1145 timeout=600,
1146 stage=None,
1147 operation=None,
1148 ):
1149 detailed_status_old = None
1150 db_nsr_update = {}
1151 start_time = start_time or time()
1152 while time() <= start_time + timeout:
1153 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1154 self.logger.debug("Wait NG RO > {}".format(desc_status))
1155 if desc_status["status"] == "FAILED":
1156 raise NgRoException(desc_status["details"])
1157 elif desc_status["status"] == "BUILD":
1158 if stage:
1159 stage[2] = "VIM: ({})".format(desc_status["details"])
1160 elif desc_status["status"] == "DONE":
1161 if stage:
1162 stage[2] = "Deployed at VIM"
1163 break
1164 else:
1165 assert False, "ROclient.check_ns_status returns unknown {}".format(
1166 desc_status["status"]
1167 )
1168 if stage and nslcmop_id and stage[2] != detailed_status_old:
1169 detailed_status_old = stage[2]
1170 db_nsr_update["detailed-status"] = " ".join(stage)
1171 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1172 self._write_op_status(nslcmop_id, stage)
1173 await asyncio.sleep(15)
1174 else: # timeout_ns_deploy
1175 raise NgRoException("Timeout waiting ns to deploy")
1176
1177 async def _terminate_ng_ro(
1178 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1179 ):
1180 db_nsr_update = {}
1181 failed_detail = []
1182 action_id = None
1183 start_deploy = time()
1184 try:
1185 target = {
1186 "ns": {"vld": []},
1187 "vnf": [],
1188 "image": [],
1189 "flavor": [],
1190 "action_id": nslcmop_id,
1191 }
1192 desc = await self.RO.deploy(nsr_id, target)
1193 action_id = desc["action_id"]
1194 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1195 self.logger.debug(
1196 logging_text
1197 + "ns terminate action at RO. action_id={}".format(action_id)
1198 )
1199
1200 # wait until done
1201 delete_timeout = 20 * 60 # 20 minutes
1202 await self._wait_ng_ro(
1203 nsr_id,
1204 action_id,
1205 nslcmop_id,
1206 start_deploy,
1207 delete_timeout,
1208 stage,
1209 operation="termination",
1210 )
1211 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1212 # delete all nsr
1213 await self.RO.delete(nsr_id)
1214 except NgRoException as e:
1215 if e.http_code == 404: # not found
1216 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1217 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1218 self.logger.debug(
1219 logging_text + "RO_action_id={} already deleted".format(action_id)
1220 )
1221 elif e.http_code == 409: # conflict
1222 failed_detail.append("delete conflict: {}".format(e))
1223 self.logger.debug(
1224 logging_text
1225 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1226 )
1227 else:
1228 failed_detail.append("delete error: {}".format(e))
1229 self.logger.error(
1230 logging_text
1231 + "RO_action_id={} delete error: {}".format(action_id, e)
1232 )
1233 except Exception as e:
1234 failed_detail.append("delete error: {}".format(e))
1235 self.logger.error(
1236 logging_text + "RO_action_id={} delete error: {}".format(action_id, e)
1237 )
1238
1239 if failed_detail:
1240 stage[2] = "Error deleting from VIM"
1241 else:
1242 stage[2] = "Deleted from VIM"
1243 db_nsr_update["detailed-status"] = " ".join(stage)
1244 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1245 self._write_op_status(nslcmop_id, stage)
1246
1247 if failed_detail:
1248 raise LcmException("; ".join(failed_detail))
1249 return
1250
1251 async def instantiate_RO(
1252 self,
1253 logging_text,
1254 nsr_id,
1255 nsd,
1256 db_nsr,
1257 db_nslcmop,
1258 db_vnfrs,
1259 db_vnfds,
1260 n2vc_key_list,
1261 stage,
1262 ):
1263 """
1264 Instantiate at RO
1265 :param logging_text: preffix text to use at logging
1266 :param nsr_id: nsr identity
1267 :param nsd: database content of ns descriptor
1268 :param db_nsr: database content of ns record
1269 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1270 :param db_vnfrs:
1271 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1272 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1273 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1274 :return: None or exception
1275 """
1276 try:
1277 start_deploy = time()
1278 ns_params = db_nslcmop.get("operationParams")
1279 if ns_params and ns_params.get("timeout_ns_deploy"):
1280 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1281 else:
1282 timeout_ns_deploy = self.timeout.ns_deploy
1283
1284 # Check for and optionally request placement optimization. Database will be updated if placement activated
1285 stage[2] = "Waiting for Placement."
1286 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1287 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1288 for vnfr in db_vnfrs.values():
1289 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1290 break
1291 else:
1292 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1293
1294 return await self._instantiate_ng_ro(
1295 logging_text,
1296 nsr_id,
1297 nsd,
1298 db_nsr,
1299 db_nslcmop,
1300 db_vnfrs,
1301 db_vnfds,
1302 n2vc_key_list,
1303 stage,
1304 start_deploy,
1305 timeout_ns_deploy,
1306 )
1307 except Exception as e:
1308 stage[2] = "ERROR deploying at VIM"
1309 self.set_vnfr_at_error(db_vnfrs, str(e))
1310 self.logger.error(
1311 "Error deploying at VIM {}".format(e),
1312 exc_info=not isinstance(
1313 e,
1314 (
1315 ROclient.ROClientException,
1316 LcmException,
1317 DbException,
1318 NgRoException,
1319 ),
1320 ),
1321 )
1322 raise
1323
1324 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1325 """
1326 Wait for kdu to be up, get ip address
1327 :param logging_text: prefix use for logging
1328 :param nsr_id:
1329 :param vnfr_id:
1330 :param kdu_name:
1331 :return: IP address, K8s services
1332 """
1333
1334 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1335 nb_tries = 0
1336
1337 while nb_tries < 360:
1338 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1339 kdur = next(
1340 (
1341 x
1342 for x in get_iterable(db_vnfr, "kdur")
1343 if x.get("kdu-name") == kdu_name
1344 ),
1345 None,
1346 )
1347 if not kdur:
1348 raise LcmException(
1349 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1350 )
1351 if kdur.get("status"):
1352 if kdur["status"] in ("READY", "ENABLED"):
1353 return kdur.get("ip-address"), kdur.get("services")
1354 else:
1355 raise LcmException(
1356 "target KDU={} is in error state".format(kdu_name)
1357 )
1358
1359 await asyncio.sleep(10)
1360 nb_tries += 1
1361 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1362
1363 async def wait_vm_up_insert_key_ro(
1364 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1365 ):
1366 """
1367 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1368 :param logging_text: prefix use for logging
1369 :param nsr_id:
1370 :param vnfr_id:
1371 :param vdu_id:
1372 :param vdu_index:
1373 :param pub_key: public ssh key to inject, None to skip
1374 :param user: user to apply the public ssh key
1375 :return: IP address
1376 """
1377
1378 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1379 ip_address = None
1380 target_vdu_id = None
1381 ro_retries = 0
1382
1383 while True:
1384 ro_retries += 1
1385 if ro_retries >= 360: # 1 hour
1386 raise LcmException(
1387 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1388 )
1389
1390 await asyncio.sleep(10)
1391
1392 # get ip address
1393 if not target_vdu_id:
1394 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1395
1396 if not vdu_id: # for the VNF case
1397 if db_vnfr.get("status") == "ERROR":
1398 raise LcmException(
1399 "Cannot inject ssh-key because target VNF is in error state"
1400 )
1401 ip_address = db_vnfr.get("ip-address")
1402 if not ip_address:
1403 continue
1404 vdur = next(
1405 (
1406 x
1407 for x in get_iterable(db_vnfr, "vdur")
1408 if x.get("ip-address") == ip_address
1409 ),
1410 None,
1411 )
1412 else: # VDU case
1413 vdur = next(
1414 (
1415 x
1416 for x in get_iterable(db_vnfr, "vdur")
1417 if x.get("vdu-id-ref") == vdu_id
1418 and x.get("count-index") == vdu_index
1419 ),
1420 None,
1421 )
1422
1423 if (
1424 not vdur and len(db_vnfr.get("vdur", ())) == 1
1425 ): # If only one, this should be the target vdu
1426 vdur = db_vnfr["vdur"][0]
1427 if not vdur:
1428 raise LcmException(
1429 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1430 vnfr_id, vdu_id, vdu_index
1431 )
1432 )
1433 # New generation RO stores information at "vim_info"
1434 ng_ro_status = None
1435 target_vim = None
1436 if vdur.get("vim_info"):
1437 target_vim = next(
1438 t for t in vdur["vim_info"]
1439 ) # there should be only one key
1440 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1441 if (
1442 vdur.get("pdu-type")
1443 or vdur.get("status") == "ACTIVE"
1444 or ng_ro_status == "ACTIVE"
1445 ):
1446 ip_address = vdur.get("ip-address")
1447 if not ip_address:
1448 continue
1449 target_vdu_id = vdur["vdu-id-ref"]
1450 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1451 raise LcmException(
1452 "Cannot inject ssh-key because target VM is in error state"
1453 )
1454
1455 if not target_vdu_id:
1456 continue
1457
1458 # inject public key into machine
1459 if pub_key and user:
1460 self.logger.debug(logging_text + "Inserting RO key")
1461 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1462 if vdur.get("pdu-type"):
1463 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1464 return ip_address
1465 try:
1466 target = {
1467 "action": {
1468 "action": "inject_ssh_key",
1469 "key": pub_key,
1470 "user": user,
1471 },
1472 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1473 }
1474 desc = await self.RO.deploy(nsr_id, target)
1475 action_id = desc["action_id"]
1476 await self._wait_ng_ro(
1477 nsr_id, action_id, timeout=600, operation="instantiation"
1478 )
1479 break
1480 except NgRoException as e:
1481 raise LcmException(
1482 "Reaching max tries injecting key. Error: {}".format(e)
1483 )
1484 else:
1485 break
1486
1487 return ip_address
1488
1489 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1490 """
1491 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1492 """
1493 my_vca = vca_deployed_list[vca_index]
1494 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1495 # vdu or kdu: no dependencies
1496 return
1497 timeout = 300
1498 while timeout >= 0:
1499 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1500 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1501 configuration_status_list = db_nsr["configurationStatus"]
1502 for index, vca_deployed in enumerate(configuration_status_list):
1503 if index == vca_index:
1504 # myself
1505 continue
1506 if not my_vca.get("member-vnf-index") or (
1507 vca_deployed.get("member-vnf-index")
1508 == my_vca.get("member-vnf-index")
1509 ):
1510 internal_status = configuration_status_list[index].get("status")
1511 if internal_status == "READY":
1512 continue
1513 elif internal_status == "BROKEN":
1514 raise LcmException(
1515 "Configuration aborted because dependent charm/s has failed"
1516 )
1517 else:
1518 break
1519 else:
1520 # no dependencies, return
1521 return
1522 await asyncio.sleep(10)
1523 timeout -= 1
1524
1525 raise LcmException("Configuration aborted because dependent charm/s timeout")
1526
1527 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1528 vca_id = None
1529 if db_vnfr:
1530 vca_id = deep_get(db_vnfr, ("vca-id",))
1531 elif db_nsr:
1532 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1533 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1534 return vca_id
1535
1536 async def instantiate_N2VC(
1537 self,
1538 logging_text,
1539 vca_index,
1540 nsi_id,
1541 db_nsr,
1542 db_vnfr,
1543 vdu_id,
1544 kdu_name,
1545 vdu_index,
1546 kdu_index,
1547 config_descriptor,
1548 deploy_params,
1549 base_folder,
1550 nslcmop_id,
1551 stage,
1552 vca_type,
1553 vca_name,
1554 ee_config_descriptor,
1555 ):
1556 nsr_id = db_nsr["_id"]
1557 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1558 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1559 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1560 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1561 db_dict = {
1562 "collection": "nsrs",
1563 "filter": {"_id": nsr_id},
1564 "path": db_update_entry,
1565 }
1566 step = ""
1567 try:
1568 element_type = "NS"
1569 element_under_configuration = nsr_id
1570
1571 vnfr_id = None
1572 if db_vnfr:
1573 vnfr_id = db_vnfr["_id"]
1574 osm_config["osm"]["vnf_id"] = vnfr_id
1575
1576 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1577
1578 if vca_type == "native_charm":
1579 index_number = 0
1580 else:
1581 index_number = vdu_index or 0
1582
1583 if vnfr_id:
1584 element_type = "VNF"
1585 element_under_configuration = vnfr_id
1586 namespace += ".{}-{}".format(vnfr_id, index_number)
1587 if vdu_id:
1588 namespace += ".{}-{}".format(vdu_id, index_number)
1589 element_type = "VDU"
1590 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1591 osm_config["osm"]["vdu_id"] = vdu_id
1592 elif kdu_name:
1593 namespace += ".{}".format(kdu_name)
1594 element_type = "KDU"
1595 element_under_configuration = kdu_name
1596 osm_config["osm"]["kdu_name"] = kdu_name
1597
1598 # Get artifact path
1599 if base_folder["pkg-dir"]:
1600 artifact_path = "{}/{}/{}/{}".format(
1601 base_folder["folder"],
1602 base_folder["pkg-dir"],
1603 "charms"
1604 if vca_type
1605 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1606 else "helm-charts",
1607 vca_name,
1608 )
1609 else:
1610 artifact_path = "{}/Scripts/{}/{}/".format(
1611 base_folder["folder"],
1612 "charms"
1613 if vca_type
1614 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1615 else "helm-charts",
1616 vca_name,
1617 )
1618
1619 self.logger.debug("Artifact path > {}".format(artifact_path))
1620
1621 # get initial_config_primitive_list that applies to this element
1622 initial_config_primitive_list = config_descriptor.get(
1623 "initial-config-primitive"
1624 )
1625
1626 self.logger.debug(
1627 "Initial config primitive list > {}".format(
1628 initial_config_primitive_list
1629 )
1630 )
1631
1632 # add config if not present for NS charm
1633 ee_descriptor_id = ee_config_descriptor.get("id")
1634 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1635 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1636 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1637 )
1638
1639 self.logger.debug(
1640 "Initial config primitive list #2 > {}".format(
1641 initial_config_primitive_list
1642 )
1643 )
1644 # n2vc_redesign STEP 3.1
1645 # find old ee_id if exists
1646 ee_id = vca_deployed.get("ee_id")
1647
1648 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1649 # create or register execution environment in VCA
1650 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1651 self._write_configuration_status(
1652 nsr_id=nsr_id,
1653 vca_index=vca_index,
1654 status="CREATING",
1655 element_under_configuration=element_under_configuration,
1656 element_type=element_type,
1657 )
1658
1659 step = "create execution environment"
1660 self.logger.debug(logging_text + step)
1661
1662 ee_id = None
1663 credentials = None
1664 if vca_type == "k8s_proxy_charm":
1665 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1666 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1667 namespace=namespace,
1668 artifact_path=artifact_path,
1669 db_dict=db_dict,
1670 vca_id=vca_id,
1671 )
1672 elif vca_type == "helm" or vca_type == "helm-v3":
1673 ee_id, credentials = await self.vca_map[
1674 vca_type
1675 ].create_execution_environment(
1676 namespace=nsr_id,
1677 reuse_ee_id=ee_id,
1678 db_dict=db_dict,
1679 config=osm_config,
1680 artifact_path=artifact_path,
1681 chart_model=vca_name,
1682 vca_type=vca_type,
1683 )
1684 else:
1685 ee_id, credentials = await self.vca_map[
1686 vca_type
1687 ].create_execution_environment(
1688 namespace=namespace,
1689 reuse_ee_id=ee_id,
1690 db_dict=db_dict,
1691 vca_id=vca_id,
1692 )
1693
1694 elif vca_type == "native_charm":
1695 step = "Waiting to VM being up and getting IP address"
1696 self.logger.debug(logging_text + step)
1697 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1698 logging_text,
1699 nsr_id,
1700 vnfr_id,
1701 vdu_id,
1702 vdu_index,
1703 user=None,
1704 pub_key=None,
1705 )
1706 credentials = {"hostname": rw_mgmt_ip}
1707 # get username
1708 username = deep_get(
1709 config_descriptor, ("config-access", "ssh-access", "default-user")
1710 )
1711 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1712 # merged. Meanwhile let's get username from initial-config-primitive
1713 if not username and initial_config_primitive_list:
1714 for config_primitive in initial_config_primitive_list:
1715 for param in config_primitive.get("parameter", ()):
1716 if param["name"] == "ssh-username":
1717 username = param["value"]
1718 break
1719 if not username:
1720 raise LcmException(
1721 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1722 "'config-access.ssh-access.default-user'"
1723 )
1724 credentials["username"] = username
1725 # n2vc_redesign STEP 3.2
1726
1727 self._write_configuration_status(
1728 nsr_id=nsr_id,
1729 vca_index=vca_index,
1730 status="REGISTERING",
1731 element_under_configuration=element_under_configuration,
1732 element_type=element_type,
1733 )
1734
1735 step = "register execution environment {}".format(credentials)
1736 self.logger.debug(logging_text + step)
1737 ee_id = await self.vca_map[vca_type].register_execution_environment(
1738 credentials=credentials,
1739 namespace=namespace,
1740 db_dict=db_dict,
1741 vca_id=vca_id,
1742 )
1743
1744 # for compatibility with MON/POL modules, the need model and application name at database
1745 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1746 ee_id_parts = ee_id.split(".")
1747 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1748 if len(ee_id_parts) >= 2:
1749 model_name = ee_id_parts[0]
1750 application_name = ee_id_parts[1]
1751 db_nsr_update[db_update_entry + "model"] = model_name
1752 db_nsr_update[db_update_entry + "application"] = application_name
1753
1754 # n2vc_redesign STEP 3.3
1755 step = "Install configuration Software"
1756
1757 self._write_configuration_status(
1758 nsr_id=nsr_id,
1759 vca_index=vca_index,
1760 status="INSTALLING SW",
1761 element_under_configuration=element_under_configuration,
1762 element_type=element_type,
1763 other_update=db_nsr_update,
1764 )
1765
1766 # TODO check if already done
1767 self.logger.debug(logging_text + step)
1768 config = None
1769 if vca_type == "native_charm":
1770 config_primitive = next(
1771 (p for p in initial_config_primitive_list if p["name"] == "config"),
1772 None,
1773 )
1774 if config_primitive:
1775 config = self._map_primitive_params(
1776 config_primitive, {}, deploy_params
1777 )
1778 num_units = 1
1779 if vca_type == "lxc_proxy_charm":
1780 if element_type == "NS":
1781 num_units = db_nsr.get("config-units") or 1
1782 elif element_type == "VNF":
1783 num_units = db_vnfr.get("config-units") or 1
1784 elif element_type == "VDU":
1785 for v in db_vnfr["vdur"]:
1786 if vdu_id == v["vdu-id-ref"]:
1787 num_units = v.get("config-units") or 1
1788 break
1789 if vca_type != "k8s_proxy_charm":
1790 await self.vca_map[vca_type].install_configuration_sw(
1791 ee_id=ee_id,
1792 artifact_path=artifact_path,
1793 db_dict=db_dict,
1794 config=config,
1795 num_units=num_units,
1796 vca_id=vca_id,
1797 vca_type=vca_type,
1798 )
1799
1800 # write in db flag of configuration_sw already installed
1801 self.update_db_2(
1802 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
1803 )
1804
1805 # add relations for this VCA (wait for other peers related with this VCA)
1806 is_relation_added = await self._add_vca_relations(
1807 logging_text=logging_text,
1808 nsr_id=nsr_id,
1809 vca_type=vca_type,
1810 vca_index=vca_index,
1811 )
1812
1813 if not is_relation_added:
1814 raise LcmException("Relations could not be added to VCA.")
1815
1816 # if SSH access is required, then get execution environment SSH public
1817 # if native charm we have waited already to VM be UP
1818 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
1819 pub_key = None
1820 user = None
1821 # self.logger.debug("get ssh key block")
1822 if deep_get(
1823 config_descriptor, ("config-access", "ssh-access", "required")
1824 ):
1825 # self.logger.debug("ssh key needed")
1826 # Needed to inject a ssh key
1827 user = deep_get(
1828 config_descriptor,
1829 ("config-access", "ssh-access", "default-user"),
1830 )
1831 step = "Install configuration Software, getting public ssh key"
1832 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
1833 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
1834 )
1835
1836 step = "Insert public key into VM user={} ssh_key={}".format(
1837 user, pub_key
1838 )
1839 else:
1840 # self.logger.debug("no need to get ssh key")
1841 step = "Waiting to VM being up and getting IP address"
1842 self.logger.debug(logging_text + step)
1843
1844 # default rw_mgmt_ip to None, avoiding the non definition of the variable
1845 rw_mgmt_ip = None
1846
1847 # n2vc_redesign STEP 5.1
1848 # wait for RO (ip-address) Insert pub_key into VM
1849 if vnfr_id:
1850 if kdu_name:
1851 rw_mgmt_ip, services = await self.wait_kdu_up(
1852 logging_text, nsr_id, vnfr_id, kdu_name
1853 )
1854 vnfd = self.db.get_one(
1855 "vnfds_revisions",
1856 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
1857 )
1858 kdu = get_kdu(vnfd, kdu_name)
1859 kdu_services = [
1860 service["name"] for service in get_kdu_services(kdu)
1861 ]
1862 exposed_services = []
1863 for service in services:
1864 if any(s in service["name"] for s in kdu_services):
1865 exposed_services.append(service)
1866 await self.vca_map[vca_type].exec_primitive(
1867 ee_id=ee_id,
1868 primitive_name="config",
1869 params_dict={
1870 "osm-config": json.dumps(
1871 OsmConfigBuilder(
1872 k8s={"services": exposed_services}
1873 ).build()
1874 )
1875 },
1876 vca_id=vca_id,
1877 )
1878
1879 # This verification is needed in order to avoid trying to add a public key
1880 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
1881 # for a KNF and not for its KDUs, the previous verification gives False, and the code
1882 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
1883 # or it is a KNF)
1884 elif db_vnfr.get("vdur"):
1885 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1886 logging_text,
1887 nsr_id,
1888 vnfr_id,
1889 vdu_id,
1890 vdu_index,
1891 user=user,
1892 pub_key=pub_key,
1893 )
1894
1895 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
1896
1897 # store rw_mgmt_ip in deploy params for later replacement
1898 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
1899
1900 # n2vc_redesign STEP 6 Execute initial config primitive
1901 step = "execute initial config primitive"
1902
1903 # wait for dependent primitives execution (NS -> VNF -> VDU)
1904 if initial_config_primitive_list:
1905 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
1906
1907 # stage, in function of element type: vdu, kdu, vnf or ns
1908 my_vca = vca_deployed_list[vca_index]
1909 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1910 # VDU or KDU
1911 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
1912 elif my_vca.get("member-vnf-index"):
1913 # VNF
1914 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
1915 else:
1916 # NS
1917 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
1918
1919 self._write_configuration_status(
1920 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
1921 )
1922
1923 self._write_op_status(op_id=nslcmop_id, stage=stage)
1924
1925 check_if_terminated_needed = True
1926 for initial_config_primitive in initial_config_primitive_list:
1927 # adding information on the vca_deployed if it is a NS execution environment
1928 if not vca_deployed["member-vnf-index"]:
1929 deploy_params["ns_config_info"] = json.dumps(
1930 self._get_ns_config_info(nsr_id)
1931 )
1932 # TODO check if already done
1933 primitive_params_ = self._map_primitive_params(
1934 initial_config_primitive, {}, deploy_params
1935 )
1936
1937 step = "execute primitive '{}' params '{}'".format(
1938 initial_config_primitive["name"], primitive_params_
1939 )
1940 self.logger.debug(logging_text + step)
1941 await self.vca_map[vca_type].exec_primitive(
1942 ee_id=ee_id,
1943 primitive_name=initial_config_primitive["name"],
1944 params_dict=primitive_params_,
1945 db_dict=db_dict,
1946 vca_id=vca_id,
1947 vca_type=vca_type,
1948 )
1949 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
1950 if check_if_terminated_needed:
1951 if config_descriptor.get("terminate-config-primitive"):
1952 self.update_db_2(
1953 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
1954 )
1955 check_if_terminated_needed = False
1956
1957 # TODO register in database that primitive is done
1958
1959 # STEP 7 Configure metrics
1960 if vca_type == "helm" or vca_type == "helm-v3":
1961 # TODO: review for those cases where the helm chart is a reference and
1962 # is not part of the NF package
1963 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
1964 ee_id=ee_id,
1965 artifact_path=artifact_path,
1966 ee_config_descriptor=ee_config_descriptor,
1967 vnfr_id=vnfr_id,
1968 nsr_id=nsr_id,
1969 target_ip=rw_mgmt_ip,
1970 element_type=element_type,
1971 vnf_member_index=db_vnfr.get("member-vnf-index-ref", ""),
1972 vdu_id=vdu_id,
1973 vdu_index=vdu_index,
1974 kdu_name=kdu_name,
1975 kdu_index=kdu_index,
1976 )
1977 if prometheus_jobs:
1978 self.update_db_2(
1979 "nsrs",
1980 nsr_id,
1981 {db_update_entry + "prometheus_jobs": prometheus_jobs},
1982 )
1983
1984 for job in prometheus_jobs:
1985 self.db.set_one(
1986 "prometheus_jobs",
1987 {"job_name": job["job_name"]},
1988 job,
1989 upsert=True,
1990 fail_on_empty=False,
1991 )
1992
1993 step = "instantiated at VCA"
1994 self.logger.debug(logging_text + step)
1995
1996 self._write_configuration_status(
1997 nsr_id=nsr_id, vca_index=vca_index, status="READY"
1998 )
1999
2000 except Exception as e: # TODO not use Exception but N2VC exception
2001 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2002 if not isinstance(
2003 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2004 ):
2005 self.logger.error(
2006 "Exception while {} : {}".format(step, e), exc_info=True
2007 )
2008 self._write_configuration_status(
2009 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2010 )
2011 raise LcmException("{}. {}".format(step, e)) from e
2012
2013 def _write_ns_status(
2014 self,
2015 nsr_id: str,
2016 ns_state: str,
2017 current_operation: str,
2018 current_operation_id: str,
2019 error_description: str = None,
2020 error_detail: str = None,
2021 other_update: dict = None,
2022 ):
2023 """
2024 Update db_nsr fields.
2025 :param nsr_id:
2026 :param ns_state:
2027 :param current_operation:
2028 :param current_operation_id:
2029 :param error_description:
2030 :param error_detail:
2031 :param other_update: Other required changes at database if provided, will be cleared
2032 :return:
2033 """
2034 try:
2035 db_dict = other_update or {}
2036 db_dict[
2037 "_admin.nslcmop"
2038 ] = current_operation_id # for backward compatibility
2039 db_dict["_admin.current-operation"] = current_operation_id
2040 db_dict["_admin.operation-type"] = (
2041 current_operation if current_operation != "IDLE" else None
2042 )
2043 db_dict["currentOperation"] = current_operation
2044 db_dict["currentOperationID"] = current_operation_id
2045 db_dict["errorDescription"] = error_description
2046 db_dict["errorDetail"] = error_detail
2047
2048 if ns_state:
2049 db_dict["nsState"] = ns_state
2050 self.update_db_2("nsrs", nsr_id, db_dict)
2051 except DbException as e:
2052 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2053
2054 def _write_op_status(
2055 self,
2056 op_id: str,
2057 stage: list = None,
2058 error_message: str = None,
2059 queuePosition: int = 0,
2060 operation_state: str = None,
2061 other_update: dict = None,
2062 ):
2063 try:
2064 db_dict = other_update or {}
2065 db_dict["queuePosition"] = queuePosition
2066 if isinstance(stage, list):
2067 db_dict["stage"] = stage[0]
2068 db_dict["detailed-status"] = " ".join(stage)
2069 elif stage is not None:
2070 db_dict["stage"] = str(stage)
2071
2072 if error_message is not None:
2073 db_dict["errorMessage"] = error_message
2074 if operation_state is not None:
2075 db_dict["operationState"] = operation_state
2076 db_dict["statusEnteredTime"] = time()
2077 self.update_db_2("nslcmops", op_id, db_dict)
2078 except DbException as e:
2079 self.logger.warn(
2080 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2081 )
2082
2083 def _write_all_config_status(self, db_nsr: dict, status: str):
2084 try:
2085 nsr_id = db_nsr["_id"]
2086 # configurationStatus
2087 config_status = db_nsr.get("configurationStatus")
2088 if config_status:
2089 db_nsr_update = {
2090 "configurationStatus.{}.status".format(index): status
2091 for index, v in enumerate(config_status)
2092 if v
2093 }
2094 # update status
2095 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2096
2097 except DbException as e:
2098 self.logger.warn(
2099 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2100 )
2101
2102 def _write_configuration_status(
2103 self,
2104 nsr_id: str,
2105 vca_index: int,
2106 status: str = None,
2107 element_under_configuration: str = None,
2108 element_type: str = None,
2109 other_update: dict = None,
2110 ):
2111 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2112 # .format(vca_index, status))
2113
2114 try:
2115 db_path = "configurationStatus.{}.".format(vca_index)
2116 db_dict = other_update or {}
2117 if status:
2118 db_dict[db_path + "status"] = status
2119 if element_under_configuration:
2120 db_dict[
2121 db_path + "elementUnderConfiguration"
2122 ] = element_under_configuration
2123 if element_type:
2124 db_dict[db_path + "elementType"] = element_type
2125 self.update_db_2("nsrs", nsr_id, db_dict)
2126 except DbException as e:
2127 self.logger.warn(
2128 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2129 status, nsr_id, vca_index, e
2130 )
2131 )
2132
2133 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2134 """
2135 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2136 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2137 Database is used because the result can be obtained from a different LCM worker in case of HA.
2138 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2139 :param db_nslcmop: database content of nslcmop
2140 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2141 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2142 computed 'vim-account-id'
2143 """
2144 modified = False
2145 nslcmop_id = db_nslcmop["_id"]
2146 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2147 if placement_engine == "PLA":
2148 self.logger.debug(
2149 logging_text + "Invoke and wait for placement optimization"
2150 )
2151 await self.msg.aiowrite("pla", "get_placement", {"nslcmopId": nslcmop_id})
2152 db_poll_interval = 5
2153 wait = db_poll_interval * 10
2154 pla_result = None
2155 while not pla_result and wait >= 0:
2156 await asyncio.sleep(db_poll_interval)
2157 wait -= db_poll_interval
2158 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2159 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2160
2161 if not pla_result:
2162 raise LcmException(
2163 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2164 )
2165
2166 for pla_vnf in pla_result["vnf"]:
2167 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2168 if not pla_vnf.get("vimAccountId") or not vnfr:
2169 continue
2170 modified = True
2171 self.db.set_one(
2172 "vnfrs",
2173 {"_id": vnfr["_id"]},
2174 {"vim-account-id": pla_vnf["vimAccountId"]},
2175 )
2176 # Modifies db_vnfrs
2177 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2178 return modified
2179
2180 def _gather_vnfr_healing_alerts(self, vnfr, vnfd):
2181 alerts = []
2182 nsr_id = vnfr["nsr-id-ref"]
2183 df = vnfd.get("df", [{}])[0]
2184 # Checking for auto-healing configuration
2185 if "healing-aspect" in df:
2186 healing_aspects = df["healing-aspect"]
2187 for healing in healing_aspects:
2188 for healing_policy in healing.get("healing-policy", ()):
2189 vdu_id = healing_policy["vdu-id"]
2190 vdur = next(
2191 (vdur for vdur in vnfr["vdur"] if vdu_id == vdur["vdu-id-ref"]),
2192 {},
2193 )
2194 if not vdur:
2195 continue
2196 metric_name = "vm_status"
2197 vdu_name = vdur.get("name")
2198 vnf_member_index = vnfr["member-vnf-index-ref"]
2199 uuid = str(uuid4())
2200 name = f"healing_{uuid}"
2201 action = healing_policy
2202 # action_on_recovery = healing.get("action-on-recovery")
2203 # cooldown_time = healing.get("cooldown-time")
2204 # day1 = healing.get("day1")
2205 alert = {
2206 "uuid": uuid,
2207 "name": name,
2208 "metric": metric_name,
2209 "tags": {
2210 "ns_id": nsr_id,
2211 "vnf_member_index": vnf_member_index,
2212 "vdu_name": vdu_name,
2213 },
2214 "alarm_status": "ok",
2215 "action_type": "healing",
2216 "action": action,
2217 }
2218 alerts.append(alert)
2219 return alerts
2220
2221 def _gather_vnfr_scaling_alerts(self, vnfr, vnfd):
2222 alerts = []
2223 nsr_id = vnfr["nsr-id-ref"]
2224 df = vnfd.get("df", [{}])[0]
2225 # Checking for auto-scaling configuration
2226 if "scaling-aspect" in df:
2227 scaling_aspects = df["scaling-aspect"]
2228 all_vnfd_monitoring_params = {}
2229 for ivld in vnfd.get("int-virtual-link-desc", ()):
2230 for mp in ivld.get("monitoring-parameters", ()):
2231 all_vnfd_monitoring_params[mp.get("id")] = mp
2232 for vdu in vnfd.get("vdu", ()):
2233 for mp in vdu.get("monitoring-parameter", ()):
2234 all_vnfd_monitoring_params[mp.get("id")] = mp
2235 for df in vnfd.get("df", ()):
2236 for mp in df.get("monitoring-parameter", ()):
2237 all_vnfd_monitoring_params[mp.get("id")] = mp
2238 for scaling_aspect in scaling_aspects:
2239 scaling_group_name = scaling_aspect.get("name", "")
2240 # Get monitored VDUs
2241 all_monitored_vdus = set()
2242 for delta in scaling_aspect.get("aspect-delta-details", {}).get(
2243 "deltas", ()
2244 ):
2245 for vdu_delta in delta.get("vdu-delta", ()):
2246 all_monitored_vdus.add(vdu_delta.get("id"))
2247 monitored_vdurs = list(
2248 filter(
2249 lambda vdur: vdur["vdu-id-ref"] in all_monitored_vdus,
2250 vnfr["vdur"],
2251 )
2252 )
2253 if not monitored_vdurs:
2254 self.logger.error(
2255 "Scaling criteria is referring to a vnf-monitoring-param that does not contain a reference to a vdu or vnf metric"
2256 )
2257 continue
2258 for scaling_policy in scaling_aspect.get("scaling-policy", ()):
2259 if scaling_policy["scaling-type"] != "automatic":
2260 continue
2261 threshold_time = scaling_policy.get("threshold-time", "1")
2262 cooldown_time = scaling_policy.get("cooldown-time", "0")
2263 for scaling_criteria in scaling_policy["scaling-criteria"]:
2264 monitoring_param_ref = scaling_criteria.get(
2265 "vnf-monitoring-param-ref"
2266 )
2267 vnf_monitoring_param = all_vnfd_monitoring_params[
2268 monitoring_param_ref
2269 ]
2270 for vdur in monitored_vdurs:
2271 vdu_id = vdur["vdu-id-ref"]
2272 metric_name = vnf_monitoring_param.get("performance-metric")
2273 metric_name = f"osm_{metric_name}"
2274 vnf_member_index = vnfr["member-vnf-index-ref"]
2275 scalein_threshold = scaling_criteria.get(
2276 "scale-in-threshold"
2277 )
2278 scaleout_threshold = scaling_criteria.get(
2279 "scale-out-threshold"
2280 )
2281 # Looking for min/max-number-of-instances
2282 instances_min_number = 1
2283 instances_max_number = 1
2284 vdu_profile = df["vdu-profile"]
2285 if vdu_profile:
2286 profile = next(
2287 item for item in vdu_profile if item["id"] == vdu_id
2288 )
2289 instances_min_number = profile.get(
2290 "min-number-of-instances", 1
2291 )
2292 instances_max_number = profile.get(
2293 "max-number-of-instances", 1
2294 )
2295
2296 if scalein_threshold:
2297 uuid = str(uuid4())
2298 name = f"scalein_{uuid}"
2299 operation = scaling_criteria[
2300 "scale-in-relational-operation"
2301 ]
2302 rel_operator = self.rel_operation_types.get(
2303 operation, "<="
2304 )
2305 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2306 expression = f"(count ({metric_selector}) > {instances_min_number}) and (avg({metric_selector}) {rel_operator} {scalein_threshold})"
2307 labels = {
2308 "ns_id": nsr_id,
2309 "vnf_member_index": vnf_member_index,
2310 "vdu_id": vdu_id,
2311 }
2312 prom_cfg = {
2313 "alert": name,
2314 "expr": expression,
2315 "for": str(threshold_time) + "m",
2316 "labels": labels,
2317 }
2318 action = scaling_policy
2319 action = {
2320 "scaling-group": scaling_group_name,
2321 "cooldown-time": cooldown_time,
2322 }
2323 alert = {
2324 "uuid": uuid,
2325 "name": name,
2326 "metric": metric_name,
2327 "tags": {
2328 "ns_id": nsr_id,
2329 "vnf_member_index": vnf_member_index,
2330 "vdu_id": vdu_id,
2331 },
2332 "alarm_status": "ok",
2333 "action_type": "scale_in",
2334 "action": action,
2335 "prometheus_config": prom_cfg,
2336 }
2337 alerts.append(alert)
2338
2339 if scaleout_threshold:
2340 uuid = str(uuid4())
2341 name = f"scaleout_{uuid}"
2342 operation = scaling_criteria[
2343 "scale-out-relational-operation"
2344 ]
2345 rel_operator = self.rel_operation_types.get(
2346 operation, "<="
2347 )
2348 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2349 expression = f"(count ({metric_selector}) < {instances_max_number}) and (avg({metric_selector}) {rel_operator} {scaleout_threshold})"
2350 labels = {
2351 "ns_id": nsr_id,
2352 "vnf_member_index": vnf_member_index,
2353 "vdu_id": vdu_id,
2354 }
2355 prom_cfg = {
2356 "alert": name,
2357 "expr": expression,
2358 "for": str(threshold_time) + "m",
2359 "labels": labels,
2360 }
2361 action = scaling_policy
2362 action = {
2363 "scaling-group": scaling_group_name,
2364 "cooldown-time": cooldown_time,
2365 }
2366 alert = {
2367 "uuid": uuid,
2368 "name": name,
2369 "metric": metric_name,
2370 "tags": {
2371 "ns_id": nsr_id,
2372 "vnf_member_index": vnf_member_index,
2373 "vdu_id": vdu_id,
2374 },
2375 "alarm_status": "ok",
2376 "action_type": "scale_out",
2377 "action": action,
2378 "prometheus_config": prom_cfg,
2379 }
2380 alerts.append(alert)
2381 return alerts
2382
2383 def _gather_vnfr_alarm_alerts(self, vnfr, vnfd):
2384 alerts = []
2385 nsr_id = vnfr["nsr-id-ref"]
2386 vnf_member_index = vnfr["member-vnf-index-ref"]
2387
2388 # Checking for VNF alarm configuration
2389 for vdur in vnfr["vdur"]:
2390 vdu_id = vdur["vdu-id-ref"]
2391 vdu = next(filter(lambda vdu: vdu["id"] == vdu_id, vnfd["vdu"]))
2392 if "alarm" in vdu:
2393 # Get VDU monitoring params, since alerts are based on them
2394 vdu_monitoring_params = {}
2395 for mp in vdu.get("monitoring-parameter", []):
2396 vdu_monitoring_params[mp.get("id")] = mp
2397 if not vdu_monitoring_params:
2398 self.logger.error(
2399 "VDU alarm refers to a VDU monitoring param, but there are no VDU monitoring params in the VDU"
2400 )
2401 continue
2402 # Get alarms in the VDU
2403 alarm_descriptors = vdu["alarm"]
2404 # Create VDU alarms for each alarm in the VDU
2405 for alarm_descriptor in alarm_descriptors:
2406 # Check that the VDU alarm refers to a proper monitoring param
2407 alarm_monitoring_param = alarm_descriptor.get(
2408 "vnf-monitoring-param-ref", ""
2409 )
2410 vdu_specific_monitoring_param = vdu_monitoring_params.get(
2411 alarm_monitoring_param, {}
2412 )
2413 if not vdu_specific_monitoring_param:
2414 self.logger.error(
2415 "VDU alarm refers to a VDU monitoring param not present in the VDU"
2416 )
2417 continue
2418 metric_name = vdu_specific_monitoring_param.get(
2419 "performance-metric"
2420 )
2421 if not metric_name:
2422 self.logger.error(
2423 "VDU alarm refers to a VDU monitoring param that has no associated performance-metric"
2424 )
2425 continue
2426 # Set params of the alarm to be created in Prometheus
2427 metric_name = f"osm_{metric_name}"
2428 metric_threshold = alarm_descriptor.get("value")
2429 uuid = str(uuid4())
2430 alert_name = f"vdu_alarm_{uuid}"
2431 operation = alarm_descriptor["operation"]
2432 rel_operator = self.rel_operation_types.get(operation, "<=")
2433 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2434 expression = f"{metric_selector} {rel_operator} {metric_threshold}"
2435 labels = {
2436 "ns_id": nsr_id,
2437 "vnf_member_index": vnf_member_index,
2438 "vdu_id": vdu_id,
2439 "vdu_name": "{{ $labels.vdu_name }}",
2440 }
2441 prom_cfg = {
2442 "alert": alert_name,
2443 "expr": expression,
2444 "for": "1m", # default value. Ideally, this should be related to an IM param, but there is not such param
2445 "labels": labels,
2446 }
2447 alarm_action = dict()
2448 for action_type in ["ok", "insufficient-data", "alarm"]:
2449 if (
2450 "actions" in alarm_descriptor
2451 and action_type in alarm_descriptor["actions"]
2452 ):
2453 alarm_action[action_type] = alarm_descriptor["actions"][
2454 action_type
2455 ]
2456 alert = {
2457 "uuid": uuid,
2458 "name": alert_name,
2459 "metric": metric_name,
2460 "tags": {
2461 "ns_id": nsr_id,
2462 "vnf_member_index": vnf_member_index,
2463 "vdu_id": vdu_id,
2464 },
2465 "alarm_status": "ok",
2466 "action_type": "vdu_alarm",
2467 "action": alarm_action,
2468 "prometheus_config": prom_cfg,
2469 }
2470 alerts.append(alert)
2471 return alerts
2472
2473 def update_nsrs_with_pla_result(self, params):
2474 try:
2475 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2476 self.update_db_2(
2477 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2478 )
2479 except Exception as e:
2480 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2481
2482 async def instantiate(self, nsr_id, nslcmop_id):
2483 """
2484
2485 :param nsr_id: ns instance to deploy
2486 :param nslcmop_id: operation to run
2487 :return:
2488 """
2489
2490 # Try to lock HA task here
2491 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2492 if not task_is_locked_by_me:
2493 self.logger.debug(
2494 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2495 )
2496 return
2497
2498 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2499 self.logger.debug(logging_text + "Enter")
2500
2501 # get all needed from database
2502
2503 # database nsrs record
2504 db_nsr = None
2505
2506 # database nslcmops record
2507 db_nslcmop = None
2508
2509 # update operation on nsrs
2510 db_nsr_update = {}
2511 # update operation on nslcmops
2512 db_nslcmop_update = {}
2513
2514 timeout_ns_deploy = self.timeout.ns_deploy
2515
2516 nslcmop_operation_state = None
2517 db_vnfrs = {} # vnf's info indexed by member-index
2518 # n2vc_info = {}
2519 tasks_dict_info = {} # from task to info text
2520 exc = None
2521 error_list = []
2522 stage = [
2523 "Stage 1/5: preparation of the environment.",
2524 "Waiting for previous operations to terminate.",
2525 "",
2526 ]
2527 # ^ stage, step, VIM progress
2528 try:
2529 # wait for any previous tasks in process
2530 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2531
2532 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2533 stage[1] = "Reading from database."
2534 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2535 db_nsr_update["detailed-status"] = "creating"
2536 db_nsr_update["operational-status"] = "init"
2537 self._write_ns_status(
2538 nsr_id=nsr_id,
2539 ns_state="BUILDING",
2540 current_operation="INSTANTIATING",
2541 current_operation_id=nslcmop_id,
2542 other_update=db_nsr_update,
2543 )
2544 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2545
2546 # read from db: operation
2547 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2548 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2549 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2550 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2551 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2552 )
2553 ns_params = db_nslcmop.get("operationParams")
2554 if ns_params and ns_params.get("timeout_ns_deploy"):
2555 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2556
2557 # read from db: ns
2558 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2559 self.logger.debug(logging_text + stage[1])
2560 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2561 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2562 self.logger.debug(logging_text + stage[1])
2563 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2564 self.fs.sync(db_nsr["nsd-id"])
2565 db_nsr["nsd"] = nsd
2566 # nsr_name = db_nsr["name"] # TODO short-name??
2567
2568 # read from db: vnf's of this ns
2569 stage[1] = "Getting vnfrs from db."
2570 self.logger.debug(logging_text + stage[1])
2571 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2572
2573 # read from db: vnfd's for every vnf
2574 db_vnfds = [] # every vnfd data
2575
2576 # for each vnf in ns, read vnfd
2577 for vnfr in db_vnfrs_list:
2578 if vnfr.get("kdur"):
2579 kdur_list = []
2580 for kdur in vnfr["kdur"]:
2581 if kdur.get("additionalParams"):
2582 kdur["additionalParams"] = json.loads(
2583 kdur["additionalParams"]
2584 )
2585 kdur_list.append(kdur)
2586 vnfr["kdur"] = kdur_list
2587
2588 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2589 vnfd_id = vnfr["vnfd-id"]
2590 vnfd_ref = vnfr["vnfd-ref"]
2591 self.fs.sync(vnfd_id)
2592
2593 # if we haven't this vnfd, read it from db
2594 if vnfd_id not in db_vnfds:
2595 # read from db
2596 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2597 vnfd_id, vnfd_ref
2598 )
2599 self.logger.debug(logging_text + stage[1])
2600 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2601
2602 # store vnfd
2603 db_vnfds.append(vnfd)
2604
2605 # Get or generates the _admin.deployed.VCA list
2606 vca_deployed_list = None
2607 if db_nsr["_admin"].get("deployed"):
2608 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2609 if vca_deployed_list is None:
2610 vca_deployed_list = []
2611 configuration_status_list = []
2612 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2613 db_nsr_update["configurationStatus"] = configuration_status_list
2614 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2615 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2616 elif isinstance(vca_deployed_list, dict):
2617 # maintain backward compatibility. Change a dict to list at database
2618 vca_deployed_list = list(vca_deployed_list.values())
2619 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2620 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2621
2622 if not isinstance(
2623 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2624 ):
2625 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2626 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2627
2628 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2629 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2630 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2631 self.db.set_list(
2632 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2633 )
2634
2635 # n2vc_redesign STEP 2 Deploy Network Scenario
2636 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2637 self._write_op_status(op_id=nslcmop_id, stage=stage)
2638
2639 stage[1] = "Deploying KDUs."
2640 # self.logger.debug(logging_text + "Before deploy_kdus")
2641 # Call to deploy_kdus in case exists the "vdu:kdu" param
2642 await self.deploy_kdus(
2643 logging_text=logging_text,
2644 nsr_id=nsr_id,
2645 nslcmop_id=nslcmop_id,
2646 db_vnfrs=db_vnfrs,
2647 db_vnfds=db_vnfds,
2648 task_instantiation_info=tasks_dict_info,
2649 )
2650
2651 stage[1] = "Getting VCA public key."
2652 # n2vc_redesign STEP 1 Get VCA public ssh-key
2653 # feature 1429. Add n2vc public key to needed VMs
2654 n2vc_key = self.n2vc.get_public_key()
2655 n2vc_key_list = [n2vc_key]
2656 if self.vca_config.public_key:
2657 n2vc_key_list.append(self.vca_config.public_key)
2658
2659 stage[1] = "Deploying NS at VIM."
2660 task_ro = asyncio.ensure_future(
2661 self.instantiate_RO(
2662 logging_text=logging_text,
2663 nsr_id=nsr_id,
2664 nsd=nsd,
2665 db_nsr=db_nsr,
2666 db_nslcmop=db_nslcmop,
2667 db_vnfrs=db_vnfrs,
2668 db_vnfds=db_vnfds,
2669 n2vc_key_list=n2vc_key_list,
2670 stage=stage,
2671 )
2672 )
2673 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2674 tasks_dict_info[task_ro] = "Deploying at VIM"
2675
2676 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2677 stage[1] = "Deploying Execution Environments."
2678 self.logger.debug(logging_text + stage[1])
2679
2680 # create namespace and certificate if any helm based EE is present in the NS
2681 if check_helm_ee_in_ns(db_vnfds):
2682 await self.vca_map["helm-v3"].setup_ns_namespace(
2683 name=nsr_id,
2684 )
2685 # create TLS certificates
2686 await self.vca_map["helm-v3"].create_tls_certificate(
2687 secret_name=self.EE_TLS_NAME,
2688 dns_prefix="*",
2689 nsr_id=nsr_id,
2690 usage="server auth",
2691 namespace=nsr_id,
2692 )
2693
2694 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2695 for vnf_profile in get_vnf_profiles(nsd):
2696 vnfd_id = vnf_profile["vnfd-id"]
2697 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2698 member_vnf_index = str(vnf_profile["id"])
2699 db_vnfr = db_vnfrs[member_vnf_index]
2700 base_folder = vnfd["_admin"]["storage"]
2701 vdu_id = None
2702 vdu_index = 0
2703 vdu_name = None
2704 kdu_name = None
2705 kdu_index = None
2706
2707 # Get additional parameters
2708 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2709 if db_vnfr.get("additionalParamsForVnf"):
2710 deploy_params.update(
2711 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2712 )
2713
2714 descriptor_config = get_configuration(vnfd, vnfd["id"])
2715 if descriptor_config:
2716 self._deploy_n2vc(
2717 logging_text=logging_text
2718 + "member_vnf_index={} ".format(member_vnf_index),
2719 db_nsr=db_nsr,
2720 db_vnfr=db_vnfr,
2721 nslcmop_id=nslcmop_id,
2722 nsr_id=nsr_id,
2723 nsi_id=nsi_id,
2724 vnfd_id=vnfd_id,
2725 vdu_id=vdu_id,
2726 kdu_name=kdu_name,
2727 member_vnf_index=member_vnf_index,
2728 vdu_index=vdu_index,
2729 kdu_index=kdu_index,
2730 vdu_name=vdu_name,
2731 deploy_params=deploy_params,
2732 descriptor_config=descriptor_config,
2733 base_folder=base_folder,
2734 task_instantiation_info=tasks_dict_info,
2735 stage=stage,
2736 )
2737
2738 # Deploy charms for each VDU that supports one.
2739 for vdud in get_vdu_list(vnfd):
2740 vdu_id = vdud["id"]
2741 descriptor_config = get_configuration(vnfd, vdu_id)
2742 vdur = find_in_list(
2743 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2744 )
2745
2746 if vdur.get("additionalParams"):
2747 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2748 else:
2749 deploy_params_vdu = deploy_params
2750 deploy_params_vdu["OSM"] = get_osm_params(
2751 db_vnfr, vdu_id, vdu_count_index=0
2752 )
2753 vdud_count = get_number_of_instances(vnfd, vdu_id)
2754
2755 self.logger.debug("VDUD > {}".format(vdud))
2756 self.logger.debug(
2757 "Descriptor config > {}".format(descriptor_config)
2758 )
2759 if descriptor_config:
2760 vdu_name = None
2761 kdu_name = None
2762 kdu_index = None
2763 for vdu_index in range(vdud_count):
2764 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2765 self._deploy_n2vc(
2766 logging_text=logging_text
2767 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2768 member_vnf_index, vdu_id, vdu_index
2769 ),
2770 db_nsr=db_nsr,
2771 db_vnfr=db_vnfr,
2772 nslcmop_id=nslcmop_id,
2773 nsr_id=nsr_id,
2774 nsi_id=nsi_id,
2775 vnfd_id=vnfd_id,
2776 vdu_id=vdu_id,
2777 kdu_name=kdu_name,
2778 kdu_index=kdu_index,
2779 member_vnf_index=member_vnf_index,
2780 vdu_index=vdu_index,
2781 vdu_name=vdu_name,
2782 deploy_params=deploy_params_vdu,
2783 descriptor_config=descriptor_config,
2784 base_folder=base_folder,
2785 task_instantiation_info=tasks_dict_info,
2786 stage=stage,
2787 )
2788 for kdud in get_kdu_list(vnfd):
2789 kdu_name = kdud["name"]
2790 descriptor_config = get_configuration(vnfd, kdu_name)
2791 if descriptor_config:
2792 vdu_id = None
2793 vdu_index = 0
2794 vdu_name = None
2795 kdu_index, kdur = next(
2796 x
2797 for x in enumerate(db_vnfr["kdur"])
2798 if x[1]["kdu-name"] == kdu_name
2799 )
2800 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2801 if kdur.get("additionalParams"):
2802 deploy_params_kdu.update(
2803 parse_yaml_strings(kdur["additionalParams"].copy())
2804 )
2805
2806 self._deploy_n2vc(
2807 logging_text=logging_text,
2808 db_nsr=db_nsr,
2809 db_vnfr=db_vnfr,
2810 nslcmop_id=nslcmop_id,
2811 nsr_id=nsr_id,
2812 nsi_id=nsi_id,
2813 vnfd_id=vnfd_id,
2814 vdu_id=vdu_id,
2815 kdu_name=kdu_name,
2816 member_vnf_index=member_vnf_index,
2817 vdu_index=vdu_index,
2818 kdu_index=kdu_index,
2819 vdu_name=vdu_name,
2820 deploy_params=deploy_params_kdu,
2821 descriptor_config=descriptor_config,
2822 base_folder=base_folder,
2823 task_instantiation_info=tasks_dict_info,
2824 stage=stage,
2825 )
2826
2827 # Check if each vnf has exporter for metric collection if so update prometheus job records
2828 if "exporters-endpoints" in vnfd.get("df")[0]:
2829 exporter_config = vnfd.get("df")[0].get("exporters-endpoints")
2830 self.logger.debug("exporter config :{}".format(exporter_config))
2831 artifact_path = "{}/{}/{}".format(
2832 base_folder["folder"],
2833 base_folder["pkg-dir"],
2834 "exporter-endpoint",
2835 )
2836 ee_id = None
2837 ee_config_descriptor = exporter_config
2838 vnfr_id = db_vnfr["id"]
2839 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2840 logging_text,
2841 nsr_id,
2842 vnfr_id,
2843 vdu_id=None,
2844 vdu_index=None,
2845 user=None,
2846 pub_key=None,
2847 )
2848 self.logger.debug("rw_mgmt_ip:{}".format(rw_mgmt_ip))
2849 self.logger.debug("Artifact_path:{}".format(artifact_path))
2850 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
2851 vdu_id_for_prom = None
2852 vdu_index_for_prom = None
2853 for x in get_iterable(db_vnfr, "vdur"):
2854 vdu_id_for_prom = x.get("vdu-id-ref")
2855 vdu_index_for_prom = x.get("count-index")
2856 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2857 ee_id=ee_id,
2858 artifact_path=artifact_path,
2859 ee_config_descriptor=ee_config_descriptor,
2860 vnfr_id=vnfr_id,
2861 nsr_id=nsr_id,
2862 target_ip=rw_mgmt_ip,
2863 element_type="VDU",
2864 vdu_id=vdu_id_for_prom,
2865 vdu_index=vdu_index_for_prom,
2866 )
2867
2868 self.logger.debug("Prometheus job:{}".format(prometheus_jobs))
2869 if prometheus_jobs:
2870 db_nsr_update["_admin.deployed.prometheus_jobs"] = prometheus_jobs
2871 self.update_db_2(
2872 "nsrs",
2873 nsr_id,
2874 db_nsr_update,
2875 )
2876
2877 for job in prometheus_jobs:
2878 self.db.set_one(
2879 "prometheus_jobs",
2880 {"job_name": job["job_name"]},
2881 job,
2882 upsert=True,
2883 fail_on_empty=False,
2884 )
2885
2886 # Check if this NS has a charm configuration
2887 descriptor_config = nsd.get("ns-configuration")
2888 if descriptor_config and descriptor_config.get("juju"):
2889 vnfd_id = None
2890 db_vnfr = None
2891 member_vnf_index = None
2892 vdu_id = None
2893 kdu_name = None
2894 kdu_index = None
2895 vdu_index = 0
2896 vdu_name = None
2897
2898 # Get additional parameters
2899 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2900 if db_nsr.get("additionalParamsForNs"):
2901 deploy_params.update(
2902 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2903 )
2904 base_folder = nsd["_admin"]["storage"]
2905 self._deploy_n2vc(
2906 logging_text=logging_text,
2907 db_nsr=db_nsr,
2908 db_vnfr=db_vnfr,
2909 nslcmop_id=nslcmop_id,
2910 nsr_id=nsr_id,
2911 nsi_id=nsi_id,
2912 vnfd_id=vnfd_id,
2913 vdu_id=vdu_id,
2914 kdu_name=kdu_name,
2915 member_vnf_index=member_vnf_index,
2916 vdu_index=vdu_index,
2917 kdu_index=kdu_index,
2918 vdu_name=vdu_name,
2919 deploy_params=deploy_params,
2920 descriptor_config=descriptor_config,
2921 base_folder=base_folder,
2922 task_instantiation_info=tasks_dict_info,
2923 stage=stage,
2924 )
2925
2926 # rest of staff will be done at finally
2927
2928 except (
2929 ROclient.ROClientException,
2930 DbException,
2931 LcmException,
2932 N2VCException,
2933 ) as e:
2934 self.logger.error(
2935 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2936 )
2937 exc = e
2938 except asyncio.CancelledError:
2939 self.logger.error(
2940 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2941 )
2942 exc = "Operation was cancelled"
2943 except Exception as e:
2944 exc = traceback.format_exc()
2945 self.logger.critical(
2946 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2947 exc_info=True,
2948 )
2949 finally:
2950 if exc:
2951 error_list.append(str(exc))
2952 try:
2953 # wait for pending tasks
2954 if tasks_dict_info:
2955 stage[1] = "Waiting for instantiate pending tasks."
2956 self.logger.debug(logging_text + stage[1])
2957 error_list += await self._wait_for_tasks(
2958 logging_text,
2959 tasks_dict_info,
2960 timeout_ns_deploy,
2961 stage,
2962 nslcmop_id,
2963 nsr_id=nsr_id,
2964 )
2965 stage[1] = stage[2] = ""
2966 except asyncio.CancelledError:
2967 error_list.append("Cancelled")
2968 # TODO cancel all tasks
2969 except Exception as exc:
2970 error_list.append(str(exc))
2971
2972 # update operation-status
2973 db_nsr_update["operational-status"] = "running"
2974 # let's begin with VCA 'configured' status (later we can change it)
2975 db_nsr_update["config-status"] = "configured"
2976 for task, task_name in tasks_dict_info.items():
2977 if not task.done() or task.cancelled() or task.exception():
2978 if task_name.startswith(self.task_name_deploy_vca):
2979 # A N2VC task is pending
2980 db_nsr_update["config-status"] = "failed"
2981 else:
2982 # RO or KDU task is pending
2983 db_nsr_update["operational-status"] = "failed"
2984
2985 # update status at database
2986 if error_list:
2987 error_detail = ". ".join(error_list)
2988 self.logger.error(logging_text + error_detail)
2989 error_description_nslcmop = "{} Detail: {}".format(
2990 stage[0], error_detail
2991 )
2992 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2993 nslcmop_id, stage[0]
2994 )
2995
2996 db_nsr_update["detailed-status"] = (
2997 error_description_nsr + " Detail: " + error_detail
2998 )
2999 db_nslcmop_update["detailed-status"] = error_detail
3000 nslcmop_operation_state = "FAILED"
3001 ns_state = "BROKEN"
3002 else:
3003 error_detail = None
3004 error_description_nsr = error_description_nslcmop = None
3005 ns_state = "READY"
3006 db_nsr_update["detailed-status"] = "Done"
3007 db_nslcmop_update["detailed-status"] = "Done"
3008 nslcmop_operation_state = "COMPLETED"
3009 # Gather auto-healing and auto-scaling alerts for each vnfr
3010 healing_alerts = []
3011 scaling_alerts = []
3012 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
3013 vnfd = next(
3014 (sub for sub in db_vnfds if sub["_id"] == vnfr["vnfd-id"]), None
3015 )
3016 healing_alerts = self._gather_vnfr_healing_alerts(vnfr, vnfd)
3017 for alert in healing_alerts:
3018 self.logger.info(f"Storing healing alert in MongoDB: {alert}")
3019 self.db.create("alerts", alert)
3020
3021 scaling_alerts = self._gather_vnfr_scaling_alerts(vnfr, vnfd)
3022 for alert in scaling_alerts:
3023 self.logger.info(f"Storing scaling alert in MongoDB: {alert}")
3024 self.db.create("alerts", alert)
3025
3026 alarm_alerts = self._gather_vnfr_alarm_alerts(vnfr, vnfd)
3027 for alert in alarm_alerts:
3028 self.logger.info(f"Storing VNF alarm alert in MongoDB: {alert}")
3029 self.db.create("alerts", alert)
3030 if db_nsr:
3031 self._write_ns_status(
3032 nsr_id=nsr_id,
3033 ns_state=ns_state,
3034 current_operation="IDLE",
3035 current_operation_id=None,
3036 error_description=error_description_nsr,
3037 error_detail=error_detail,
3038 other_update=db_nsr_update,
3039 )
3040 self._write_op_status(
3041 op_id=nslcmop_id,
3042 stage="",
3043 error_message=error_description_nslcmop,
3044 operation_state=nslcmop_operation_state,
3045 other_update=db_nslcmop_update,
3046 )
3047
3048 if nslcmop_operation_state:
3049 try:
3050 await self.msg.aiowrite(
3051 "ns",
3052 "instantiated",
3053 {
3054 "nsr_id": nsr_id,
3055 "nslcmop_id": nslcmop_id,
3056 "operationState": nslcmop_operation_state,
3057 "startTime": db_nslcmop["startTime"],
3058 "links": db_nslcmop["links"],
3059 "operationParams": {
3060 "nsInstanceId": nsr_id,
3061 "nsdId": db_nsr["nsd-id"],
3062 },
3063 },
3064 )
3065 except Exception as e:
3066 self.logger.error(
3067 logging_text + "kafka_write notification Exception {}".format(e)
3068 )
3069
3070 self.logger.debug(logging_text + "Exit")
3071 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
3072
3073 def _get_vnfd(self, vnfd_id: str, projects_read: str, cached_vnfds: Dict[str, Any]):
3074 if vnfd_id not in cached_vnfds:
3075 cached_vnfds[vnfd_id] = self.db.get_one(
3076 "vnfds", {"id": vnfd_id, "_admin.projects_read": projects_read}
3077 )
3078 return cached_vnfds[vnfd_id]
3079
3080 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
3081 if vnf_profile_id not in cached_vnfrs:
3082 cached_vnfrs[vnf_profile_id] = self.db.get_one(
3083 "vnfrs",
3084 {
3085 "member-vnf-index-ref": vnf_profile_id,
3086 "nsr-id-ref": nsr_id,
3087 },
3088 )
3089 return cached_vnfrs[vnf_profile_id]
3090
3091 def _is_deployed_vca_in_relation(
3092 self, vca: DeployedVCA, relation: Relation
3093 ) -> bool:
3094 found = False
3095 for endpoint in (relation.provider, relation.requirer):
3096 if endpoint["kdu-resource-profile-id"]:
3097 continue
3098 found = (
3099 vca.vnf_profile_id == endpoint.vnf_profile_id
3100 and vca.vdu_profile_id == endpoint.vdu_profile_id
3101 and vca.execution_environment_ref == endpoint.execution_environment_ref
3102 )
3103 if found:
3104 break
3105 return found
3106
3107 def _update_ee_relation_data_with_implicit_data(
3108 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
3109 ):
3110 ee_relation_data = safe_get_ee_relation(
3111 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
3112 )
3113 ee_relation_level = EELevel.get_level(ee_relation_data)
3114 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
3115 "execution-environment-ref"
3116 ]:
3117 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
3118 vnfd_id = vnf_profile["vnfd-id"]
3119 project = nsd["_admin"]["projects_read"][0]
3120 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3121 entity_id = (
3122 vnfd_id
3123 if ee_relation_level == EELevel.VNF
3124 else ee_relation_data["vdu-profile-id"]
3125 )
3126 ee = get_juju_ee_ref(db_vnfd, entity_id)
3127 if not ee:
3128 raise Exception(
3129 f"not execution environments found for ee_relation {ee_relation_data}"
3130 )
3131 ee_relation_data["execution-environment-ref"] = ee["id"]
3132 return ee_relation_data
3133
3134 def _get_ns_relations(
3135 self,
3136 nsr_id: str,
3137 nsd: Dict[str, Any],
3138 vca: DeployedVCA,
3139 cached_vnfds: Dict[str, Any],
3140 ) -> List[Relation]:
3141 relations = []
3142 db_ns_relations = get_ns_configuration_relation_list(nsd)
3143 for r in db_ns_relations:
3144 provider_dict = None
3145 requirer_dict = None
3146 if all(key in r for key in ("provider", "requirer")):
3147 provider_dict = r["provider"]
3148 requirer_dict = r["requirer"]
3149 elif "entities" in r:
3150 provider_id = r["entities"][0]["id"]
3151 provider_dict = {
3152 "nsr-id": nsr_id,
3153 "endpoint": r["entities"][0]["endpoint"],
3154 }
3155 if provider_id != nsd["id"]:
3156 provider_dict["vnf-profile-id"] = provider_id
3157 requirer_id = r["entities"][1]["id"]
3158 requirer_dict = {
3159 "nsr-id": nsr_id,
3160 "endpoint": r["entities"][1]["endpoint"],
3161 }
3162 if requirer_id != nsd["id"]:
3163 requirer_dict["vnf-profile-id"] = requirer_id
3164 else:
3165 raise Exception(
3166 "provider/requirer or entities must be included in the relation."
3167 )
3168 relation_provider = self._update_ee_relation_data_with_implicit_data(
3169 nsr_id, nsd, provider_dict, cached_vnfds
3170 )
3171 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3172 nsr_id, nsd, requirer_dict, cached_vnfds
3173 )
3174 provider = EERelation(relation_provider)
3175 requirer = EERelation(relation_requirer)
3176 relation = Relation(r["name"], provider, requirer)
3177 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3178 if vca_in_relation:
3179 relations.append(relation)
3180 return relations
3181
3182 def _get_vnf_relations(
3183 self,
3184 nsr_id: str,
3185 nsd: Dict[str, Any],
3186 vca: DeployedVCA,
3187 cached_vnfds: Dict[str, Any],
3188 ) -> List[Relation]:
3189 relations = []
3190 if vca.target_element == "ns":
3191 self.logger.debug("VCA is a NS charm, not a VNF.")
3192 return relations
3193 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
3194 vnf_profile_id = vnf_profile["id"]
3195 vnfd_id = vnf_profile["vnfd-id"]
3196 project = nsd["_admin"]["projects_read"][0]
3197 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3198 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
3199 for r in db_vnf_relations:
3200 provider_dict = None
3201 requirer_dict = None
3202 if all(key in r for key in ("provider", "requirer")):
3203 provider_dict = r["provider"]
3204 requirer_dict = r["requirer"]
3205 elif "entities" in r:
3206 provider_id = r["entities"][0]["id"]
3207 provider_dict = {
3208 "nsr-id": nsr_id,
3209 "vnf-profile-id": vnf_profile_id,
3210 "endpoint": r["entities"][0]["endpoint"],
3211 }
3212 if provider_id != vnfd_id:
3213 provider_dict["vdu-profile-id"] = provider_id
3214 requirer_id = r["entities"][1]["id"]
3215 requirer_dict = {
3216 "nsr-id": nsr_id,
3217 "vnf-profile-id": vnf_profile_id,
3218 "endpoint": r["entities"][1]["endpoint"],
3219 }
3220 if requirer_id != vnfd_id:
3221 requirer_dict["vdu-profile-id"] = requirer_id
3222 else:
3223 raise Exception(
3224 "provider/requirer or entities must be included in the relation."
3225 )
3226 relation_provider = self._update_ee_relation_data_with_implicit_data(
3227 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3228 )
3229 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3230 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3231 )
3232 provider = EERelation(relation_provider)
3233 requirer = EERelation(relation_requirer)
3234 relation = Relation(r["name"], provider, requirer)
3235 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3236 if vca_in_relation:
3237 relations.append(relation)
3238 return relations
3239
3240 def _get_kdu_resource_data(
3241 self,
3242 ee_relation: EERelation,
3243 db_nsr: Dict[str, Any],
3244 cached_vnfds: Dict[str, Any],
3245 ) -> DeployedK8sResource:
3246 nsd = get_nsd(db_nsr)
3247 vnf_profiles = get_vnf_profiles(nsd)
3248 vnfd_id = find_in_list(
3249 vnf_profiles,
3250 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3251 )["vnfd-id"]
3252 project = nsd["_admin"]["projects_read"][0]
3253 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3254 kdu_resource_profile = get_kdu_resource_profile(
3255 db_vnfd, ee_relation.kdu_resource_profile_id
3256 )
3257 kdu_name = kdu_resource_profile["kdu-name"]
3258 deployed_kdu, _ = get_deployed_kdu(
3259 db_nsr.get("_admin", ()).get("deployed", ()),
3260 kdu_name,
3261 ee_relation.vnf_profile_id,
3262 )
3263 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3264 return deployed_kdu
3265
3266 def _get_deployed_component(
3267 self,
3268 ee_relation: EERelation,
3269 db_nsr: Dict[str, Any],
3270 cached_vnfds: Dict[str, Any],
3271 ) -> DeployedComponent:
3272 nsr_id = db_nsr["_id"]
3273 deployed_component = None
3274 ee_level = EELevel.get_level(ee_relation)
3275 if ee_level == EELevel.NS:
3276 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3277 if vca:
3278 deployed_component = DeployedVCA(nsr_id, vca)
3279 elif ee_level == EELevel.VNF:
3280 vca = get_deployed_vca(
3281 db_nsr,
3282 {
3283 "vdu_id": None,
3284 "member-vnf-index": ee_relation.vnf_profile_id,
3285 "ee_descriptor_id": ee_relation.execution_environment_ref,
3286 },
3287 )
3288 if vca:
3289 deployed_component = DeployedVCA(nsr_id, vca)
3290 elif ee_level == EELevel.VDU:
3291 vca = get_deployed_vca(
3292 db_nsr,
3293 {
3294 "vdu_id": ee_relation.vdu_profile_id,
3295 "member-vnf-index": ee_relation.vnf_profile_id,
3296 "ee_descriptor_id": ee_relation.execution_environment_ref,
3297 },
3298 )
3299 if vca:
3300 deployed_component = DeployedVCA(nsr_id, vca)
3301 elif ee_level == EELevel.KDU:
3302 kdu_resource_data = self._get_kdu_resource_data(
3303 ee_relation, db_nsr, cached_vnfds
3304 )
3305 if kdu_resource_data:
3306 deployed_component = DeployedK8sResource(kdu_resource_data)
3307 return deployed_component
3308
3309 async def _add_relation(
3310 self,
3311 relation: Relation,
3312 vca_type: str,
3313 db_nsr: Dict[str, Any],
3314 cached_vnfds: Dict[str, Any],
3315 cached_vnfrs: Dict[str, Any],
3316 ) -> bool:
3317 deployed_provider = self._get_deployed_component(
3318 relation.provider, db_nsr, cached_vnfds
3319 )
3320 deployed_requirer = self._get_deployed_component(
3321 relation.requirer, db_nsr, cached_vnfds
3322 )
3323 if (
3324 deployed_provider
3325 and deployed_requirer
3326 and deployed_provider.config_sw_installed
3327 and deployed_requirer.config_sw_installed
3328 ):
3329 provider_db_vnfr = (
3330 self._get_vnfr(
3331 relation.provider.nsr_id,
3332 relation.provider.vnf_profile_id,
3333 cached_vnfrs,
3334 )
3335 if relation.provider.vnf_profile_id
3336 else None
3337 )
3338 requirer_db_vnfr = (
3339 self._get_vnfr(
3340 relation.requirer.nsr_id,
3341 relation.requirer.vnf_profile_id,
3342 cached_vnfrs,
3343 )
3344 if relation.requirer.vnf_profile_id
3345 else None
3346 )
3347 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3348 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3349 provider_relation_endpoint = RelationEndpoint(
3350 deployed_provider.ee_id,
3351 provider_vca_id,
3352 relation.provider.endpoint,
3353 )
3354 requirer_relation_endpoint = RelationEndpoint(
3355 deployed_requirer.ee_id,
3356 requirer_vca_id,
3357 relation.requirer.endpoint,
3358 )
3359 try:
3360 await self.vca_map[vca_type].add_relation(
3361 provider=provider_relation_endpoint,
3362 requirer=requirer_relation_endpoint,
3363 )
3364 except N2VCException as exception:
3365 self.logger.error(exception)
3366 raise LcmException(exception)
3367 return True
3368 return False
3369
3370 async def _add_vca_relations(
3371 self,
3372 logging_text,
3373 nsr_id,
3374 vca_type: str,
3375 vca_index: int,
3376 timeout: int = 3600,
3377 ) -> bool:
3378 # steps:
3379 # 1. find all relations for this VCA
3380 # 2. wait for other peers related
3381 # 3. add relations
3382
3383 try:
3384 # STEP 1: find all relations for this VCA
3385
3386 # read nsr record
3387 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3388 nsd = get_nsd(db_nsr)
3389
3390 # this VCA data
3391 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3392 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3393
3394 cached_vnfds = {}
3395 cached_vnfrs = {}
3396 relations = []
3397 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3398 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3399
3400 # if no relations, terminate
3401 if not relations:
3402 self.logger.debug(logging_text + " No relations")
3403 return True
3404
3405 self.logger.debug(logging_text + " adding relations {}".format(relations))
3406
3407 # add all relations
3408 start = time()
3409 while True:
3410 # check timeout
3411 now = time()
3412 if now - start >= timeout:
3413 self.logger.error(logging_text + " : timeout adding relations")
3414 return False
3415
3416 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3417 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3418
3419 # for each relation, find the VCA's related
3420 for relation in relations.copy():
3421 added = await self._add_relation(
3422 relation,
3423 vca_type,
3424 db_nsr,
3425 cached_vnfds,
3426 cached_vnfrs,
3427 )
3428 if added:
3429 relations.remove(relation)
3430
3431 if not relations:
3432 self.logger.debug("Relations added")
3433 break
3434 await asyncio.sleep(5.0)
3435
3436 return True
3437
3438 except Exception as e:
3439 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3440 return False
3441
3442 async def _install_kdu(
3443 self,
3444 nsr_id: str,
3445 nsr_db_path: str,
3446 vnfr_data: dict,
3447 kdu_index: int,
3448 kdud: dict,
3449 vnfd: dict,
3450 k8s_instance_info: dict,
3451 k8params: dict = None,
3452 timeout: int = 600,
3453 vca_id: str = None,
3454 ):
3455 try:
3456 k8sclustertype = k8s_instance_info["k8scluster-type"]
3457 # Instantiate kdu
3458 db_dict_install = {
3459 "collection": "nsrs",
3460 "filter": {"_id": nsr_id},
3461 "path": nsr_db_path,
3462 }
3463
3464 if k8s_instance_info.get("kdu-deployment-name"):
3465 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3466 else:
3467 kdu_instance = self.k8scluster_map[
3468 k8sclustertype
3469 ].generate_kdu_instance_name(
3470 db_dict=db_dict_install,
3471 kdu_model=k8s_instance_info["kdu-model"],
3472 kdu_name=k8s_instance_info["kdu-name"],
3473 )
3474
3475 # Update the nsrs table with the kdu-instance value
3476 self.update_db_2(
3477 item="nsrs",
3478 _id=nsr_id,
3479 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3480 )
3481
3482 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3483 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3484 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3485 # namespace, this first verification could be removed, and the next step would be done for any kind
3486 # of KNF.
3487 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3488 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3489 if k8sclustertype in ("juju", "juju-bundle"):
3490 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3491 # that the user passed a namespace which he wants its KDU to be deployed in)
3492 if (
3493 self.db.count(
3494 table="nsrs",
3495 q_filter={
3496 "_id": nsr_id,
3497 "_admin.projects_write": k8s_instance_info["namespace"],
3498 "_admin.projects_read": k8s_instance_info["namespace"],
3499 },
3500 )
3501 > 0
3502 ):
3503 self.logger.debug(
3504 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3505 )
3506 self.update_db_2(
3507 item="nsrs",
3508 _id=nsr_id,
3509 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3510 )
3511 k8s_instance_info["namespace"] = kdu_instance
3512
3513 await self.k8scluster_map[k8sclustertype].install(
3514 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3515 kdu_model=k8s_instance_info["kdu-model"],
3516 atomic=True,
3517 params=k8params,
3518 db_dict=db_dict_install,
3519 timeout=timeout,
3520 kdu_name=k8s_instance_info["kdu-name"],
3521 namespace=k8s_instance_info["namespace"],
3522 kdu_instance=kdu_instance,
3523 vca_id=vca_id,
3524 )
3525
3526 # Obtain services to obtain management service ip
3527 services = await self.k8scluster_map[k8sclustertype].get_services(
3528 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3529 kdu_instance=kdu_instance,
3530 namespace=k8s_instance_info["namespace"],
3531 )
3532
3533 # Obtain management service info (if exists)
3534 vnfr_update_dict = {}
3535 kdu_config = get_configuration(vnfd, kdud["name"])
3536 if kdu_config:
3537 target_ee_list = kdu_config.get("execution-environment-list", [])
3538 else:
3539 target_ee_list = []
3540
3541 if services:
3542 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3543 mgmt_services = [
3544 service
3545 for service in kdud.get("service", [])
3546 if service.get("mgmt-service")
3547 ]
3548 for mgmt_service in mgmt_services:
3549 for service in services:
3550 if service["name"].startswith(mgmt_service["name"]):
3551 # Mgmt service found, Obtain service ip
3552 ip = service.get("external_ip", service.get("cluster_ip"))
3553 if isinstance(ip, list) and len(ip) == 1:
3554 ip = ip[0]
3555
3556 vnfr_update_dict[
3557 "kdur.{}.ip-address".format(kdu_index)
3558 ] = ip
3559
3560 # Check if must update also mgmt ip at the vnf
3561 service_external_cp = mgmt_service.get(
3562 "external-connection-point-ref"
3563 )
3564 if service_external_cp:
3565 if (
3566 deep_get(vnfd, ("mgmt-interface", "cp"))
3567 == service_external_cp
3568 ):
3569 vnfr_update_dict["ip-address"] = ip
3570
3571 if find_in_list(
3572 target_ee_list,
3573 lambda ee: ee.get(
3574 "external-connection-point-ref", ""
3575 )
3576 == service_external_cp,
3577 ):
3578 vnfr_update_dict[
3579 "kdur.{}.ip-address".format(kdu_index)
3580 ] = ip
3581 break
3582 else:
3583 self.logger.warn(
3584 "Mgmt service name: {} not found".format(
3585 mgmt_service["name"]
3586 )
3587 )
3588
3589 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3590 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3591
3592 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3593 if (
3594 kdu_config
3595 and kdu_config.get("initial-config-primitive")
3596 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3597 ):
3598 initial_config_primitive_list = kdu_config.get(
3599 "initial-config-primitive"
3600 )
3601 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3602
3603 for initial_config_primitive in initial_config_primitive_list:
3604 primitive_params_ = self._map_primitive_params(
3605 initial_config_primitive, {}, {}
3606 )
3607
3608 await asyncio.wait_for(
3609 self.k8scluster_map[k8sclustertype].exec_primitive(
3610 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3611 kdu_instance=kdu_instance,
3612 primitive_name=initial_config_primitive["name"],
3613 params=primitive_params_,
3614 db_dict=db_dict_install,
3615 vca_id=vca_id,
3616 ),
3617 timeout=timeout,
3618 )
3619
3620 except Exception as e:
3621 # Prepare update db with error and raise exception
3622 try:
3623 self.update_db_2(
3624 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3625 )
3626 self.update_db_2(
3627 "vnfrs",
3628 vnfr_data.get("_id"),
3629 {"kdur.{}.status".format(kdu_index): "ERROR"},
3630 )
3631 except Exception as error:
3632 # ignore to keep original exception
3633 self.logger.warning(
3634 f"An exception occurred while updating DB: {str(error)}"
3635 )
3636 # reraise original error
3637 raise
3638
3639 return kdu_instance
3640
3641 async def deploy_kdus(
3642 self,
3643 logging_text,
3644 nsr_id,
3645 nslcmop_id,
3646 db_vnfrs,
3647 db_vnfds,
3648 task_instantiation_info,
3649 ):
3650 # Launch kdus if present in the descriptor
3651
3652 k8scluster_id_2_uuic = {
3653 "helm-chart-v3": {},
3654 "helm-chart": {},
3655 "juju-bundle": {},
3656 }
3657
3658 async def _get_cluster_id(cluster_id, cluster_type):
3659 nonlocal k8scluster_id_2_uuic
3660 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3661 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3662
3663 # check if K8scluster is creating and wait look if previous tasks in process
3664 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3665 "k8scluster", cluster_id
3666 )
3667 if task_dependency:
3668 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3669 task_name, cluster_id
3670 )
3671 self.logger.debug(logging_text + text)
3672 await asyncio.wait(task_dependency, timeout=3600)
3673
3674 db_k8scluster = self.db.get_one(
3675 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3676 )
3677 if not db_k8scluster:
3678 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3679
3680 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3681 if not k8s_id:
3682 if cluster_type == "helm-chart-v3":
3683 try:
3684 # backward compatibility for existing clusters that have not been initialized for helm v3
3685 k8s_credentials = yaml.safe_dump(
3686 db_k8scluster.get("credentials")
3687 )
3688 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3689 k8s_credentials, reuse_cluster_uuid=cluster_id
3690 )
3691 db_k8scluster_update = {}
3692 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3693 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3694 db_k8scluster_update[
3695 "_admin.helm-chart-v3.created"
3696 ] = uninstall_sw
3697 db_k8scluster_update[
3698 "_admin.helm-chart-v3.operationalState"
3699 ] = "ENABLED"
3700 self.update_db_2(
3701 "k8sclusters", cluster_id, db_k8scluster_update
3702 )
3703 except Exception as e:
3704 self.logger.error(
3705 logging_text
3706 + "error initializing helm-v3 cluster: {}".format(str(e))
3707 )
3708 raise LcmException(
3709 "K8s cluster '{}' has not been initialized for '{}'".format(
3710 cluster_id, cluster_type
3711 )
3712 )
3713 else:
3714 raise LcmException(
3715 "K8s cluster '{}' has not been initialized for '{}'".format(
3716 cluster_id, cluster_type
3717 )
3718 )
3719 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3720 return k8s_id
3721
3722 logging_text += "Deploy kdus: "
3723 step = ""
3724 try:
3725 db_nsr_update = {"_admin.deployed.K8s": []}
3726 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3727
3728 index = 0
3729 updated_cluster_list = []
3730 updated_v3_cluster_list = []
3731
3732 for vnfr_data in db_vnfrs.values():
3733 vca_id = self.get_vca_id(vnfr_data, {})
3734 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3735 # Step 0: Prepare and set parameters
3736 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3737 vnfd_id = vnfr_data.get("vnfd-id")
3738 vnfd_with_id = find_in_list(
3739 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3740 )
3741 kdud = next(
3742 kdud
3743 for kdud in vnfd_with_id["kdu"]
3744 if kdud["name"] == kdur["kdu-name"]
3745 )
3746 namespace = kdur.get("k8s-namespace")
3747 kdu_deployment_name = kdur.get("kdu-deployment-name")
3748 if kdur.get("helm-chart"):
3749 kdumodel = kdur["helm-chart"]
3750 # Default version: helm3, if helm-version is v2 assign v2
3751 k8sclustertype = "helm-chart-v3"
3752 self.logger.debug("kdur: {}".format(kdur))
3753 if (
3754 kdur.get("helm-version")
3755 and kdur.get("helm-version") == "v2"
3756 ):
3757 k8sclustertype = "helm-chart"
3758 elif kdur.get("juju-bundle"):
3759 kdumodel = kdur["juju-bundle"]
3760 k8sclustertype = "juju-bundle"
3761 else:
3762 raise LcmException(
3763 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3764 "juju-bundle. Maybe an old NBI version is running".format(
3765 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3766 )
3767 )
3768 # check if kdumodel is a file and exists
3769 try:
3770 vnfd_with_id = find_in_list(
3771 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3772 )
3773 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3774 if storage: # may be not present if vnfd has not artifacts
3775 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3776 if storage["pkg-dir"]:
3777 filename = "{}/{}/{}s/{}".format(
3778 storage["folder"],
3779 storage["pkg-dir"],
3780 k8sclustertype,
3781 kdumodel,
3782 )
3783 else:
3784 filename = "{}/Scripts/{}s/{}".format(
3785 storage["folder"],
3786 k8sclustertype,
3787 kdumodel,
3788 )
3789 if self.fs.file_exists(
3790 filename, mode="file"
3791 ) or self.fs.file_exists(filename, mode="dir"):
3792 kdumodel = self.fs.path + filename
3793 except (asyncio.TimeoutError, asyncio.CancelledError):
3794 raise
3795 except Exception as e: # it is not a file
3796 self.logger.warning(f"An exception occurred: {str(e)}")
3797
3798 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3799 step = "Synchronize repos for k8s cluster '{}'".format(
3800 k8s_cluster_id
3801 )
3802 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3803
3804 # Synchronize repos
3805 if (
3806 k8sclustertype == "helm-chart"
3807 and cluster_uuid not in updated_cluster_list
3808 ) or (
3809 k8sclustertype == "helm-chart-v3"
3810 and cluster_uuid not in updated_v3_cluster_list
3811 ):
3812 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3813 self.k8scluster_map[k8sclustertype].synchronize_repos(
3814 cluster_uuid=cluster_uuid
3815 )
3816 )
3817 if del_repo_list or added_repo_dict:
3818 if k8sclustertype == "helm-chart":
3819 unset = {
3820 "_admin.helm_charts_added." + item: None
3821 for item in del_repo_list
3822 }
3823 updated = {
3824 "_admin.helm_charts_added." + item: name
3825 for item, name in added_repo_dict.items()
3826 }
3827 updated_cluster_list.append(cluster_uuid)
3828 elif k8sclustertype == "helm-chart-v3":
3829 unset = {
3830 "_admin.helm_charts_v3_added." + item: None
3831 for item in del_repo_list
3832 }
3833 updated = {
3834 "_admin.helm_charts_v3_added." + item: name
3835 for item, name in added_repo_dict.items()
3836 }
3837 updated_v3_cluster_list.append(cluster_uuid)
3838 self.logger.debug(
3839 logging_text + "repos synchronized on k8s cluster "
3840 "'{}' to_delete: {}, to_add: {}".format(
3841 k8s_cluster_id, del_repo_list, added_repo_dict
3842 )
3843 )
3844 self.db.set_one(
3845 "k8sclusters",
3846 {"_id": k8s_cluster_id},
3847 updated,
3848 unset=unset,
3849 )
3850
3851 # Instantiate kdu
3852 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3853 vnfr_data["member-vnf-index-ref"],
3854 kdur["kdu-name"],
3855 k8s_cluster_id,
3856 )
3857 k8s_instance_info = {
3858 "kdu-instance": None,
3859 "k8scluster-uuid": cluster_uuid,
3860 "k8scluster-type": k8sclustertype,
3861 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3862 "kdu-name": kdur["kdu-name"],
3863 "kdu-model": kdumodel,
3864 "namespace": namespace,
3865 "kdu-deployment-name": kdu_deployment_name,
3866 }
3867 db_path = "_admin.deployed.K8s.{}".format(index)
3868 db_nsr_update[db_path] = k8s_instance_info
3869 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3870 vnfd_with_id = find_in_list(
3871 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3872 )
3873 task = asyncio.ensure_future(
3874 self._install_kdu(
3875 nsr_id,
3876 db_path,
3877 vnfr_data,
3878 kdu_index,
3879 kdud,
3880 vnfd_with_id,
3881 k8s_instance_info,
3882 k8params=desc_params,
3883 timeout=1800,
3884 vca_id=vca_id,
3885 )
3886 )
3887 self.lcm_tasks.register(
3888 "ns",
3889 nsr_id,
3890 nslcmop_id,
3891 "instantiate_KDU-{}".format(index),
3892 task,
3893 )
3894 task_instantiation_info[task] = "Deploying KDU {}".format(
3895 kdur["kdu-name"]
3896 )
3897
3898 index += 1
3899
3900 except (LcmException, asyncio.CancelledError):
3901 raise
3902 except Exception as e:
3903 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3904 if isinstance(e, (N2VCException, DbException)):
3905 self.logger.error(logging_text + msg)
3906 else:
3907 self.logger.critical(logging_text + msg, exc_info=True)
3908 raise LcmException(msg)
3909 finally:
3910 if db_nsr_update:
3911 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3912
3913 def _deploy_n2vc(
3914 self,
3915 logging_text,
3916 db_nsr,
3917 db_vnfr,
3918 nslcmop_id,
3919 nsr_id,
3920 nsi_id,
3921 vnfd_id,
3922 vdu_id,
3923 kdu_name,
3924 member_vnf_index,
3925 vdu_index,
3926 kdu_index,
3927 vdu_name,
3928 deploy_params,
3929 descriptor_config,
3930 base_folder,
3931 task_instantiation_info,
3932 stage,
3933 ):
3934 # launch instantiate_N2VC in a asyncio task and register task object
3935 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3936 # if not found, create one entry and update database
3937 # fill db_nsr._admin.deployed.VCA.<index>
3938
3939 self.logger.debug(
3940 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3941 )
3942
3943 charm_name = ""
3944 get_charm_name = False
3945 if "execution-environment-list" in descriptor_config:
3946 ee_list = descriptor_config.get("execution-environment-list", [])
3947 elif "juju" in descriptor_config:
3948 ee_list = [descriptor_config] # ns charms
3949 if "execution-environment-list" not in descriptor_config:
3950 # charm name is only required for ns charms
3951 get_charm_name = True
3952 else: # other types as script are not supported
3953 ee_list = []
3954
3955 for ee_item in ee_list:
3956 self.logger.debug(
3957 logging_text
3958 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3959 ee_item.get("juju"), ee_item.get("helm-chart")
3960 )
3961 )
3962 ee_descriptor_id = ee_item.get("id")
3963 if ee_item.get("juju"):
3964 vca_name = ee_item["juju"].get("charm")
3965 if get_charm_name:
3966 charm_name = self.find_charm_name(db_nsr, str(vca_name))
3967 vca_type = (
3968 "lxc_proxy_charm"
3969 if ee_item["juju"].get("charm") is not None
3970 else "native_charm"
3971 )
3972 if ee_item["juju"].get("cloud") == "k8s":
3973 vca_type = "k8s_proxy_charm"
3974 elif ee_item["juju"].get("proxy") is False:
3975 vca_type = "native_charm"
3976 elif ee_item.get("helm-chart"):
3977 vca_name = ee_item["helm-chart"]
3978 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3979 vca_type = "helm"
3980 else:
3981 vca_type = "helm-v3"
3982 else:
3983 self.logger.debug(
3984 logging_text + "skipping non juju neither charm configuration"
3985 )
3986 continue
3987
3988 vca_index = -1
3989 for vca_index, vca_deployed in enumerate(
3990 db_nsr["_admin"]["deployed"]["VCA"]
3991 ):
3992 if not vca_deployed:
3993 continue
3994 if (
3995 vca_deployed.get("member-vnf-index") == member_vnf_index
3996 and vca_deployed.get("vdu_id") == vdu_id
3997 and vca_deployed.get("kdu_name") == kdu_name
3998 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3999 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
4000 ):
4001 break
4002 else:
4003 # not found, create one.
4004 target = (
4005 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
4006 )
4007 if vdu_id:
4008 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
4009 elif kdu_name:
4010 target += "/kdu/{}".format(kdu_name)
4011 vca_deployed = {
4012 "target_element": target,
4013 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
4014 "member-vnf-index": member_vnf_index,
4015 "vdu_id": vdu_id,
4016 "kdu_name": kdu_name,
4017 "vdu_count_index": vdu_index,
4018 "operational-status": "init", # TODO revise
4019 "detailed-status": "", # TODO revise
4020 "step": "initial-deploy", # TODO revise
4021 "vnfd_id": vnfd_id,
4022 "vdu_name": vdu_name,
4023 "type": vca_type,
4024 "ee_descriptor_id": ee_descriptor_id,
4025 "charm_name": charm_name,
4026 }
4027 vca_index += 1
4028
4029 # create VCA and configurationStatus in db
4030 db_dict = {
4031 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
4032 "configurationStatus.{}".format(vca_index): dict(),
4033 }
4034 self.update_db_2("nsrs", nsr_id, db_dict)
4035
4036 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
4037
4038 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
4039 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
4040 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
4041
4042 # Launch task
4043 task_n2vc = asyncio.ensure_future(
4044 self.instantiate_N2VC(
4045 logging_text=logging_text,
4046 vca_index=vca_index,
4047 nsi_id=nsi_id,
4048 db_nsr=db_nsr,
4049 db_vnfr=db_vnfr,
4050 vdu_id=vdu_id,
4051 kdu_name=kdu_name,
4052 vdu_index=vdu_index,
4053 kdu_index=kdu_index,
4054 deploy_params=deploy_params,
4055 config_descriptor=descriptor_config,
4056 base_folder=base_folder,
4057 nslcmop_id=nslcmop_id,
4058 stage=stage,
4059 vca_type=vca_type,
4060 vca_name=vca_name,
4061 ee_config_descriptor=ee_item,
4062 )
4063 )
4064 self.lcm_tasks.register(
4065 "ns",
4066 nsr_id,
4067 nslcmop_id,
4068 "instantiate_N2VC-{}".format(vca_index),
4069 task_n2vc,
4070 )
4071 task_instantiation_info[
4072 task_n2vc
4073 ] = self.task_name_deploy_vca + " {}.{}".format(
4074 member_vnf_index or "", vdu_id or ""
4075 )
4076
4077 def _format_additional_params(self, params):
4078 params = params or {}
4079 for key, value in params.items():
4080 if str(value).startswith("!!yaml "):
4081 params[key] = yaml.safe_load(value[7:])
4082 return params
4083
4084 def _get_terminate_primitive_params(self, seq, vnf_index):
4085 primitive = seq.get("name")
4086 primitive_params = {}
4087 params = {
4088 "member_vnf_index": vnf_index,
4089 "primitive": primitive,
4090 "primitive_params": primitive_params,
4091 }
4092 desc_params = {}
4093 return self._map_primitive_params(seq, params, desc_params)
4094
4095 # sub-operations
4096
4097 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
4098 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
4099 if op.get("operationState") == "COMPLETED":
4100 # b. Skip sub-operation
4101 # _ns_execute_primitive() or RO.create_action() will NOT be executed
4102 return self.SUBOPERATION_STATUS_SKIP
4103 else:
4104 # c. retry executing sub-operation
4105 # The sub-operation exists, and operationState != 'COMPLETED'
4106 # Update operationState = 'PROCESSING' to indicate a retry.
4107 operationState = "PROCESSING"
4108 detailed_status = "In progress"
4109 self._update_suboperation_status(
4110 db_nslcmop, op_index, operationState, detailed_status
4111 )
4112 # Return the sub-operation index
4113 # _ns_execute_primitive() or RO.create_action() will be called from scale()
4114 # with arguments extracted from the sub-operation
4115 return op_index
4116
4117 # Find a sub-operation where all keys in a matching dictionary must match
4118 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
4119 def _find_suboperation(self, db_nslcmop, match):
4120 if db_nslcmop and match:
4121 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
4122 for i, op in enumerate(op_list):
4123 if all(op.get(k) == match[k] for k in match):
4124 return i
4125 return self.SUBOPERATION_STATUS_NOT_FOUND
4126
4127 # Update status for a sub-operation given its index
4128 def _update_suboperation_status(
4129 self, db_nslcmop, op_index, operationState, detailed_status
4130 ):
4131 # Update DB for HA tasks
4132 q_filter = {"_id": db_nslcmop["_id"]}
4133 update_dict = {
4134 "_admin.operations.{}.operationState".format(op_index): operationState,
4135 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
4136 }
4137 self.db.set_one(
4138 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
4139 )
4140
4141 # Add sub-operation, return the index of the added sub-operation
4142 # Optionally, set operationState, detailed-status, and operationType
4143 # Status and type are currently set for 'scale' sub-operations:
4144 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
4145 # 'detailed-status' : status message
4146 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
4147 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
4148 def _add_suboperation(
4149 self,
4150 db_nslcmop,
4151 vnf_index,
4152 vdu_id,
4153 vdu_count_index,
4154 vdu_name,
4155 primitive,
4156 mapped_primitive_params,
4157 operationState=None,
4158 detailed_status=None,
4159 operationType=None,
4160 RO_nsr_id=None,
4161 RO_scaling_info=None,
4162 ):
4163 if not db_nslcmop:
4164 return self.SUBOPERATION_STATUS_NOT_FOUND
4165 # Get the "_admin.operations" list, if it exists
4166 db_nslcmop_admin = db_nslcmop.get("_admin", {})
4167 op_list = db_nslcmop_admin.get("operations")
4168 # Create or append to the "_admin.operations" list
4169 new_op = {
4170 "member_vnf_index": vnf_index,
4171 "vdu_id": vdu_id,
4172 "vdu_count_index": vdu_count_index,
4173 "primitive": primitive,
4174 "primitive_params": mapped_primitive_params,
4175 }
4176 if operationState:
4177 new_op["operationState"] = operationState
4178 if detailed_status:
4179 new_op["detailed-status"] = detailed_status
4180 if operationType:
4181 new_op["lcmOperationType"] = operationType
4182 if RO_nsr_id:
4183 new_op["RO_nsr_id"] = RO_nsr_id
4184 if RO_scaling_info:
4185 new_op["RO_scaling_info"] = RO_scaling_info
4186 if not op_list:
4187 # No existing operations, create key 'operations' with current operation as first list element
4188 db_nslcmop_admin.update({"operations": [new_op]})
4189 op_list = db_nslcmop_admin.get("operations")
4190 else:
4191 # Existing operations, append operation to list
4192 op_list.append(new_op)
4193
4194 db_nslcmop_update = {"_admin.operations": op_list}
4195 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
4196 op_index = len(op_list) - 1
4197 return op_index
4198
4199 # Helper methods for scale() sub-operations
4200
4201 # pre-scale/post-scale:
4202 # Check for 3 different cases:
4203 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4204 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4205 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4206 def _check_or_add_scale_suboperation(
4207 self,
4208 db_nslcmop,
4209 vnf_index,
4210 vnf_config_primitive,
4211 primitive_params,
4212 operationType,
4213 RO_nsr_id=None,
4214 RO_scaling_info=None,
4215 ):
4216 # Find this sub-operation
4217 if RO_nsr_id and RO_scaling_info:
4218 operationType = "SCALE-RO"
4219 match = {
4220 "member_vnf_index": vnf_index,
4221 "RO_nsr_id": RO_nsr_id,
4222 "RO_scaling_info": RO_scaling_info,
4223 }
4224 else:
4225 match = {
4226 "member_vnf_index": vnf_index,
4227 "primitive": vnf_config_primitive,
4228 "primitive_params": primitive_params,
4229 "lcmOperationType": operationType,
4230 }
4231 op_index = self._find_suboperation(db_nslcmop, match)
4232 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4233 # a. New sub-operation
4234 # The sub-operation does not exist, add it.
4235 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4236 # The following parameters are set to None for all kind of scaling:
4237 vdu_id = None
4238 vdu_count_index = None
4239 vdu_name = None
4240 if RO_nsr_id and RO_scaling_info:
4241 vnf_config_primitive = None
4242 primitive_params = None
4243 else:
4244 RO_nsr_id = None
4245 RO_scaling_info = None
4246 # Initial status for sub-operation
4247 operationState = "PROCESSING"
4248 detailed_status = "In progress"
4249 # Add sub-operation for pre/post-scaling (zero or more operations)
4250 self._add_suboperation(
4251 db_nslcmop,
4252 vnf_index,
4253 vdu_id,
4254 vdu_count_index,
4255 vdu_name,
4256 vnf_config_primitive,
4257 primitive_params,
4258 operationState,
4259 detailed_status,
4260 operationType,
4261 RO_nsr_id,
4262 RO_scaling_info,
4263 )
4264 return self.SUBOPERATION_STATUS_NEW
4265 else:
4266 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4267 # or op_index (operationState != 'COMPLETED')
4268 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4269
4270 # Function to return execution_environment id
4271
4272 async def destroy_N2VC(
4273 self,
4274 logging_text,
4275 db_nslcmop,
4276 vca_deployed,
4277 config_descriptor,
4278 vca_index,
4279 destroy_ee=True,
4280 exec_primitives=True,
4281 scaling_in=False,
4282 vca_id: str = None,
4283 ):
4284 """
4285 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4286 :param logging_text:
4287 :param db_nslcmop:
4288 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4289 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4290 :param vca_index: index in the database _admin.deployed.VCA
4291 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4292 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4293 not executed properly
4294 :param scaling_in: True destroys the application, False destroys the model
4295 :return: None or exception
4296 """
4297
4298 self.logger.debug(
4299 logging_text
4300 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4301 vca_index, vca_deployed, config_descriptor, destroy_ee
4302 )
4303 )
4304
4305 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4306
4307 # execute terminate_primitives
4308 if exec_primitives:
4309 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4310 config_descriptor.get("terminate-config-primitive"),
4311 vca_deployed.get("ee_descriptor_id"),
4312 )
4313 vdu_id = vca_deployed.get("vdu_id")
4314 vdu_count_index = vca_deployed.get("vdu_count_index")
4315 vdu_name = vca_deployed.get("vdu_name")
4316 vnf_index = vca_deployed.get("member-vnf-index")
4317 if terminate_primitives and vca_deployed.get("needed_terminate"):
4318 for seq in terminate_primitives:
4319 # For each sequence in list, get primitive and call _ns_execute_primitive()
4320 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4321 vnf_index, seq.get("name")
4322 )
4323 self.logger.debug(logging_text + step)
4324 # Create the primitive for each sequence, i.e. "primitive": "touch"
4325 primitive = seq.get("name")
4326 mapped_primitive_params = self._get_terminate_primitive_params(
4327 seq, vnf_index
4328 )
4329
4330 # Add sub-operation
4331 self._add_suboperation(
4332 db_nslcmop,
4333 vnf_index,
4334 vdu_id,
4335 vdu_count_index,
4336 vdu_name,
4337 primitive,
4338 mapped_primitive_params,
4339 )
4340 # Sub-operations: Call _ns_execute_primitive() instead of action()
4341 try:
4342 result, result_detail = await self._ns_execute_primitive(
4343 vca_deployed["ee_id"],
4344 primitive,
4345 mapped_primitive_params,
4346 vca_type=vca_type,
4347 vca_id=vca_id,
4348 )
4349 except LcmException:
4350 # this happens when VCA is not deployed. In this case it is not needed to terminate
4351 continue
4352 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4353 if result not in result_ok:
4354 raise LcmException(
4355 "terminate_primitive {} for vnf_member_index={} fails with "
4356 "error {}".format(seq.get("name"), vnf_index, result_detail)
4357 )
4358 # set that this VCA do not need terminated
4359 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4360 vca_index
4361 )
4362 self.update_db_2(
4363 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4364 )
4365
4366 # Delete Prometheus Jobs if any
4367 # This uses NSR_ID, so it will destroy any jobs under this index
4368 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4369
4370 if destroy_ee:
4371 await self.vca_map[vca_type].delete_execution_environment(
4372 vca_deployed["ee_id"],
4373 scaling_in=scaling_in,
4374 vca_type=vca_type,
4375 vca_id=vca_id,
4376 )
4377
4378 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4379 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4380 namespace = "." + db_nsr["_id"]
4381 try:
4382 await self.n2vc.delete_namespace(
4383 namespace=namespace,
4384 total_timeout=self.timeout.charm_delete,
4385 vca_id=vca_id,
4386 )
4387 except N2VCNotFound: # already deleted. Skip
4388 pass
4389 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4390
4391 async def terminate(self, nsr_id, nslcmop_id):
4392 # Try to lock HA task here
4393 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4394 if not task_is_locked_by_me:
4395 return
4396
4397 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4398 self.logger.debug(logging_text + "Enter")
4399 timeout_ns_terminate = self.timeout.ns_terminate
4400 db_nsr = None
4401 db_nslcmop = None
4402 operation_params = None
4403 exc = None
4404 error_list = [] # annotates all failed error messages
4405 db_nslcmop_update = {}
4406 autoremove = False # autoremove after terminated
4407 tasks_dict_info = {}
4408 db_nsr_update = {}
4409 stage = [
4410 "Stage 1/3: Preparing task.",
4411 "Waiting for previous operations to terminate.",
4412 "",
4413 ]
4414 # ^ contains [stage, step, VIM-status]
4415 try:
4416 # wait for any previous tasks in process
4417 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4418
4419 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4420 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4421 operation_params = db_nslcmop.get("operationParams") or {}
4422 if operation_params.get("timeout_ns_terminate"):
4423 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4424 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4425 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4426
4427 db_nsr_update["operational-status"] = "terminating"
4428 db_nsr_update["config-status"] = "terminating"
4429 self._write_ns_status(
4430 nsr_id=nsr_id,
4431 ns_state="TERMINATING",
4432 current_operation="TERMINATING",
4433 current_operation_id=nslcmop_id,
4434 other_update=db_nsr_update,
4435 )
4436 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4437 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4438 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4439 return
4440
4441 stage[1] = "Getting vnf descriptors from db."
4442 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4443 db_vnfrs_dict = {
4444 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4445 }
4446 db_vnfds_from_id = {}
4447 db_vnfds_from_member_index = {}
4448 # Loop over VNFRs
4449 for vnfr in db_vnfrs_list:
4450 vnfd_id = vnfr["vnfd-id"]
4451 if vnfd_id not in db_vnfds_from_id:
4452 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4453 db_vnfds_from_id[vnfd_id] = vnfd
4454 db_vnfds_from_member_index[
4455 vnfr["member-vnf-index-ref"]
4456 ] = db_vnfds_from_id[vnfd_id]
4457
4458 # Destroy individual execution environments when there are terminating primitives.
4459 # Rest of EE will be deleted at once
4460 # TODO - check before calling _destroy_N2VC
4461 # if not operation_params.get("skip_terminate_primitives"):#
4462 # or not vca.get("needed_terminate"):
4463 stage[0] = "Stage 2/3 execute terminating primitives."
4464 self.logger.debug(logging_text + stage[0])
4465 stage[1] = "Looking execution environment that needs terminate."
4466 self.logger.debug(logging_text + stage[1])
4467
4468 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4469 config_descriptor = None
4470 vca_member_vnf_index = vca.get("member-vnf-index")
4471 vca_id = self.get_vca_id(
4472 db_vnfrs_dict.get(vca_member_vnf_index)
4473 if vca_member_vnf_index
4474 else None,
4475 db_nsr,
4476 )
4477 if not vca or not vca.get("ee_id"):
4478 continue
4479 if not vca.get("member-vnf-index"):
4480 # ns
4481 config_descriptor = db_nsr.get("ns-configuration")
4482 elif vca.get("vdu_id"):
4483 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4484 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4485 elif vca.get("kdu_name"):
4486 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4487 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4488 else:
4489 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4490 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4491 vca_type = vca.get("type")
4492 exec_terminate_primitives = not operation_params.get(
4493 "skip_terminate_primitives"
4494 ) and vca.get("needed_terminate")
4495 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4496 # pending native charms
4497 destroy_ee = (
4498 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4499 )
4500 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4501 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4502 task = asyncio.ensure_future(
4503 self.destroy_N2VC(
4504 logging_text,
4505 db_nslcmop,
4506 vca,
4507 config_descriptor,
4508 vca_index,
4509 destroy_ee,
4510 exec_terminate_primitives,
4511 vca_id=vca_id,
4512 )
4513 )
4514 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4515
4516 # wait for pending tasks of terminate primitives
4517 if tasks_dict_info:
4518 self.logger.debug(
4519 logging_text
4520 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4521 )
4522 error_list = await self._wait_for_tasks(
4523 logging_text,
4524 tasks_dict_info,
4525 min(self.timeout.charm_delete, timeout_ns_terminate),
4526 stage,
4527 nslcmop_id,
4528 )
4529 tasks_dict_info.clear()
4530 if error_list:
4531 return # raise LcmException("; ".join(error_list))
4532
4533 # remove All execution environments at once
4534 stage[0] = "Stage 3/3 delete all."
4535
4536 if nsr_deployed.get("VCA"):
4537 stage[1] = "Deleting all execution environments."
4538 self.logger.debug(logging_text + stage[1])
4539 vca_id = self.get_vca_id({}, db_nsr)
4540 task_delete_ee = asyncio.ensure_future(
4541 asyncio.wait_for(
4542 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4543 timeout=self.timeout.charm_delete,
4544 )
4545 )
4546 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4547 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4548
4549 # Delete Namespace and Certificates if necessary
4550 if check_helm_ee_in_ns(list(db_vnfds_from_member_index.values())):
4551 await self.vca_map["helm-v3"].delete_tls_certificate(
4552 namespace=db_nslcmop["nsInstanceId"],
4553 certificate_name=self.EE_TLS_NAME,
4554 )
4555 await self.vca_map["helm-v3"].delete_namespace(
4556 namespace=db_nslcmop["nsInstanceId"],
4557 )
4558
4559 # Delete from k8scluster
4560 stage[1] = "Deleting KDUs."
4561 self.logger.debug(logging_text + stage[1])
4562 # print(nsr_deployed)
4563 for kdu in get_iterable(nsr_deployed, "K8s"):
4564 if not kdu or not kdu.get("kdu-instance"):
4565 continue
4566 kdu_instance = kdu.get("kdu-instance")
4567 if kdu.get("k8scluster-type") in self.k8scluster_map:
4568 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4569 vca_id = self.get_vca_id({}, db_nsr)
4570 task_delete_kdu_instance = asyncio.ensure_future(
4571 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4572 cluster_uuid=kdu.get("k8scluster-uuid"),
4573 kdu_instance=kdu_instance,
4574 vca_id=vca_id,
4575 namespace=kdu.get("namespace"),
4576 )
4577 )
4578 else:
4579 self.logger.error(
4580 logging_text
4581 + "Unknown k8s deployment type {}".format(
4582 kdu.get("k8scluster-type")
4583 )
4584 )
4585 continue
4586 tasks_dict_info[
4587 task_delete_kdu_instance
4588 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4589
4590 # remove from RO
4591 stage[1] = "Deleting ns from VIM."
4592 if self.ro_config.ng:
4593 task_delete_ro = asyncio.ensure_future(
4594 self._terminate_ng_ro(
4595 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4596 )
4597 )
4598 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4599
4600 # rest of staff will be done at finally
4601
4602 except (
4603 ROclient.ROClientException,
4604 DbException,
4605 LcmException,
4606 N2VCException,
4607 ) as e:
4608 self.logger.error(logging_text + "Exit Exception {}".format(e))
4609 exc = e
4610 except asyncio.CancelledError:
4611 self.logger.error(
4612 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4613 )
4614 exc = "Operation was cancelled"
4615 except Exception as e:
4616 exc = traceback.format_exc()
4617 self.logger.critical(
4618 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4619 exc_info=True,
4620 )
4621 finally:
4622 if exc:
4623 error_list.append(str(exc))
4624 try:
4625 # wait for pending tasks
4626 if tasks_dict_info:
4627 stage[1] = "Waiting for terminate pending tasks."
4628 self.logger.debug(logging_text + stage[1])
4629 error_list += await self._wait_for_tasks(
4630 logging_text,
4631 tasks_dict_info,
4632 timeout_ns_terminate,
4633 stage,
4634 nslcmop_id,
4635 )
4636 stage[1] = stage[2] = ""
4637 except asyncio.CancelledError:
4638 error_list.append("Cancelled")
4639 # TODO cancell all tasks
4640 except Exception as exc:
4641 error_list.append(str(exc))
4642 # update status at database
4643 if error_list:
4644 error_detail = "; ".join(error_list)
4645 # self.logger.error(logging_text + error_detail)
4646 error_description_nslcmop = "{} Detail: {}".format(
4647 stage[0], error_detail
4648 )
4649 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4650 nslcmop_id, stage[0]
4651 )
4652
4653 db_nsr_update["operational-status"] = "failed"
4654 db_nsr_update["detailed-status"] = (
4655 error_description_nsr + " Detail: " + error_detail
4656 )
4657 db_nslcmop_update["detailed-status"] = error_detail
4658 nslcmop_operation_state = "FAILED"
4659 ns_state = "BROKEN"
4660 else:
4661 error_detail = None
4662 error_description_nsr = error_description_nslcmop = None
4663 ns_state = "NOT_INSTANTIATED"
4664 db_nsr_update["operational-status"] = "terminated"
4665 db_nsr_update["detailed-status"] = "Done"
4666 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4667 db_nslcmop_update["detailed-status"] = "Done"
4668 nslcmop_operation_state = "COMPLETED"
4669
4670 if db_nsr:
4671 self._write_ns_status(
4672 nsr_id=nsr_id,
4673 ns_state=ns_state,
4674 current_operation="IDLE",
4675 current_operation_id=None,
4676 error_description=error_description_nsr,
4677 error_detail=error_detail,
4678 other_update=db_nsr_update,
4679 )
4680 self._write_op_status(
4681 op_id=nslcmop_id,
4682 stage="",
4683 error_message=error_description_nslcmop,
4684 operation_state=nslcmop_operation_state,
4685 other_update=db_nslcmop_update,
4686 )
4687 if ns_state == "NOT_INSTANTIATED":
4688 try:
4689 self.db.set_list(
4690 "vnfrs",
4691 {"nsr-id-ref": nsr_id},
4692 {"_admin.nsState": "NOT_INSTANTIATED"},
4693 )
4694 except DbException as e:
4695 self.logger.warn(
4696 logging_text
4697 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4698 nsr_id, e
4699 )
4700 )
4701 if operation_params:
4702 autoremove = operation_params.get("autoremove", False)
4703 if nslcmop_operation_state:
4704 try:
4705 await self.msg.aiowrite(
4706 "ns",
4707 "terminated",
4708 {
4709 "nsr_id": nsr_id,
4710 "nslcmop_id": nslcmop_id,
4711 "operationState": nslcmop_operation_state,
4712 "autoremove": autoremove,
4713 },
4714 )
4715 except Exception as e:
4716 self.logger.error(
4717 logging_text + "kafka_write notification Exception {}".format(e)
4718 )
4719 self.logger.debug(f"Deleting alerts: ns_id={nsr_id}")
4720 self.db.del_list("alerts", {"tags.ns_id": nsr_id})
4721
4722 self.logger.debug(logging_text + "Exit")
4723 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4724
4725 async def _wait_for_tasks(
4726 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4727 ):
4728 time_start = time()
4729 error_detail_list = []
4730 error_list = []
4731 pending_tasks = list(created_tasks_info.keys())
4732 num_tasks = len(pending_tasks)
4733 num_done = 0
4734 stage[1] = "{}/{}.".format(num_done, num_tasks)
4735 self._write_op_status(nslcmop_id, stage)
4736 while pending_tasks:
4737 new_error = None
4738 _timeout = timeout + time_start - time()
4739 done, pending_tasks = await asyncio.wait(
4740 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4741 )
4742 num_done += len(done)
4743 if not done: # Timeout
4744 for task in pending_tasks:
4745 new_error = created_tasks_info[task] + ": Timeout"
4746 error_detail_list.append(new_error)
4747 error_list.append(new_error)
4748 break
4749 for task in done:
4750 if task.cancelled():
4751 exc = "Cancelled"
4752 else:
4753 exc = task.exception()
4754 if exc:
4755 if isinstance(exc, asyncio.TimeoutError):
4756 exc = "Timeout"
4757 new_error = created_tasks_info[task] + ": {}".format(exc)
4758 error_list.append(created_tasks_info[task])
4759 error_detail_list.append(new_error)
4760 if isinstance(
4761 exc,
4762 (
4763 str,
4764 DbException,
4765 N2VCException,
4766 ROclient.ROClientException,
4767 LcmException,
4768 K8sException,
4769 NgRoException,
4770 ),
4771 ):
4772 self.logger.error(logging_text + new_error)
4773 else:
4774 exc_traceback = "".join(
4775 traceback.format_exception(None, exc, exc.__traceback__)
4776 )
4777 self.logger.error(
4778 logging_text
4779 + created_tasks_info[task]
4780 + " "
4781 + exc_traceback
4782 )
4783 else:
4784 self.logger.debug(
4785 logging_text + created_tasks_info[task] + ": Done"
4786 )
4787 stage[1] = "{}/{}.".format(num_done, num_tasks)
4788 if new_error:
4789 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4790 if nsr_id: # update also nsr
4791 self.update_db_2(
4792 "nsrs",
4793 nsr_id,
4794 {
4795 "errorDescription": "Error at: " + ", ".join(error_list),
4796 "errorDetail": ". ".join(error_detail_list),
4797 },
4798 )
4799 self._write_op_status(nslcmop_id, stage)
4800 return error_detail_list
4801
4802 @staticmethod
4803 def _map_primitive_params(primitive_desc, params, instantiation_params):
4804 """
4805 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4806 The default-value is used. If it is between < > it look for a value at instantiation_params
4807 :param primitive_desc: portion of VNFD/NSD that describes primitive
4808 :param params: Params provided by user
4809 :param instantiation_params: Instantiation params provided by user
4810 :return: a dictionary with the calculated params
4811 """
4812 calculated_params = {}
4813 for parameter in primitive_desc.get("parameter", ()):
4814 param_name = parameter["name"]
4815 if param_name in params:
4816 calculated_params[param_name] = params[param_name]
4817 elif "default-value" in parameter or "value" in parameter:
4818 if "value" in parameter:
4819 calculated_params[param_name] = parameter["value"]
4820 else:
4821 calculated_params[param_name] = parameter["default-value"]
4822 if (
4823 isinstance(calculated_params[param_name], str)
4824 and calculated_params[param_name].startswith("<")
4825 and calculated_params[param_name].endswith(">")
4826 ):
4827 if calculated_params[param_name][1:-1] in instantiation_params:
4828 calculated_params[param_name] = instantiation_params[
4829 calculated_params[param_name][1:-1]
4830 ]
4831 else:
4832 raise LcmException(
4833 "Parameter {} needed to execute primitive {} not provided".format(
4834 calculated_params[param_name], primitive_desc["name"]
4835 )
4836 )
4837 else:
4838 raise LcmException(
4839 "Parameter {} needed to execute primitive {} not provided".format(
4840 param_name, primitive_desc["name"]
4841 )
4842 )
4843
4844 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4845 calculated_params[param_name] = yaml.safe_dump(
4846 calculated_params[param_name], default_flow_style=True, width=256
4847 )
4848 elif isinstance(calculated_params[param_name], str) and calculated_params[
4849 param_name
4850 ].startswith("!!yaml "):
4851 calculated_params[param_name] = calculated_params[param_name][7:]
4852 if parameter.get("data-type") == "INTEGER":
4853 try:
4854 calculated_params[param_name] = int(calculated_params[param_name])
4855 except ValueError: # error converting string to int
4856 raise LcmException(
4857 "Parameter {} of primitive {} must be integer".format(
4858 param_name, primitive_desc["name"]
4859 )
4860 )
4861 elif parameter.get("data-type") == "BOOLEAN":
4862 calculated_params[param_name] = not (
4863 (str(calculated_params[param_name])).lower() == "false"
4864 )
4865
4866 # add always ns_config_info if primitive name is config
4867 if primitive_desc["name"] == "config":
4868 if "ns_config_info" in instantiation_params:
4869 calculated_params["ns_config_info"] = instantiation_params[
4870 "ns_config_info"
4871 ]
4872 return calculated_params
4873
4874 def _look_for_deployed_vca(
4875 self,
4876 deployed_vca,
4877 member_vnf_index,
4878 vdu_id,
4879 vdu_count_index,
4880 kdu_name=None,
4881 ee_descriptor_id=None,
4882 ):
4883 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4884 for vca in deployed_vca:
4885 if not vca:
4886 continue
4887 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4888 continue
4889 if (
4890 vdu_count_index is not None
4891 and vdu_count_index != vca["vdu_count_index"]
4892 ):
4893 continue
4894 if kdu_name and kdu_name != vca["kdu_name"]:
4895 continue
4896 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4897 continue
4898 break
4899 else:
4900 # vca_deployed not found
4901 raise LcmException(
4902 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4903 " is not deployed".format(
4904 member_vnf_index,
4905 vdu_id,
4906 vdu_count_index,
4907 kdu_name,
4908 ee_descriptor_id,
4909 )
4910 )
4911 # get ee_id
4912 ee_id = vca.get("ee_id")
4913 vca_type = vca.get(
4914 "type", "lxc_proxy_charm"
4915 ) # default value for backward compatibility - proxy charm
4916 if not ee_id:
4917 raise LcmException(
4918 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4919 "execution environment".format(
4920 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4921 )
4922 )
4923 return ee_id, vca_type
4924
4925 async def _ns_execute_primitive(
4926 self,
4927 ee_id,
4928 primitive,
4929 primitive_params,
4930 retries=0,
4931 retries_interval=30,
4932 timeout=None,
4933 vca_type=None,
4934 db_dict=None,
4935 vca_id: str = None,
4936 ) -> (str, str):
4937 try:
4938 if primitive == "config":
4939 primitive_params = {"params": primitive_params}
4940
4941 vca_type = vca_type or "lxc_proxy_charm"
4942
4943 while retries >= 0:
4944 try:
4945 output = await asyncio.wait_for(
4946 self.vca_map[vca_type].exec_primitive(
4947 ee_id=ee_id,
4948 primitive_name=primitive,
4949 params_dict=primitive_params,
4950 progress_timeout=self.timeout.progress_primitive,
4951 total_timeout=self.timeout.primitive,
4952 db_dict=db_dict,
4953 vca_id=vca_id,
4954 vca_type=vca_type,
4955 ),
4956 timeout=timeout or self.timeout.primitive,
4957 )
4958 # execution was OK
4959 break
4960 except asyncio.CancelledError:
4961 raise
4962 except Exception as e:
4963 retries -= 1
4964 if retries >= 0:
4965 self.logger.debug(
4966 "Error executing action {} on {} -> {}".format(
4967 primitive, ee_id, e
4968 )
4969 )
4970 # wait and retry
4971 await asyncio.sleep(retries_interval)
4972 else:
4973 if isinstance(e, asyncio.TimeoutError):
4974 e = N2VCException(
4975 message="Timed out waiting for action to complete"
4976 )
4977 return "FAILED", getattr(e, "message", repr(e))
4978
4979 return "COMPLETED", output
4980
4981 except (LcmException, asyncio.CancelledError):
4982 raise
4983 except Exception as e:
4984 return "FAIL", "Error executing action {}: {}".format(primitive, e)
4985
4986 async def vca_status_refresh(self, nsr_id, nslcmop_id):
4987 """
4988 Updating the vca_status with latest juju information in nsrs record
4989 :param: nsr_id: Id of the nsr
4990 :param: nslcmop_id: Id of the nslcmop
4991 :return: None
4992 """
4993
4994 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
4995 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4996 vca_id = self.get_vca_id({}, db_nsr)
4997 if db_nsr["_admin"]["deployed"]["K8s"]:
4998 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
4999 cluster_uuid, kdu_instance, cluster_type = (
5000 k8s["k8scluster-uuid"],
5001 k8s["kdu-instance"],
5002 k8s["k8scluster-type"],
5003 )
5004 await self._on_update_k8s_db(
5005 cluster_uuid=cluster_uuid,
5006 kdu_instance=kdu_instance,
5007 filter={"_id": nsr_id},
5008 vca_id=vca_id,
5009 cluster_type=cluster_type,
5010 )
5011 else:
5012 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
5013 table, filter = "nsrs", {"_id": nsr_id}
5014 path = "_admin.deployed.VCA.{}.".format(vca_index)
5015 await self._on_update_n2vc_db(table, filter, path, {})
5016
5017 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
5018 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
5019
5020 async def action(self, nsr_id, nslcmop_id):
5021 # Try to lock HA task here
5022 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5023 if not task_is_locked_by_me:
5024 return
5025
5026 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
5027 self.logger.debug(logging_text + "Enter")
5028 # get all needed from database
5029 db_nsr = None
5030 db_nslcmop = None
5031 db_nsr_update = {}
5032 db_nslcmop_update = {}
5033 nslcmop_operation_state = None
5034 error_description_nslcmop = None
5035 exc = None
5036 step = ""
5037 try:
5038 # wait for any previous tasks in process
5039 step = "Waiting for previous operations to terminate"
5040 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5041
5042 self._write_ns_status(
5043 nsr_id=nsr_id,
5044 ns_state=None,
5045 current_operation="RUNNING ACTION",
5046 current_operation_id=nslcmop_id,
5047 )
5048
5049 step = "Getting information from database"
5050 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5051 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5052 if db_nslcmop["operationParams"].get("primitive_params"):
5053 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5054 db_nslcmop["operationParams"]["primitive_params"]
5055 )
5056
5057 nsr_deployed = db_nsr["_admin"].get("deployed")
5058 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
5059 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
5060 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
5061 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
5062 primitive = db_nslcmop["operationParams"]["primitive"]
5063 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
5064 timeout_ns_action = db_nslcmop["operationParams"].get(
5065 "timeout_ns_action", self.timeout.primitive
5066 )
5067
5068 if vnf_index:
5069 step = "Getting vnfr from database"
5070 db_vnfr = self.db.get_one(
5071 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5072 )
5073 if db_vnfr.get("kdur"):
5074 kdur_list = []
5075 for kdur in db_vnfr["kdur"]:
5076 if kdur.get("additionalParams"):
5077 kdur["additionalParams"] = json.loads(
5078 kdur["additionalParams"]
5079 )
5080 kdur_list.append(kdur)
5081 db_vnfr["kdur"] = kdur_list
5082 step = "Getting vnfd from database"
5083 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5084
5085 # Sync filesystem before running a primitive
5086 self.fs.sync(db_vnfr["vnfd-id"])
5087 else:
5088 step = "Getting nsd from database"
5089 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
5090
5091 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5092 # for backward compatibility
5093 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5094 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5095 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5096 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5097
5098 # look for primitive
5099 config_primitive_desc = descriptor_configuration = None
5100 if vdu_id:
5101 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
5102 elif kdu_name:
5103 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
5104 elif vnf_index:
5105 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
5106 else:
5107 descriptor_configuration = db_nsd.get("ns-configuration")
5108
5109 if descriptor_configuration and descriptor_configuration.get(
5110 "config-primitive"
5111 ):
5112 for config_primitive in descriptor_configuration["config-primitive"]:
5113 if config_primitive["name"] == primitive:
5114 config_primitive_desc = config_primitive
5115 break
5116
5117 if not config_primitive_desc:
5118 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
5119 raise LcmException(
5120 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5121 primitive
5122 )
5123 )
5124 primitive_name = primitive
5125 ee_descriptor_id = None
5126 else:
5127 primitive_name = config_primitive_desc.get(
5128 "execution-environment-primitive", primitive
5129 )
5130 ee_descriptor_id = config_primitive_desc.get(
5131 "execution-environment-ref"
5132 )
5133
5134 if vnf_index:
5135 if vdu_id:
5136 vdur = next(
5137 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5138 )
5139 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
5140 elif kdu_name:
5141 kdur = next(
5142 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5143 )
5144 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
5145 else:
5146 desc_params = parse_yaml_strings(
5147 db_vnfr.get("additionalParamsForVnf")
5148 )
5149 else:
5150 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
5151 if kdu_name and get_configuration(db_vnfd, kdu_name):
5152 kdu_configuration = get_configuration(db_vnfd, kdu_name)
5153 actions = set()
5154 for primitive in kdu_configuration.get("initial-config-primitive", []):
5155 actions.add(primitive["name"])
5156 for primitive in kdu_configuration.get("config-primitive", []):
5157 actions.add(primitive["name"])
5158 kdu = find_in_list(
5159 nsr_deployed["K8s"],
5160 lambda kdu: kdu_name == kdu["kdu-name"]
5161 and kdu["member-vnf-index"] == vnf_index,
5162 )
5163 kdu_action = (
5164 True
5165 if primitive_name in actions
5166 and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
5167 else False
5168 )
5169
5170 # TODO check if ns is in a proper status
5171 if kdu_name and (
5172 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5173 ):
5174 # kdur and desc_params already set from before
5175 if primitive_params:
5176 desc_params.update(primitive_params)
5177 # TODO Check if we will need something at vnf level
5178 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5179 if (
5180 kdu_name == kdu["kdu-name"]
5181 and kdu["member-vnf-index"] == vnf_index
5182 ):
5183 break
5184 else:
5185 raise LcmException(
5186 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5187 )
5188
5189 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5190 msg = "unknown k8scluster-type '{}'".format(
5191 kdu.get("k8scluster-type")
5192 )
5193 raise LcmException(msg)
5194
5195 db_dict = {
5196 "collection": "nsrs",
5197 "filter": {"_id": nsr_id},
5198 "path": "_admin.deployed.K8s.{}".format(index),
5199 }
5200 self.logger.debug(
5201 logging_text
5202 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5203 )
5204 step = "Executing kdu {}".format(primitive_name)
5205 if primitive_name == "upgrade":
5206 if desc_params.get("kdu_model"):
5207 kdu_model = desc_params.get("kdu_model")
5208 del desc_params["kdu_model"]
5209 else:
5210 kdu_model = kdu.get("kdu-model")
5211 if kdu_model.count("/") < 2: # helm chart is not embedded
5212 parts = kdu_model.split(sep=":")
5213 if len(parts) == 2:
5214 kdu_model = parts[0]
5215 if desc_params.get("kdu_atomic_upgrade"):
5216 atomic_upgrade = desc_params.get(
5217 "kdu_atomic_upgrade"
5218 ).lower() in ("yes", "true", "1")
5219 del desc_params["kdu_atomic_upgrade"]
5220 else:
5221 atomic_upgrade = True
5222
5223 detailed_status = await asyncio.wait_for(
5224 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5225 cluster_uuid=kdu.get("k8scluster-uuid"),
5226 kdu_instance=kdu.get("kdu-instance"),
5227 atomic=atomic_upgrade,
5228 kdu_model=kdu_model,
5229 params=desc_params,
5230 db_dict=db_dict,
5231 timeout=timeout_ns_action,
5232 ),
5233 timeout=timeout_ns_action + 10,
5234 )
5235 self.logger.debug(
5236 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5237 )
5238 elif primitive_name == "rollback":
5239 detailed_status = await asyncio.wait_for(
5240 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5241 cluster_uuid=kdu.get("k8scluster-uuid"),
5242 kdu_instance=kdu.get("kdu-instance"),
5243 db_dict=db_dict,
5244 ),
5245 timeout=timeout_ns_action,
5246 )
5247 elif primitive_name == "status":
5248 detailed_status = await asyncio.wait_for(
5249 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5250 cluster_uuid=kdu.get("k8scluster-uuid"),
5251 kdu_instance=kdu.get("kdu-instance"),
5252 vca_id=vca_id,
5253 ),
5254 timeout=timeout_ns_action,
5255 )
5256 else:
5257 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5258 kdu["kdu-name"], nsr_id
5259 )
5260 params = self._map_primitive_params(
5261 config_primitive_desc, primitive_params, desc_params
5262 )
5263
5264 detailed_status = await asyncio.wait_for(
5265 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5266 cluster_uuid=kdu.get("k8scluster-uuid"),
5267 kdu_instance=kdu_instance,
5268 primitive_name=primitive_name,
5269 params=params,
5270 db_dict=db_dict,
5271 timeout=timeout_ns_action,
5272 vca_id=vca_id,
5273 ),
5274 timeout=timeout_ns_action,
5275 )
5276
5277 if detailed_status:
5278 nslcmop_operation_state = "COMPLETED"
5279 else:
5280 detailed_status = ""
5281 nslcmop_operation_state = "FAILED"
5282 else:
5283 ee_id, vca_type = self._look_for_deployed_vca(
5284 nsr_deployed["VCA"],
5285 member_vnf_index=vnf_index,
5286 vdu_id=vdu_id,
5287 vdu_count_index=vdu_count_index,
5288 ee_descriptor_id=ee_descriptor_id,
5289 )
5290 for vca_index, vca_deployed in enumerate(
5291 db_nsr["_admin"]["deployed"]["VCA"]
5292 ):
5293 if vca_deployed.get("member-vnf-index") == vnf_index:
5294 db_dict = {
5295 "collection": "nsrs",
5296 "filter": {"_id": nsr_id},
5297 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5298 }
5299 break
5300 (
5301 nslcmop_operation_state,
5302 detailed_status,
5303 ) = await self._ns_execute_primitive(
5304 ee_id,
5305 primitive=primitive_name,
5306 primitive_params=self._map_primitive_params(
5307 config_primitive_desc, primitive_params, desc_params
5308 ),
5309 timeout=timeout_ns_action,
5310 vca_type=vca_type,
5311 db_dict=db_dict,
5312 vca_id=vca_id,
5313 )
5314
5315 db_nslcmop_update["detailed-status"] = detailed_status
5316 error_description_nslcmop = (
5317 detailed_status if nslcmop_operation_state == "FAILED" else ""
5318 )
5319 self.logger.debug(
5320 logging_text
5321 + "Done with result {} {}".format(
5322 nslcmop_operation_state, detailed_status
5323 )
5324 )
5325 return # database update is called inside finally
5326
5327 except (DbException, LcmException, N2VCException, K8sException) as e:
5328 self.logger.error(logging_text + "Exit Exception {}".format(e))
5329 exc = e
5330 except asyncio.CancelledError:
5331 self.logger.error(
5332 logging_text + "Cancelled Exception while '{}'".format(step)
5333 )
5334 exc = "Operation was cancelled"
5335 except asyncio.TimeoutError:
5336 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5337 exc = "Timeout"
5338 except Exception as e:
5339 exc = traceback.format_exc()
5340 self.logger.critical(
5341 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5342 exc_info=True,
5343 )
5344 finally:
5345 if exc:
5346 db_nslcmop_update[
5347 "detailed-status"
5348 ] = (
5349 detailed_status
5350 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5351 nslcmop_operation_state = "FAILED"
5352 if db_nsr:
5353 self._write_ns_status(
5354 nsr_id=nsr_id,
5355 ns_state=db_nsr[
5356 "nsState"
5357 ], # TODO check if degraded. For the moment use previous status
5358 current_operation="IDLE",
5359 current_operation_id=None,
5360 # error_description=error_description_nsr,
5361 # error_detail=error_detail,
5362 other_update=db_nsr_update,
5363 )
5364
5365 self._write_op_status(
5366 op_id=nslcmop_id,
5367 stage="",
5368 error_message=error_description_nslcmop,
5369 operation_state=nslcmop_operation_state,
5370 other_update=db_nslcmop_update,
5371 )
5372
5373 if nslcmop_operation_state:
5374 try:
5375 await self.msg.aiowrite(
5376 "ns",
5377 "actioned",
5378 {
5379 "nsr_id": nsr_id,
5380 "nslcmop_id": nslcmop_id,
5381 "operationState": nslcmop_operation_state,
5382 },
5383 )
5384 except Exception as e:
5385 self.logger.error(
5386 logging_text + "kafka_write notification Exception {}".format(e)
5387 )
5388 self.logger.debug(logging_text + "Exit")
5389 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5390 return nslcmop_operation_state, detailed_status
5391
5392 async def terminate_vdus(
5393 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5394 ):
5395 """This method terminates VDUs
5396
5397 Args:
5398 db_vnfr: VNF instance record
5399 member_vnf_index: VNF index to identify the VDUs to be removed
5400 db_nsr: NS instance record
5401 update_db_nslcmops: Nslcmop update record
5402 """
5403 vca_scaling_info = []
5404 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5405 scaling_info["scaling_direction"] = "IN"
5406 scaling_info["vdu-delete"] = {}
5407 scaling_info["kdu-delete"] = {}
5408 db_vdur = db_vnfr.get("vdur")
5409 vdur_list = copy(db_vdur)
5410 count_index = 0
5411 for index, vdu in enumerate(vdur_list):
5412 vca_scaling_info.append(
5413 {
5414 "osm_vdu_id": vdu["vdu-id-ref"],
5415 "member-vnf-index": member_vnf_index,
5416 "type": "delete",
5417 "vdu_index": count_index,
5418 }
5419 )
5420 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5421 scaling_info["vdu"].append(
5422 {
5423 "name": vdu.get("name") or vdu.get("vdu-name"),
5424 "vdu_id": vdu["vdu-id-ref"],
5425 "interface": [],
5426 }
5427 )
5428 for interface in vdu["interfaces"]:
5429 scaling_info["vdu"][index]["interface"].append(
5430 {
5431 "name": interface["name"],
5432 "ip_address": interface["ip-address"],
5433 "mac_address": interface.get("mac-address"),
5434 }
5435 )
5436 self.logger.info("NS update scaling info{}".format(scaling_info))
5437 stage[2] = "Terminating VDUs"
5438 if scaling_info.get("vdu-delete"):
5439 # scale_process = "RO"
5440 if self.ro_config.ng:
5441 await self._scale_ng_ro(
5442 logging_text,
5443 db_nsr,
5444 update_db_nslcmops,
5445 db_vnfr,
5446 scaling_info,
5447 stage,
5448 )
5449
5450 async def remove_vnf(self, nsr_id, nslcmop_id, vnf_instance_id):
5451 """This method is to Remove VNF instances from NS.
5452
5453 Args:
5454 nsr_id: NS instance id
5455 nslcmop_id: nslcmop id of update
5456 vnf_instance_id: id of the VNF instance to be removed
5457
5458 Returns:
5459 result: (str, str) COMPLETED/FAILED, details
5460 """
5461 try:
5462 db_nsr_update = {}
5463 logging_text = "Task ns={} update ".format(nsr_id)
5464 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5465 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5466 if check_vnfr_count > 1:
5467 stage = ["", "", ""]
5468 step = "Getting nslcmop from database"
5469 self.logger.debug(
5470 step + " after having waited for previous tasks to be completed"
5471 )
5472 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5473 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5474 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5475 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5476 """ db_vnfr = self.db.get_one(
5477 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5478
5479 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5480 await self.terminate_vdus(
5481 db_vnfr,
5482 member_vnf_index,
5483 db_nsr,
5484 update_db_nslcmops,
5485 stage,
5486 logging_text,
5487 )
5488
5489 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5490 constituent_vnfr.remove(db_vnfr.get("_id"))
5491 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get(
5492 "constituent-vnfr-ref"
5493 )
5494 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5495 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5496 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5497 return "COMPLETED", "Done"
5498 else:
5499 step = "Terminate VNF Failed with"
5500 raise LcmException(
5501 "{} Cannot terminate the last VNF in this NS.".format(
5502 vnf_instance_id
5503 )
5504 )
5505 except (LcmException, asyncio.CancelledError):
5506 raise
5507 except Exception as e:
5508 self.logger.debug("Error removing VNF {}".format(e))
5509 return "FAILED", "Error removing VNF {}".format(e)
5510
5511 async def _ns_redeploy_vnf(
5512 self,
5513 nsr_id,
5514 nslcmop_id,
5515 db_vnfd,
5516 db_vnfr,
5517 db_nsr,
5518 ):
5519 """This method updates and redeploys VNF instances
5520
5521 Args:
5522 nsr_id: NS instance id
5523 nslcmop_id: nslcmop id
5524 db_vnfd: VNF descriptor
5525 db_vnfr: VNF instance record
5526 db_nsr: NS instance record
5527
5528 Returns:
5529 result: (str, str) COMPLETED/FAILED, details
5530 """
5531 try:
5532 count_index = 0
5533 stage = ["", "", ""]
5534 logging_text = "Task ns={} update ".format(nsr_id)
5535 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5536 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5537
5538 # Terminate old VNF resources
5539 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5540 await self.terminate_vdus(
5541 db_vnfr,
5542 member_vnf_index,
5543 db_nsr,
5544 update_db_nslcmops,
5545 stage,
5546 logging_text,
5547 )
5548
5549 # old_vnfd_id = db_vnfr["vnfd-id"]
5550 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5551 new_db_vnfd = db_vnfd
5552 # new_vnfd_ref = new_db_vnfd["id"]
5553 # new_vnfd_id = vnfd_id
5554
5555 # Create VDUR
5556 new_vnfr_cp = []
5557 for cp in new_db_vnfd.get("ext-cpd", ()):
5558 vnf_cp = {
5559 "name": cp.get("id"),
5560 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5561 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5562 "id": cp.get("id"),
5563 }
5564 new_vnfr_cp.append(vnf_cp)
5565 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5566 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5567 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5568 new_vnfr_update = {
5569 "revision": latest_vnfd_revision,
5570 "connection-point": new_vnfr_cp,
5571 "vdur": new_vdur,
5572 "ip-address": "",
5573 }
5574 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5575 updated_db_vnfr = self.db.get_one(
5576 "vnfrs",
5577 {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id},
5578 )
5579
5580 # Instantiate new VNF resources
5581 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5582 vca_scaling_info = []
5583 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5584 scaling_info["scaling_direction"] = "OUT"
5585 scaling_info["vdu-create"] = {}
5586 scaling_info["kdu-create"] = {}
5587 vdud_instantiate_list = db_vnfd["vdu"]
5588 for index, vdud in enumerate(vdud_instantiate_list):
5589 cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
5590 if cloud_init_text:
5591 additional_params = (
5592 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5593 or {}
5594 )
5595 cloud_init_list = []
5596 if cloud_init_text:
5597 # TODO Information of its own ip is not available because db_vnfr is not updated.
5598 additional_params["OSM"] = get_osm_params(
5599 updated_db_vnfr, vdud["id"], 1
5600 )
5601 cloud_init_list.append(
5602 self._parse_cloud_init(
5603 cloud_init_text,
5604 additional_params,
5605 db_vnfd["id"],
5606 vdud["id"],
5607 )
5608 )
5609 vca_scaling_info.append(
5610 {
5611 "osm_vdu_id": vdud["id"],
5612 "member-vnf-index": member_vnf_index,
5613 "type": "create",
5614 "vdu_index": count_index,
5615 }
5616 )
5617 scaling_info["vdu-create"][vdud["id"]] = count_index
5618 if self.ro_config.ng:
5619 self.logger.debug(
5620 "New Resources to be deployed: {}".format(scaling_info)
5621 )
5622 await self._scale_ng_ro(
5623 logging_text,
5624 db_nsr,
5625 update_db_nslcmops,
5626 updated_db_vnfr,
5627 scaling_info,
5628 stage,
5629 )
5630 return "COMPLETED", "Done"
5631 except (LcmException, asyncio.CancelledError):
5632 raise
5633 except Exception as e:
5634 self.logger.debug("Error updating VNF {}".format(e))
5635 return "FAILED", "Error updating VNF {}".format(e)
5636
5637 async def _ns_charm_upgrade(
5638 self,
5639 ee_id,
5640 charm_id,
5641 charm_type,
5642 path,
5643 timeout: float = None,
5644 ) -> (str, str):
5645 """This method upgrade charms in VNF instances
5646
5647 Args:
5648 ee_id: Execution environment id
5649 path: Local path to the charm
5650 charm_id: charm-id
5651 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5652 timeout: (Float) Timeout for the ns update operation
5653
5654 Returns:
5655 result: (str, str) COMPLETED/FAILED, details
5656 """
5657 try:
5658 charm_type = charm_type or "lxc_proxy_charm"
5659 output = await self.vca_map[charm_type].upgrade_charm(
5660 ee_id=ee_id,
5661 path=path,
5662 charm_id=charm_id,
5663 charm_type=charm_type,
5664 timeout=timeout or self.timeout.ns_update,
5665 )
5666
5667 if output:
5668 return "COMPLETED", output
5669
5670 except (LcmException, asyncio.CancelledError):
5671 raise
5672
5673 except Exception as e:
5674 self.logger.debug("Error upgrading charm {}".format(path))
5675
5676 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5677
5678 async def update(self, nsr_id, nslcmop_id):
5679 """Update NS according to different update types
5680
5681 This method performs upgrade of VNF instances then updates the revision
5682 number in VNF record
5683
5684 Args:
5685 nsr_id: Network service will be updated
5686 nslcmop_id: ns lcm operation id
5687
5688 Returns:
5689 It may raise DbException, LcmException, N2VCException, K8sException
5690
5691 """
5692 # Try to lock HA task here
5693 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5694 if not task_is_locked_by_me:
5695 return
5696
5697 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5698 self.logger.debug(logging_text + "Enter")
5699
5700 # Set the required variables to be filled up later
5701 db_nsr = None
5702 db_nslcmop_update = {}
5703 vnfr_update = {}
5704 nslcmop_operation_state = None
5705 db_nsr_update = {}
5706 error_description_nslcmop = ""
5707 exc = None
5708 change_type = "updated"
5709 detailed_status = ""
5710 member_vnf_index = None
5711
5712 try:
5713 # wait for any previous tasks in process
5714 step = "Waiting for previous operations to terminate"
5715 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5716 self._write_ns_status(
5717 nsr_id=nsr_id,
5718 ns_state=None,
5719 current_operation="UPDATING",
5720 current_operation_id=nslcmop_id,
5721 )
5722
5723 step = "Getting nslcmop from database"
5724 db_nslcmop = self.db.get_one(
5725 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5726 )
5727 update_type = db_nslcmop["operationParams"]["updateType"]
5728
5729 step = "Getting nsr from database"
5730 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5731 old_operational_status = db_nsr["operational-status"]
5732 db_nsr_update["operational-status"] = "updating"
5733 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5734 nsr_deployed = db_nsr["_admin"].get("deployed")
5735
5736 if update_type == "CHANGE_VNFPKG":
5737 # Get the input parameters given through update request
5738 vnf_instance_id = db_nslcmop["operationParams"][
5739 "changeVnfPackageData"
5740 ].get("vnfInstanceId")
5741
5742 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5743 "vnfdId"
5744 )
5745 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5746
5747 step = "Getting vnfr from database"
5748 db_vnfr = self.db.get_one(
5749 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5750 )
5751
5752 step = "Getting vnfds from database"
5753 # Latest VNFD
5754 latest_vnfd = self.db.get_one(
5755 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5756 )
5757 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5758
5759 # Current VNFD
5760 current_vnf_revision = db_vnfr.get("revision", 1)
5761 current_vnfd = self.db.get_one(
5762 "vnfds_revisions",
5763 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5764 fail_on_empty=False,
5765 )
5766 # Charm artifact paths will be filled up later
5767 (
5768 current_charm_artifact_path,
5769 target_charm_artifact_path,
5770 charm_artifact_paths,
5771 helm_artifacts,
5772 ) = ([], [], [], [])
5773
5774 step = "Checking if revision has changed in VNFD"
5775 if current_vnf_revision != latest_vnfd_revision:
5776 change_type = "policy_updated"
5777
5778 # There is new revision of VNFD, update operation is required
5779 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5780 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5781
5782 step = "Removing the VNFD packages if they exist in the local path"
5783 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5784 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5785
5786 step = "Get the VNFD packages from FSMongo"
5787 self.fs.sync(from_path=latest_vnfd_path)
5788 self.fs.sync(from_path=current_vnfd_path)
5789
5790 step = (
5791 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5792 )
5793 current_base_folder = current_vnfd["_admin"]["storage"]
5794 latest_base_folder = latest_vnfd["_admin"]["storage"]
5795
5796 for vca_index, vca_deployed in enumerate(
5797 get_iterable(nsr_deployed, "VCA")
5798 ):
5799 vnf_index = db_vnfr.get("member-vnf-index-ref")
5800
5801 # Getting charm-id and charm-type
5802 if vca_deployed.get("member-vnf-index") == vnf_index:
5803 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5804 vca_type = vca_deployed.get("type")
5805 vdu_count_index = vca_deployed.get("vdu_count_index")
5806
5807 # Getting ee-id
5808 ee_id = vca_deployed.get("ee_id")
5809
5810 step = "Getting descriptor config"
5811 if current_vnfd.get("kdu"):
5812 search_key = "kdu_name"
5813 else:
5814 search_key = "vnfd_id"
5815
5816 entity_id = vca_deployed.get(search_key)
5817
5818 descriptor_config = get_configuration(
5819 current_vnfd, entity_id
5820 )
5821
5822 if "execution-environment-list" in descriptor_config:
5823 ee_list = descriptor_config.get(
5824 "execution-environment-list", []
5825 )
5826 else:
5827 ee_list = []
5828
5829 # There could be several charm used in the same VNF
5830 for ee_item in ee_list:
5831 if ee_item.get("juju"):
5832 step = "Getting charm name"
5833 charm_name = ee_item["juju"].get("charm")
5834
5835 step = "Setting Charm artifact paths"
5836 current_charm_artifact_path.append(
5837 get_charm_artifact_path(
5838 current_base_folder,
5839 charm_name,
5840 vca_type,
5841 current_vnf_revision,
5842 )
5843 )
5844 target_charm_artifact_path.append(
5845 get_charm_artifact_path(
5846 latest_base_folder,
5847 charm_name,
5848 vca_type,
5849 latest_vnfd_revision,
5850 )
5851 )
5852 elif ee_item.get("helm-chart"):
5853 # add chart to list and all parameters
5854 step = "Getting helm chart name"
5855 chart_name = ee_item.get("helm-chart")
5856 if (
5857 ee_item.get("helm-version")
5858 and ee_item.get("helm-version") == "v2"
5859 ):
5860 vca_type = "helm"
5861 else:
5862 vca_type = "helm-v3"
5863 step = "Setting Helm chart artifact paths"
5864
5865 helm_artifacts.append(
5866 {
5867 "current_artifact_path": get_charm_artifact_path(
5868 current_base_folder,
5869 chart_name,
5870 vca_type,
5871 current_vnf_revision,
5872 ),
5873 "target_artifact_path": get_charm_artifact_path(
5874 latest_base_folder,
5875 chart_name,
5876 vca_type,
5877 latest_vnfd_revision,
5878 ),
5879 "ee_id": ee_id,
5880 "vca_index": vca_index,
5881 "vdu_index": vdu_count_index,
5882 }
5883 )
5884
5885 charm_artifact_paths = zip(
5886 current_charm_artifact_path, target_charm_artifact_path
5887 )
5888
5889 step = "Checking if software version has changed in VNFD"
5890 if find_software_version(current_vnfd) != find_software_version(
5891 latest_vnfd
5892 ):
5893 step = "Checking if existing VNF has charm"
5894 for current_charm_path, target_charm_path in list(
5895 charm_artifact_paths
5896 ):
5897 if current_charm_path:
5898 raise LcmException(
5899 "Software version change is not supported as VNF instance {} has charm.".format(
5900 vnf_instance_id
5901 )
5902 )
5903
5904 # There is no change in the charm package, then redeploy the VNF
5905 # based on new descriptor
5906 step = "Redeploying VNF"
5907 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5908 (result, detailed_status) = await self._ns_redeploy_vnf(
5909 nsr_id, nslcmop_id, latest_vnfd, db_vnfr, db_nsr
5910 )
5911 if result == "FAILED":
5912 nslcmop_operation_state = result
5913 error_description_nslcmop = detailed_status
5914 db_nslcmop_update["detailed-status"] = detailed_status
5915 self.logger.debug(
5916 logging_text
5917 + " step {} Done with result {} {}".format(
5918 step, nslcmop_operation_state, detailed_status
5919 )
5920 )
5921
5922 else:
5923 step = "Checking if any charm package has changed or not"
5924 for current_charm_path, target_charm_path in list(
5925 charm_artifact_paths
5926 ):
5927 if (
5928 current_charm_path
5929 and target_charm_path
5930 and self.check_charm_hash_changed(
5931 current_charm_path, target_charm_path
5932 )
5933 ):
5934 step = "Checking whether VNF uses juju bundle"
5935 if check_juju_bundle_existence(current_vnfd):
5936 raise LcmException(
5937 "Charm upgrade is not supported for the instance which"
5938 " uses juju-bundle: {}".format(
5939 check_juju_bundle_existence(current_vnfd)
5940 )
5941 )
5942
5943 step = "Upgrading Charm"
5944 (
5945 result,
5946 detailed_status,
5947 ) = await self._ns_charm_upgrade(
5948 ee_id=ee_id,
5949 charm_id=vca_id,
5950 charm_type=vca_type,
5951 path=self.fs.path + target_charm_path,
5952 timeout=timeout_seconds,
5953 )
5954
5955 if result == "FAILED":
5956 nslcmop_operation_state = result
5957 error_description_nslcmop = detailed_status
5958
5959 db_nslcmop_update["detailed-status"] = detailed_status
5960 self.logger.debug(
5961 logging_text
5962 + " step {} Done with result {} {}".format(
5963 step, nslcmop_operation_state, detailed_status
5964 )
5965 )
5966
5967 step = "Updating policies"
5968 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5969 result = "COMPLETED"
5970 detailed_status = "Done"
5971 db_nslcmop_update["detailed-status"] = "Done"
5972
5973 # helm base EE
5974 for item in helm_artifacts:
5975 if not (
5976 item["current_artifact_path"]
5977 and item["target_artifact_path"]
5978 and self.check_charm_hash_changed(
5979 item["current_artifact_path"],
5980 item["target_artifact_path"],
5981 )
5982 ):
5983 continue
5984 db_update_entry = "_admin.deployed.VCA.{}.".format(
5985 item["vca_index"]
5986 )
5987 vnfr_id = db_vnfr["_id"]
5988 osm_config = {"osm": {"ns_id": nsr_id, "vnf_id": vnfr_id}}
5989 db_dict = {
5990 "collection": "nsrs",
5991 "filter": {"_id": nsr_id},
5992 "path": db_update_entry,
5993 }
5994 vca_type, namespace, helm_id = get_ee_id_parts(item["ee_id"])
5995 await self.vca_map[vca_type].upgrade_execution_environment(
5996 namespace=namespace,
5997 helm_id=helm_id,
5998 db_dict=db_dict,
5999 config=osm_config,
6000 artifact_path=item["target_artifact_path"],
6001 vca_type=vca_type,
6002 )
6003 vnf_id = db_vnfr.get("vnfd-ref")
6004 config_descriptor = get_configuration(latest_vnfd, vnf_id)
6005 self.logger.debug("get ssh key block")
6006 rw_mgmt_ip = None
6007 if deep_get(
6008 config_descriptor,
6009 ("config-access", "ssh-access", "required"),
6010 ):
6011 # Needed to inject a ssh key
6012 user = deep_get(
6013 config_descriptor,
6014 ("config-access", "ssh-access", "default-user"),
6015 )
6016 step = (
6017 "Install configuration Software, getting public ssh key"
6018 )
6019 pub_key = await self.vca_map[
6020 vca_type
6021 ].get_ee_ssh_public__key(
6022 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
6023 )
6024
6025 step = (
6026 "Insert public key into VM user={} ssh_key={}".format(
6027 user, pub_key
6028 )
6029 )
6030 self.logger.debug(logging_text + step)
6031
6032 # wait for RO (ip-address) Insert pub_key into VM
6033 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
6034 logging_text,
6035 nsr_id,
6036 vnfr_id,
6037 None,
6038 item["vdu_index"],
6039 user=user,
6040 pub_key=pub_key,
6041 )
6042
6043 initial_config_primitive_list = config_descriptor.get(
6044 "initial-config-primitive"
6045 )
6046 config_primitive = next(
6047 (
6048 p
6049 for p in initial_config_primitive_list
6050 if p["name"] == "config"
6051 ),
6052 None,
6053 )
6054 if not config_primitive:
6055 continue
6056
6057 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6058 if rw_mgmt_ip:
6059 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
6060 if db_vnfr.get("additionalParamsForVnf"):
6061 deploy_params.update(
6062 parse_yaml_strings(
6063 db_vnfr["additionalParamsForVnf"].copy()
6064 )
6065 )
6066 primitive_params_ = self._map_primitive_params(
6067 config_primitive, {}, deploy_params
6068 )
6069
6070 step = "execute primitive '{}' params '{}'".format(
6071 config_primitive["name"], primitive_params_
6072 )
6073 self.logger.debug(logging_text + step)
6074 await self.vca_map[vca_type].exec_primitive(
6075 ee_id=ee_id,
6076 primitive_name=config_primitive["name"],
6077 params_dict=primitive_params_,
6078 db_dict=db_dict,
6079 vca_id=vca_id,
6080 vca_type=vca_type,
6081 )
6082
6083 step = "Updating policies"
6084 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6085 detailed_status = "Done"
6086 db_nslcmop_update["detailed-status"] = "Done"
6087
6088 # If nslcmop_operation_state is None, so any operation is not failed.
6089 if not nslcmop_operation_state:
6090 nslcmop_operation_state = "COMPLETED"
6091
6092 # If update CHANGE_VNFPKG nslcmop_operation is successful
6093 # vnf revision need to be updated
6094 vnfr_update["revision"] = latest_vnfd_revision
6095 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
6096
6097 self.logger.debug(
6098 logging_text
6099 + " task Done with result {} {}".format(
6100 nslcmop_operation_state, detailed_status
6101 )
6102 )
6103 elif update_type == "REMOVE_VNF":
6104 # This part is included in https://osm.etsi.org/gerrit/11876
6105 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
6106 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
6107 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6108 step = "Removing VNF"
6109 (result, detailed_status) = await self.remove_vnf(
6110 nsr_id, nslcmop_id, vnf_instance_id
6111 )
6112 if result == "FAILED":
6113 nslcmop_operation_state = result
6114 error_description_nslcmop = detailed_status
6115 db_nslcmop_update["detailed-status"] = detailed_status
6116 change_type = "vnf_terminated"
6117 if not nslcmop_operation_state:
6118 nslcmop_operation_state = "COMPLETED"
6119 self.logger.debug(
6120 logging_text
6121 + " task Done with result {} {}".format(
6122 nslcmop_operation_state, detailed_status
6123 )
6124 )
6125
6126 elif update_type == "OPERATE_VNF":
6127 vnf_id = db_nslcmop["operationParams"]["operateVnfData"][
6128 "vnfInstanceId"
6129 ]
6130 operation_type = db_nslcmop["operationParams"]["operateVnfData"][
6131 "changeStateTo"
6132 ]
6133 additional_param = db_nslcmop["operationParams"]["operateVnfData"][
6134 "additionalParam"
6135 ]
6136 (result, detailed_status) = await self.rebuild_start_stop(
6137 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
6138 )
6139 if result == "FAILED":
6140 nslcmop_operation_state = result
6141 error_description_nslcmop = detailed_status
6142 db_nslcmop_update["detailed-status"] = detailed_status
6143 if not nslcmop_operation_state:
6144 nslcmop_operation_state = "COMPLETED"
6145 self.logger.debug(
6146 logging_text
6147 + " task Done with result {} {}".format(
6148 nslcmop_operation_state, detailed_status
6149 )
6150 )
6151
6152 # If nslcmop_operation_state is None, so any operation is not failed.
6153 # All operations are executed in overall.
6154 if not nslcmop_operation_state:
6155 nslcmop_operation_state = "COMPLETED"
6156 db_nsr_update["operational-status"] = old_operational_status
6157
6158 except (DbException, LcmException, N2VCException, K8sException) as e:
6159 self.logger.error(logging_text + "Exit Exception {}".format(e))
6160 exc = e
6161 except asyncio.CancelledError:
6162 self.logger.error(
6163 logging_text + "Cancelled Exception while '{}'".format(step)
6164 )
6165 exc = "Operation was cancelled"
6166 except asyncio.TimeoutError:
6167 self.logger.error(logging_text + "Timeout while '{}'".format(step))
6168 exc = "Timeout"
6169 except Exception as e:
6170 exc = traceback.format_exc()
6171 self.logger.critical(
6172 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6173 exc_info=True,
6174 )
6175 finally:
6176 if exc:
6177 db_nslcmop_update[
6178 "detailed-status"
6179 ] = (
6180 detailed_status
6181 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6182 nslcmop_operation_state = "FAILED"
6183 db_nsr_update["operational-status"] = old_operational_status
6184 if db_nsr:
6185 self._write_ns_status(
6186 nsr_id=nsr_id,
6187 ns_state=db_nsr["nsState"],
6188 current_operation="IDLE",
6189 current_operation_id=None,
6190 other_update=db_nsr_update,
6191 )
6192
6193 self._write_op_status(
6194 op_id=nslcmop_id,
6195 stage="",
6196 error_message=error_description_nslcmop,
6197 operation_state=nslcmop_operation_state,
6198 other_update=db_nslcmop_update,
6199 )
6200
6201 if nslcmop_operation_state:
6202 try:
6203 msg = {
6204 "nsr_id": nsr_id,
6205 "nslcmop_id": nslcmop_id,
6206 "operationState": nslcmop_operation_state,
6207 }
6208 if (
6209 change_type in ("vnf_terminated", "policy_updated")
6210 and member_vnf_index
6211 ):
6212 msg.update({"vnf_member_index": member_vnf_index})
6213 await self.msg.aiowrite("ns", change_type, msg)
6214 except Exception as e:
6215 self.logger.error(
6216 logging_text + "kafka_write notification Exception {}".format(e)
6217 )
6218 self.logger.debug(logging_text + "Exit")
6219 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6220 return nslcmop_operation_state, detailed_status
6221
6222 async def scale(self, nsr_id, nslcmop_id):
6223 # Try to lock HA task here
6224 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6225 if not task_is_locked_by_me:
6226 return
6227
6228 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6229 stage = ["", "", ""]
6230 tasks_dict_info = {}
6231 # ^ stage, step, VIM progress
6232 self.logger.debug(logging_text + "Enter")
6233 # get all needed from database
6234 db_nsr = None
6235 db_nslcmop_update = {}
6236 db_nsr_update = {}
6237 exc = None
6238 # in case of error, indicates what part of scale was failed to put nsr at error status
6239 scale_process = None
6240 old_operational_status = ""
6241 old_config_status = ""
6242 nsi_id = None
6243 try:
6244 # wait for any previous tasks in process
6245 step = "Waiting for previous operations to terminate"
6246 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6247 self._write_ns_status(
6248 nsr_id=nsr_id,
6249 ns_state=None,
6250 current_operation="SCALING",
6251 current_operation_id=nslcmop_id,
6252 )
6253
6254 step = "Getting nslcmop from database"
6255 self.logger.debug(
6256 step + " after having waited for previous tasks to be completed"
6257 )
6258 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6259
6260 step = "Getting nsr from database"
6261 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6262 old_operational_status = db_nsr["operational-status"]
6263 old_config_status = db_nsr["config-status"]
6264
6265 step = "Parsing scaling parameters"
6266 db_nsr_update["operational-status"] = "scaling"
6267 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6268 nsr_deployed = db_nsr["_admin"].get("deployed")
6269
6270 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6271 "scaleByStepData"
6272 ]["member-vnf-index"]
6273 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6274 "scaleByStepData"
6275 ]["scaling-group-descriptor"]
6276 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6277 # for backward compatibility
6278 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6279 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6280 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6281 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6282
6283 step = "Getting vnfr from database"
6284 db_vnfr = self.db.get_one(
6285 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6286 )
6287
6288 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6289
6290 step = "Getting vnfd from database"
6291 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6292
6293 base_folder = db_vnfd["_admin"]["storage"]
6294
6295 step = "Getting scaling-group-descriptor"
6296 scaling_descriptor = find_in_list(
6297 get_scaling_aspect(db_vnfd),
6298 lambda scale_desc: scale_desc["name"] == scaling_group,
6299 )
6300 if not scaling_descriptor:
6301 raise LcmException(
6302 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6303 "at vnfd:scaling-group-descriptor".format(scaling_group)
6304 )
6305
6306 step = "Sending scale order to VIM"
6307 # TODO check if ns is in a proper status
6308 nb_scale_op = 0
6309 if not db_nsr["_admin"].get("scaling-group"):
6310 self.update_db_2(
6311 "nsrs",
6312 nsr_id,
6313 {
6314 "_admin.scaling-group": [
6315 {"name": scaling_group, "nb-scale-op": 0}
6316 ]
6317 },
6318 )
6319 admin_scale_index = 0
6320 else:
6321 for admin_scale_index, admin_scale_info in enumerate(
6322 db_nsr["_admin"]["scaling-group"]
6323 ):
6324 if admin_scale_info["name"] == scaling_group:
6325 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6326 break
6327 else: # not found, set index one plus last element and add new entry with the name
6328 admin_scale_index += 1
6329 db_nsr_update[
6330 "_admin.scaling-group.{}.name".format(admin_scale_index)
6331 ] = scaling_group
6332
6333 vca_scaling_info = []
6334 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6335 if scaling_type == "SCALE_OUT":
6336 if "aspect-delta-details" not in scaling_descriptor:
6337 raise LcmException(
6338 "Aspect delta details not fount in scaling descriptor {}".format(
6339 scaling_descriptor["name"]
6340 )
6341 )
6342 # count if max-instance-count is reached
6343 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6344
6345 scaling_info["scaling_direction"] = "OUT"
6346 scaling_info["vdu-create"] = {}
6347 scaling_info["kdu-create"] = {}
6348 for delta in deltas:
6349 for vdu_delta in delta.get("vdu-delta", {}):
6350 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6351 # vdu_index also provides the number of instance of the targeted vdu
6352 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6353 cloud_init_text = self._get_vdu_cloud_init_content(
6354 vdud, db_vnfd
6355 )
6356 if cloud_init_text:
6357 additional_params = (
6358 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6359 or {}
6360 )
6361 cloud_init_list = []
6362
6363 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6364 max_instance_count = 10
6365 if vdu_profile and "max-number-of-instances" in vdu_profile:
6366 max_instance_count = vdu_profile.get(
6367 "max-number-of-instances", 10
6368 )
6369
6370 default_instance_num = get_number_of_instances(
6371 db_vnfd, vdud["id"]
6372 )
6373 instances_number = vdu_delta.get("number-of-instances", 1)
6374 nb_scale_op += instances_number
6375
6376 new_instance_count = nb_scale_op + default_instance_num
6377 # Control if new count is over max and vdu count is less than max.
6378 # Then assign new instance count
6379 if new_instance_count > max_instance_count > vdu_count:
6380 instances_number = new_instance_count - max_instance_count
6381 else:
6382 instances_number = instances_number
6383
6384 if new_instance_count > max_instance_count:
6385 raise LcmException(
6386 "reached the limit of {} (max-instance-count) "
6387 "scaling-out operations for the "
6388 "scaling-group-descriptor '{}'".format(
6389 nb_scale_op, scaling_group
6390 )
6391 )
6392 for x in range(vdu_delta.get("number-of-instances", 1)):
6393 if cloud_init_text:
6394 # TODO Information of its own ip is not available because db_vnfr is not updated.
6395 additional_params["OSM"] = get_osm_params(
6396 db_vnfr, vdu_delta["id"], vdu_index + x
6397 )
6398 cloud_init_list.append(
6399 self._parse_cloud_init(
6400 cloud_init_text,
6401 additional_params,
6402 db_vnfd["id"],
6403 vdud["id"],
6404 )
6405 )
6406 vca_scaling_info.append(
6407 {
6408 "osm_vdu_id": vdu_delta["id"],
6409 "member-vnf-index": vnf_index,
6410 "type": "create",
6411 "vdu_index": vdu_index + x,
6412 }
6413 )
6414 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6415 for kdu_delta in delta.get("kdu-resource-delta", {}):
6416 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6417 kdu_name = kdu_profile["kdu-name"]
6418 resource_name = kdu_profile.get("resource-name", "")
6419
6420 # Might have different kdus in the same delta
6421 # Should have list for each kdu
6422 if not scaling_info["kdu-create"].get(kdu_name, None):
6423 scaling_info["kdu-create"][kdu_name] = []
6424
6425 kdur = get_kdur(db_vnfr, kdu_name)
6426 if kdur.get("helm-chart"):
6427 k8s_cluster_type = "helm-chart-v3"
6428 self.logger.debug("kdur: {}".format(kdur))
6429 if (
6430 kdur.get("helm-version")
6431 and kdur.get("helm-version") == "v2"
6432 ):
6433 k8s_cluster_type = "helm-chart"
6434 elif kdur.get("juju-bundle"):
6435 k8s_cluster_type = "juju-bundle"
6436 else:
6437 raise LcmException(
6438 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6439 "juju-bundle. Maybe an old NBI version is running".format(
6440 db_vnfr["member-vnf-index-ref"], kdu_name
6441 )
6442 )
6443
6444 max_instance_count = 10
6445 if kdu_profile and "max-number-of-instances" in kdu_profile:
6446 max_instance_count = kdu_profile.get(
6447 "max-number-of-instances", 10
6448 )
6449
6450 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6451 deployed_kdu, _ = get_deployed_kdu(
6452 nsr_deployed, kdu_name, vnf_index
6453 )
6454 if deployed_kdu is None:
6455 raise LcmException(
6456 "KDU '{}' for vnf '{}' not deployed".format(
6457 kdu_name, vnf_index
6458 )
6459 )
6460 kdu_instance = deployed_kdu.get("kdu-instance")
6461 instance_num = await self.k8scluster_map[
6462 k8s_cluster_type
6463 ].get_scale_count(
6464 resource_name,
6465 kdu_instance,
6466 vca_id=vca_id,
6467 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6468 kdu_model=deployed_kdu.get("kdu-model"),
6469 )
6470 kdu_replica_count = instance_num + kdu_delta.get(
6471 "number-of-instances", 1
6472 )
6473
6474 # Control if new count is over max and instance_num is less than max.
6475 # Then assign max instance number to kdu replica count
6476 if kdu_replica_count > max_instance_count > instance_num:
6477 kdu_replica_count = max_instance_count
6478 if kdu_replica_count > max_instance_count:
6479 raise LcmException(
6480 "reached the limit of {} (max-instance-count) "
6481 "scaling-out operations for the "
6482 "scaling-group-descriptor '{}'".format(
6483 instance_num, scaling_group
6484 )
6485 )
6486
6487 for x in range(kdu_delta.get("number-of-instances", 1)):
6488 vca_scaling_info.append(
6489 {
6490 "osm_kdu_id": kdu_name,
6491 "member-vnf-index": vnf_index,
6492 "type": "create",
6493 "kdu_index": instance_num + x - 1,
6494 }
6495 )
6496 scaling_info["kdu-create"][kdu_name].append(
6497 {
6498 "member-vnf-index": vnf_index,
6499 "type": "create",
6500 "k8s-cluster-type": k8s_cluster_type,
6501 "resource-name": resource_name,
6502 "scale": kdu_replica_count,
6503 }
6504 )
6505 elif scaling_type == "SCALE_IN":
6506 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6507
6508 scaling_info["scaling_direction"] = "IN"
6509 scaling_info["vdu-delete"] = {}
6510 scaling_info["kdu-delete"] = {}
6511
6512 for delta in deltas:
6513 for vdu_delta in delta.get("vdu-delta", {}):
6514 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6515 min_instance_count = 0
6516 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6517 if vdu_profile and "min-number-of-instances" in vdu_profile:
6518 min_instance_count = vdu_profile["min-number-of-instances"]
6519
6520 default_instance_num = get_number_of_instances(
6521 db_vnfd, vdu_delta["id"]
6522 )
6523 instance_num = vdu_delta.get("number-of-instances", 1)
6524 nb_scale_op -= instance_num
6525
6526 new_instance_count = nb_scale_op + default_instance_num
6527
6528 if new_instance_count < min_instance_count < vdu_count:
6529 instances_number = min_instance_count - new_instance_count
6530 else:
6531 instances_number = instance_num
6532
6533 if new_instance_count < min_instance_count:
6534 raise LcmException(
6535 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6536 "scaling-group-descriptor '{}'".format(
6537 nb_scale_op, scaling_group
6538 )
6539 )
6540 for x in range(vdu_delta.get("number-of-instances", 1)):
6541 vca_scaling_info.append(
6542 {
6543 "osm_vdu_id": vdu_delta["id"],
6544 "member-vnf-index": vnf_index,
6545 "type": "delete",
6546 "vdu_index": vdu_index - 1 - x,
6547 }
6548 )
6549 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6550 for kdu_delta in delta.get("kdu-resource-delta", {}):
6551 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6552 kdu_name = kdu_profile["kdu-name"]
6553 resource_name = kdu_profile.get("resource-name", "")
6554
6555 if not scaling_info["kdu-delete"].get(kdu_name, None):
6556 scaling_info["kdu-delete"][kdu_name] = []
6557
6558 kdur = get_kdur(db_vnfr, kdu_name)
6559 if kdur.get("helm-chart"):
6560 k8s_cluster_type = "helm-chart-v3"
6561 self.logger.debug("kdur: {}".format(kdur))
6562 if (
6563 kdur.get("helm-version")
6564 and kdur.get("helm-version") == "v2"
6565 ):
6566 k8s_cluster_type = "helm-chart"
6567 elif kdur.get("juju-bundle"):
6568 k8s_cluster_type = "juju-bundle"
6569 else:
6570 raise LcmException(
6571 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6572 "juju-bundle. Maybe an old NBI version is running".format(
6573 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6574 )
6575 )
6576
6577 min_instance_count = 0
6578 if kdu_profile and "min-number-of-instances" in kdu_profile:
6579 min_instance_count = kdu_profile["min-number-of-instances"]
6580
6581 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6582 deployed_kdu, _ = get_deployed_kdu(
6583 nsr_deployed, kdu_name, vnf_index
6584 )
6585 if deployed_kdu is None:
6586 raise LcmException(
6587 "KDU '{}' for vnf '{}' not deployed".format(
6588 kdu_name, vnf_index
6589 )
6590 )
6591 kdu_instance = deployed_kdu.get("kdu-instance")
6592 instance_num = await self.k8scluster_map[
6593 k8s_cluster_type
6594 ].get_scale_count(
6595 resource_name,
6596 kdu_instance,
6597 vca_id=vca_id,
6598 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6599 kdu_model=deployed_kdu.get("kdu-model"),
6600 )
6601 kdu_replica_count = instance_num - kdu_delta.get(
6602 "number-of-instances", 1
6603 )
6604
6605 if kdu_replica_count < min_instance_count < instance_num:
6606 kdu_replica_count = min_instance_count
6607 if kdu_replica_count < min_instance_count:
6608 raise LcmException(
6609 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6610 "scaling-group-descriptor '{}'".format(
6611 instance_num, scaling_group
6612 )
6613 )
6614
6615 for x in range(kdu_delta.get("number-of-instances", 1)):
6616 vca_scaling_info.append(
6617 {
6618 "osm_kdu_id": kdu_name,
6619 "member-vnf-index": vnf_index,
6620 "type": "delete",
6621 "kdu_index": instance_num - x - 1,
6622 }
6623 )
6624 scaling_info["kdu-delete"][kdu_name].append(
6625 {
6626 "member-vnf-index": vnf_index,
6627 "type": "delete",
6628 "k8s-cluster-type": k8s_cluster_type,
6629 "resource-name": resource_name,
6630 "scale": kdu_replica_count,
6631 }
6632 )
6633
6634 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6635 vdu_delete = copy(scaling_info.get("vdu-delete"))
6636 if scaling_info["scaling_direction"] == "IN":
6637 for vdur in reversed(db_vnfr["vdur"]):
6638 if vdu_delete.get(vdur["vdu-id-ref"]):
6639 vdu_delete[vdur["vdu-id-ref"]] -= 1
6640 scaling_info["vdu"].append(
6641 {
6642 "name": vdur.get("name") or vdur.get("vdu-name"),
6643 "vdu_id": vdur["vdu-id-ref"],
6644 "interface": [],
6645 }
6646 )
6647 for interface in vdur["interfaces"]:
6648 scaling_info["vdu"][-1]["interface"].append(
6649 {
6650 "name": interface["name"],
6651 "ip_address": interface["ip-address"],
6652 "mac_address": interface.get("mac-address"),
6653 }
6654 )
6655 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6656
6657 # PRE-SCALE BEGIN
6658 step = "Executing pre-scale vnf-config-primitive"
6659 if scaling_descriptor.get("scaling-config-action"):
6660 for scaling_config_action in scaling_descriptor[
6661 "scaling-config-action"
6662 ]:
6663 if (
6664 scaling_config_action.get("trigger") == "pre-scale-in"
6665 and scaling_type == "SCALE_IN"
6666 ) or (
6667 scaling_config_action.get("trigger") == "pre-scale-out"
6668 and scaling_type == "SCALE_OUT"
6669 ):
6670 vnf_config_primitive = scaling_config_action[
6671 "vnf-config-primitive-name-ref"
6672 ]
6673 step = db_nslcmop_update[
6674 "detailed-status"
6675 ] = "executing pre-scale scaling-config-action '{}'".format(
6676 vnf_config_primitive
6677 )
6678
6679 # look for primitive
6680 for config_primitive in (
6681 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6682 ).get("config-primitive", ()):
6683 if config_primitive["name"] == vnf_config_primitive:
6684 break
6685 else:
6686 raise LcmException(
6687 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6688 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6689 "primitive".format(scaling_group, vnf_config_primitive)
6690 )
6691
6692 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6693 if db_vnfr.get("additionalParamsForVnf"):
6694 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6695
6696 scale_process = "VCA"
6697 db_nsr_update["config-status"] = "configuring pre-scaling"
6698 primitive_params = self._map_primitive_params(
6699 config_primitive, {}, vnfr_params
6700 )
6701
6702 # Pre-scale retry check: Check if this sub-operation has been executed before
6703 op_index = self._check_or_add_scale_suboperation(
6704 db_nslcmop,
6705 vnf_index,
6706 vnf_config_primitive,
6707 primitive_params,
6708 "PRE-SCALE",
6709 )
6710 if op_index == self.SUBOPERATION_STATUS_SKIP:
6711 # Skip sub-operation
6712 result = "COMPLETED"
6713 result_detail = "Done"
6714 self.logger.debug(
6715 logging_text
6716 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6717 vnf_config_primitive, result, result_detail
6718 )
6719 )
6720 else:
6721 if op_index == self.SUBOPERATION_STATUS_NEW:
6722 # New sub-operation: Get index of this sub-operation
6723 op_index = (
6724 len(db_nslcmop.get("_admin", {}).get("operations"))
6725 - 1
6726 )
6727 self.logger.debug(
6728 logging_text
6729 + "vnf_config_primitive={} New sub-operation".format(
6730 vnf_config_primitive
6731 )
6732 )
6733 else:
6734 # retry: Get registered params for this existing sub-operation
6735 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6736 op_index
6737 ]
6738 vnf_index = op.get("member_vnf_index")
6739 vnf_config_primitive = op.get("primitive")
6740 primitive_params = op.get("primitive_params")
6741 self.logger.debug(
6742 logging_text
6743 + "vnf_config_primitive={} Sub-operation retry".format(
6744 vnf_config_primitive
6745 )
6746 )
6747 # Execute the primitive, either with new (first-time) or registered (reintent) args
6748 ee_descriptor_id = config_primitive.get(
6749 "execution-environment-ref"
6750 )
6751 primitive_name = config_primitive.get(
6752 "execution-environment-primitive", vnf_config_primitive
6753 )
6754 ee_id, vca_type = self._look_for_deployed_vca(
6755 nsr_deployed["VCA"],
6756 member_vnf_index=vnf_index,
6757 vdu_id=None,
6758 vdu_count_index=None,
6759 ee_descriptor_id=ee_descriptor_id,
6760 )
6761 result, result_detail = await self._ns_execute_primitive(
6762 ee_id,
6763 primitive_name,
6764 primitive_params,
6765 vca_type=vca_type,
6766 vca_id=vca_id,
6767 )
6768 self.logger.debug(
6769 logging_text
6770 + "vnf_config_primitive={} Done with result {} {}".format(
6771 vnf_config_primitive, result, result_detail
6772 )
6773 )
6774 # Update operationState = COMPLETED | FAILED
6775 self._update_suboperation_status(
6776 db_nslcmop, op_index, result, result_detail
6777 )
6778
6779 if result == "FAILED":
6780 raise LcmException(result_detail)
6781 db_nsr_update["config-status"] = old_config_status
6782 scale_process = None
6783 # PRE-SCALE END
6784
6785 db_nsr_update[
6786 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6787 ] = nb_scale_op
6788 db_nsr_update[
6789 "_admin.scaling-group.{}.time".format(admin_scale_index)
6790 ] = time()
6791
6792 # SCALE-IN VCA - BEGIN
6793 if vca_scaling_info:
6794 step = db_nslcmop_update[
6795 "detailed-status"
6796 ] = "Deleting the execution environments"
6797 scale_process = "VCA"
6798 for vca_info in vca_scaling_info:
6799 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6800 member_vnf_index = str(vca_info["member-vnf-index"])
6801 self.logger.debug(
6802 logging_text + "vdu info: {}".format(vca_info)
6803 )
6804 if vca_info.get("osm_vdu_id"):
6805 vdu_id = vca_info["osm_vdu_id"]
6806 vdu_index = int(vca_info["vdu_index"])
6807 stage[
6808 1
6809 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6810 member_vnf_index, vdu_id, vdu_index
6811 )
6812 stage[2] = step = "Scaling in VCA"
6813 self._write_op_status(op_id=nslcmop_id, stage=stage)
6814 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6815 config_update = db_nsr["configurationStatus"]
6816 for vca_index, vca in enumerate(vca_update):
6817 if (
6818 (vca or vca.get("ee_id"))
6819 and vca["member-vnf-index"] == member_vnf_index
6820 and vca["vdu_count_index"] == vdu_index
6821 ):
6822 if vca.get("vdu_id"):
6823 config_descriptor = get_configuration(
6824 db_vnfd, vca.get("vdu_id")
6825 )
6826 elif vca.get("kdu_name"):
6827 config_descriptor = get_configuration(
6828 db_vnfd, vca.get("kdu_name")
6829 )
6830 else:
6831 config_descriptor = get_configuration(
6832 db_vnfd, db_vnfd["id"]
6833 )
6834 operation_params = (
6835 db_nslcmop.get("operationParams") or {}
6836 )
6837 exec_terminate_primitives = not operation_params.get(
6838 "skip_terminate_primitives"
6839 ) and vca.get("needed_terminate")
6840 task = asyncio.ensure_future(
6841 asyncio.wait_for(
6842 self.destroy_N2VC(
6843 logging_text,
6844 db_nslcmop,
6845 vca,
6846 config_descriptor,
6847 vca_index,
6848 destroy_ee=True,
6849 exec_primitives=exec_terminate_primitives,
6850 scaling_in=True,
6851 vca_id=vca_id,
6852 ),
6853 timeout=self.timeout.charm_delete,
6854 )
6855 )
6856 tasks_dict_info[task] = "Terminating VCA {}".format(
6857 vca.get("ee_id")
6858 )
6859 del vca_update[vca_index]
6860 del config_update[vca_index]
6861 # wait for pending tasks of terminate primitives
6862 if tasks_dict_info:
6863 self.logger.debug(
6864 logging_text
6865 + "Waiting for tasks {}".format(
6866 list(tasks_dict_info.keys())
6867 )
6868 )
6869 error_list = await self._wait_for_tasks(
6870 logging_text,
6871 tasks_dict_info,
6872 min(
6873 self.timeout.charm_delete, self.timeout.ns_terminate
6874 ),
6875 stage,
6876 nslcmop_id,
6877 )
6878 tasks_dict_info.clear()
6879 if error_list:
6880 raise LcmException("; ".join(error_list))
6881
6882 db_vca_and_config_update = {
6883 "_admin.deployed.VCA": vca_update,
6884 "configurationStatus": config_update,
6885 }
6886 self.update_db_2(
6887 "nsrs", db_nsr["_id"], db_vca_and_config_update
6888 )
6889 scale_process = None
6890 # SCALE-IN VCA - END
6891
6892 # SCALE RO - BEGIN
6893 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
6894 scale_process = "RO"
6895 if self.ro_config.ng:
6896 await self._scale_ng_ro(
6897 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
6898 )
6899 scaling_info.pop("vdu-create", None)
6900 scaling_info.pop("vdu-delete", None)
6901
6902 scale_process = None
6903 # SCALE RO - END
6904
6905 # SCALE KDU - BEGIN
6906 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
6907 scale_process = "KDU"
6908 await self._scale_kdu(
6909 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6910 )
6911 scaling_info.pop("kdu-create", None)
6912 scaling_info.pop("kdu-delete", None)
6913
6914 scale_process = None
6915 # SCALE KDU - END
6916
6917 if db_nsr_update:
6918 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6919
6920 # SCALE-UP VCA - BEGIN
6921 if vca_scaling_info:
6922 step = db_nslcmop_update[
6923 "detailed-status"
6924 ] = "Creating new execution environments"
6925 scale_process = "VCA"
6926 for vca_info in vca_scaling_info:
6927 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
6928 member_vnf_index = str(vca_info["member-vnf-index"])
6929 self.logger.debug(
6930 logging_text + "vdu info: {}".format(vca_info)
6931 )
6932 vnfd_id = db_vnfr["vnfd-ref"]
6933 if vca_info.get("osm_vdu_id"):
6934 vdu_index = int(vca_info["vdu_index"])
6935 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6936 if db_vnfr.get("additionalParamsForVnf"):
6937 deploy_params.update(
6938 parse_yaml_strings(
6939 db_vnfr["additionalParamsForVnf"].copy()
6940 )
6941 )
6942 descriptor_config = get_configuration(
6943 db_vnfd, db_vnfd["id"]
6944 )
6945 if descriptor_config:
6946 vdu_id = None
6947 vdu_name = None
6948 kdu_name = None
6949 kdu_index = None
6950 self._deploy_n2vc(
6951 logging_text=logging_text
6952 + "member_vnf_index={} ".format(member_vnf_index),
6953 db_nsr=db_nsr,
6954 db_vnfr=db_vnfr,
6955 nslcmop_id=nslcmop_id,
6956 nsr_id=nsr_id,
6957 nsi_id=nsi_id,
6958 vnfd_id=vnfd_id,
6959 vdu_id=vdu_id,
6960 kdu_name=kdu_name,
6961 kdu_index=kdu_index,
6962 member_vnf_index=member_vnf_index,
6963 vdu_index=vdu_index,
6964 vdu_name=vdu_name,
6965 deploy_params=deploy_params,
6966 descriptor_config=descriptor_config,
6967 base_folder=base_folder,
6968 task_instantiation_info=tasks_dict_info,
6969 stage=stage,
6970 )
6971 vdu_id = vca_info["osm_vdu_id"]
6972 vdur = find_in_list(
6973 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
6974 )
6975 descriptor_config = get_configuration(db_vnfd, vdu_id)
6976 if vdur.get("additionalParams"):
6977 deploy_params_vdu = parse_yaml_strings(
6978 vdur["additionalParams"]
6979 )
6980 else:
6981 deploy_params_vdu = deploy_params
6982 deploy_params_vdu["OSM"] = get_osm_params(
6983 db_vnfr, vdu_id, vdu_count_index=vdu_index
6984 )
6985 if descriptor_config:
6986 vdu_name = None
6987 kdu_name = None
6988 kdu_index = None
6989 stage[
6990 1
6991 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6992 member_vnf_index, vdu_id, vdu_index
6993 )
6994 stage[2] = step = "Scaling out VCA"
6995 self._write_op_status(op_id=nslcmop_id, stage=stage)
6996 self._deploy_n2vc(
6997 logging_text=logging_text
6998 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6999 member_vnf_index, vdu_id, vdu_index
7000 ),
7001 db_nsr=db_nsr,
7002 db_vnfr=db_vnfr,
7003 nslcmop_id=nslcmop_id,
7004 nsr_id=nsr_id,
7005 nsi_id=nsi_id,
7006 vnfd_id=vnfd_id,
7007 vdu_id=vdu_id,
7008 kdu_name=kdu_name,
7009 member_vnf_index=member_vnf_index,
7010 vdu_index=vdu_index,
7011 kdu_index=kdu_index,
7012 vdu_name=vdu_name,
7013 deploy_params=deploy_params_vdu,
7014 descriptor_config=descriptor_config,
7015 base_folder=base_folder,
7016 task_instantiation_info=tasks_dict_info,
7017 stage=stage,
7018 )
7019 # SCALE-UP VCA - END
7020 scale_process = None
7021
7022 # POST-SCALE BEGIN
7023 # execute primitive service POST-SCALING
7024 step = "Executing post-scale vnf-config-primitive"
7025 if scaling_descriptor.get("scaling-config-action"):
7026 for scaling_config_action in scaling_descriptor[
7027 "scaling-config-action"
7028 ]:
7029 if (
7030 scaling_config_action.get("trigger") == "post-scale-in"
7031 and scaling_type == "SCALE_IN"
7032 ) or (
7033 scaling_config_action.get("trigger") == "post-scale-out"
7034 and scaling_type == "SCALE_OUT"
7035 ):
7036 vnf_config_primitive = scaling_config_action[
7037 "vnf-config-primitive-name-ref"
7038 ]
7039 step = db_nslcmop_update[
7040 "detailed-status"
7041 ] = "executing post-scale scaling-config-action '{}'".format(
7042 vnf_config_primitive
7043 )
7044
7045 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
7046 if db_vnfr.get("additionalParamsForVnf"):
7047 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
7048
7049 # look for primitive
7050 for config_primitive in (
7051 get_configuration(db_vnfd, db_vnfd["id"]) or {}
7052 ).get("config-primitive", ()):
7053 if config_primitive["name"] == vnf_config_primitive:
7054 break
7055 else:
7056 raise LcmException(
7057 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
7058 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
7059 "config-primitive".format(
7060 scaling_group, vnf_config_primitive
7061 )
7062 )
7063 scale_process = "VCA"
7064 db_nsr_update["config-status"] = "configuring post-scaling"
7065 primitive_params = self._map_primitive_params(
7066 config_primitive, {}, vnfr_params
7067 )
7068
7069 # Post-scale retry check: Check if this sub-operation has been executed before
7070 op_index = self._check_or_add_scale_suboperation(
7071 db_nslcmop,
7072 vnf_index,
7073 vnf_config_primitive,
7074 primitive_params,
7075 "POST-SCALE",
7076 )
7077 if op_index == self.SUBOPERATION_STATUS_SKIP:
7078 # Skip sub-operation
7079 result = "COMPLETED"
7080 result_detail = "Done"
7081 self.logger.debug(
7082 logging_text
7083 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
7084 vnf_config_primitive, result, result_detail
7085 )
7086 )
7087 else:
7088 if op_index == self.SUBOPERATION_STATUS_NEW:
7089 # New sub-operation: Get index of this sub-operation
7090 op_index = (
7091 len(db_nslcmop.get("_admin", {}).get("operations"))
7092 - 1
7093 )
7094 self.logger.debug(
7095 logging_text
7096 + "vnf_config_primitive={} New sub-operation".format(
7097 vnf_config_primitive
7098 )
7099 )
7100 else:
7101 # retry: Get registered params for this existing sub-operation
7102 op = db_nslcmop.get("_admin", {}).get("operations", [])[
7103 op_index
7104 ]
7105 vnf_index = op.get("member_vnf_index")
7106 vnf_config_primitive = op.get("primitive")
7107 primitive_params = op.get("primitive_params")
7108 self.logger.debug(
7109 logging_text
7110 + "vnf_config_primitive={} Sub-operation retry".format(
7111 vnf_config_primitive
7112 )
7113 )
7114 # Execute the primitive, either with new (first-time) or registered (reintent) args
7115 ee_descriptor_id = config_primitive.get(
7116 "execution-environment-ref"
7117 )
7118 primitive_name = config_primitive.get(
7119 "execution-environment-primitive", vnf_config_primitive
7120 )
7121 ee_id, vca_type = self._look_for_deployed_vca(
7122 nsr_deployed["VCA"],
7123 member_vnf_index=vnf_index,
7124 vdu_id=None,
7125 vdu_count_index=None,
7126 ee_descriptor_id=ee_descriptor_id,
7127 )
7128 result, result_detail = await self._ns_execute_primitive(
7129 ee_id,
7130 primitive_name,
7131 primitive_params,
7132 vca_type=vca_type,
7133 vca_id=vca_id,
7134 )
7135 self.logger.debug(
7136 logging_text
7137 + "vnf_config_primitive={} Done with result {} {}".format(
7138 vnf_config_primitive, result, result_detail
7139 )
7140 )
7141 # Update operationState = COMPLETED | FAILED
7142 self._update_suboperation_status(
7143 db_nslcmop, op_index, result, result_detail
7144 )
7145
7146 if result == "FAILED":
7147 raise LcmException(result_detail)
7148 db_nsr_update["config-status"] = old_config_status
7149 scale_process = None
7150 # POST-SCALE END
7151
7152 db_nsr_update[
7153 "detailed-status"
7154 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
7155 db_nsr_update["operational-status"] = (
7156 "running"
7157 if old_operational_status == "failed"
7158 else old_operational_status
7159 )
7160 db_nsr_update["config-status"] = old_config_status
7161 return
7162 except (
7163 ROclient.ROClientException,
7164 DbException,
7165 LcmException,
7166 NgRoException,
7167 ) as e:
7168 self.logger.error(logging_text + "Exit Exception {}".format(e))
7169 exc = e
7170 except asyncio.CancelledError:
7171 self.logger.error(
7172 logging_text + "Cancelled Exception while '{}'".format(step)
7173 )
7174 exc = "Operation was cancelled"
7175 except Exception as e:
7176 exc = traceback.format_exc()
7177 self.logger.critical(
7178 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7179 exc_info=True,
7180 )
7181 finally:
7182 self._write_ns_status(
7183 nsr_id=nsr_id,
7184 ns_state=None,
7185 current_operation="IDLE",
7186 current_operation_id=None,
7187 )
7188 if tasks_dict_info:
7189 stage[1] = "Waiting for instantiate pending tasks."
7190 self.logger.debug(logging_text + stage[1])
7191 exc = await self._wait_for_tasks(
7192 logging_text,
7193 tasks_dict_info,
7194 self.timeout.ns_deploy,
7195 stage,
7196 nslcmop_id,
7197 nsr_id=nsr_id,
7198 )
7199 if exc:
7200 db_nslcmop_update[
7201 "detailed-status"
7202 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7203 nslcmop_operation_state = "FAILED"
7204 if db_nsr:
7205 db_nsr_update["operational-status"] = old_operational_status
7206 db_nsr_update["config-status"] = old_config_status
7207 db_nsr_update["detailed-status"] = ""
7208 if scale_process:
7209 if "VCA" in scale_process:
7210 db_nsr_update["config-status"] = "failed"
7211 if "RO" in scale_process:
7212 db_nsr_update["operational-status"] = "failed"
7213 db_nsr_update[
7214 "detailed-status"
7215 ] = "FAILED scaling nslcmop={} {}: {}".format(
7216 nslcmop_id, step, exc
7217 )
7218 else:
7219 error_description_nslcmop = None
7220 nslcmop_operation_state = "COMPLETED"
7221 db_nslcmop_update["detailed-status"] = "Done"
7222
7223 self._write_op_status(
7224 op_id=nslcmop_id,
7225 stage="",
7226 error_message=error_description_nslcmop,
7227 operation_state=nslcmop_operation_state,
7228 other_update=db_nslcmop_update,
7229 )
7230 if db_nsr:
7231 self._write_ns_status(
7232 nsr_id=nsr_id,
7233 ns_state=None,
7234 current_operation="IDLE",
7235 current_operation_id=None,
7236 other_update=db_nsr_update,
7237 )
7238
7239 if nslcmop_operation_state:
7240 try:
7241 msg = {
7242 "nsr_id": nsr_id,
7243 "nslcmop_id": nslcmop_id,
7244 "operationState": nslcmop_operation_state,
7245 }
7246 await self.msg.aiowrite("ns", "scaled", msg)
7247 except Exception as e:
7248 self.logger.error(
7249 logging_text + "kafka_write notification Exception {}".format(e)
7250 )
7251 self.logger.debug(logging_text + "Exit")
7252 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7253
7254 async def _scale_kdu(
7255 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7256 ):
7257 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7258 for kdu_name in _scaling_info:
7259 for kdu_scaling_info in _scaling_info[kdu_name]:
7260 deployed_kdu, index = get_deployed_kdu(
7261 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7262 )
7263 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7264 kdu_instance = deployed_kdu["kdu-instance"]
7265 kdu_model = deployed_kdu.get("kdu-model")
7266 scale = int(kdu_scaling_info["scale"])
7267 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7268
7269 db_dict = {
7270 "collection": "nsrs",
7271 "filter": {"_id": nsr_id},
7272 "path": "_admin.deployed.K8s.{}".format(index),
7273 }
7274
7275 step = "scaling application {}".format(
7276 kdu_scaling_info["resource-name"]
7277 )
7278 self.logger.debug(logging_text + step)
7279
7280 if kdu_scaling_info["type"] == "delete":
7281 kdu_config = get_configuration(db_vnfd, kdu_name)
7282 if (
7283 kdu_config
7284 and kdu_config.get("terminate-config-primitive")
7285 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7286 ):
7287 terminate_config_primitive_list = kdu_config.get(
7288 "terminate-config-primitive"
7289 )
7290 terminate_config_primitive_list.sort(
7291 key=lambda val: int(val["seq"])
7292 )
7293
7294 for (
7295 terminate_config_primitive
7296 ) in terminate_config_primitive_list:
7297 primitive_params_ = self._map_primitive_params(
7298 terminate_config_primitive, {}, {}
7299 )
7300 step = "execute terminate config primitive"
7301 self.logger.debug(logging_text + step)
7302 await asyncio.wait_for(
7303 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7304 cluster_uuid=cluster_uuid,
7305 kdu_instance=kdu_instance,
7306 primitive_name=terminate_config_primitive["name"],
7307 params=primitive_params_,
7308 db_dict=db_dict,
7309 total_timeout=self.timeout.primitive,
7310 vca_id=vca_id,
7311 ),
7312 timeout=self.timeout.primitive
7313 * self.timeout.primitive_outer_factor,
7314 )
7315
7316 await asyncio.wait_for(
7317 self.k8scluster_map[k8s_cluster_type].scale(
7318 kdu_instance=kdu_instance,
7319 scale=scale,
7320 resource_name=kdu_scaling_info["resource-name"],
7321 total_timeout=self.timeout.scale_on_error,
7322 vca_id=vca_id,
7323 cluster_uuid=cluster_uuid,
7324 kdu_model=kdu_model,
7325 atomic=True,
7326 db_dict=db_dict,
7327 ),
7328 timeout=self.timeout.scale_on_error
7329 * self.timeout.scale_on_error_outer_factor,
7330 )
7331
7332 if kdu_scaling_info["type"] == "create":
7333 kdu_config = get_configuration(db_vnfd, kdu_name)
7334 if (
7335 kdu_config
7336 and kdu_config.get("initial-config-primitive")
7337 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7338 ):
7339 initial_config_primitive_list = kdu_config.get(
7340 "initial-config-primitive"
7341 )
7342 initial_config_primitive_list.sort(
7343 key=lambda val: int(val["seq"])
7344 )
7345
7346 for initial_config_primitive in initial_config_primitive_list:
7347 primitive_params_ = self._map_primitive_params(
7348 initial_config_primitive, {}, {}
7349 )
7350 step = "execute initial config primitive"
7351 self.logger.debug(logging_text + step)
7352 await asyncio.wait_for(
7353 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7354 cluster_uuid=cluster_uuid,
7355 kdu_instance=kdu_instance,
7356 primitive_name=initial_config_primitive["name"],
7357 params=primitive_params_,
7358 db_dict=db_dict,
7359 vca_id=vca_id,
7360 ),
7361 timeout=600,
7362 )
7363
7364 async def _scale_ng_ro(
7365 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7366 ):
7367 nsr_id = db_nslcmop["nsInstanceId"]
7368 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7369 db_vnfrs = {}
7370
7371 # read from db: vnfd's for every vnf
7372 db_vnfds = []
7373
7374 # for each vnf in ns, read vnfd
7375 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7376 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7377 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7378 # if we haven't this vnfd, read it from db
7379 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7380 # read from db
7381 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7382 db_vnfds.append(vnfd)
7383 n2vc_key = self.n2vc.get_public_key()
7384 n2vc_key_list = [n2vc_key]
7385 self.scale_vnfr(
7386 db_vnfr,
7387 vdu_scaling_info.get("vdu-create"),
7388 vdu_scaling_info.get("vdu-delete"),
7389 mark_delete=True,
7390 )
7391 # db_vnfr has been updated, update db_vnfrs to use it
7392 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7393 await self._instantiate_ng_ro(
7394 logging_text,
7395 nsr_id,
7396 db_nsd,
7397 db_nsr,
7398 db_nslcmop,
7399 db_vnfrs,
7400 db_vnfds,
7401 n2vc_key_list,
7402 stage=stage,
7403 start_deploy=time(),
7404 timeout_ns_deploy=self.timeout.ns_deploy,
7405 )
7406 if vdu_scaling_info.get("vdu-delete"):
7407 self.scale_vnfr(
7408 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7409 )
7410
7411 async def extract_prometheus_scrape_jobs(
7412 self,
7413 ee_id: str,
7414 artifact_path: str,
7415 ee_config_descriptor: dict,
7416 vnfr_id: str,
7417 nsr_id: str,
7418 target_ip: str,
7419 element_type: str,
7420 vnf_member_index: str = "",
7421 vdu_id: str = "",
7422 vdu_index: int = None,
7423 kdu_name: str = "",
7424 kdu_index: int = None,
7425 ) -> dict:
7426 """Method to extract prometheus scrape jobs from EE's Prometheus template job file
7427 This method will wait until the corresponding VDU or KDU is fully instantiated
7428
7429 Args:
7430 ee_id (str): Execution Environment ID
7431 artifact_path (str): Path where the EE's content is (including the Prometheus template file)
7432 ee_config_descriptor (dict): Execution Environment's configuration descriptor
7433 vnfr_id (str): VNFR ID where this EE applies
7434 nsr_id (str): NSR ID where this EE applies
7435 target_ip (str): VDU/KDU instance IP address
7436 element_type (str): NS or VNF or VDU or KDU
7437 vnf_member_index (str, optional): VNF index where this EE applies. Defaults to "".
7438 vdu_id (str, optional): VDU ID where this EE applies. Defaults to "".
7439 vdu_index (int, optional): VDU index where this EE applies. Defaults to None.
7440 kdu_name (str, optional): KDU name where this EE applies. Defaults to "".
7441 kdu_index (int, optional): KDU index where this EE applies. Defaults to None.
7442
7443 Raises:
7444 LcmException: When the VDU or KDU instance was not found in an hour
7445
7446 Returns:
7447 _type_: Prometheus jobs
7448 """
7449 # default the vdur and kdur names to an empty string, to avoid any later
7450 # problem with Prometheus when the element type is not VDU or KDU
7451 vdur_name = ""
7452 kdur_name = ""
7453
7454 # look if exist a file called 'prometheus*.j2' and
7455 artifact_content = self.fs.dir_ls(artifact_path)
7456 job_file = next(
7457 (
7458 f
7459 for f in artifact_content
7460 if f.startswith("prometheus") and f.endswith(".j2")
7461 ),
7462 None,
7463 )
7464 if not job_file:
7465 return
7466 self.logger.debug("Artifact path{}".format(artifact_path))
7467 self.logger.debug("job file{}".format(job_file))
7468 with self.fs.file_open((artifact_path, job_file), "r") as f:
7469 job_data = f.read()
7470
7471 # obtain the VDUR or KDUR, if the element type is VDU or KDU
7472 if element_type in ("VDU", "KDU"):
7473 for _ in range(360):
7474 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
7475 if vdu_id and vdu_index is not None:
7476 vdur = next(
7477 (
7478 x
7479 for x in get_iterable(db_vnfr, "vdur")
7480 if (
7481 x.get("vdu-id-ref") == vdu_id
7482 and x.get("count-index") == vdu_index
7483 )
7484 ),
7485 {},
7486 )
7487 if vdur.get("name"):
7488 vdur_name = vdur.get("name")
7489 break
7490 if kdu_name and kdu_index is not None:
7491 kdur = next(
7492 (
7493 x
7494 for x in get_iterable(db_vnfr, "kdur")
7495 if (
7496 x.get("kdu-name") == kdu_name
7497 and x.get("count-index") == kdu_index
7498 )
7499 ),
7500 {},
7501 )
7502 if kdur.get("name"):
7503 kdur_name = kdur.get("name")
7504 break
7505
7506 await asyncio.sleep(10)
7507 else:
7508 if vdu_id and vdu_index is not None:
7509 raise LcmException(
7510 f"Timeout waiting VDU with name={vdu_id} and index={vdu_index} to be intantiated"
7511 )
7512 if kdu_name and kdu_index is not None:
7513 raise LcmException(
7514 f"Timeout waiting KDU with name={kdu_name} and index={kdu_index} to be intantiated"
7515 )
7516
7517 if ee_id is not None:
7518 _, namespace, helm_id = get_ee_id_parts(
7519 ee_id
7520 ) # get namespace and EE gRPC service name
7521 host_name = f'{helm_id}-{ee_config_descriptor["metric-service"]}.{namespace}.svc' # svc_name.namespace.svc
7522 host_port = "80"
7523 vnfr_id = vnfr_id.replace("-", "")
7524 variables = {
7525 "JOB_NAME": vnfr_id,
7526 "TARGET_IP": target_ip,
7527 "EXPORTER_POD_IP": host_name,
7528 "EXPORTER_POD_PORT": host_port,
7529 "NSR_ID": nsr_id,
7530 "VNF_MEMBER_INDEX": vnf_member_index,
7531 "VDUR_NAME": vdur_name,
7532 "KDUR_NAME": kdur_name,
7533 "ELEMENT_TYPE": element_type,
7534 }
7535 else:
7536 metric_path = ee_config_descriptor["metric-path"]
7537 target_port = ee_config_descriptor["metric-port"]
7538 vnfr_id = vnfr_id.replace("-", "")
7539 variables = {
7540 "JOB_NAME": vnfr_id,
7541 "TARGET_IP": target_ip,
7542 "TARGET_PORT": target_port,
7543 "METRIC_PATH": metric_path,
7544 }
7545
7546 job_list = parse_job(job_data, variables)
7547 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7548 for job in job_list:
7549 if (
7550 not isinstance(job.get("job_name"), str)
7551 or vnfr_id not in job["job_name"]
7552 ):
7553 job["job_name"] = vnfr_id + "_" + str(SystemRandom().randint(1, 10000))
7554 job["nsr_id"] = nsr_id
7555 job["vnfr_id"] = vnfr_id
7556 return job_list
7557
7558 async def rebuild_start_stop(
7559 self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
7560 ):
7561 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7562 self.logger.info(logging_text + "Enter")
7563 stage = ["Preparing the environment", ""]
7564 # database nsrs record
7565 db_nsr_update = {}
7566 vdu_vim_name = None
7567 vim_vm_id = None
7568 # in case of error, indicates what part of scale was failed to put nsr at error status
7569 start_deploy = time()
7570 try:
7571 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7572 vim_account_id = db_vnfr.get("vim-account-id")
7573 vim_info_key = "vim:" + vim_account_id
7574 vdu_id = additional_param["vdu_id"]
7575 vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
7576 vdur = find_in_list(
7577 vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
7578 )
7579 if vdur:
7580 vdu_vim_name = vdur["name"]
7581 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7582 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7583 else:
7584 raise LcmException("Target vdu is not found")
7585 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7586 # wait for any previous tasks in process
7587 stage[1] = "Waiting for previous operations to terminate"
7588 self.logger.info(stage[1])
7589 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7590
7591 stage[1] = "Reading from database."
7592 self.logger.info(stage[1])
7593 self._write_ns_status(
7594 nsr_id=nsr_id,
7595 ns_state=None,
7596 current_operation=operation_type.upper(),
7597 current_operation_id=nslcmop_id,
7598 )
7599 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7600
7601 # read from db: ns
7602 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7603 db_nsr_update["operational-status"] = operation_type
7604 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7605 # Payload for RO
7606 desc = {
7607 operation_type: {
7608 "vim_vm_id": vim_vm_id,
7609 "vnf_id": vnf_id,
7610 "vdu_index": additional_param["count-index"],
7611 "vdu_id": vdur["id"],
7612 "target_vim": target_vim,
7613 "vim_account_id": vim_account_id,
7614 }
7615 }
7616 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7617 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7618 self.logger.info("ro nsr id: {}".format(nsr_id))
7619 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7620 self.logger.info("response from RO: {}".format(result_dict))
7621 action_id = result_dict["action_id"]
7622 await self._wait_ng_ro(
7623 nsr_id,
7624 action_id,
7625 nslcmop_id,
7626 start_deploy,
7627 self.timeout.operate,
7628 None,
7629 "start_stop_rebuild",
7630 )
7631 return "COMPLETED", "Done"
7632 except (ROclient.ROClientException, DbException, LcmException) as e:
7633 self.logger.error("Exit Exception {}".format(e))
7634 exc = e
7635 except asyncio.CancelledError:
7636 self.logger.error("Cancelled Exception while '{}'".format(stage))
7637 exc = "Operation was cancelled"
7638 except Exception as e:
7639 exc = traceback.format_exc()
7640 self.logger.critical(
7641 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7642 )
7643 return "FAILED", "Error in operate VNF {}".format(exc)
7644
7645 async def migrate(self, nsr_id, nslcmop_id):
7646 """
7647 Migrate VNFs and VDUs instances in a NS
7648
7649 :param: nsr_id: NS Instance ID
7650 :param: nslcmop_id: nslcmop ID of migrate
7651
7652 """
7653 # Try to lock HA task here
7654 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7655 if not task_is_locked_by_me:
7656 return
7657 logging_text = "Task ns={} migrate ".format(nsr_id)
7658 self.logger.debug(logging_text + "Enter")
7659 # get all needed from database
7660 db_nslcmop = None
7661 db_nslcmop_update = {}
7662 nslcmop_operation_state = None
7663 db_nsr_update = {}
7664 target = {}
7665 exc = None
7666 # in case of error, indicates what part of scale was failed to put nsr at error status
7667 start_deploy = time()
7668
7669 try:
7670 # wait for any previous tasks in process
7671 step = "Waiting for previous operations to terminate"
7672 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7673
7674 self._write_ns_status(
7675 nsr_id=nsr_id,
7676 ns_state=None,
7677 current_operation="MIGRATING",
7678 current_operation_id=nslcmop_id,
7679 )
7680 step = "Getting nslcmop from database"
7681 self.logger.debug(
7682 step + " after having waited for previous tasks to be completed"
7683 )
7684 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7685 migrate_params = db_nslcmop.get("operationParams")
7686
7687 target = {}
7688 target.update(migrate_params)
7689 desc = await self.RO.migrate(nsr_id, target)
7690 self.logger.debug("RO return > {}".format(desc))
7691 action_id = desc["action_id"]
7692 await self._wait_ng_ro(
7693 nsr_id,
7694 action_id,
7695 nslcmop_id,
7696 start_deploy,
7697 self.timeout.migrate,
7698 operation="migrate",
7699 )
7700 except (ROclient.ROClientException, DbException, LcmException) as e:
7701 self.logger.error("Exit Exception {}".format(e))
7702 exc = e
7703 except asyncio.CancelledError:
7704 self.logger.error("Cancelled Exception while '{}'".format(step))
7705 exc = "Operation was cancelled"
7706 except Exception as e:
7707 exc = traceback.format_exc()
7708 self.logger.critical(
7709 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7710 )
7711 finally:
7712 self._write_ns_status(
7713 nsr_id=nsr_id,
7714 ns_state=None,
7715 current_operation="IDLE",
7716 current_operation_id=None,
7717 )
7718 if exc:
7719 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7720 nslcmop_operation_state = "FAILED"
7721 else:
7722 nslcmop_operation_state = "COMPLETED"
7723 db_nslcmop_update["detailed-status"] = "Done"
7724 db_nsr_update["detailed-status"] = "Done"
7725
7726 self._write_op_status(
7727 op_id=nslcmop_id,
7728 stage="",
7729 error_message="",
7730 operation_state=nslcmop_operation_state,
7731 other_update=db_nslcmop_update,
7732 )
7733 if nslcmop_operation_state:
7734 try:
7735 msg = {
7736 "nsr_id": nsr_id,
7737 "nslcmop_id": nslcmop_id,
7738 "operationState": nslcmop_operation_state,
7739 }
7740 await self.msg.aiowrite("ns", "migrated", msg)
7741 except Exception as e:
7742 self.logger.error(
7743 logging_text + "kafka_write notification Exception {}".format(e)
7744 )
7745 self.logger.debug(logging_text + "Exit")
7746 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7747
7748 async def heal(self, nsr_id, nslcmop_id):
7749 """
7750 Heal NS
7751
7752 :param nsr_id: ns instance to heal
7753 :param nslcmop_id: operation to run
7754 :return:
7755 """
7756
7757 # Try to lock HA task here
7758 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7759 if not task_is_locked_by_me:
7760 return
7761
7762 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7763 stage = ["", "", ""]
7764 tasks_dict_info = {}
7765 # ^ stage, step, VIM progress
7766 self.logger.debug(logging_text + "Enter")
7767 # get all needed from database
7768 db_nsr = None
7769 db_nslcmop_update = {}
7770 db_nsr_update = {}
7771 db_vnfrs = {} # vnf's info indexed by _id
7772 exc = None
7773 old_operational_status = ""
7774 old_config_status = ""
7775 nsi_id = None
7776 try:
7777 # wait for any previous tasks in process
7778 step = "Waiting for previous operations to terminate"
7779 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7780 self._write_ns_status(
7781 nsr_id=nsr_id,
7782 ns_state=None,
7783 current_operation="HEALING",
7784 current_operation_id=nslcmop_id,
7785 )
7786
7787 step = "Getting nslcmop from database"
7788 self.logger.debug(
7789 step + " after having waited for previous tasks to be completed"
7790 )
7791 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7792
7793 step = "Getting nsr from database"
7794 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7795 old_operational_status = db_nsr["operational-status"]
7796 old_config_status = db_nsr["config-status"]
7797
7798 db_nsr_update = {
7799 "_admin.deployed.RO.operational-status": "healing",
7800 }
7801 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7802
7803 step = "Sending heal order to VIM"
7804 await self.heal_RO(
7805 logging_text=logging_text,
7806 nsr_id=nsr_id,
7807 db_nslcmop=db_nslcmop,
7808 stage=stage,
7809 )
7810 # VCA tasks
7811 # read from db: nsd
7812 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7813 self.logger.debug(logging_text + stage[1])
7814 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7815 self.fs.sync(db_nsr["nsd-id"])
7816 db_nsr["nsd"] = nsd
7817 # read from db: vnfr's of this ns
7818 step = "Getting vnfrs from db"
7819 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7820 for vnfr in db_vnfrs_list:
7821 db_vnfrs[vnfr["_id"]] = vnfr
7822 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7823
7824 # Check for each target VNF
7825 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7826 for target_vnf in target_list:
7827 # Find this VNF in the list from DB
7828 vnfr_id = target_vnf.get("vnfInstanceId", None)
7829 if vnfr_id:
7830 db_vnfr = db_vnfrs[vnfr_id]
7831 vnfd_id = db_vnfr.get("vnfd-id")
7832 vnfd_ref = db_vnfr.get("vnfd-ref")
7833 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7834 base_folder = vnfd["_admin"]["storage"]
7835 vdu_id = None
7836 vdu_index = 0
7837 vdu_name = None
7838 kdu_name = None
7839 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7840 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7841
7842 # Check each target VDU and deploy N2VC
7843 target_vdu_list = target_vnf.get("additionalParams", {}).get(
7844 "vdu", []
7845 )
7846 if not target_vdu_list:
7847 # Codigo nuevo para crear diccionario
7848 target_vdu_list = []
7849 for existing_vdu in db_vnfr.get("vdur"):
7850 vdu_name = existing_vdu.get("vdu-name", None)
7851 vdu_index = existing_vdu.get("count-index", 0)
7852 vdu_run_day1 = target_vnf.get("additionalParams", {}).get(
7853 "run-day1", False
7854 )
7855 vdu_to_be_healed = {
7856 "vdu-id": vdu_name,
7857 "count-index": vdu_index,
7858 "run-day1": vdu_run_day1,
7859 }
7860 target_vdu_list.append(vdu_to_be_healed)
7861 for target_vdu in target_vdu_list:
7862 deploy_params_vdu = target_vdu
7863 # Set run-day1 vnf level value if not vdu level value exists
7864 if not deploy_params_vdu.get("run-day1") and target_vnf.get(
7865 "additionalParams", {}
7866 ).get("run-day1"):
7867 deploy_params_vdu["run-day1"] = target_vnf[
7868 "additionalParams"
7869 ].get("run-day1")
7870 vdu_name = target_vdu.get("vdu-id", None)
7871 # TODO: Get vdu_id from vdud.
7872 vdu_id = vdu_name
7873 # For multi instance VDU count-index is mandatory
7874 # For single session VDU count-indes is 0
7875 vdu_index = target_vdu.get("count-index", 0)
7876
7877 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7878 stage[1] = "Deploying Execution Environments."
7879 self.logger.debug(logging_text + stage[1])
7880
7881 # VNF Level charm. Normal case when proxy charms.
7882 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7883 descriptor_config = get_configuration(vnfd, vnfd_ref)
7884 if descriptor_config:
7885 # Continue if healed machine is management machine
7886 vnf_ip_address = db_vnfr.get("ip-address")
7887 target_instance = None
7888 for instance in db_vnfr.get("vdur", None):
7889 if (
7890 instance["vdu-name"] == vdu_name
7891 and instance["count-index"] == vdu_index
7892 ):
7893 target_instance = instance
7894 break
7895 if vnf_ip_address == target_instance.get("ip-address"):
7896 self._heal_n2vc(
7897 logging_text=logging_text
7898 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7899 member_vnf_index, vdu_name, vdu_index
7900 ),
7901 db_nsr=db_nsr,
7902 db_vnfr=db_vnfr,
7903 nslcmop_id=nslcmop_id,
7904 nsr_id=nsr_id,
7905 nsi_id=nsi_id,
7906 vnfd_id=vnfd_ref,
7907 vdu_id=None,
7908 kdu_name=None,
7909 member_vnf_index=member_vnf_index,
7910 vdu_index=0,
7911 vdu_name=None,
7912 deploy_params=deploy_params_vdu,
7913 descriptor_config=descriptor_config,
7914 base_folder=base_folder,
7915 task_instantiation_info=tasks_dict_info,
7916 stage=stage,
7917 )
7918
7919 # VDU Level charm. Normal case with native charms.
7920 descriptor_config = get_configuration(vnfd, vdu_name)
7921 if descriptor_config:
7922 self._heal_n2vc(
7923 logging_text=logging_text
7924 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7925 member_vnf_index, vdu_name, vdu_index
7926 ),
7927 db_nsr=db_nsr,
7928 db_vnfr=db_vnfr,
7929 nslcmop_id=nslcmop_id,
7930 nsr_id=nsr_id,
7931 nsi_id=nsi_id,
7932 vnfd_id=vnfd_ref,
7933 vdu_id=vdu_id,
7934 kdu_name=kdu_name,
7935 member_vnf_index=member_vnf_index,
7936 vdu_index=vdu_index,
7937 vdu_name=vdu_name,
7938 deploy_params=deploy_params_vdu,
7939 descriptor_config=descriptor_config,
7940 base_folder=base_folder,
7941 task_instantiation_info=tasks_dict_info,
7942 stage=stage,
7943 )
7944
7945 except (
7946 ROclient.ROClientException,
7947 DbException,
7948 LcmException,
7949 NgRoException,
7950 ) as e:
7951 self.logger.error(logging_text + "Exit Exception {}".format(e))
7952 exc = e
7953 except asyncio.CancelledError:
7954 self.logger.error(
7955 logging_text + "Cancelled Exception while '{}'".format(step)
7956 )
7957 exc = "Operation was cancelled"
7958 except Exception as e:
7959 exc = traceback.format_exc()
7960 self.logger.critical(
7961 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7962 exc_info=True,
7963 )
7964 finally:
7965 if tasks_dict_info:
7966 stage[1] = "Waiting for healing pending tasks."
7967 self.logger.debug(logging_text + stage[1])
7968 exc = await self._wait_for_tasks(
7969 logging_text,
7970 tasks_dict_info,
7971 self.timeout.ns_deploy,
7972 stage,
7973 nslcmop_id,
7974 nsr_id=nsr_id,
7975 )
7976 if exc:
7977 db_nslcmop_update[
7978 "detailed-status"
7979 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7980 nslcmop_operation_state = "FAILED"
7981 if db_nsr:
7982 db_nsr_update["operational-status"] = old_operational_status
7983 db_nsr_update["config-status"] = old_config_status
7984 db_nsr_update[
7985 "detailed-status"
7986 ] = "FAILED healing nslcmop={} {}: {}".format(nslcmop_id, step, exc)
7987 for task, task_name in tasks_dict_info.items():
7988 if not task.done() or task.cancelled() or task.exception():
7989 if task_name.startswith(self.task_name_deploy_vca):
7990 # A N2VC task is pending
7991 db_nsr_update["config-status"] = "failed"
7992 else:
7993 # RO task is pending
7994 db_nsr_update["operational-status"] = "failed"
7995 else:
7996 error_description_nslcmop = None
7997 nslcmop_operation_state = "COMPLETED"
7998 db_nslcmop_update["detailed-status"] = "Done"
7999 db_nsr_update["detailed-status"] = "Done"
8000 db_nsr_update["operational-status"] = "running"
8001 db_nsr_update["config-status"] = "configured"
8002
8003 self._write_op_status(
8004 op_id=nslcmop_id,
8005 stage="",
8006 error_message=error_description_nslcmop,
8007 operation_state=nslcmop_operation_state,
8008 other_update=db_nslcmop_update,
8009 )
8010 if db_nsr:
8011 self._write_ns_status(
8012 nsr_id=nsr_id,
8013 ns_state=None,
8014 current_operation="IDLE",
8015 current_operation_id=None,
8016 other_update=db_nsr_update,
8017 )
8018
8019 if nslcmop_operation_state:
8020 try:
8021 msg = {
8022 "nsr_id": nsr_id,
8023 "nslcmop_id": nslcmop_id,
8024 "operationState": nslcmop_operation_state,
8025 }
8026 await self.msg.aiowrite("ns", "healed", msg)
8027 except Exception as e:
8028 self.logger.error(
8029 logging_text + "kafka_write notification Exception {}".format(e)
8030 )
8031 self.logger.debug(logging_text + "Exit")
8032 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
8033
8034 async def heal_RO(
8035 self,
8036 logging_text,
8037 nsr_id,
8038 db_nslcmop,
8039 stage,
8040 ):
8041 """
8042 Heal at RO
8043 :param logging_text: preffix text to use at logging
8044 :param nsr_id: nsr identity
8045 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
8046 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
8047 :return: None or exception
8048 """
8049
8050 def get_vim_account(vim_account_id):
8051 nonlocal db_vims
8052 if vim_account_id in db_vims:
8053 return db_vims[vim_account_id]
8054 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
8055 db_vims[vim_account_id] = db_vim
8056 return db_vim
8057
8058 try:
8059 start_heal = time()
8060 ns_params = db_nslcmop.get("operationParams")
8061 if ns_params and ns_params.get("timeout_ns_heal"):
8062 timeout_ns_heal = ns_params["timeout_ns_heal"]
8063 else:
8064 timeout_ns_heal = self.timeout.ns_heal
8065
8066 db_vims = {}
8067
8068 nslcmop_id = db_nslcmop["_id"]
8069 target = {
8070 "action_id": nslcmop_id,
8071 }
8072 self.logger.warning(
8073 "db_nslcmop={} and timeout_ns_heal={}".format(
8074 db_nslcmop, timeout_ns_heal
8075 )
8076 )
8077 target.update(db_nslcmop.get("operationParams", {}))
8078
8079 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
8080 desc = await self.RO.recreate(nsr_id, target)
8081 self.logger.debug("RO return > {}".format(desc))
8082 action_id = desc["action_id"]
8083 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
8084 await self._wait_ng_ro(
8085 nsr_id,
8086 action_id,
8087 nslcmop_id,
8088 start_heal,
8089 timeout_ns_heal,
8090 stage,
8091 operation="healing",
8092 )
8093
8094 # Updating NSR
8095 db_nsr_update = {
8096 "_admin.deployed.RO.operational-status": "running",
8097 "detailed-status": " ".join(stage),
8098 }
8099 self.update_db_2("nsrs", nsr_id, db_nsr_update)
8100 self._write_op_status(nslcmop_id, stage)
8101 self.logger.debug(
8102 logging_text + "ns healed at RO. RO_id={}".format(action_id)
8103 )
8104
8105 except Exception as e:
8106 stage[2] = "ERROR healing at VIM"
8107 # self.set_vnfr_at_error(db_vnfrs, str(e))
8108 self.logger.error(
8109 "Error healing at VIM {}".format(e),
8110 exc_info=not isinstance(
8111 e,
8112 (
8113 ROclient.ROClientException,
8114 LcmException,
8115 DbException,
8116 NgRoException,
8117 ),
8118 ),
8119 )
8120 raise
8121
8122 def _heal_n2vc(
8123 self,
8124 logging_text,
8125 db_nsr,
8126 db_vnfr,
8127 nslcmop_id,
8128 nsr_id,
8129 nsi_id,
8130 vnfd_id,
8131 vdu_id,
8132 kdu_name,
8133 member_vnf_index,
8134 vdu_index,
8135 vdu_name,
8136 deploy_params,
8137 descriptor_config,
8138 base_folder,
8139 task_instantiation_info,
8140 stage,
8141 ):
8142 # launch instantiate_N2VC in a asyncio task and register task object
8143 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
8144 # if not found, create one entry and update database
8145 # fill db_nsr._admin.deployed.VCA.<index>
8146
8147 self.logger.debug(
8148 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
8149 )
8150
8151 charm_name = ""
8152 get_charm_name = False
8153 if "execution-environment-list" in descriptor_config:
8154 ee_list = descriptor_config.get("execution-environment-list", [])
8155 elif "juju" in descriptor_config:
8156 ee_list = [descriptor_config] # ns charms
8157 if "execution-environment-list" not in descriptor_config:
8158 # charm name is only required for ns charms
8159 get_charm_name = True
8160 else: # other types as script are not supported
8161 ee_list = []
8162
8163 for ee_item in ee_list:
8164 self.logger.debug(
8165 logging_text
8166 + "_deploy_n2vc ee_item juju={}, helm={}".format(
8167 ee_item.get("juju"), ee_item.get("helm-chart")
8168 )
8169 )
8170 ee_descriptor_id = ee_item.get("id")
8171 if ee_item.get("juju"):
8172 vca_name = ee_item["juju"].get("charm")
8173 if get_charm_name:
8174 charm_name = self.find_charm_name(db_nsr, str(vca_name))
8175 vca_type = (
8176 "lxc_proxy_charm"
8177 if ee_item["juju"].get("charm") is not None
8178 else "native_charm"
8179 )
8180 if ee_item["juju"].get("cloud") == "k8s":
8181 vca_type = "k8s_proxy_charm"
8182 elif ee_item["juju"].get("proxy") is False:
8183 vca_type = "native_charm"
8184 elif ee_item.get("helm-chart"):
8185 vca_name = ee_item["helm-chart"]
8186 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
8187 vca_type = "helm"
8188 else:
8189 vca_type = "helm-v3"
8190 else:
8191 self.logger.debug(
8192 logging_text + "skipping non juju neither charm configuration"
8193 )
8194 continue
8195
8196 vca_index = -1
8197 for vca_index, vca_deployed in enumerate(
8198 db_nsr["_admin"]["deployed"]["VCA"]
8199 ):
8200 if not vca_deployed:
8201 continue
8202 if (
8203 vca_deployed.get("member-vnf-index") == member_vnf_index
8204 and vca_deployed.get("vdu_id") == vdu_id
8205 and vca_deployed.get("kdu_name") == kdu_name
8206 and vca_deployed.get("vdu_count_index", 0) == vdu_index
8207 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
8208 ):
8209 break
8210 else:
8211 # not found, create one.
8212 target = (
8213 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
8214 )
8215 if vdu_id:
8216 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
8217 elif kdu_name:
8218 target += "/kdu/{}".format(kdu_name)
8219 vca_deployed = {
8220 "target_element": target,
8221 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8222 "member-vnf-index": member_vnf_index,
8223 "vdu_id": vdu_id,
8224 "kdu_name": kdu_name,
8225 "vdu_count_index": vdu_index,
8226 "operational-status": "init", # TODO revise
8227 "detailed-status": "", # TODO revise
8228 "step": "initial-deploy", # TODO revise
8229 "vnfd_id": vnfd_id,
8230 "vdu_name": vdu_name,
8231 "type": vca_type,
8232 "ee_descriptor_id": ee_descriptor_id,
8233 "charm_name": charm_name,
8234 }
8235 vca_index += 1
8236
8237 # create VCA and configurationStatus in db
8238 db_dict = {
8239 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
8240 "configurationStatus.{}".format(vca_index): dict(),
8241 }
8242 self.update_db_2("nsrs", nsr_id, db_dict)
8243
8244 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
8245
8246 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
8247 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
8248 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
8249
8250 # Launch task
8251 task_n2vc = asyncio.ensure_future(
8252 self.heal_N2VC(
8253 logging_text=logging_text,
8254 vca_index=vca_index,
8255 nsi_id=nsi_id,
8256 db_nsr=db_nsr,
8257 db_vnfr=db_vnfr,
8258 vdu_id=vdu_id,
8259 kdu_name=kdu_name,
8260 vdu_index=vdu_index,
8261 deploy_params=deploy_params,
8262 config_descriptor=descriptor_config,
8263 base_folder=base_folder,
8264 nslcmop_id=nslcmop_id,
8265 stage=stage,
8266 vca_type=vca_type,
8267 vca_name=vca_name,
8268 ee_config_descriptor=ee_item,
8269 )
8270 )
8271 self.lcm_tasks.register(
8272 "ns",
8273 nsr_id,
8274 nslcmop_id,
8275 "instantiate_N2VC-{}".format(vca_index),
8276 task_n2vc,
8277 )
8278 task_instantiation_info[
8279 task_n2vc
8280 ] = self.task_name_deploy_vca + " {}.{}".format(
8281 member_vnf_index or "", vdu_id or ""
8282 )
8283
8284 async def heal_N2VC(
8285 self,
8286 logging_text,
8287 vca_index,
8288 nsi_id,
8289 db_nsr,
8290 db_vnfr,
8291 vdu_id,
8292 kdu_name,
8293 vdu_index,
8294 config_descriptor,
8295 deploy_params,
8296 base_folder,
8297 nslcmop_id,
8298 stage,
8299 vca_type,
8300 vca_name,
8301 ee_config_descriptor,
8302 ):
8303 nsr_id = db_nsr["_id"]
8304 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
8305 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
8306 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
8307 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
8308 db_dict = {
8309 "collection": "nsrs",
8310 "filter": {"_id": nsr_id},
8311 "path": db_update_entry,
8312 }
8313 step = ""
8314 try:
8315 element_type = "NS"
8316 element_under_configuration = nsr_id
8317
8318 vnfr_id = None
8319 if db_vnfr:
8320 vnfr_id = db_vnfr["_id"]
8321 osm_config["osm"]["vnf_id"] = vnfr_id
8322
8323 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8324
8325 if vca_type == "native_charm":
8326 index_number = 0
8327 else:
8328 index_number = vdu_index or 0
8329
8330 if vnfr_id:
8331 element_type = "VNF"
8332 element_under_configuration = vnfr_id
8333 namespace += ".{}-{}".format(vnfr_id, index_number)
8334 if vdu_id:
8335 namespace += ".{}-{}".format(vdu_id, index_number)
8336 element_type = "VDU"
8337 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8338 osm_config["osm"]["vdu_id"] = vdu_id
8339 elif kdu_name:
8340 namespace += ".{}".format(kdu_name)
8341 element_type = "KDU"
8342 element_under_configuration = kdu_name
8343 osm_config["osm"]["kdu_name"] = kdu_name
8344
8345 # Get artifact path
8346 if base_folder["pkg-dir"]:
8347 artifact_path = "{}/{}/{}/{}".format(
8348 base_folder["folder"],
8349 base_folder["pkg-dir"],
8350 "charms"
8351 if vca_type
8352 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8353 else "helm-charts",
8354 vca_name,
8355 )
8356 else:
8357 artifact_path = "{}/Scripts/{}/{}/".format(
8358 base_folder["folder"],
8359 "charms"
8360 if vca_type
8361 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8362 else "helm-charts",
8363 vca_name,
8364 )
8365
8366 self.logger.debug("Artifact path > {}".format(artifact_path))
8367
8368 # get initial_config_primitive_list that applies to this element
8369 initial_config_primitive_list = config_descriptor.get(
8370 "initial-config-primitive"
8371 )
8372
8373 self.logger.debug(
8374 "Initial config primitive list > {}".format(
8375 initial_config_primitive_list
8376 )
8377 )
8378
8379 # add config if not present for NS charm
8380 ee_descriptor_id = ee_config_descriptor.get("id")
8381 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8382 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8383 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8384 )
8385
8386 self.logger.debug(
8387 "Initial config primitive list #2 > {}".format(
8388 initial_config_primitive_list
8389 )
8390 )
8391 # n2vc_redesign STEP 3.1
8392 # find old ee_id if exists
8393 ee_id = vca_deployed.get("ee_id")
8394
8395 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8396 # create or register execution environment in VCA. Only for native charms when healing
8397 if vca_type == "native_charm":
8398 step = "Waiting to VM being up and getting IP address"
8399 self.logger.debug(logging_text + step)
8400 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8401 logging_text,
8402 nsr_id,
8403 vnfr_id,
8404 vdu_id,
8405 vdu_index,
8406 user=None,
8407 pub_key=None,
8408 )
8409 credentials = {"hostname": rw_mgmt_ip}
8410 # get username
8411 username = deep_get(
8412 config_descriptor, ("config-access", "ssh-access", "default-user")
8413 )
8414 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8415 # merged. Meanwhile let's get username from initial-config-primitive
8416 if not username and initial_config_primitive_list:
8417 for config_primitive in initial_config_primitive_list:
8418 for param in config_primitive.get("parameter", ()):
8419 if param["name"] == "ssh-username":
8420 username = param["value"]
8421 break
8422 if not username:
8423 raise LcmException(
8424 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8425 "'config-access.ssh-access.default-user'"
8426 )
8427 credentials["username"] = username
8428
8429 # n2vc_redesign STEP 3.2
8430 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8431 self._write_configuration_status(
8432 nsr_id=nsr_id,
8433 vca_index=vca_index,
8434 status="REGISTERING",
8435 element_under_configuration=element_under_configuration,
8436 element_type=element_type,
8437 )
8438
8439 step = "register execution environment {}".format(credentials)
8440 self.logger.debug(logging_text + step)
8441 ee_id = await self.vca_map[vca_type].register_execution_environment(
8442 credentials=credentials,
8443 namespace=namespace,
8444 db_dict=db_dict,
8445 vca_id=vca_id,
8446 )
8447
8448 # update ee_id en db
8449 db_dict_ee_id = {
8450 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8451 }
8452 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8453
8454 # for compatibility with MON/POL modules, the need model and application name at database
8455 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8456 # Not sure if this need to be done when healing
8457 """
8458 ee_id_parts = ee_id.split(".")
8459 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8460 if len(ee_id_parts) >= 2:
8461 model_name = ee_id_parts[0]
8462 application_name = ee_id_parts[1]
8463 db_nsr_update[db_update_entry + "model"] = model_name
8464 db_nsr_update[db_update_entry + "application"] = application_name
8465 """
8466
8467 # n2vc_redesign STEP 3.3
8468 # Install configuration software. Only for native charms.
8469 step = "Install configuration Software"
8470
8471 self._write_configuration_status(
8472 nsr_id=nsr_id,
8473 vca_index=vca_index,
8474 status="INSTALLING SW",
8475 element_under_configuration=element_under_configuration,
8476 element_type=element_type,
8477 # other_update=db_nsr_update,
8478 other_update=None,
8479 )
8480
8481 # TODO check if already done
8482 self.logger.debug(logging_text + step)
8483 config = None
8484 if vca_type == "native_charm":
8485 config_primitive = next(
8486 (p for p in initial_config_primitive_list if p["name"] == "config"),
8487 None,
8488 )
8489 if config_primitive:
8490 config = self._map_primitive_params(
8491 config_primitive, {}, deploy_params
8492 )
8493 await self.vca_map[vca_type].install_configuration_sw(
8494 ee_id=ee_id,
8495 artifact_path=artifact_path,
8496 db_dict=db_dict,
8497 config=config,
8498 num_units=1,
8499 vca_id=vca_id,
8500 vca_type=vca_type,
8501 )
8502
8503 # write in db flag of configuration_sw already installed
8504 self.update_db_2(
8505 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8506 )
8507
8508 # Not sure if this need to be done when healing
8509 """
8510 # add relations for this VCA (wait for other peers related with this VCA)
8511 await self._add_vca_relations(
8512 logging_text=logging_text,
8513 nsr_id=nsr_id,
8514 vca_type=vca_type,
8515 vca_index=vca_index,
8516 )
8517 """
8518
8519 # if SSH access is required, then get execution environment SSH public
8520 # if native charm we have waited already to VM be UP
8521 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8522 pub_key = None
8523 user = None
8524 # self.logger.debug("get ssh key block")
8525 if deep_get(
8526 config_descriptor, ("config-access", "ssh-access", "required")
8527 ):
8528 # self.logger.debug("ssh key needed")
8529 # Needed to inject a ssh key
8530 user = deep_get(
8531 config_descriptor,
8532 ("config-access", "ssh-access", "default-user"),
8533 )
8534 step = "Install configuration Software, getting public ssh key"
8535 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8536 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8537 )
8538
8539 step = "Insert public key into VM user={} ssh_key={}".format(
8540 user, pub_key
8541 )
8542 else:
8543 # self.logger.debug("no need to get ssh key")
8544 step = "Waiting to VM being up and getting IP address"
8545 self.logger.debug(logging_text + step)
8546
8547 # n2vc_redesign STEP 5.1
8548 # wait for RO (ip-address) Insert pub_key into VM
8549 # IMPORTANT: We need do wait for RO to complete healing operation.
8550 await self._wait_heal_ro(nsr_id, self.timeout.ns_heal)
8551 if vnfr_id:
8552 if kdu_name:
8553 rw_mgmt_ip = await self.wait_kdu_up(
8554 logging_text, nsr_id, vnfr_id, kdu_name
8555 )
8556 else:
8557 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8558 logging_text,
8559 nsr_id,
8560 vnfr_id,
8561 vdu_id,
8562 vdu_index,
8563 user=user,
8564 pub_key=pub_key,
8565 )
8566 else:
8567 rw_mgmt_ip = None # This is for a NS configuration
8568
8569 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8570
8571 # store rw_mgmt_ip in deploy params for later replacement
8572 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8573
8574 # Day1 operations.
8575 # get run-day1 operation parameter
8576 runDay1 = deploy_params.get("run-day1", False)
8577 self.logger.debug(
8578 "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id, vdu_id, runDay1)
8579 )
8580 if runDay1:
8581 # n2vc_redesign STEP 6 Execute initial config primitive
8582 step = "execute initial config primitive"
8583
8584 # wait for dependent primitives execution (NS -> VNF -> VDU)
8585 if initial_config_primitive_list:
8586 await self._wait_dependent_n2vc(
8587 nsr_id, vca_deployed_list, vca_index
8588 )
8589
8590 # stage, in function of element type: vdu, kdu, vnf or ns
8591 my_vca = vca_deployed_list[vca_index]
8592 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8593 # VDU or KDU
8594 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8595 elif my_vca.get("member-vnf-index"):
8596 # VNF
8597 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8598 else:
8599 # NS
8600 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8601
8602 self._write_configuration_status(
8603 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8604 )
8605
8606 self._write_op_status(op_id=nslcmop_id, stage=stage)
8607
8608 check_if_terminated_needed = True
8609 for initial_config_primitive in initial_config_primitive_list:
8610 # adding information on the vca_deployed if it is a NS execution environment
8611 if not vca_deployed["member-vnf-index"]:
8612 deploy_params["ns_config_info"] = json.dumps(
8613 self._get_ns_config_info(nsr_id)
8614 )
8615 # TODO check if already done
8616 primitive_params_ = self._map_primitive_params(
8617 initial_config_primitive, {}, deploy_params
8618 )
8619
8620 step = "execute primitive '{}' params '{}'".format(
8621 initial_config_primitive["name"], primitive_params_
8622 )
8623 self.logger.debug(logging_text + step)
8624 await self.vca_map[vca_type].exec_primitive(
8625 ee_id=ee_id,
8626 primitive_name=initial_config_primitive["name"],
8627 params_dict=primitive_params_,
8628 db_dict=db_dict,
8629 vca_id=vca_id,
8630 vca_type=vca_type,
8631 )
8632 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8633 if check_if_terminated_needed:
8634 if config_descriptor.get("terminate-config-primitive"):
8635 self.update_db_2(
8636 "nsrs",
8637 nsr_id,
8638 {db_update_entry + "needed_terminate": True},
8639 )
8640 check_if_terminated_needed = False
8641
8642 # TODO register in database that primitive is done
8643
8644 # STEP 7 Configure metrics
8645 # Not sure if this need to be done when healing
8646 """
8647 if vca_type == "helm" or vca_type == "helm-v3":
8648 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8649 ee_id=ee_id,
8650 artifact_path=artifact_path,
8651 ee_config_descriptor=ee_config_descriptor,
8652 vnfr_id=vnfr_id,
8653 nsr_id=nsr_id,
8654 target_ip=rw_mgmt_ip,
8655 )
8656 if prometheus_jobs:
8657 self.update_db_2(
8658 "nsrs",
8659 nsr_id,
8660 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8661 )
8662
8663 for job in prometheus_jobs:
8664 self.db.set_one(
8665 "prometheus_jobs",
8666 {"job_name": job["job_name"]},
8667 job,
8668 upsert=True,
8669 fail_on_empty=False,
8670 )
8671
8672 """
8673 step = "instantiated at VCA"
8674 self.logger.debug(logging_text + step)
8675
8676 self._write_configuration_status(
8677 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8678 )
8679
8680 except Exception as e: # TODO not use Exception but N2VC exception
8681 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8682 if not isinstance(
8683 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8684 ):
8685 self.logger.error(
8686 "Exception while {} : {}".format(step, e), exc_info=True
8687 )
8688 self._write_configuration_status(
8689 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8690 )
8691 raise LcmException("{} {}".format(step, e)) from e
8692
8693 async def _wait_heal_ro(
8694 self,
8695 nsr_id,
8696 timeout=600,
8697 ):
8698 start_time = time()
8699 while time() <= start_time + timeout:
8700 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8701 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"][
8702 "operational-status"
8703 ]
8704 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8705 if operational_status_ro != "healing":
8706 break
8707 await asyncio.sleep(15)
8708 else: # timeout_ns_deploy
8709 raise NgRoException("Timeout waiting ns to deploy")
8710
8711 async def vertical_scale(self, nsr_id, nslcmop_id):
8712 """
8713 Vertical Scale the VDUs in a NS
8714
8715 :param: nsr_id: NS Instance ID
8716 :param: nslcmop_id: nslcmop ID of migrate
8717
8718 """
8719 # Try to lock HA task here
8720 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8721 if not task_is_locked_by_me:
8722 return
8723 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8724 self.logger.debug(logging_text + "Enter")
8725 # get all needed from database
8726 db_nslcmop = None
8727 db_nslcmop_update = {}
8728 nslcmop_operation_state = None
8729 db_nsr_update = {}
8730 target = {}
8731 exc = None
8732 # in case of error, indicates what part of scale was failed to put nsr at error status
8733 start_deploy = time()
8734
8735 try:
8736 # wait for any previous tasks in process
8737 step = "Waiting for previous operations to terminate"
8738 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
8739
8740 self._write_ns_status(
8741 nsr_id=nsr_id,
8742 ns_state=None,
8743 current_operation="VerticalScale",
8744 current_operation_id=nslcmop_id,
8745 )
8746 step = "Getting nslcmop from database"
8747 self.logger.debug(
8748 step + " after having waited for previous tasks to be completed"
8749 )
8750 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8751 operationParams = db_nslcmop.get("operationParams")
8752 target = {}
8753 target.update(operationParams)
8754 desc = await self.RO.vertical_scale(nsr_id, target)
8755 self.logger.debug("RO return > {}".format(desc))
8756 action_id = desc["action_id"]
8757 await self._wait_ng_ro(
8758 nsr_id,
8759 action_id,
8760 nslcmop_id,
8761 start_deploy,
8762 self.timeout.verticalscale,
8763 operation="verticalscale",
8764 )
8765 except (ROclient.ROClientException, DbException, LcmException) as e:
8766 self.logger.error("Exit Exception {}".format(e))
8767 exc = e
8768 except asyncio.CancelledError:
8769 self.logger.error("Cancelled Exception while '{}'".format(step))
8770 exc = "Operation was cancelled"
8771 except Exception as e:
8772 exc = traceback.format_exc()
8773 self.logger.critical(
8774 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
8775 )
8776 finally:
8777 self._write_ns_status(
8778 nsr_id=nsr_id,
8779 ns_state=None,
8780 current_operation="IDLE",
8781 current_operation_id=None,
8782 )
8783 if exc:
8784 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
8785 nslcmop_operation_state = "FAILED"
8786 else:
8787 nslcmop_operation_state = "COMPLETED"
8788 db_nslcmop_update["detailed-status"] = "Done"
8789 db_nsr_update["detailed-status"] = "Done"
8790
8791 self._write_op_status(
8792 op_id=nslcmop_id,
8793 stage="",
8794 error_message="",
8795 operation_state=nslcmop_operation_state,
8796 other_update=db_nslcmop_update,
8797 )
8798 if nslcmop_operation_state:
8799 try:
8800 msg = {
8801 "nsr_id": nsr_id,
8802 "nslcmop_id": nslcmop_id,
8803 "operationState": nslcmop_operation_state,
8804 }
8805 await self.msg.aiowrite("ns", "verticalscaled", msg)
8806 except Exception as e:
8807 self.logger.error(
8808 logging_text + "kafka_write notification Exception {}".format(e)
8809 )
8810 self.logger.debug(logging_text + "Exit")
8811 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")